From 1a1172c6d1b29df6d90a8487a13827f077af51ce Mon Sep 17 00:00:00 2001 From: Di Wu Date: Tue, 29 Jun 2021 18:45:22 +0900 Subject: [PATCH 01/63] local code sync --- .../pruning/transformer_pruning.py | 528 ++++++++++++++++++ .../pruning/transformers/run_local.sh | 35 ++ .../pytorch/pruning/transformer_pruner.py | 137 +++++ .../transformer_pruning_head_masker.py | 252 +++++++++ 4 files changed, 952 insertions(+) create mode 100644 examples/model_compress/pruning/transformer_pruning.py create mode 100755 examples/model_compress/pruning/transformers/run_local.sh create mode 100644 nni/algorithms/compression/pytorch/pruning/transformer_pruner.py create mode 100644 nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py diff --git a/examples/model_compress/pruning/transformer_pruning.py b/examples/model_compress/pruning/transformer_pruning.py new file mode 100644 index 0000000000..96bb0b0ff9 --- /dev/null +++ b/examples/model_compress/pruning/transformer_pruning.py @@ -0,0 +1,528 @@ +# code adapted from https://github.com/huggingface/transformers/tree/master/examples/pytorch/text-classification + +import argparse +import logging +import math +import os +import random + +import datasets +from datasets import load_dataset, load_metric +import torch +from torch.utils.data.dataloader import DataLoader +from tqdm.auto import tqdm + +import transformers +from accelerate import Accelerator +from transformers import ( + AdamW, + AutoConfig, + AutoModel, + AutoModelForPreTraining, + AutoModelForSequenceClassification, + AutoTokenizer, + DataCollatorWithPadding, + PretrainedConfig, + SchedulerType, + default_data_collator, + get_scheduler, + set_seed, +) + +import nni +from nni.compression.pytorch import ModelSpeedup +from nni.algorithms.compression.pytorch.pruning import ( + TransformerHeadPruner +) + + +logger = logging.getLogger('bert_pruning_example') + +task_to_keys = { + "cola": ("sentence", None), + "mnli": ("premise", "hypothesis"), + "mrpc": ("sentence1", "sentence2"), + "qnli": ("question", "sentence"), + "qqp": ("question1", "question2"), + "rte": ("sentence1", "sentence2"), + "sst2": ("sentence", None), + "stsb": ("sentence1", "sentence2"), + "wnli": ("sentence1", "sentence2"), +} + + +############################ +# TODO: simplify this later +############################ +def parse_args(): + parser = argparse.ArgumentParser(description="Finetune a transformers model on a text classification task") + parser.add_argument( + "--n_heads_to_prune_per_layer", + type=int, + default=None, + help="The name of the glue task to train on." + ) + parser.add_argument( + "--task_name", + type=str, + default=None, + help="The name of the glue task to train on.", + choices=list(task_to_keys.keys()), + ) + parser.add_argument( + "--train_file", type=str, default=None, help="A csv or a json file containing the training data." + ) + parser.add_argument( + "--validation_file", type=str, default=None, help="A csv or a json file containing the validation data." + ) + parser.add_argument( + "--max_length", + type=int, + default=128, + help=( + "The maximum total input sequence length after tokenization. Sequences longer than this will be truncated," + " sequences shorter will be padded if `--pad_to_max_lengh` is passed." + ), + ) + parser.add_argument( + "--pad_to_max_length", + action="store_true", + help="If passed, pad all samples to `max_length`. Otherwise, dynamic padding is used.", + ) + parser.add_argument( + "--model_name_or_path", + type=str, + help="Path to pretrained model or model identifier from huggingface.co/models.", + required=True, + ) + parser.add_argument( + "--use_slow_tokenizer", + action="store_true", + help="If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).", + ) + parser.add_argument( + "--per_device_train_batch_size", + type=int, + default=8, + help="Batch size (per device) for the training dataloader.", + ) + parser.add_argument( + "--per_device_eval_batch_size", + type=int, + default=8, + help="Batch size (per device) for the evaluation dataloader.", + ) + parser.add_argument( + "--learning_rate", + type=float, + default=5e-5, + help="Initial learning rate (after the potential warmup period) to use.", + ) + parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.") + parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.") + parser.add_argument( + "--max_train_steps", + type=int, + default=None, + help="Total number of training steps to perform. If provided, overrides num_train_epochs.", + ) + parser.add_argument( + "--gradient_accumulation_steps", + type=int, + default=1, + help="Number of updates steps to accumulate before performing a backward/update pass.", + ) + parser.add_argument( + "--lr_scheduler_type", + type=SchedulerType, + default="linear", + help="The scheduler type to use.", + choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"], + ) + parser.add_argument( + "--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler." + ) + parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.") + parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") + args = parser.parse_args() + + # Sanity checks + if args.task_name is None and args.train_file is None and args.validation_file is None: + raise ValueError("Need either a task name or a training/validation file.") + else: + if args.train_file is not None: + extension = args.train_file.split(".")[-1] + assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." + if args.validation_file is not None: + extension = args.validation_file.split(".")[-1] + assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." + + if args.output_dir is not None: + os.makedirs(args.output_dir, exist_ok=True) + + return args + + +def get_raw_dataset(args): + # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) + # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). + + # For CSV/JSON files, this script will use as labels the column called 'label' and as pair of sentences the + # sentences in columns called 'sentence1' and 'sentence2' if such column exists or the first two columns not named + # label if at least two columns are provided. + + # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this + # single column. You can easily tweak this behavior (see below) + + # In distributed training, the load_dataset function guarantee that only one local process can concurrently + # download the dataset. + if args.task_name is not None: + # Downloading and loading a dataset from the hub. + raw_datasets = load_dataset("glue", args.task_name) + else: + # Loading the dataset from local csv or json file. + data_files = {} + if args.train_file is not None: + data_files["train"] = args.train_file + if args.validation_file is not None: + data_files["validation"] = args.validation_file + extension = (args.train_file if args.train_file is not None else args.valid_file).split(".")[-1] + raw_datasets = load_dataset(extension, data_files=data_files) + # See more about loading any type of standard or custom dataset at + # https://huggingface.co/docs/datasets/loading_datasets.html. + + # Labels + if args.task_name is not None: + is_regression = args.task_name == "stsb" + if not is_regression: + label_list = raw_datasets["train"].features["label"].names + num_labels = len(label_list) + else: + label_list = None + num_labels = 1 + else: + # Trying to have good defaults here, don't hesitate to tweak to your needs. + is_regression = raw_datasets["train"].features["label"].dtype in ["float32", "float64"] + if is_regression: + label_list = None + num_labels = 1 + else: + # A useful fast method: + # https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.unique + label_list = raw_datasets["train"].unique("label") + label_list.sort() # Let's sort it for determinism + num_labels = len(label_list) + + return raw_datasets, is_regression, label_list, num_labels + + +def preprocess_dataset(args, tokenizer, model, raw_datasets, num_labels, is_regression, label_list): + # Preprocessing the datasets + if args.task_name is not None: + sentence1_key, sentence2_key = task_to_keys[args.task_name] + else: + # Again, we try to have some nice defaults but don't hesitate to tweak to your use case. + non_label_column_names = [name for name in raw_datasets["train"].column_names if name != "label"] + if "sentence1" in non_label_column_names and "sentence2" in non_label_column_names: + sentence1_key, sentence2_key = "sentence1", "sentence2" + else: + if len(non_label_column_names) >= 2: + sentence1_key, sentence2_key = non_label_column_names[:2] + else: + sentence1_key, sentence2_key = non_label_column_names[0], None + + # Some models have set the order of the labels to use, so let's make sure we do use it. + label_to_id = None + if ( + model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id + and args.task_name is not None + and not is_regression + ): + # Some have all caps in their config, some don't. + label_name_to_id = {k.lower(): v for k, v in model.config.label2id.items()} + if list(sorted(label_name_to_id.keys())) == list(sorted(label_list)): + logger.info( + f"The configuration of the model provided the following label correspondence: {label_name_to_id}. " + "Using it!" + ) + label_to_id = {i: label_name_to_id[label_list[i]] for i in range(num_labels)} + else: + logger.warning( + "Your model seems to have been trained with labels, but they don't match the dataset: ", + f"model labels: {list(sorted(label_name_to_id.keys()))}, dataset labels: {list(sorted(label_list))}." + "\nIgnoring the model labels as a result.", + ) + elif args.task_name is None: + label_to_id = {v: i for i, v in enumerate(label_list)} + + padding = "max_length" if args.pad_to_max_length else False + + def preprocess_function(examples): + # Tokenize the texts + texts = ( + (examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key]) + ) + result = tokenizer(*texts, padding=padding, max_length=args.max_length, truncation=True) + + if "label" in examples: + if label_to_id is not None: + # Map labels to IDs (not necessary for GLUE tasks) + result["labels"] = [label_to_id[l] for l in examples["label"]] + else: + # In all cases, rename the column to labels because the model will expect that. + result["labels"] = examples["label"] + return result + + processed_datasets = raw_datasets.map( + preprocess_function, batched=True, remove_columns=raw_datasets["train"].column_names + ) + return processed_datasets + + +def train_model(args, model, is_regression, train_dataloader, eval_dataloader, optimizer, lr_scheduler, metric, + accelerator): + # Only show the progress bar once on each machine. + progress_bar = tqdm(range(args.max_train_steps), position=0, leave=True, + disable=not accelerator.is_local_main_process) + completed_steps = 0 + + for epoch in range(args.num_train_epochs): + model.train() + for step, batch in enumerate(train_dataloader): + outputs = model(**batch) + loss = outputs.loss + loss = loss / args.gradient_accumulation_steps + accelerator.backward(loss) + if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1: + optimizer.step() + lr_scheduler.step() + optimizer.zero_grad() + progress_bar.update(1) + completed_steps += 1 + + if completed_steps >= args.max_train_steps: + break + + model.eval() + for step, batch in enumerate(eval_dataloader): + outputs = model(**batch) + predictions = outputs.logits.argmax(dim=-1) if not is_regression else outputs.logits.squeeze() + metric.add_batch( + predictions=accelerator.gather(predictions), + references=accelerator.gather(batch["labels"]), + ) + + eval_metric = metric.compute() + logger.info(f"epoch {epoch}: {eval_metric}") + + +def final_eval_for_mnli(args, model, processed_datasets, metric, accelerator, data_collator): + # Final evaluation on mismatched validation set + eval_dataset = processed_datasets["validation_mismatched"] + eval_dataloader = DataLoader( + eval_dataset, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size + ) + eval_dataloader = accelerator.prepare(eval_dataloader) + + model.eval() + for step, batch in enumerate(eval_dataloader): + outputs = model(**batch) + predictions = outputs.logits.argmax(dim=-1) + metric.add_batch( + predictions=accelerator.gather(predictions), + references=accelerator.gather(batch["labels"]), + ) + + eval_metric = metric.compute() + logger.info(f"mnli-mm: {eval_metric}") + + +def get_dataloader_and_optimizer(args, tokenizer, model, train_dataset, eval_dataset, accelerator): + # DataLoaders creation: + if args.pad_to_max_length: + # If padding was already done ot max length, we use the default data collator that will just convert everything + # to tensors. + data_collator = default_data_collator + else: + # Otherwise, `DataCollatorWithPadding` will apply dynamic padding for us (by padding to the maximum length of + # the samples passed). When using mixed precision, we add `pad_to_multiple_of=8` to pad all tensors to multiple + # of 8s, which will enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta). + data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=(8 if accelerator.use_fp16 else None)) + + train_dataloader = DataLoader( + train_dataset, shuffle=True, collate_fn=data_collator, batch_size=args.per_device_train_batch_size + ) + eval_dataloader = DataLoader(eval_dataset, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size) + + # Optimizer + # Split weights in two groups, one with weight decay and the other not. + no_decay = ["bias", "LayerNorm.weight"] + optimizer_grouped_parameters = [ + { + "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], + "weight_decay": args.weight_decay, + }, + { + "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], + "weight_decay": 0.0, + }, + ] + optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate) + + return accelerator.prepare(model, optimizer, train_dataloader, eval_dataloader), data_collator + + +def main(): + args = parse_args() + + ######################################################################### + # Prepare model, tokenizer, dataset, optimizer, and the scheduler + + # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. + accelerator = Accelerator() + # Make one log on every process with the configuration for debugging. + logging.basicConfig( + format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", + datefmt="%m/%d/%Y %H:%M:%S", + level=logging.INFO, + ) + logger.info(accelerator.state) + + # Setup logging, we only want one process per machine to log things on the screen. + # accelerator.is_local_main_process is only True for one process per machine. + logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) + if accelerator.is_local_main_process: + datasets.utils.logging.set_verbosity_warning() + transformers.utils.logging.set_verbosity_info() + else: + datasets.utils.logging.set_verbosity_error() + transformers.utils.logging.set_verbosity_error() + + # If passed along, set the training seed now. + if args.seed is not None: + set_seed(args.seed) + + raw_datasets, is_regression, label_list, num_labels = get_raw_dataset(args) + + # Load pretrained model and tokenizer + # + # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently + # download model & vocab. + config = AutoConfig.from_pretrained(args.model_name_or_path, num_labels=num_labels, finetuning_task=args.task_name) + tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, use_fast=not args.use_slow_tokenizer) + model = AutoModelForSequenceClassification.from_pretrained( + args.model_name_or_path, + from_tf=bool(".ckpt" in args.model_name_or_path), + config=config, + ) + + processed_datasets = preprocess_dataset(args, tokenizer, model, raw_datasets, num_labels, is_regression, label_list) + train_dataset = processed_datasets["train"] + eval_dataset = processed_datasets["validation_matched" if args.task_name == "mnli" else "validation"] + # Log a few random samples from the training set: + for index in random.sample(range(len(train_dataset)), 3): + logger.info(f"Sample {index} of the training set: {train_dataset[index]}.") + + (model, optimizer, train_dataloader, eval_dataloader), data_collator = get_dataloader_and_optimizer(args, tokenizer, + model, + train_dataset, + eval_dataset, + accelerator) + + # Scheduler and math around the number of training steps. + num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) + if args.max_train_steps is None: + args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch + else: + args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) + + lr_scheduler = get_scheduler( + name=args.lr_scheduler_type, + optimizer=optimizer, + num_warmup_steps=args.num_warmup_steps, + num_training_steps=args.max_train_steps, + ) + + # Get the metric function + if args.task_name is not None: + metric = load_metric("glue", args.task_name) + else: + metric = load_metric("accuracy") + + ######################################################################### + # Finetune before pruning + """ + total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps + logger.info("***** Finetuning before pruning *****") + logger.info(f" Num examples = {len(train_dataset)}") + logger.info(f" Num Epochs = {args.num_train_epochs}") + logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}") + logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") + logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") + logger.info(f" Total optimization steps = {args.max_train_steps}") + train_model(args, model, is_regression, train_dataloader, eval_dataloader, optimizer, lr_scheduler, metric, + accelerator) + + if args.output_dir is not None: + accelerator.wait_for_everyone() + torch.save(model, args.output_dir + '/entire_model_before_pruning.pt') + # unwrapped_model = accelerator.unwrap_model(model) + # unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save) + + if args.task_name == "mnli": + final_eval_for_mnli(args, model, processed_datasets, metric, accelerator, data_collator) + """ + + ######################################################################### + # Pruning + kwargs_final = {'num_iterations': 6, 'epochs_per_iteration': 1, 'head_hidden_dim': 64, + 'trainer': 1, 'optimizer': 2, 'criterion': 3} + + attention_name_groups = list(zip(['encoder.layer.{}.attention.self.query'.format(i) for i in range(12)], + ['encoder.layer.{}.attention.self.key'.format(i) for i in range(12)], + ['encoder.layer.{}.attention.self.value'.format(i) for i in range(12)], + ['encoder.layer.{}.attention.output.dense'.format(i) for i in range(12)])) + + kwargs = {'ranking_criteria': 'l1_weight', + 'attention_name_groups': attention_name_groups, + 'head_hidden_dim': 64} + + config_list = [{ + 'sparsity': 0.25, + 'op_types': ["Linear"], + 'op_names': [x for layer in attention_name_groups for x in layer] + }] + + pruner = TransformerHeadPruner(model.bert, config_list, **kwargs) + pruner.compress() + + exit() + + ######################################################################### + # After pruning, finetune again on the target task + # re-initialize the optimizer and the scheduler + (model, optimizer, _, _), data_collator = get_dataloader_and_optimizer(args, tokenizer, model, train_dataset, + eval_dataset, accelerator) + lr_scheduler = get_scheduler( + name=args.lr_scheduler_type, + optimizer=optimizer, + num_warmup_steps=args.num_warmup_steps, + num_training_steps=args.max_train_steps, + ) + + logger.info("***** Finetuning after Pruning *****") + train_model(args, model, is_regression, train_dataloader, eval_dataloader, optimizer, lr_scheduler, metric, + accelerator) + + if args.output_dir is not None: + accelerator.wait_for_everyone() + torch.save(model, args.output_dir + '/entire_model_after_pruning.pt') + # unwrapped_model = accelerator.unwrap_model(model) + # unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save) + + if args.task_name == "mnli": + final_eval_for_mnli(args, model, processed_datasets, metric, accelerator, data_collator) + + +if __name__ == "__main__": + main() diff --git a/examples/model_compress/pruning/transformers/run_local.sh b/examples/model_compress/pruning/transformers/run_local.sh new file mode 100755 index 0000000000..cf22706158 --- /dev/null +++ b/examples/model_compress/pruning/transformers/run_local.sh @@ -0,0 +1,35 @@ +#!/bin/bash + +# Usage: ./run_local.sh script gpu_id task n_heads_per_layer + +export CUDA_VISIBLE_DEVICES=$2 +SOURCE_CODE=$1 +TASK_NAME=$3 +N_HEADS_PER_LAYER=$4 +PRETRAINED_MODEL='bert-base-uncased' # 'distilbert-base-uncased' 'roberta-base' 'bert-base-cased' 'bert-base-uncased' +MAX_LENGTH=128 +BATCH_SIZE=32 +LR=2e-5 +N_EPOCHS=3 +SEED=2021 + +time=$(date "+%Y%m%d%H%M%S") +OUTDIR="models_${PRETRAINED_MODEL}_${SOURCE_CODE}_prune${N_HEADS_PER_LAYER}_${TASK_NAME}_$time/" + +TASK_LIST=('cola' 'sst2' 'mrpc' 'stsb' 'qqp' 'mnli' 'qnli' 'rte' 'wnli') +if [[ ${TASK_LIST[*]} =~ (^|[[:space:]])$TASK_NAME($|[[:space:]]) ]]; then + mkdir $OUTDIR + python $SOURCE_CODE \ + --n_heads_to_prune_per_layer $N_HEADS_PER_LAYER\ + --seed $SEED \ + --model_name_or_path $PRETRAINED_MODEL \ + --task_name $TASK_NAME \ + --max_length $MAX_LENGTH \ + --per_device_train_batch_size $BATCH_SIZE \ + --learning_rate $LR \ + --num_train_epochs $N_EPOCHS \ + --output_dir $OUTDIR \ + 2>&1 | tee "$OUTDIR/output.log" +else + echo "Unsupported task $TASK_NAME." +fi diff --git a/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py b/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py new file mode 100644 index 0000000000..cde90b0c1e --- /dev/null +++ b/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py @@ -0,0 +1,137 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging +from schema import And, Optional + +from nni.compression.pytorch.utils.config_validation import CompressorSchema +from nni.compression.pytorch.compressor import Pruner +from . import L1WeightHeadMasker, L2WeightHeadMasker + +__all__ = ['TransformerHeadPruner'] + +MASKER_DICT = { + 'l1_weight': L1WeightHeadMasker, + 'l2_weight': L2WeightHeadMasker +} + +logger = logging.getLogger(__name__) +logger.setLevel(logging.INFO) + + +class TransformerHeadPruner(Pruner): + """ + A pruner specialized for pruning attention heads in models belong to the transformer family. + + Parameters + ---------- + model : torch.nn.Module + Model to be pruned. Expect a model from transformers library (e.g., BertModel). + This pruner can work with other customized transformer models, but some ranking modes might fail. + config_list : list + Supported keys: + - sparsity : This is to specify the sparsity operations to be compressed to. + - op_types : Optional. Operation types to prune. (Should be 'Linear' for this pruner.) + - op_names : Optional. Operation names to prune. + ranking_criteria : str + Supported criteria: + - 'taylor' + - 'l1_weight' + - 'l2_weight' + - 'l1_activation' + - 'l2_activation' + """ + + def __init__(self, model, config_list, attention_name_groups=None, ranking_criteria='taylor', **algo_kwargs): + super().__init__(model, config_list) + + self.ranking_criteria = ranking_criteria + self.attention_name_groups = attention_name_groups + self.masker = MASKER_DICT[ranking_criteria](model, self, **algo_kwargs) + self.set_wrappers_attribute("mask_calculated", False) + + # Group generation: one group per attention layer, four weights per group + self.masking_groups = [] + if self.attention_name_groups is not None: + logger.info("Note: weights for the same attention layer are grouped using the given attention_name_groups.") + self.group_weights_by_name() + else: + logger.info("Note: weights for the same attention layer are grouped using model graph.") + self.group_weights_by_graph() + + # Group sanity check + self.validate_weight_groups() + + def group_weights_by_name(self): + """ + Populate self.masking_groups using the groups specified by user in attention_name_groups. + """ + assert len(self.masking_groups) == 0 + # build up masking groups + name2group = {} + for layer_idx, layer in enumerate(self.attention_name_groups): + errmsg = 'each name group must contain 4 weights in the following order: query projection, key ' \ + 'projection, value projection, and fully connected output layer' + assert len(layer) == 4, errmsg + self.masking_groups.append([]) + for weight in layer: + name2group[weight] = layer_idx + # assign wrappers to these groups + for wrapper in self.get_modules_wrapper(): + if wrapper.name in name2group: + wrapper.group_idx = name2group[wrapper.name] + self.masking_groups[name2group[wrapper.name]].append(wrapper) + + # TODO: graph-based group inference + def group_weights_by_graph(self): + """ + Populate self.masking_groups bu running inference on the module graph. + """ + pass + + # TODO: some sanity checks - weight shape agreement (including head_hidden_dim parameter)? sparsity agreement? + def validate_weight_groups(self): + pass + + def validate_config(self, model, config_list): + """ + Parameters + ---------- + model : torch.nn.Module + Model to be pruned + config_list : list + List on pruning configs + """ + schema = CompressorSchema([{ + 'sparsity': And(float, lambda n: 0 < n < 1), + Optional('op_types'): [str], + Optional('op_names'): [str] + }], model, logger) + + schema.validate(config_list) + + def update_mask(self): + for layer_weight_group in self.masking_groups: + masks = self._calc_mask(layer_weight_group[0], layer_weight_group) + if masks is not None: + for i, mask in enumerate(masks): + for mask_type in mask: + assert hasattr(layer_weight_group[i], mask_type), \ + "there is no attribute '%s' in wrapper on %s" % (mask_type, layer_weight_group[i]) + setattr(layer_weight_group[i], mask_type, mask[mask_type]) + print(f'updated {layer_weight_group[i].name} {mask_type}') + + def _calc_mask(self, wrapper, weight_group, wrapper_idx=None): + if not wrapper.mask_calculated: + sparsity = wrapper.config['sparsity'] + masks = self.masker.calc_mask(sparsity=sparsity, wrapper=wrapper, weight_group=weight_group, + wrapper_idx=wrapper_idx) + # masker.calc_mask returns None means calc_mask is not calculated successfully; can try later + if masks is not None: + wrapper.mask_calculated = True + return masks + else: + return None + + def calc_mask(self, wrapper, **kwargs): + raise RuntimeError("Applications should directly call TransformerHeadPruner's update_mask() method.") diff --git a/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py b/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py new file mode 100644 index 0000000000..8e1b365da0 --- /dev/null +++ b/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py @@ -0,0 +1,252 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import logging +import torch +from .weight_masker import WeightMasker + +__all__ = ['L1WeightHeadMasker', 'L2WeightHeadMasker'] + +logger = logging.getLogger('torch transformer head pruners') + + +class AttentionHeadMasker(WeightMasker): + """ + A structured pruning masker base class that prunes convolutional layer filters. + + Parameters + ---------- + model: nn.Module + model to be pruned + pruner: Pruner + A Pruner instance used to prune the model + head_hidden_dim: int + Hidden dimension for each attention head (e.g., 64 for BERT base) + """ + def __init__(self, model, pruner, head_hidden_dim=None): + super().__init__(model, pruner) + self.head_hidden_dim = head_hidden_dim + assert self.head_hidden_dim is not None, "head_hidden_dim must be specified." + + def calc_mask(self, sparsity, wrapper, wrapper_idx=None, **depen_kwargs): + """ + calculate the mask for `wrapper`. + + Parameters + ---------- + sparsity: float/list of float + The target sparsity of the wrapper. If we calculate the mask in + the normal way, then sparsity is a float number. In contrast, if + we calculate the mask in the dependency-aware way, sparsity is a + list of float numbers, each float number corressponds to a sparsity + of a layer. + wrapper: PrunerModuleWrapper/list of PrunerModuleWrappers + The wrapper of the target layer. If we calculate the mask in the normal + way, then `wrapper` is an instance of PrunerModuleWrapper, else `wrapper` + is a list of PrunerModuleWrapper. + wrapper_idx: int/list of int + The index of the wrapper. + Returns + ------- + dict + dictionary for storing masks, keys of the dict: + 'weight_mask': weight mask tensor + 'bias_mask': bias mask tensor (optional) + """ + mask, weight, num_prune = self._get_current_state(sparsity, wrapper, wrapper_idx) + num_total = weight.size(0) // self.head_hidden_dim + if num_total < 2 or num_prune < 1: + return mask + return self.get_mask(mask, weight, num_prune, wrapper, wrapper_idx, **depen_kwargs) + + def _get_current_state(self, sparsity, wrapper, wrapper_idx=None): + """ + Some pruner may prune the layers in a iterative way. In each pruning iteration, + we may get the current state of this wrapper/layer, and continue to prune this layer + based on the current state. This function is to get the current pruning state of the + target wrapper/layer. + Parameters + ---------- + sparsity: float + pruning ratio, preserved weight ratio is `1 - sparsity` + wrapper: PrunerModuleWrapper + layer wrapper of this layer + wrapper_idx: int + index of this wrapper in pruner's all wrappers + Returns + ------- + base_mask: dict + dict object that stores the mask of this wrapper in this iteration, if it is the + first iteration, then we create a new mask with all ones. If there is already a + mask in this wrapper, then we return the existing mask. + weight: tensor + the current weight of this layer + num_prune: int + how many heads we should prune + """ + msg = 'module type {} is not supported!'.format(wrapper.type) + assert wrapper.type == 'Linear', msg + weight = wrapper.module.weight.data + bias = None + if hasattr(wrapper.module, 'bias') and wrapper.module.bias is not None: + bias = wrapper.module.bias.data + + if wrapper.weight_mask is None: + mask_weight = torch.ones(weight.size()).type_as(weight).detach() + else: + mask_weight = wrapper.weight_mask.clone() + if bias is not None: + if wrapper.bias_mask is None: + mask_bias = torch.ones(bias.size()).type_as(bias).detach() + else: + mask_bias = wrapper.bias_mask.clone() + else: + mask_bias = None + mask = {'weight_mask': mask_weight, 'bias_mask': mask_bias} + + num_total = weight.size(0) // self.head_hidden_dim + num_prune = int(num_total * sparsity) + + # weight*mask_weight: apply base mask for iterative pruning + return mask, weight * mask_weight, num_prune + + def get_mask(self, base_mask, weight, num_prune, wrapper, wrapper_idx, **depen_kwargs): + """ + Calculate the mask of given layer. + + Parameters + ---------- + base_mask: dict + The basic mask with the same shape of weight, all item in the basic mask is 1. + weight: tensor + the module weight to be pruned + num_prune: int + Num of heads to prune + wrapper: PrunerModuleWrapper + layer wrapper of this layer + wrapper_idx: int + index of this wrapper in pruner's all wrappers + Returns + ------- + dict + dictionary for storing masks + """ + raise NotImplementedError('{} get_mask is not implemented'.format(self.__class__.__name__)) + + def get_mask_by_importance_ranking(self, base_mask, weight, num_prune, wrapper, wrapper_idx, weight_group=None): + """ + Calculate the mask of given layer by pruning out heads with lowest importance scores. + + Parameters + ---------- + weight_group: list + list of a group of weights for an attention layer + base_mask: dict + The basic mask with the same shape of weight, all item in the basic mask is 1. + weight: tensor + the module weight to be pruned + num_prune: int + Num of heads to prune + wrapper: PrunerModuleWrapper + layer wrapper of this layer + wrapper_idx: int + index of this wrapper in pruner's all wrappers + Returns + ------- + dict + dictionary for storing masks + """ + device = weight.device + + importance_scores = self.get_head_importance_scores(wrapper, weight_group, wrapper_idx) + threshold = torch.topk(importance_scores, num_prune, largest=False)[0].max() + + # get q_proj, k_proj, v_proj, output_proj from the same attention head + q_proj, k_proj, v_proj, output_proj = weight_group if weight_group is not None else \ + self.pruner.masking_groups[wrapper.group_idx] + + n_heads = q_proj.module.weight.size()[0] // self.head_hidden_dim + weight_mask_shape = q_proj.module.weight.data.view([n_heads, -1]).size() + bias_mask_shape = q_proj.module.bias.data.view([n_heads, -1]).size() + + mask_weight = torch.gt(importance_scores, threshold).unsqueeze(-1).expand(weight_mask_shape).type_as(weight) + mask_bias = torch.gt(importance_scores, threshold).unsqueeze(-1).expand(bias_mask_shape).type_as(weight) + + mask_weight_proj = mask_weight.view(weight.size()).detach().to(device) + mask_bias_proj = mask_bias.view(-1).detach().to(device) \ + if base_mask['bias_mask'] is not None else None + masks_for_proj = {'weight_mask': mask_weight_proj.detach(), 'bias_mask': mask_bias_proj} + + mask_weight_dense = mask_bias_proj.expand_as(output_proj.module.weight.data).detach().to(device) + mask_bias_dense = torch.ones_like(output_proj.module.bias.data).to(device) + masks_for_dense = {'weight_mask': mask_weight_dense.detach(), 'bias_mask': mask_bias_dense} + + masks = [masks_for_proj, masks_for_proj, masks_for_proj, masks_for_dense] + return masks + + def get_head_importance_scores(self, wrapper, weight_group, wrapper_idx): + """ + Calculate the importance score for each head. + Parameters + ---------- + weight_group: list + list of a group of weights for an attention layer + wrapper: PrunerModuleWrapper + layer wrapper of this layer + wrapper_idx: int + index of this wrapper in pruner's all wrappers + Returns + ------- + tensor + Tensor that indicates the importance of each head + """ + raise NotImplementedError('{} get_channel_sum is not implemented'.format(self.__class__.__name__)) + + +class L1WeightHeadMasker(AttentionHeadMasker): + """ + A structured pruning algorithm that prunes the heads weight smallest weight magnitude for the query, head, + and key projection matrices. L1 norm is used for magnitude calculation. Note that in this implementation, weight + norms of q_proj, k_proj, v_proj from each head are summed as the final importance score for the head. + """ + def get_head_importance_scores(self, wrapper, weight_group, wrapper_idx): + print('calculating importance scores for wrapper', wrapper.name) + q_proj, k_proj, v_proj, _ = weight_group + + n_heads = q_proj.module.weight.size()[0] // self.head_hidden_dim + query_proj_weights = q_proj.module.weight.data.view([n_heads, -1]) + key_proj_weights = k_proj.module.weight.data.view([n_heads, -1]) + value_proj_weights = v_proj.module.weight.data.view([n_heads, -1]) + + query_norm_avg = torch.sum(torch.abs(query_proj_weights), -1) + key_norm_avg = torch.sum(torch.abs(key_proj_weights), -1) + value_norm_avg = torch.sum(torch.abs(value_proj_weights), -1) + + return ((query_norm_avg + key_norm_avg + value_norm_avg) / 3).detach() + + def get_mask(self, base_mask, weight, num_prune, wrapper, wrapper_idx, weight_group=None): + return self.get_mask_by_importance_ranking(base_mask, weight, num_prune, wrapper, wrapper_idx, weight_group) + + +class L2WeightHeadMasker(AttentionHeadMasker): + """ + A structured pruning algorithm that prunes the heads weight smallest weight magnitude for the query, head, + and key projection matrices. L2 norm is used for magnitude calculation. Note that in this implementation, weight + norms of q_proj, k_proj, v_proj from each head are summed as the final importance score for the head. + """ + def get_head_importance_scores(self, wrapper, weight_group, wrapper_idx): + q_proj, k_proj, v_proj, _ = weight_group + + n_heads = q_proj.module.weight.size()[0] // self.head_hidden_dim + query_proj_weights = q_proj.module.weight.data.view([n_heads, -1]) + key_proj_weights = k_proj.module.weight.data.view([n_heads, -1]) + value_proj_weights = v_proj.module.weight.data.view([n_heads, -1]) + + query_norm_avg = torch.sum(query_proj_weights ** 2, -1) + key_norm_avg = torch.sum(key_proj_weights ** 2, -1) + value_norm_avg = torch.sum(value_proj_weights ** 2, -1) + + return ((query_norm_avg + key_norm_avg + value_norm_avg) / 3).detach() + + def get_mask(self, base_mask, weight, num_prune, wrapper, wrapper_idx, weight_group=None): + return self.get_mask_by_importance_ranking(base_mask, weight, num_prune, wrapper, wrapper_idx, weight_group) From 5f27c3552248954f3bc9278a5f6ee3cc06c248bd Mon Sep 17 00:00:00 2001 From: Di Wu Date: Wed, 30 Jun 2021 17:54:32 +0900 Subject: [PATCH 02/63] graph-based weight grouping --- .../{ => transformers}/transformer_pruning.py | 9 +- .../pytorch/pruning/transformer_pruner.py | 64 +++++++++-- .../pytorch/utils/shape_dependency.py | 107 +++++++++++++++++- 3 files changed, 165 insertions(+), 15 deletions(-) rename examples/model_compress/pruning/{ => transformers}/transformer_pruning.py (98%) diff --git a/examples/model_compress/pruning/transformer_pruning.py b/examples/model_compress/pruning/transformers/transformer_pruning.py similarity index 98% rename from examples/model_compress/pruning/transformer_pruning.py rename to examples/model_compress/pruning/transformers/transformer_pruning.py index 96bb0b0ff9..fdac3323b9 100644 --- a/examples/model_compress/pruning/transformer_pruning.py +++ b/examples/model_compress/pruning/transformers/transformer_pruning.py @@ -475,8 +475,8 @@ def main(): ######################################################################### # Pruning - kwargs_final = {'num_iterations': 6, 'epochs_per_iteration': 1, 'head_hidden_dim': 64, - 'trainer': 1, 'optimizer': 2, 'criterion': 3} + # kwargs_final = {'num_iterations': 6, 'epochs_per_iteration': 1, 'head_hidden_dim': 64, + # 'trainer': 1, 'optimizer': 2, 'criterion': 3} attention_name_groups = list(zip(['encoder.layer.{}.attention.self.query'.format(i) for i in range(12)], ['encoder.layer.{}.attention.self.key'.format(i) for i in range(12)], @@ -484,8 +484,9 @@ def main(): ['encoder.layer.{}.attention.output.dense'.format(i) for i in range(12)])) kwargs = {'ranking_criteria': 'l1_weight', - 'attention_name_groups': attention_name_groups, - 'head_hidden_dim': 64} + # 'attention_name_groups': attention_name_groups, + 'head_hidden_dim': 64, + 'dummy_input': [torch.rand([1, 64, 768]).cuda(), torch.ones([1, 64]).cuda()]} # input and mask config_list = [{ 'sparsity': 0.25, diff --git a/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py b/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py index cde90b0c1e..d24a8a1c42 100644 --- a/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py +++ b/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py @@ -4,6 +4,8 @@ import logging from schema import And, Optional +from nni.common.graph_utils import TorchModuleGraph +from nni.compression.pytorch.utils.shape_dependency import AttentionWeightDependency from nni.compression.pytorch.utils.config_validation import CompressorSchema from nni.compression.pytorch.compressor import Pruner from . import L1WeightHeadMasker, L2WeightHeadMasker @@ -42,13 +44,18 @@ class TransformerHeadPruner(Pruner): - 'l2_activation' """ - def __init__(self, model, config_list, attention_name_groups=None, ranking_criteria='taylor', **algo_kwargs): - super().__init__(model, config_list) - - self.ranking_criteria = ranking_criteria + def __init__(self, model, config_list, attention_name_groups=None, ranking_criteria='taylor', dummy_input=None, + **algo_kwargs): self.attention_name_groups = attention_name_groups + self.ranking_criteria = ranking_criteria + self.dummy_input = dummy_input self.masker = MASKER_DICT[ranking_criteria](model, self, **algo_kwargs) - self.set_wrappers_attribute("mask_calculated", False) + self.masking_groups = [] + + super().__init__(model, config_list) # reset() called here + + def reset(self, checkpoint=None): + super().reset(checkpoint=checkpoint) # Group generation: one group per attention layer, four weights per group self.masking_groups = [] @@ -56,12 +63,20 @@ def __init__(self, model, config_list, attention_name_groups=None, ranking_crite logger.info("Note: weights for the same attention layer are grouped using the given attention_name_groups.") self.group_weights_by_name() else: + assert self.dummy_input is not None logger.info("Note: weights for the same attention layer are grouped using model graph.") + self._unwrap_model() self.group_weights_by_graph() + self._wrap_model() # Group sanity check self.validate_weight_groups() + # Remove any mistakenly captured ungrouped modules + self.remove_ungrouped_modules() + + self.set_wrappers_attribute("mask_calculated", False) + def group_weights_by_name(self): """ Populate self.masking_groups using the groups specified by user in attention_name_groups. @@ -82,16 +97,45 @@ def group_weights_by_name(self): wrapper.group_idx = name2group[wrapper.name] self.masking_groups[name2group[wrapper.name]].append(wrapper) - # TODO: graph-based group inference def group_weights_by_graph(self): """ Populate self.masking_groups bu running inference on the module graph. """ - pass - - # TODO: some sanity checks - weight shape agreement (including head_hidden_dim parameter)? sparsity agreement? + weight_names_grouped = [] + stack = [(name, module) for name, module in self.bound_model.named_children()] + while stack: + cur_name, cur_module = stack.pop() + try: + module_graph = TorchModuleGraph(cur_module, self.dummy_input) + dependency_tracer = AttentionWeightDependency(traced_model=module_graph.trace) + weight_names_grouped.extend([[cur_name + '.' + x for x in group] + for group in dependency_tracer.dependency_sets]) + except: + stack.extend([(cur_name + '.' + name, module) for name, module in cur_module.named_children()]) + + self.attention_name_groups = weight_names_grouped + self.group_weights_by_name() + + # TODO: more sanity checks - include head_hidden_dim parameter? sparsity agreement? def validate_weight_groups(self): - pass + errmsg = 'Attention weight group sanity check not passed' + try: + for group in self.masking_groups: + assert len(group) == 4, errmsg + ': each group must have four weights' + assert group[0].module.weight.size() == group[1].module.weight.size() and \ + group[1].module.weight.size() == group[2].module.weight.size(), \ + errmsg + ': the dimensions of Q, K, V projection matrices must be the same ' + assert group[0].module.weight.size()[0] == group[3].module.weight.size()[1], \ + errmsg + ': the dimension of attention results must match with input for output projection' + except: + raise RuntimeError(errmsg) + + def remove_ungrouped_modules(self): + """ + Remove non-attention weights that might be captured mistakenly by a simplified config_list. + """ + care_of_modules = set([x for layer in self.masking_groups for x in layer]) + self.modules_wrapper = [x for x in self.modules_wrapper if x in care_of_modules] def validate_config(self, model, config_list): """ diff --git a/nni/compression/pytorch/utils/shape_dependency.py b/nni/compression/pytorch/utils/shape_dependency.py index 6c7491897b..ae499edff2 100644 --- a/nni/compression/pytorch/utils/shape_dependency.py +++ b/nni/compression/pytorch/utils/shape_dependency.py @@ -5,7 +5,8 @@ import logging __all__ = ['ChannelDependency', 'GroupDependency', - 'CatPaddingDependency', 'InputChannelDependency'] + 'CatPaddingDependency', 'InputChannelDependency', + 'AttentionWeightDependency'] CONV_TYPE = 'aten::_convolution' ADD_TYPES = ['aten::add', 'aten::add_'] @@ -501,3 +502,107 @@ def export(self, filepath): @property def dependency_sets(self): return self.dependency + + +class AttentionWeightDependency(Dependency): + def __init__(self, model=None, dummy_input=None, traced_model=None): + """ + This model analyze the channel dependencies between the conv + layers in a model. + + Parameters + ---------- + model : torch.nn.Module + The model to be analyzed. + data : torch.Tensor + The example input data to trace the network architecture. + traced_model : torch._C.Graph + if we alreay has the traced graph of the target model, we donnot + need to trace the model again. + """ + super(AttentionWeightDependency, self).__init__( + model, dummy_input, traced_model) + + def _get_parent_layers(self, node): + """ + Find the nearest father conv layers for the target node. + + Parameters + --------- + node : torch._C.Node + target node. + + Returns + ------- + parent_layers: list + + """ + parent_layers = [] + stack = [] + stack.append(node) + while stack: + curnode = stack.pop() + if curnode.op_type == 'LayerNorm': + continue + if curnode.op_type == 'Linear': + parent_layers.append(curnode.name) + if len(parent_layers) > 1: + continue + parents = self.graph.find_predecessors(curnode.unique_name) + parents = [self.graph.name_to_node[name] for name in parents] + for parent in parents: + stack.append(parent) + + return parent_layers + + def build_dependency(self): + """ + Build the channel dependency for the conv layers + in the model. + """ + # unpack the tuple/list manually before analyze the + # channel dependency + self.graph.unpack_manually() + for node in self.graph.nodes_py.nodes_op: + parent_layers = [] + if node.op_type in ['Linear']: + parent_layers = self._get_parent_layers(node) + # print(node.name, parent_layers) + dependency_set = set(parent_layers) + # merge the dependencies + # for parent in parent_layers: + # if parent in self.dependency: + # dependency_set.update(self.dependency[parent]) + # save the dependencies + # for _node in dependency_set: + # self.dependency[_node] = dependency_set + self.dependency[node.name] = dependency_set + + @property + def dependency_sets(self): + """ + Get the list of the dependency set. + + Returns + ------- + dependency_sets : list + list of the dependency sets. + Each dependency set is a 4-element list of module names, with the first three elements being the projection + matrices for Q, K, V (in any order), and the last element being the dense matrix. + """ + d_sets = [] + for node in self.graph.nodes_py.nodes_op: + if node.op_type != 'Linear' or node.name not in self.dependency or len(self.dependency[node.name]) < 4: + continue + tmp_set = set() + for other in self.dependency[node.name]: + tmp_set.add(other) + tmp_set.remove(node.name) + res_list = list(tmp_set) + res_list.append(node.name) + d_sets.append(res_list) + + return d_sets + + def export(self, filepath): + pass From d960426ca025ba951a67ac90064d8dc11e7fbd59 Mon Sep 17 00:00:00 2001 From: Di Wu Date: Wed, 30 Jun 2021 18:05:09 +0900 Subject: [PATCH 03/63] fix for pipeline --- nni/algorithms/compression/pytorch/pruning/__init__.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/nni/algorithms/compression/pytorch/pruning/__init__.py b/nni/algorithms/compression/pytorch/pruning/__init__.py index f49cf0cb65..2d92454859 100644 --- a/nni/algorithms/compression/pytorch/pruning/__init__.py +++ b/nni/algorithms/compression/pytorch/pruning/__init__.py @@ -3,6 +3,7 @@ from .finegrained_pruning_masker import * from .structured_pruning_masker import * +from .transformer_pruning_head_masker import * from .one_shot_pruner import * from .iterative_pruner import * from .lottery_ticket import LotteryTicketPruner @@ -11,3 +12,4 @@ from .auto_compress_pruner import AutoCompressPruner from .sensitivity_pruner import SensitivityPruner from .amc import AMCPruner +from .transformer_pruner import TransformerHeadPruner From faedb0f56130f202400b13af9a029e3fd1855ab0 Mon Sep 17 00:00:00 2001 From: Di Wu Date: Wed, 30 Jun 2021 18:31:17 +0900 Subject: [PATCH 04/63] pipeline related --- .../pytorch/pruning/transformer_pruning_head_masker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py b/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py index 8e1b365da0..b0159fc29f 100644 --- a/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py +++ b/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py @@ -162,7 +162,7 @@ def get_mask_by_importance_ranking(self, base_mask, weight, num_prune, wrapper, threshold = torch.topk(importance_scores, num_prune, largest=False)[0].max() # get q_proj, k_proj, v_proj, output_proj from the same attention head - q_proj, k_proj, v_proj, output_proj = weight_group if weight_group is not None else \ + q_proj, _, _, output_proj = weight_group if weight_group is not None else \ self.pruner.masking_groups[wrapper.group_idx] n_heads = q_proj.module.weight.size()[0] // self.head_hidden_dim From c62b9a1cde0f517d591f34272d04f212fc328bf5 Mon Sep 17 00:00:00 2001 From: Di Wu Date: Thu, 1 Jul 2021 14:27:08 +0900 Subject: [PATCH 05/63] add activation-based maskers; refactor example --- .../transformers/{run_local.sh => run.sh} | 6 +- .../transformers/transformer_pruning.py | 160 +++++++++--------- .../pytorch/pruning/transformer_pruner.py | 41 +++-- .../transformer_pruning_head_masker.py | 106 +++++++++++- 4 files changed, 215 insertions(+), 98 deletions(-) rename examples/model_compress/pruning/transformers/{run_local.sh => run.sh} (78%) diff --git a/examples/model_compress/pruning/transformers/run_local.sh b/examples/model_compress/pruning/transformers/run.sh similarity index 78% rename from examples/model_compress/pruning/transformers/run_local.sh rename to examples/model_compress/pruning/transformers/run.sh index cf22706158..7b3f5bf96b 100755 --- a/examples/model_compress/pruning/transformers/run_local.sh +++ b/examples/model_compress/pruning/transformers/run.sh @@ -1,11 +1,10 @@ #!/bin/bash -# Usage: ./run_local.sh script gpu_id task n_heads_per_layer +# Usage: ./run_local.sh script gpu_id task export CUDA_VISIBLE_DEVICES=$2 SOURCE_CODE=$1 TASK_NAME=$3 -N_HEADS_PER_LAYER=$4 PRETRAINED_MODEL='bert-base-uncased' # 'distilbert-base-uncased' 'roberta-base' 'bert-base-cased' 'bert-base-uncased' MAX_LENGTH=128 BATCH_SIZE=32 @@ -14,13 +13,12 @@ N_EPOCHS=3 SEED=2021 time=$(date "+%Y%m%d%H%M%S") -OUTDIR="models_${PRETRAINED_MODEL}_${SOURCE_CODE}_prune${N_HEADS_PER_LAYER}_${TASK_NAME}_$time/" +OUTDIR="models_${PRETRAINED_MODEL}_${SOURCE_CODE}_${TASK_NAME}_$time/" TASK_LIST=('cola' 'sst2' 'mrpc' 'stsb' 'qqp' 'mnli' 'qnli' 'rte' 'wnli') if [[ ${TASK_LIST[*]} =~ (^|[[:space:]])$TASK_NAME($|[[:space:]]) ]]; then mkdir $OUTDIR python $SOURCE_CODE \ - --n_heads_to_prune_per_layer $N_HEADS_PER_LAYER\ --seed $SEED \ --model_name_or_path $PRETRAINED_MODEL \ --task_name $TASK_NAME \ diff --git a/examples/model_compress/pruning/transformers/transformer_pruning.py b/examples/model_compress/pruning/transformers/transformer_pruning.py index fdac3323b9..420eb8a640 100644 --- a/examples/model_compress/pruning/transformers/transformer_pruning.py +++ b/examples/model_compress/pruning/transformers/transformer_pruning.py @@ -13,7 +13,6 @@ from tqdm.auto import tqdm import transformers -from accelerate import Accelerator from transformers import ( AdamW, AutoConfig, @@ -51,17 +50,8 @@ } -############################ -# TODO: simplify this later -############################ def parse_args(): parser = argparse.ArgumentParser(description="Finetune a transformers model on a text classification task") - parser.add_argument( - "--n_heads_to_prune_per_layer", - type=int, - default=None, - help="The name of the glue task to train on." - ) parser.add_argument( "--task_name", type=str, @@ -279,20 +269,21 @@ def preprocess_function(examples): return processed_datasets -def train_model(args, model, is_regression, train_dataloader, eval_dataloader, optimizer, lr_scheduler, metric, - accelerator): - # Only show the progress bar once on each machine. - progress_bar = tqdm(range(args.max_train_steps), position=0, leave=True, - disable=not accelerator.is_local_main_process) +def train_model(args, model, is_regression, train_dataloader, eval_dataloader, optimizer, lr_scheduler, metric, device, + epoch_num=None): + progress_bar = tqdm(range(args.max_train_steps), position=0, leave=True) completed_steps = 0 - for epoch in range(args.num_train_epochs): + train_epoch = args.num_train_epochs if epoch_num is None else 1 + for epoch in range(train_epoch): model.train() for step, batch in enumerate(train_dataloader): + for field in batch.keys(): + batch[field] = batch[field].to(device) outputs = model(**batch) loss = outputs.loss loss = loss / args.gradient_accumulation_steps - accelerator.backward(loss) + loss.backward() if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1: optimizer.step() lr_scheduler.step() @@ -305,39 +296,59 @@ def train_model(args, model, is_regression, train_dataloader, eval_dataloader, o model.eval() for step, batch in enumerate(eval_dataloader): + for field in batch.keys(): + batch[field] = batch[field].to(device) outputs = model(**batch) predictions = outputs.logits.argmax(dim=-1) if not is_regression else outputs.logits.squeeze() metric.add_batch( - predictions=accelerator.gather(predictions), - references=accelerator.gather(batch["labels"]), + predictions=predictions, + references=batch["labels"], ) eval_metric = metric.compute() logger.info(f"epoch {epoch}: {eval_metric}") -def final_eval_for_mnli(args, model, processed_datasets, metric, accelerator, data_collator): +def dry_run_no_param_update(args, model, train_dataloader, optimizer, device, epoch_num=None): + # no param update performed, just do forward and backward on the entire train data (to collect output/gradient etc.) + progress_bar = tqdm(range(len(train_dataloader)), position=0, leave=True) + completed_steps = 0 + + train_epoch = args.num_train_epochs if epoch_num is None else 1 + for epoch in range(train_epoch): + for step, batch in enumerate(train_dataloader): + for field in batch.keys(): + batch[field] = batch[field].to(device) + outputs = model(**batch) + loss = outputs.loss + loss = loss / args.gradient_accumulation_steps + loss.backward() + optimizer.zero_grad() + progress_bar.update(1) + completed_steps += 1 + + +def final_eval_for_mnli(args, model, processed_datasets, metric, data_collator): # Final evaluation on mismatched validation set eval_dataset = processed_datasets["validation_mismatched"] eval_dataloader = DataLoader( eval_dataset, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size ) - eval_dataloader = accelerator.prepare(eval_dataloader) model.eval() for step, batch in enumerate(eval_dataloader): outputs = model(**batch) predictions = outputs.logits.argmax(dim=-1) metric.add_batch( - predictions=accelerator.gather(predictions), - references=accelerator.gather(batch["labels"]), + predictions=predictions, + references=batch["labels"], ) eval_metric = metric.compute() logger.info(f"mnli-mm: {eval_metric}") -def get_dataloader_and_optimizer(args, tokenizer, model, train_dataset, eval_dataset, accelerator): +def get_dataloader_and_optimizer(args, tokenizer, model, train_dataset, eval_dataset): # DataLoaders creation: if args.pad_to_max_length: # If padding was already done ot max length, we use the default data collator that will just convert everything @@ -347,7 +358,7 @@ def get_dataloader_and_optimizer(args, tokenizer, model, train_dataset, eval_dat # Otherwise, `DataCollatorWithPadding` will apply dynamic padding for us (by padding to the maximum length of # the samples passed). When using mixed precision, we add `pad_to_multiple_of=8` to pad all tensors to multiple # of 8s, which will enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta). - data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=(8 if accelerator.use_fp16 else None)) + data_collator = DataCollatorWithPadding(tokenizer) train_dataloader = DataLoader( train_dataset, shuffle=True, collate_fn=data_collator, batch_size=args.per_device_train_batch_size @@ -369,45 +380,32 @@ def get_dataloader_and_optimizer(args, tokenizer, model, train_dataset, eval_dat ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate) - return accelerator.prepare(model, optimizer, train_dataloader, eval_dataloader), data_collator + return model, optimizer, train_dataloader, eval_dataloader, data_collator def main(): + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") args = parse_args() ######################################################################### # Prepare model, tokenizer, dataset, optimizer, and the scheduler - # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. - accelerator = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) - logger.info(accelerator.state) - - # Setup logging, we only want one process per machine to log things on the screen. - # accelerator.is_local_main_process is only True for one process per machine. - logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) - if accelerator.is_local_main_process: - datasets.utils.logging.set_verbosity_warning() - transformers.utils.logging.set_verbosity_info() - else: - datasets.utils.logging.set_verbosity_error() - transformers.utils.logging.set_verbosity_error() + logger.setLevel(logging.INFO) + datasets.utils.logging.set_verbosity_warning() + transformers.utils.logging.set_verbosity_info() - # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) raw_datasets, is_regression, label_list, num_labels = get_raw_dataset(args) # Load pretrained model and tokenizer - # - # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently - # download model & vocab. config = AutoConfig.from_pretrained(args.model_name_or_path, num_labels=num_labels, finetuning_task=args.task_name) tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, use_fast=not args.use_slow_tokenizer) model = AutoModelForSequenceClassification.from_pretrained( @@ -415,6 +413,7 @@ def main(): from_tf=bool(".ckpt" in args.model_name_or_path), config=config, ) + model.to(device) processed_datasets = preprocess_dataset(args, tokenizer, model, raw_datasets, num_labels, is_regression, label_list) train_dataset = processed_datasets["train"] @@ -423,11 +422,12 @@ def main(): for index in random.sample(range(len(train_dataset)), 3): logger.info(f"Sample {index} of the training set: {train_dataset[index]}.") - (model, optimizer, train_dataloader, eval_dataloader), data_collator = get_dataloader_and_optimizer(args, tokenizer, + ######################################################################### + # Finetune before pruning + model, optimizer, train_dataloader, eval_dataloader, data_collator = get_dataloader_and_optimizer(args, tokenizer, model, train_dataset, - eval_dataset, - accelerator) + eval_dataset) # Scheduler and math around the number of training steps. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) @@ -449,44 +449,48 @@ def main(): else: metric = load_metric("accuracy") - ######################################################################### - # Finetune before pruning - """ - total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps - logger.info("***** Finetuning before pruning *****") - logger.info(f" Num examples = {len(train_dataset)}") - logger.info(f" Num Epochs = {args.num_train_epochs}") - logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}") - logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") - logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") - logger.info(f" Total optimization steps = {args.max_train_steps}") - train_model(args, model, is_regression, train_dataloader, eval_dataloader, optimizer, lr_scheduler, metric, - accelerator) - - if args.output_dir is not None: - accelerator.wait_for_everyone() - torch.save(model, args.output_dir + '/entire_model_before_pruning.pt') - # unwrapped_model = accelerator.unwrap_model(model) - # unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save) - - if args.task_name == "mnli": - final_eval_for_mnli(args, model, processed_datasets, metric, accelerator, data_collator) - """ + # total_batch_size = args.per_device_train_batch_size * args.gradient_accumulation_steps + # logger.info("***** Finetuning before pruning *****") + # logger.info(f" Num examples = {len(train_dataset)}") + # logger.info(f" Num Epochs = {args.num_train_epochs}") + # logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}") + # logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") + # logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") + # logger.info(f" Total optimization steps = {args.max_train_steps}") + # train_model(args, model, is_regression, train_dataloader, eval_dataloader, optimizer, lr_scheduler, metric, device) + # + # if args.output_dir is not None: + # torch.save(model, args.output_dir + '/entire_model_before_pruning.pt') + # + # if args.task_name == "mnli": + # final_eval_for_mnli(args, model, processed_datasets, metric, data_collator) ######################################################################### # Pruning # kwargs_final = {'num_iterations': 6, 'epochs_per_iteration': 1, 'head_hidden_dim': 64, # 'trainer': 1, 'optimizer': 2, 'criterion': 3} + model, optimizer, train_dataloader, eval_dataloader, data_collator = get_dataloader_and_optimizer(args, tokenizer, + model, + train_dataset, + eval_dataset) + + def trainer(model, optimizer, criterion, epoch): + # here criterion is embedded in the model. Upper levels can just pass None to trainer + # no param update performed, + # just do forward and backward on the entire train data (to collect output/gradient etc.) + return dry_run_no_param_update(args, model, train_dataloader, optimizer, device, epoch_num=epoch) attention_name_groups = list(zip(['encoder.layer.{}.attention.self.query'.format(i) for i in range(12)], ['encoder.layer.{}.attention.self.key'.format(i) for i in range(12)], ['encoder.layer.{}.attention.self.value'.format(i) for i in range(12)], ['encoder.layer.{}.attention.output.dense'.format(i) for i in range(12)])) - kwargs = {'ranking_criteria': 'l1_weight', + kwargs = {'ranking_criteria': 'l2_activation', # 'attention_name_groups': attention_name_groups, 'head_hidden_dim': 64, - 'dummy_input': [torch.rand([1, 64, 768]).cuda(), torch.ones([1, 64]).cuda()]} # input and mask + 'dummy_input': [torch.rand([1, 64, 768]).to(device), torch.ones([1, 64]).to(device)], # input and mask + 'trainer': trainer, + 'optimizer': optimizer} config_list = [{ 'sparsity': 0.25, @@ -494,7 +498,7 @@ def main(): 'op_names': [x for layer in attention_name_groups for x in layer] }] - pruner = TransformerHeadPruner(model.bert, config_list, **kwargs) + pruner = TransformerHeadPruner(model, config_list, **kwargs) pruner.compress() exit() @@ -502,8 +506,8 @@ def main(): ######################################################################### # After pruning, finetune again on the target task # re-initialize the optimizer and the scheduler - (model, optimizer, _, _), data_collator = get_dataloader_and_optimizer(args, tokenizer, model, train_dataset, - eval_dataset, accelerator) + model, optimizer, _, _, data_collator = get_dataloader_and_optimizer(args, tokenizer, model, train_dataset, + eval_dataset) lr_scheduler = get_scheduler( name=args.lr_scheduler_type, optimizer=optimizer, @@ -512,17 +516,13 @@ def main(): ) logger.info("***** Finetuning after Pruning *****") - train_model(args, model, is_regression, train_dataloader, eval_dataloader, optimizer, lr_scheduler, metric, - accelerator) + train_model(args, model, is_regression, train_dataloader, eval_dataloader, optimizer, lr_scheduler, metric, device) if args.output_dir is not None: - accelerator.wait_for_everyone() torch.save(model, args.output_dir + '/entire_model_after_pruning.pt') - # unwrapped_model = accelerator.unwrap_model(model) - # unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save) if args.task_name == "mnli": - final_eval_for_mnli(args, model, processed_datasets, metric, accelerator, data_collator) + final_eval_for_mnli(args, model, processed_datasets, metric, data_collator) if __name__ == "__main__": diff --git a/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py b/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py index d24a8a1c42..bd7c3b310b 100644 --- a/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py +++ b/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py @@ -8,13 +8,16 @@ from nni.compression.pytorch.utils.shape_dependency import AttentionWeightDependency from nni.compression.pytorch.utils.config_validation import CompressorSchema from nni.compression.pytorch.compressor import Pruner -from . import L1WeightHeadMasker, L2WeightHeadMasker +from . import L1WeightHeadMasker, L2WeightHeadMasker, L1ActivationHeadMasker, L2ActivationHeadMasker, TaylorFOHeadMasker __all__ = ['TransformerHeadPruner'] MASKER_DICT = { 'l1_weight': L1WeightHeadMasker, - 'l2_weight': L2WeightHeadMasker + 'l2_weight': L2WeightHeadMasker, + 'l1_activation': L1ActivationHeadMasker, + 'l2_activation': L2ActivationHeadMasker, + 'taylor': TaylorFOHeadMasker } logger = logging.getLogger(__name__) @@ -45,17 +48,16 @@ class TransformerHeadPruner(Pruner): """ def __init__(self, model, config_list, attention_name_groups=None, ranking_criteria='taylor', dummy_input=None, + optimizer=None, trainer=None, criterion=None, **algo_kwargs): + super().__init__(model, config_list) + self.attention_name_groups = attention_name_groups self.ranking_criteria = ranking_criteria self.dummy_input = dummy_input - self.masker = MASKER_DICT[ranking_criteria](model, self, **algo_kwargs) - self.masking_groups = [] - - super().__init__(model, config_list) # reset() called here - - def reset(self, checkpoint=None): - super().reset(checkpoint=checkpoint) + self._optimizer = optimizer + self._trainer = trainer + self._criterion = criterion # Group generation: one group per attention layer, four weights per group self.masking_groups = [] @@ -76,6 +78,7 @@ def reset(self, checkpoint=None): self.remove_ungrouped_modules() self.set_wrappers_attribute("mask_calculated", False) + self.masker = MASKER_DICT[ranking_criteria](model, self, **algo_kwargs) def group_weights_by_name(self): """ @@ -97,12 +100,15 @@ def group_weights_by_name(self): wrapper.group_idx = name2group[wrapper.name] self.masking_groups[name2group[wrapper.name]].append(wrapper) + print('grouping updated:', [[x.name for x in group] for group in self.masking_groups]) + def group_weights_by_graph(self): """ Populate self.masking_groups bu running inference on the module graph. """ weight_names_grouped = [] stack = [(name, module) for name, module in self.bound_model.named_children()] + while stack: cur_name, cur_module = stack.pop() try: @@ -110,9 +116,8 @@ def group_weights_by_graph(self): dependency_tracer = AttentionWeightDependency(traced_model=module_graph.trace) weight_names_grouped.extend([[cur_name + '.' + x for x in group] for group in dependency_tracer.dependency_sets]) - except: + except Exception as e: stack.extend([(cur_name + '.' + name, module) for name, module in cur_module.named_children()]) - self.attention_name_groups = weight_names_grouped self.group_weights_by_name() @@ -154,6 +159,18 @@ def validate_config(self, model, config_list): schema.validate(config_list) + def compress(self): + if self.ranking_criteria in ['l1_activation', 'l2_activation']: + training = self.bound_model.training + self.bound_model.eval() + self._trainer(self.bound_model, optimizer=self._optimizer, criterion=self._criterion, epoch=0) + self.update_mask() + self.bound_model.train(training) + elif self.ranking_criteria == 'taylor': + pass + self.update_mask() + return self.bound_model + def update_mask(self): for layer_weight_group in self.masking_groups: masks = self._calc_mask(layer_weight_group[0], layer_weight_group) @@ -163,7 +180,7 @@ def update_mask(self): assert hasattr(layer_weight_group[i], mask_type), \ "there is no attribute '%s' in wrapper on %s" % (mask_type, layer_weight_group[i]) setattr(layer_weight_group[i], mask_type, mask[mask_type]) - print(f'updated {layer_weight_group[i].name} {mask_type}') + print(f'mask updated {layer_weight_group[i].name} {mask_type}') def _calc_mask(self, wrapper, weight_group, wrapper_idx=None): if not wrapper.mask_calculated: diff --git a/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py b/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py index b0159fc29f..9f18fb125f 100644 --- a/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py +++ b/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py @@ -5,7 +5,8 @@ import torch from .weight_masker import WeightMasker -__all__ = ['L1WeightHeadMasker', 'L2WeightHeadMasker'] +__all__ = ['L1WeightHeadMasker', 'L2WeightHeadMasker', 'L1ActivationHeadMasker', 'L2ActivationHeadMasker', + 'TaylorFOHeadMasker'] logger = logging.getLogger('torch transformer head pruners') @@ -38,7 +39,7 @@ def calc_mask(self, sparsity, wrapper, wrapper_idx=None, **depen_kwargs): The target sparsity of the wrapper. If we calculate the mask in the normal way, then sparsity is a float number. In contrast, if we calculate the mask in the dependency-aware way, sparsity is a - list of float numbers, each float number corressponds to a sparsity + list of float numbers, each float number corresponds to a sparsity of a layer. wrapper: PrunerModuleWrapper/list of PrunerModuleWrappers The wrapper of the target layer. If we calculate the mask in the normal @@ -159,6 +160,9 @@ def get_mask_by_importance_ranking(self, base_mask, weight, num_prune, wrapper, device = weight.device importance_scores = self.get_head_importance_scores(wrapper, weight_group, wrapper_idx) + if importance_scores is None: + return None + threshold = torch.topk(importance_scores, num_prune, largest=False)[0].max() # get q_proj, k_proj, v_proj, output_proj from the same attention head @@ -250,3 +254,101 @@ def get_head_importance_scores(self, wrapper, weight_group, wrapper_idx): def get_mask(self, base_mask, weight, num_prune, wrapper, wrapper_idx, weight_group=None): return self.get_mask_by_importance_ranking(base_mask, weight, num_prune, wrapper, wrapper_idx, weight_group) + + +class L1ActivationHeadMasker(AttentionHeadMasker): + """ + A structured pruning algorithm that prunes the heads with smallest final output value. + Note that this masker only relies on the output of the output layer of each attention layer. + The masker collects the L1 norm of the last weight (output projection) in each group on the entire train set, and + prunes the heads producing the smallest output. + """ + def __init__(self, model, pruner, head_hidden_dim=None): + super().__init__(model, pruner, head_hidden_dim) + self.pruner.hook_id = self._add_activation_collector(self.pruner) + + def get_head_importance_scores(self, wrapper, weight_group, wrapper_idx): + _, _, _, output_proj = weight_group + activations = torch.stack(self.pruner.collected_activation[output_proj.group_idx], -1) + activations = torch.sum(activations, -1) + n_heads = activations.size()[0] // self.head_hidden_dim + scores = torch.sum(activations.view([n_heads, -1]), -1).detach().cpu() + + if self.pruner.hook_id in self.pruner._fwd_hook_handles: + self.pruner.remove_activation_collector(self.pruner.hook_id) + + return scores + + def _add_activation_collector(self, pruner): + def collector(collected_activation): + def hook(module_, input_, output): + raw_activation = torch.abs(output.detach().cpu()) # L1-norm + raw_activation_reduced = torch.sum(raw_activation, [0, 1]) + collected_activation.append(raw_activation_reduced) + return hook + pruner.collected_activation = {} + pruner._fwd_hook_id += 1 + pruner._fwd_hook_handles[pruner._fwd_hook_id] = [] + + for _, _, _, output_proj in pruner.masking_groups: + pruner.collected_activation[output_proj.group_idx] = [] + handle = output_proj.register_forward_hook(collector(pruner.collected_activation[output_proj.group_idx])) + + pruner._fwd_hook_handles[pruner._fwd_hook_id].append(handle) + + return pruner._fwd_hook_id + + def get_mask(self, base_mask, weight, num_prune, wrapper, wrapper_idx, weight_group=None): + return self.get_mask_by_importance_ranking(base_mask, weight, num_prune, wrapper, wrapper_idx, weight_group) + + +class L2ActivationHeadMasker(AttentionHeadMasker): + """ + A structured pruning algorithm that prunes the heads with smallest final output value. + Note that this masker only relies on the output of the output layer of each attention layer. + The masker collects the L2 norm of the last weight (output projection) in each group on the entire train set, and + prunes the heads producing the smallest output. + """ + def __init__(self, model, pruner, head_hidden_dim=None): + super().__init__(model, pruner, head_hidden_dim) + self.pruner.hook_id = self._add_activation_collector(self.pruner) + + def get_head_importance_scores(self, wrapper, weight_group, wrapper_idx): + _, _, _, output_proj = weight_group + activations = torch.stack(self.pruner.collected_activation[output_proj.group_idx], -1) + activations = torch.sum(activations, -1) + n_heads = activations.size()[0] // self.head_hidden_dim + scores = torch.sum(activations.view([n_heads, -1]), -1).detach().cpu() + + if self.pruner.hook_id in self.pruner._fwd_hook_handles: + self.pruner.remove_activation_collector(self.pruner.hook_id) + + return scores + + def _add_activation_collector(self, pruner): + def collector(collected_activation): + def hook(module_, input_, output): + raw_activation = torch.abs(output.detach().cpu() ** 2) # L2-norm + raw_activation_reduced = torch.sum(raw_activation, [0, 1]) + collected_activation.append(raw_activation_reduced) + + return hook + + pruner.collected_activation = {} + pruner._fwd_hook_id += 1 + pruner._fwd_hook_handles[pruner._fwd_hook_id] = [] + + for _, _, _, output_proj in pruner.masking_groups: + pruner.collected_activation[output_proj.group_idx] = [] + handle = output_proj.register_forward_hook(collector(pruner.collected_activation[output_proj.group_idx])) + + pruner._fwd_hook_handles[pruner._fwd_hook_id].append(handle) + + return pruner._fwd_hook_id + + def get_mask(self, base_mask, weight, num_prune, wrapper, wrapper_idx, weight_group=None): + return self.get_mask_by_importance_ranking(base_mask, weight, num_prune, wrapper, wrapper_idx, weight_group) + + +class TaylorFOHeadMasker(AttentionHeadMasker): + pass \ No newline at end of file From 6877b643443bed0e34ca6fb6c0bf202539e2c515 Mon Sep 17 00:00:00 2001 From: Di Wu Date: Thu, 1 Jul 2021 15:36:46 +0900 Subject: [PATCH 06/63] minor fix --- .../compression/pytorch/pruning/transformer_pruner.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py b/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py index bd7c3b310b..9a5848a677 100644 --- a/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py +++ b/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py @@ -116,7 +116,7 @@ def group_weights_by_graph(self): dependency_tracer = AttentionWeightDependency(traced_model=module_graph.trace) weight_names_grouped.extend([[cur_name + '.' + x for x in group] for group in dependency_tracer.dependency_sets]) - except Exception as e: + except: stack.extend([(cur_name + '.' + name, module) for name, module in cur_module.named_children()]) self.attention_name_groups = weight_names_grouped self.group_weights_by_name() From 595864e4d26b7c164a841e6657b2b284c7b21515 Mon Sep 17 00:00:00 2001 From: Di Wu Date: Fri, 2 Jul 2021 17:38:51 +0900 Subject: [PATCH 07/63] change graph-based grouping logic --- .../pruning/transformers/run.sh | 2 +- .../transformers/transformer_pruning.py | 8 +- .../pytorch/pruning/transformer_pruner.py | 38 ++++--- .../pytorch/utils/shape_dependency.py | 107 ++++++++++++------ 4 files changed, 100 insertions(+), 55 deletions(-) diff --git a/examples/model_compress/pruning/transformers/run.sh b/examples/model_compress/pruning/transformers/run.sh index 7b3f5bf96b..720ec75f66 100755 --- a/examples/model_compress/pruning/transformers/run.sh +++ b/examples/model_compress/pruning/transformers/run.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Usage: ./run_local.sh script gpu_id task +# Usage: ./run.sh script gpu_id task export CUDA_VISIBLE_DEVICES=$2 SOURCE_CODE=$1 diff --git a/examples/model_compress/pruning/transformers/transformer_pruning.py b/examples/model_compress/pruning/transformers/transformer_pruning.py index 420eb8a640..53ee9d3c47 100644 --- a/examples/model_compress/pruning/transformers/transformer_pruning.py +++ b/examples/model_compress/pruning/transformers/transformer_pruning.py @@ -321,7 +321,6 @@ def dry_run_no_param_update(args, model, train_dataloader, optimizer, device, ep batch[field] = batch[field].to(device) outputs = model(**batch) loss = outputs.loss - loss = loss / args.gradient_accumulation_steps loss.backward() optimizer.zero_grad() progress_bar.update(1) @@ -467,8 +466,6 @@ def main(): ######################################################################### # Pruning - # kwargs_final = {'num_iterations': 6, 'epochs_per_iteration': 1, 'head_hidden_dim': 64, - # 'trainer': 1, 'optimizer': 2, 'criterion': 3} model, optimizer, train_dataloader, eval_dataloader, data_collator = get_dataloader_and_optimizer(args, tokenizer, model, train_dataset, @@ -488,14 +485,15 @@ def trainer(model, optimizer, criterion, epoch): kwargs = {'ranking_criteria': 'l2_activation', # 'attention_name_groups': attention_name_groups, 'head_hidden_dim': 64, - 'dummy_input': [torch.rand([1, 64, 768]).to(device), torch.ones([1, 64]).to(device)], # input and mask + #'dummy_input': [torch.rand([1, 64, 768]).to(device), torch.ones([1, 64]).to(device)], # input and mask + 'dummy_input': (next(iter(train_dataloader))['input_ids']).to(device), 'trainer': trainer, 'optimizer': optimizer} config_list = [{ 'sparsity': 0.25, 'op_types': ["Linear"], - 'op_names': [x for layer in attention_name_groups for x in layer] + # 'op_names': [x for layer in attention_name_groups for x in layer] }] pruner = TransformerHeadPruner(model, config_list, **kwargs) diff --git a/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py b/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py index 9a5848a677..e8fbea0f33 100644 --- a/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py +++ b/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py @@ -106,20 +106,28 @@ def group_weights_by_graph(self): """ Populate self.masking_groups bu running inference on the module graph. """ - weight_names_grouped = [] - stack = [(name, module) for name, module in self.bound_model.named_children()] - - while stack: - cur_name, cur_module = stack.pop() - try: - module_graph = TorchModuleGraph(cur_module, self.dummy_input) - dependency_tracer = AttentionWeightDependency(traced_model=module_graph.trace) - weight_names_grouped.extend([[cur_name + '.' + x for x in group] - for group in dependency_tracer.dependency_sets]) - except: - stack.extend([(cur_name + '.' + name, module) for name, module in cur_module.named_children()]) - self.attention_name_groups = weight_names_grouped - self.group_weights_by_name() + try: + module_graph = TorchModuleGraph(self.bound_model, self.dummy_input) + dependency_tracer = AttentionWeightDependency(traced_model=module_graph.trace) + self.attention_name_groups = dependency_tracer.dependency_sets + self.group_weights_by_name() + ''' + + stack = [(name, module) for name, module in self.bound_model.named_children()] + + while stack: + cur_name, cur_module = stack.pop() + try: + module_graph = TorchModuleGraph(cur_module, self.dummy_input) + dependency_tracer = AttentionWeightDependency(traced_model=module_graph.trace) + weight_names_grouped.extend([[cur_name + '.' + x for x in group] + for group in dependency_tracer.dependency_sets]) + except: + stack.extend([(cur_name + '.' + name, module) for name, module in cur_module.named_children()]) + ''' + except Exception as e: + raise RuntimeError('Graph trace failed: please check dummy_input, or specify attention_name_groups. ' + 'Exception message: ' + str(e)) # TODO: more sanity checks - include head_hidden_dim parameter? sparsity agreement? def validate_weight_groups(self): @@ -180,7 +188,7 @@ def update_mask(self): assert hasattr(layer_weight_group[i], mask_type), \ "there is no attribute '%s' in wrapper on %s" % (mask_type, layer_weight_group[i]) setattr(layer_weight_group[i], mask_type, mask[mask_type]) - print(f'mask updated {layer_weight_group[i].name} {mask_type}') + print(f'mask updated: {layer_weight_group[i].name} {mask_type}') def _calc_mask(self, wrapper, weight_group, wrapper_idx=None): if not wrapper.mask_calculated: diff --git a/nni/compression/pytorch/utils/shape_dependency.py b/nni/compression/pytorch/utils/shape_dependency.py index ae499edff2..05503a039c 100644 --- a/nni/compression/pytorch/utils/shape_dependency.py +++ b/nni/compression/pytorch/utils/shape_dependency.py @@ -507,17 +507,16 @@ def dependency_sets(self): class AttentionWeightDependency(Dependency): def __init__(self, model=None, dummy_input=None, traced_model=None): """ - This model analyze the channel dependencies between the conv - layers in a model. + This model groups the linear layers belonging to the same attention layer in a model. Parameters ---------- model : torch.nn.Module The model to be analyzed. - data : torch.Tensor + dummy_input : torch.Tensor The example input data to trace the network architecture. traced_model : torch._C.Graph - if we alreay has the traced graph of the target model, we donnot + if we already has the traced graph of the target model, we do not need to trace the model again. """ super(AttentionWeightDependency, self).__init__( @@ -525,7 +524,7 @@ def __init__(self, model=None, dummy_input=None, traced_model=None): def _get_parent_layers(self, node): """ - Find the nearest father conv layers for the target node. + Find the nearest father linear layers for the target node. Parameters --------- @@ -535,51 +534,71 @@ def _get_parent_layers(self, node): Returns ------- parent_layers: list - + nearest father linear layers for the target worknode. """ parent_layers = [] - stack = [] - stack.append(node) - while stack: - curnode = stack.pop() + queue = [] + queue.append(node) + while queue: + curnode = queue.pop(0) + if curnode.op_type == 'Linear': + if curnode.name not in parent_layers: + parent_layers.append(curnode.name) + continue if curnode.op_type == 'LayerNorm': continue - if curnode.op_type == 'Linear': - parent_layers.append(curnode.name) - if len(parent_layers) > 1: - continue parents = self.graph.find_predecessors(curnode.unique_name) parents = [self.graph.name_to_node[name] for name in parents] for parent in parents: - stack.append(parent) - + queue.append(parent) return parent_layers - def build_dependency(self): + def _get_children_layers(self, node): """ - Build the channel dependency for the conv layers - in the model. + Find the nearest children linear layer for the target node. + + Parameters + --------- + node : torch._C.Node + target node. + + Returns + ------- + parent_layers: list + nearest father linear layers for the target worknode. """ - # unpack the tuple/list manually before analyze the - # channel dependency + children_layers = [] + queue = [] + queue.append(node) + while queue: + curnode = queue.pop(0) + if curnode.op_type == 'Linear': + if curnode.name not in children_layers: + children_layers.append(curnode.name) + continue + if curnode.op_type == 'LayerNorm': + continue + children = self.graph.find_successors(curnode.unique_name) + children = [self.graph.name_to_node[name] for name in children] + for child in children: + queue.append(child) + return children_layers + + def build_dependency(self): self.graph.unpack_manually() for node in self.graph.nodes_py.nodes_op: - parent_layers = [] - if node.op_type in ['Linear']: + layers = [] + if node.op_type == 'aten::matmul': parent_layers = self._get_parent_layers(node) - # print(node.name, parent_layers) - dependency_set = set(parent_layers) - # merge the dependencies - # for parent in parent_layers: - # if parent in self.dependency: - # dependency_set.update(self.dependency[parent]) - # save the dependencies - # for _node in dependency_set: - # self.dependency[_node] = dependency_set - self.dependency[node.name] = dependency_set + children_layers = self._get_children_layers(node) + if len(parent_layers) == 3 and len(children_layers) == 1: + layers.extend(parent_layers) + layers.extend(children_layers) + + self.dependency[node.name] = layers @property - def dependency_sets(self): + def dependency_sets_backup(self): """ Get the list of the dependency set. @@ -604,5 +623,25 @@ def dependency_sets(self): return d_sets + @property + def dependency_sets(self): + """ + Get the list of the dependency set. + + Returns + ------- + dependency_sets : list + list of the dependency sets. + Each dependency set is a 4-element list of module names, with the first three elements being the projection + matrices for Q, K, V (in any order), and the last element being the dense matrix. + """ + d_sets = [] + for node in self.graph.nodes_py.nodes_op: + if node.op_type != 'aten::matmul' or node.name not in self.dependency or len(self.dependency[node.name]) != 4: + continue + d_sets.append(self.dependency[node.name]) + + return d_sets + def export(self, filepath): pass From bd7ff9f9f2282051b6b094f495942f050f86871e Mon Sep 17 00:00:00 2001 From: Di Wu Date: Fri, 2 Jul 2021 17:42:10 +0900 Subject: [PATCH 08/63] remove redundant code --- .../pytorch/pruning/transformer_pruner.py | 15 +-------------- 1 file changed, 1 insertion(+), 14 deletions(-) diff --git a/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py b/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py index e8fbea0f33..7afaf08f5c 100644 --- a/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py +++ b/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py @@ -111,20 +111,7 @@ def group_weights_by_graph(self): dependency_tracer = AttentionWeightDependency(traced_model=module_graph.trace) self.attention_name_groups = dependency_tracer.dependency_sets self.group_weights_by_name() - ''' - - stack = [(name, module) for name, module in self.bound_model.named_children()] - - while stack: - cur_name, cur_module = stack.pop() - try: - module_graph = TorchModuleGraph(cur_module, self.dummy_input) - dependency_tracer = AttentionWeightDependency(traced_model=module_graph.trace) - weight_names_grouped.extend([[cur_name + '.' + x for x in group] - for group in dependency_tracer.dependency_sets]) - except: - stack.extend([(cur_name + '.' + name, module) for name, module in cur_module.named_children()]) - ''' + except Exception as e: raise RuntimeError('Graph trace failed: please check dummy_input, or specify attention_name_groups. ' 'Exception message: ' + str(e)) From b28725fc473f7085e1471b002f9da32585f04a26 Mon Sep 17 00:00:00 2001 From: Di Wu Date: Tue, 6 Jul 2021 12:27:36 +0900 Subject: [PATCH 09/63] Add taylor masker --- .../transformers/transformer_pruning.py | 3 +- .../pytorch/pruning/transformer_pruner.py | 12 ++-- .../transformer_pruning_head_masker.py | 71 +++++++++++++++++-- 3 files changed, 73 insertions(+), 13 deletions(-) diff --git a/examples/model_compress/pruning/transformers/transformer_pruning.py b/examples/model_compress/pruning/transformers/transformer_pruning.py index 53ee9d3c47..6b7e7e960a 100644 --- a/examples/model_compress/pruning/transformers/transformer_pruning.py +++ b/examples/model_compress/pruning/transformers/transformer_pruning.py @@ -311,6 +311,7 @@ def train_model(args, model, is_regression, train_dataloader, eval_dataloader, o def dry_run_no_param_update(args, model, train_dataloader, optimizer, device, epoch_num=None): # no param update performed, just do forward and backward on the entire train data (to collect output/gradient etc.) + print("Running forward and backward on the entire dataset without updating parameters...") progress_bar = tqdm(range(len(train_dataloader)), position=0, leave=True) completed_steps = 0 @@ -482,7 +483,7 @@ def trainer(model, optimizer, criterion, epoch): ['encoder.layer.{}.attention.self.value'.format(i) for i in range(12)], ['encoder.layer.{}.attention.output.dense'.format(i) for i in range(12)])) - kwargs = {'ranking_criteria': 'l2_activation', + kwargs = {'ranking_criteria': 'taylorfo', # 'attention_name_groups': attention_name_groups, 'head_hidden_dim': 64, #'dummy_input': [torch.rand([1, 64, 768]).to(device), torch.ones([1, 64]).to(device)], # input and mask diff --git a/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py b/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py index 7afaf08f5c..c557d2c7df 100644 --- a/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py +++ b/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py @@ -17,7 +17,7 @@ 'l2_weight': L2WeightHeadMasker, 'l1_activation': L1ActivationHeadMasker, 'l2_activation': L2ActivationHeadMasker, - 'taylor': TaylorFOHeadMasker + 'taylorfo': TaylorFOHeadMasker } logger = logging.getLogger(__name__) @@ -40,14 +40,14 @@ class TransformerHeadPruner(Pruner): - op_names : Optional. Operation names to prune. ranking_criteria : str Supported criteria: - - 'taylor' + - 'taylorfo' - 'l1_weight' - 'l2_weight' - 'l1_activation' - 'l2_activation' """ - def __init__(self, model, config_list, attention_name_groups=None, ranking_criteria='taylor', dummy_input=None, + def __init__(self, model, config_list, attention_name_groups=None, ranking_criteria='taylorfo', dummy_input=None, optimizer=None, trainer=None, criterion=None, **algo_kwargs): super().__init__(model, config_list) @@ -113,7 +113,7 @@ def group_weights_by_graph(self): self.group_weights_by_name() except Exception as e: - raise RuntimeError('Graph trace failed: please check dummy_input, or specify attention_name_groups. ' + raise RuntimeError('Graph trace failed: please check dummy_input, or specify attention_name_groups.\n' 'Exception message: ' + str(e)) # TODO: more sanity checks - include head_hidden_dim parameter? sparsity agreement? @@ -155,14 +155,12 @@ def validate_config(self, model, config_list): schema.validate(config_list) def compress(self): - if self.ranking_criteria in ['l1_activation', 'l2_activation']: + if self.ranking_criteria in ['l1_activation', 'l2_activation', 'taylorfo']: training = self.bound_model.training self.bound_model.eval() self._trainer(self.bound_model, optimizer=self._optimizer, criterion=self._criterion, epoch=0) self.update_mask() self.bound_model.train(training) - elif self.ranking_criteria == 'taylor': - pass self.update_mask() return self.bound_model diff --git a/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py b/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py index 9f18fb125f..a86e97ab69 100644 --- a/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py +++ b/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py @@ -260,8 +260,8 @@ class L1ActivationHeadMasker(AttentionHeadMasker): """ A structured pruning algorithm that prunes the heads with smallest final output value. Note that this masker only relies on the output of the output layer of each attention layer. - The masker collects the L1 norm of the last weight (output projection) in each group on the entire train set, and - prunes the heads producing the smallest output. + The masker collects the L1 norm of the output of the last weight (output projection) in each group on the entire + train set, and prunes the heads producing the smallest output. """ def __init__(self, model, pruner, head_hidden_dim=None): super().__init__(model, pruner, head_hidden_dim) @@ -274,6 +274,7 @@ def get_head_importance_scores(self, wrapper, weight_group, wrapper_idx): n_heads = activations.size()[0] // self.head_hidden_dim scores = torch.sum(activations.view([n_heads, -1]), -1).detach().cpu() + # clean up hooks if self.pruner.hook_id in self.pruner._fwd_hook_handles: self.pruner.remove_activation_collector(self.pruner.hook_id) @@ -306,8 +307,8 @@ class L2ActivationHeadMasker(AttentionHeadMasker): """ A structured pruning algorithm that prunes the heads with smallest final output value. Note that this masker only relies on the output of the output layer of each attention layer. - The masker collects the L2 norm of the last weight (output projection) in each group on the entire train set, and - prunes the heads producing the smallest output. + The masker collects the L2 norm of the output of the last weight (output projection) in each group on the entire + train set, and prunes the heads producing the smallest output. """ def __init__(self, model, pruner, head_hidden_dim=None): super().__init__(model, pruner, head_hidden_dim) @@ -320,6 +321,7 @@ def get_head_importance_scores(self, wrapper, weight_group, wrapper_idx): n_heads = activations.size()[0] // self.head_hidden_dim scores = torch.sum(activations.view([n_heads, -1]), -1).detach().cpu() + # clean up hooks if self.pruner.hook_id in self.pruner._fwd_hook_handles: self.pruner.remove_activation_collector(self.pruner.hook_id) @@ -351,4 +353,63 @@ def get_mask(self, base_mask, weight, num_prune, wrapper, wrapper_idx, weight_gr class TaylorFOHeadMasker(AttentionHeadMasker): - pass \ No newline at end of file + """ + A structured pruning algorithm that prunes the heads with smallest final output contribution. + Note that this masker only relies on the output of the output layer of each attention layer. + The masker collects the output the last weight (output projection) in each group and the corresponding gradient + on the entire train set, and prunes the heads producing the smallest contribution as used in the following papers: + "Are Sixteen Heads Really Better than One?" (Michel et.al, 2019) + "Pruning convolutional neural networks for resource efficient inference." (Molchanov et. al., 2017) + """ + def __init__(self, model, pruner, head_hidden_dim=None): + super().__init__(model, pruner, head_hidden_dim) + self.pruner.hook_id = self._add_activation_collector() # forward hooks for collecting activation + self.backward_hooks = {} # backward hooks for collecting gradient + self._add_gradient_collector() + + def get_head_importance_scores(self, wrapper, weight_group, wrapper_idx): + _, _, _, output_proj = weight_group + result = output_proj.head_importance_scores + + # clean up hooks and cached data + if self.pruner.hook_id in self.pruner._fwd_hook_handles: + self.pruner.remove_activation_collector(self.pruner.hook_id) + self.backward_hooks[output_proj.group_idx].remove() + for attr in ['forward_output_cached', 'head_importance_scores']: + output_proj.__dict__.pop(attr, None) + + return result + + def _add_activation_collector(self): + def forward_hook(md, inp, out): + if type(out) is tuple: + out = out[0] + n_heads_per_layer = out.size(-1) // self.head_hidden_dim + heads_output = out.view([out.size(0), out.size(1), n_heads_per_layer, -1]) + md.forward_output_cached = heads_output + + self.pruner._fwd_hook_id += 1 + self.pruner._fwd_hook_handles[self.pruner._fwd_hook_id] = [] + + for _, _, _, output_proj in self.pruner.masking_groups: + handle = output_proj.register_forward_hook(forward_hook) + self.pruner._fwd_hook_handles[self.pruner._fwd_hook_id].append(handle) + + return self.pruner._fwd_hook_id + + def _add_gradient_collector(self): + def grad_hook(md, grad_in, grad_out): + if type(grad_out) is tuple: + grad_out = grad_out[0] + n_heads_per_layer = grad_out.size(-1) // self.head_hidden_dim + heads_grad = grad_out.view([grad_out.size(0), grad_out.size(1), n_heads_per_layer, -1]) + heads_scores = torch.abs(heads_grad * md.forward_output_cached) + heads_scores = torch.sum(heads_scores, [0, 1, 3]).detach().cpu().numpy() + if hasattr(md, 'head_importance_scores'): + md.head_importance_scores += heads_scores + else: + md.head_importance_scores = heads_scores + + for _, _, _, output_proj in self.pruner.masking_groups: + handle = output_proj.register_backward_hook(grad_hook) + self.backward_hooks[output_proj.group_idx] = handle From 80bdf062a208801673c079f1f33fdf6a82e857c1 Mon Sep 17 00:00:00 2001 From: Di Wu Date: Tue, 6 Jul 2021 12:29:25 +0900 Subject: [PATCH 10/63] debug --- .../pytorch/pruning/transformer_pruning_head_masker.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py b/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py index a86e97ab69..a7b0f20d44 100644 --- a/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py +++ b/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py @@ -413,3 +413,6 @@ def grad_hook(md, grad_in, grad_out): for _, _, _, output_proj in self.pruner.masking_groups: handle = output_proj.register_backward_hook(grad_hook) self.backward_hooks[output_proj.group_idx] = handle + + def get_mask(self, base_mask, weight, num_prune, wrapper, wrapper_idx, weight_group=None): + return self.get_mask_by_importance_ranking(base_mask, weight, num_prune, wrapper, wrapper_idx, weight_group) \ No newline at end of file From d5582dd96f217c5573c9eea6b9d231b6b019ca07 Mon Sep 17 00:00:00 2001 From: Di Wu Date: Tue, 6 Jul 2021 12:32:58 +0900 Subject: [PATCH 11/63] debug --- .../pytorch/pruning/transformer_pruning_head_masker.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py b/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py index a7b0f20d44..38278ea52a 100644 --- a/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py +++ b/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py @@ -385,7 +385,7 @@ def forward_hook(md, inp, out): if type(out) is tuple: out = out[0] n_heads_per_layer = out.size(-1) // self.head_hidden_dim - heads_output = out.view([out.size(0), out.size(1), n_heads_per_layer, -1]) + heads_output = out.view([out.size(0), out.size(1), n_heads_per_layer, -1]).detach() md.forward_output_cached = heads_output self.pruner._fwd_hook_id += 1 @@ -404,7 +404,7 @@ def grad_hook(md, grad_in, grad_out): n_heads_per_layer = grad_out.size(-1) // self.head_hidden_dim heads_grad = grad_out.view([grad_out.size(0), grad_out.size(1), n_heads_per_layer, -1]) heads_scores = torch.abs(heads_grad * md.forward_output_cached) - heads_scores = torch.sum(heads_scores, [0, 1, 3]).detach().cpu().numpy() + heads_scores = torch.sum(heads_scores, [0, 1, 3]).detach().cpu() if hasattr(md, 'head_importance_scores'): md.head_importance_scores += heads_scores else: From 0715a7098be87cc722ce2524401b891e8a94d877 Mon Sep 17 00:00:00 2001 From: Di Wu Date: Tue, 6 Jul 2021 17:26:48 +0900 Subject: [PATCH 12/63] Add global sorting --- .../transformers/transformer_pruning.py | 1 + .../pytorch/pruning/transformer_pruner.py | 74 ++++++++++++++----- .../transformer_pruning_head_masker.py | 63 ++++++++++++++-- 3 files changed, 112 insertions(+), 26 deletions(-) diff --git a/examples/model_compress/pruning/transformers/transformer_pruning.py b/examples/model_compress/pruning/transformers/transformer_pruning.py index 6b7e7e960a..0f7bc4a589 100644 --- a/examples/model_compress/pruning/transformers/transformer_pruning.py +++ b/examples/model_compress/pruning/transformers/transformer_pruning.py @@ -484,6 +484,7 @@ def trainer(model, optimizer, criterion, epoch): ['encoder.layer.{}.attention.output.dense'.format(i) for i in range(12)])) kwargs = {'ranking_criteria': 'taylorfo', + 'global_sort': True, # 'attention_name_groups': attention_name_groups, 'head_hidden_dim': 64, #'dummy_input': [torch.rand([1, 64, 768]).to(device), torch.ones([1, 64]).to(device)], # input and mask diff --git a/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py b/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py index c557d2c7df..6d2fedd207 100644 --- a/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py +++ b/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py @@ -47,14 +47,18 @@ class TransformerHeadPruner(Pruner): - 'l2_activation' """ - def __init__(self, model, config_list, attention_name_groups=None, ranking_criteria='taylorfo', dummy_input=None, - optimizer=None, trainer=None, criterion=None, + def __init__(self, model, config_list, attention_name_groups=None, dummy_input=None, ranking_criteria='taylorfo', + global_sort=False, head_hidden_dim=None, optimizer=None, trainer=None, criterion=None, **algo_kwargs): super().__init__(model, config_list) self.attention_name_groups = attention_name_groups - self.ranking_criteria = ranking_criteria self.dummy_input = dummy_input + self.ranking_criteria = ranking_criteria + assert self.ranking_criteria in ['l1_weight', 'l2_weight', 'l1_activation', 'l2_activation', 'taylorfo'], \ + "Unsupported ranking criteria." + self.global_sort = global_sort + self.head_hidden_dim = head_hidden_dim self._optimizer = optimizer self._trainer = trainer self._criterion = criterion @@ -78,7 +82,7 @@ def __init__(self, model, config_list, attention_name_groups=None, ranking_crite self.remove_ungrouped_modules() self.set_wrappers_attribute("mask_calculated", False) - self.masker = MASKER_DICT[ranking_criteria](model, self, **algo_kwargs) + self.masker = MASKER_DICT[ranking_criteria](model, self, self.head_hidden_dim, **algo_kwargs) def group_weights_by_name(self): """ @@ -116,19 +120,35 @@ def group_weights_by_graph(self): raise RuntimeError('Graph trace failed: please check dummy_input, or specify attention_name_groups.\n' 'Exception message: ' + str(e)) - # TODO: more sanity checks - include head_hidden_dim parameter? sparsity agreement? def validate_weight_groups(self): + """ + Sanity checks: + - Q, K, V projection weights in each groups must have the same shape + - output projection weight shape must match total hidden dimension (inferred from Q, K, V projection) + - Four weights in a group must have the same sparsity in their config + - If global_sort is specified, all weights must have the same sparsity + - head_hidden_dim must be a divisor of the output dimension of the projection weights + """ errmsg = 'Attention weight group sanity check not passed' - try: - for group in self.masking_groups: - assert len(group) == 4, errmsg + ': each group must have four weights' - assert group[0].module.weight.size() == group[1].module.weight.size() and \ - group[1].module.weight.size() == group[2].module.weight.size(), \ - errmsg + ': the dimensions of Q, K, V projection matrices must be the same ' - assert group[0].module.weight.size()[0] == group[3].module.weight.size()[1], \ - errmsg + ': the dimension of attention results must match with input for output projection' - except: - raise RuntimeError(errmsg) + sparsity = None + for group in self.masking_groups: + assert len(group) == 4, errmsg + ': each group must have four weights' + assert group[0].module.weight.size() == group[1].module.weight.size() and \ + group[1].module.weight.size() == group[2].module.weight.size(), \ + errmsg + ': the dimensions of Q, K, V projection matrices must be the same ' + assert group[0].module.weight.size()[0] == group[3].module.weight.size()[1], \ + errmsg + ': the dimension of attention results must match with input for output projection' + assert group[0].config['sparsity'] == group[1].config['sparsity'] == \ + group[2].config['sparsity'] == group[3].config['sparsity'], \ + errmsg + ': the sparsity of matrices in the same layer must be the same' + if sparsity is None: + sparsity = group[0].config['sparsity'] + if self.global_sort: + assert sparsity == group[0].config['sparsity'], \ + errmsg + ': for global_sort=True, the sparsity for all modules must be the same' + t = group[0].module.weight.size(0) / self.head_hidden_dim + assert t % 1 == 0, errmsg + ': head_hidden_dim must be a divisor of the output dimension of the ' \ + 'projection weights' def remove_ungrouped_modules(self): """ @@ -161,12 +181,20 @@ def compress(self): self._trainer(self.bound_model, optimizer=self._optimizer, criterion=self._criterion, epoch=0) self.update_mask() self.bound_model.train(training) - self.update_mask() + else: + self.update_mask() return self.bound_model def update_mask(self): - for layer_weight_group in self.masking_groups: - masks = self._calc_mask(layer_weight_group[0], layer_weight_group) + masks_for_all = None + if self.global_sort: + masks_for_all = self._calc_mask_global() + assert len(masks_for_all) == len(self.masking_groups) + for group_idx, layer_weight_group in enumerate(self.masking_groups): + if self.global_sort: + masks = masks_for_all[group_idx] + else: + masks = self._calc_mask(layer_weight_group[0], layer_weight_group) if masks is not None: for i, mask in enumerate(masks): for mask_type in mask: @@ -187,5 +215,15 @@ def _calc_mask(self, wrapper, weight_group, wrapper_idx=None): else: return None + def _calc_mask_global(self): + if len(self.get_modules_wrapper()) == 0: + return [] + overall_sparsity = self.get_modules_wrapper()[0].config['sparsity'] + n_heads_total = 0 + for q_proj, _, _, _ in self.masking_groups: + n_heads_total += int(q_proj.module.weight.size()[0] / self.head_hidden_dim) + n_heads_to_prune = int(n_heads_total * overall_sparsity) + return self.masker.calc_mask_global(n_heads_to_prune) + def calc_mask(self, wrapper, **kwargs): raise RuntimeError("Applications should directly call TransformerHeadPruner's update_mask() method.") diff --git a/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py b/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py index 38278ea52a..1c8168fbbc 100644 --- a/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py +++ b/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py @@ -8,7 +8,7 @@ __all__ = ['L1WeightHeadMasker', 'L2WeightHeadMasker', 'L1ActivationHeadMasker', 'L2ActivationHeadMasker', 'TaylorFOHeadMasker'] -logger = logging.getLogger('torch transformer head pruners') +logger = logging.getLogger('transformer head pruner') class AttentionHeadMasker(WeightMasker): @@ -60,6 +60,54 @@ def calc_mask(self, sparsity, wrapper, wrapper_idx=None, **depen_kwargs): return mask return self.get_mask(mask, weight, num_prune, wrapper, wrapper_idx, **depen_kwargs) + def calc_mask_global(self, n_heads_to_prune): + # calculate scores as normal (this step does not require global information) + head_importance_scores = [] + for group_idx, group in enumerate(self.pruner.masking_groups): + scores = self.get_head_importance_scores(group) + n_heads = group[0].module.weight.size(0) // self.head_hidden_dim + for head_idx in range(n_heads): + head_importance_scores.append([group_idx, head_idx, scores[head_idx]]) + + # determine which head to prune for each layer + pruning_rules = {i: set() for i in range(len(self.pruner.masking_groups))} + n_selected = 0 + for group_idx, head_idx, _ in sorted(head_importance_scores, key=(lambda x: x[-1])): + n_heads_original = self.pruner.masking_groups[group_idx][0].module.weight.size(0) // self.head_hidden_dim + n_heads_remaining = n_heads_original - len(pruning_rules[group_idx]) + if n_heads_remaining > 1: + pruning_rules[group_idx].add(head_idx) + n_selected += 1 + if n_selected >= n_heads_to_prune: + break + + # generate masks + all_masks = [] + for group_idx, group in enumerate(self.pruner.masking_groups): + device = group[0].module.weight.device + + n_heads = group[0].module.weight.size(0) // self.head_hidden_dim + weight_mask_shape = group[0].module.weight.data.view([n_heads, -1]).size() + bias_mask_shape = group[0].module.bias.data.view([n_heads, -1]).size() + + head_level_mask = torch.tensor([i not in pruning_rules[group_idx] for i in range(n_heads)], device=device) + mask_weight = head_level_mask.unsqueeze(-1).expand(weight_mask_shape).type_as(group[0].module.weight) + mask_bias = head_level_mask.unsqueeze(-1).expand(bias_mask_shape).type_as(group[0].module.weight) + + mask_weight_proj = mask_weight.view(group[0].module.weight.size()).detach().to(device) + mask_bias_proj = mask_bias.view(-1).detach().to(device) + masks_for_proj = {'weight_mask': mask_weight_proj.detach(), 'bias_mask': mask_bias_proj} + + mask_weight_dense = mask_bias_proj.expand_as(group[-1].module.weight.data).detach().to(device) + mask_bias_dense = torch.ones_like(group[-1].module.bias.data).to(device) + masks_for_dense = {'weight_mask': mask_weight_dense.detach(), 'bias_mask': mask_bias_dense} + + masks = [masks_for_proj, masks_for_proj, masks_for_proj, masks_for_dense] + + all_masks.append(masks) + + return all_masks + def _get_current_state(self, sparsity, wrapper, wrapper_idx=None): """ Some pruner may prune the layers in a iterative way. In each pruning iteration, @@ -188,7 +236,7 @@ def get_mask_by_importance_ranking(self, base_mask, weight, num_prune, wrapper, masks = [masks_for_proj, masks_for_proj, masks_for_proj, masks_for_dense] return masks - def get_head_importance_scores(self, wrapper, weight_group, wrapper_idx): + def get_head_importance_scores(self, weight_group): """ Calculate the importance score for each head. Parameters @@ -213,8 +261,7 @@ class L1WeightHeadMasker(AttentionHeadMasker): and key projection matrices. L1 norm is used for magnitude calculation. Note that in this implementation, weight norms of q_proj, k_proj, v_proj from each head are summed as the final importance score for the head. """ - def get_head_importance_scores(self, wrapper, weight_group, wrapper_idx): - print('calculating importance scores for wrapper', wrapper.name) + def get_head_importance_scores(self, weight_group): q_proj, k_proj, v_proj, _ = weight_group n_heads = q_proj.module.weight.size()[0] // self.head_hidden_dim @@ -238,7 +285,7 @@ class L2WeightHeadMasker(AttentionHeadMasker): and key projection matrices. L2 norm is used for magnitude calculation. Note that in this implementation, weight norms of q_proj, k_proj, v_proj from each head are summed as the final importance score for the head. """ - def get_head_importance_scores(self, wrapper, weight_group, wrapper_idx): + def get_head_importance_scores(self, weight_group): q_proj, k_proj, v_proj, _ = weight_group n_heads = q_proj.module.weight.size()[0] // self.head_hidden_dim @@ -267,7 +314,7 @@ def __init__(self, model, pruner, head_hidden_dim=None): super().__init__(model, pruner, head_hidden_dim) self.pruner.hook_id = self._add_activation_collector(self.pruner) - def get_head_importance_scores(self, wrapper, weight_group, wrapper_idx): + def get_head_importance_scores(self, weight_group): _, _, _, output_proj = weight_group activations = torch.stack(self.pruner.collected_activation[output_proj.group_idx], -1) activations = torch.sum(activations, -1) @@ -314,7 +361,7 @@ def __init__(self, model, pruner, head_hidden_dim=None): super().__init__(model, pruner, head_hidden_dim) self.pruner.hook_id = self._add_activation_collector(self.pruner) - def get_head_importance_scores(self, wrapper, weight_group, wrapper_idx): + def get_head_importance_scores(self, weight_group): _, _, _, output_proj = weight_group activations = torch.stack(self.pruner.collected_activation[output_proj.group_idx], -1) activations = torch.sum(activations, -1) @@ -367,7 +414,7 @@ def __init__(self, model, pruner, head_hidden_dim=None): self.backward_hooks = {} # backward hooks for collecting gradient self._add_gradient_collector() - def get_head_importance_scores(self, wrapper, weight_group, wrapper_idx): + def get_head_importance_scores(self, weight_group): _, _, _, output_proj = weight_group result = output_proj.head_importance_scores From d1e5d8d68826cb3f95ba641b82ab0035c350d7f3 Mon Sep 17 00:00:00 2001 From: Di Wu Date: Tue, 6 Jul 2021 17:37:59 +0900 Subject: [PATCH 13/63] debug --- .../pytorch/pruning/transformer_pruning_head_masker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py b/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py index 1c8168fbbc..1060417d8d 100644 --- a/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py +++ b/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py @@ -207,7 +207,7 @@ def get_mask_by_importance_ranking(self, base_mask, weight, num_prune, wrapper, """ device = weight.device - importance_scores = self.get_head_importance_scores(wrapper, weight_group, wrapper_idx) + importance_scores = self.get_head_importance_scores(weight_group) if importance_scores is None: return None From aece26ac36acfc584256ccb8dd65a3a118cc08d6 Mon Sep 17 00:00:00 2001 From: Di Wu Date: Tue, 6 Jul 2021 17:51:13 +0900 Subject: [PATCH 14/63] debug --- .../pytorch/pruning/transformer_pruning_head_masker.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py b/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py index 1060417d8d..62717dabc7 100644 --- a/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py +++ b/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py @@ -90,7 +90,11 @@ def calc_mask_global(self, n_heads_to_prune): weight_mask_shape = group[0].module.weight.data.view([n_heads, -1]).size() bias_mask_shape = group[0].module.bias.data.view([n_heads, -1]).size() - head_level_mask = torch.tensor([i not in pruning_rules[group_idx] for i in range(n_heads)], device=device) + # a hack to avoid using torch.tensor, which has problems with pylint + head_level_mask = torch.ones(n_heads) + for i in pruning_rules[group_idx]: + head_level_mask[i] = 0 + mask_weight = head_level_mask.unsqueeze(-1).expand(weight_mask_shape).type_as(group[0].module.weight) mask_bias = head_level_mask.unsqueeze(-1).expand(bias_mask_shape).type_as(group[0].module.weight) From 9cf94fee3f2891ff3b5e12504e70a80d888a3a17 Mon Sep 17 00:00:00 2001 From: Di Wu Date: Tue, 6 Jul 2021 20:55:52 +0900 Subject: [PATCH 15/63] Add iterative pruning --- .../transformers/transformer_pruning.py | 17 ++- .../pytorch/pruning/transformer_pruner.py | 59 ++++++--- .../transformer_pruning_head_masker.py | 117 ++++++++++-------- 3 files changed, 115 insertions(+), 78 deletions(-) diff --git a/examples/model_compress/pruning/transformers/transformer_pruning.py b/examples/model_compress/pruning/transformers/transformer_pruning.py index 0f7bc4a589..c396dbce77 100644 --- a/examples/model_compress/pruning/transformers/transformer_pruning.py +++ b/examples/model_compress/pruning/transformers/transformer_pruning.py @@ -309,9 +309,12 @@ def train_model(args, model, is_regression, train_dataloader, eval_dataloader, o logger.info(f"epoch {epoch}: {eval_metric}") -def dry_run_no_param_update(args, model, train_dataloader, optimizer, device, epoch_num=None): +def dry_run_or_finetune(args, model, train_dataloader, optimizer, device, epoch_num=None): # no param update performed, just do forward and backward on the entire train data (to collect output/gradient etc.) - print("Running forward and backward on the entire dataset without updating parameters...") + if epoch_num == 0: + print("Running forward and backward on the entire dataset without updating parameters...") + else: + print("Finetuning for 1 epoch") progress_bar = tqdm(range(len(train_dataloader)), position=0, leave=True) completed_steps = 0 @@ -323,6 +326,8 @@ def dry_run_no_param_update(args, model, train_dataloader, optimizer, device, ep outputs = model(**batch) loss = outputs.loss loss.backward() + if epoch_num != 0: + optimizer.step() optimizer.zero_grad() progress_bar.update(1) completed_steps += 1 @@ -476,15 +481,17 @@ def trainer(model, optimizer, criterion, epoch): # here criterion is embedded in the model. Upper levels can just pass None to trainer # no param update performed, # just do forward and backward on the entire train data (to collect output/gradient etc.) - return dry_run_no_param_update(args, model, train_dataloader, optimizer, device, epoch_num=epoch) + return dry_run_or_finetune(args, model, train_dataloader, optimizer, device, epoch_num=epoch) attention_name_groups = list(zip(['encoder.layer.{}.attention.self.query'.format(i) for i in range(12)], ['encoder.layer.{}.attention.self.key'.format(i) for i in range(12)], ['encoder.layer.{}.attention.self.value'.format(i) for i in range(12)], ['encoder.layer.{}.attention.output.dense'.format(i) for i in range(12)])) - kwargs = {'ranking_criteria': 'taylorfo', + kwargs = {'ranking_criteria': 'l1_activation', 'global_sort': True, + 'num_iterations': 2, + 'epochs_per_iteration': 1, # 'attention_name_groups': attention_name_groups, 'head_hidden_dim': 64, #'dummy_input': [torch.rand([1, 64, 768]).to(device), torch.ones([1, 64]).to(device)], # input and mask @@ -493,7 +500,7 @@ def trainer(model, optimizer, criterion, epoch): 'optimizer': optimizer} config_list = [{ - 'sparsity': 0.25, + 'sparsity': 0.5, 'op_types': ["Linear"], # 'op_names': [x for layer in attention_name_groups for x in layer] }] diff --git a/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py b/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py index 6d2fedd207..62b6e79206 100644 --- a/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py +++ b/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py @@ -46,22 +46,27 @@ class TransformerHeadPruner(Pruner): - 'l1_activation' - 'l2_activation' """ - - def __init__(self, model, config_list, attention_name_groups=None, dummy_input=None, ranking_criteria='taylorfo', - global_sort=False, head_hidden_dim=None, optimizer=None, trainer=None, criterion=None, + def __init__(self, model, config_list, attention_name_groups=None, dummy_input=None, head_hidden_dim=None, + ranking_criteria='taylorfo', global_sort=False, num_iterations=1, epochs_per_iteration=1, + optimizer=None, trainer=None, criterion=None, **algo_kwargs): super().__init__(model, config_list) self.attention_name_groups = attention_name_groups self.dummy_input = dummy_input + self.head_hidden_dim = head_hidden_dim self.ranking_criteria = ranking_criteria assert self.ranking_criteria in ['l1_weight', 'l2_weight', 'l1_activation', 'l2_activation', 'taylorfo'], \ "Unsupported ranking criteria." self.global_sort = global_sort - self.head_hidden_dim = head_hidden_dim + self.num_iterations = num_iterations + self.epochs_per_iteration = epochs_per_iteration self._optimizer = optimizer self._trainer = trainer self._criterion = criterion + if self.ranking_criteria in ['l1_activation', 'l2_activation', 'taylorfo'] or num_iterations > 1: + assert self._trainer is not None + assert self._optimizer is not None # Group generation: one group per attention layer, four weights per group self.masking_groups = [] @@ -83,6 +88,7 @@ def __init__(self, model, config_list, attention_name_groups=None, dummy_input=N self.set_wrappers_attribute("mask_calculated", False) self.masker = MASKER_DICT[ranking_criteria](model, self, self.head_hidden_dim, **algo_kwargs) + self.pruned_heads = {i: set() for i in range(len(self.masking_groups))} def group_weights_by_name(self): """ @@ -175,24 +181,37 @@ def validate_config(self, model, config_list): schema.validate(config_list) def compress(self): - if self.ranking_criteria in ['l1_activation', 'l2_activation', 'taylorfo']: - training = self.bound_model.training - self.bound_model.eval() - self._trainer(self.bound_model, optimizer=self._optimizer, criterion=self._criterion, epoch=0) - self.update_mask() - self.bound_model.train(training) - else: - self.update_mask() - return self.bound_model + for pruning_iter in range(self.num_iterations): + self.set_wrappers_attribute("mask_calculated", False) + if self.ranking_criteria in ['l1_activation', 'l2_activation', 'taylorfo']: + training = self.bound_model.training + self.bound_model.eval() + self._trainer(self.bound_model, optimizer=self._optimizer, criterion=self._criterion, epoch=0) + self.update_mask() + self.bound_model.train(training) + else: + self.update_mask() + + # for iterative pruning + # finetune before next iteration + if self.num_iterations > 1: + for e in range(self.epochs_per_iteration): + self._trainer(self.bound_model, optimizer=self._optimizer, criterion=self._criterion, epoch=e+1) + + # if not the last iteration, reset the maskers (may create additional hooks) + if self.num_iterations > 1 and pruning_iter != self.num_iterations - 1: + self.masker.reset() + + print('pruned heads after iteration', pruning_iter, self.pruned_heads) def update_mask(self): - masks_for_all = None + masks_for_all_groups = None if self.global_sort: - masks_for_all = self._calc_mask_global() - assert len(masks_for_all) == len(self.masking_groups) + masks_for_all_groups = self._calc_mask_global() + assert len(masks_for_all_groups) == len(self.masking_groups) for group_idx, layer_weight_group in enumerate(self.masking_groups): if self.global_sort: - masks = masks_for_all[group_idx] + masks = masks_for_all_groups[group_idx] else: masks = self._calc_mask(layer_weight_group[0], layer_weight_group) if masks is not None: @@ -205,8 +224,8 @@ def update_mask(self): def _calc_mask(self, wrapper, weight_group, wrapper_idx=None): if not wrapper.mask_calculated: - sparsity = wrapper.config['sparsity'] - masks = self.masker.calc_mask(sparsity=sparsity, wrapper=wrapper, weight_group=weight_group, + iter_sparsity = wrapper.config['sparsity'] / self.num_iterations + masks = self.masker.calc_mask(sparsity=iter_sparsity, wrapper=wrapper, weight_group=weight_group, wrapper_idx=wrapper_idx) # masker.calc_mask returns None means calc_mask is not calculated successfully; can try later if masks is not None: @@ -218,7 +237,7 @@ def _calc_mask(self, wrapper, weight_group, wrapper_idx=None): def _calc_mask_global(self): if len(self.get_modules_wrapper()) == 0: return [] - overall_sparsity = self.get_modules_wrapper()[0].config['sparsity'] + overall_sparsity = self.get_modules_wrapper()[0].config['sparsity'] / self.num_iterations n_heads_total = 0 for q_proj, _, _, _ in self.masking_groups: n_heads_total += int(q_proj.module.weight.size()[0] / self.head_hidden_dim) diff --git a/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py b/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py index 62717dabc7..dc60efc59a 100644 --- a/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py +++ b/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py @@ -29,6 +29,13 @@ def __init__(self, model, pruner, head_hidden_dim=None): self.head_hidden_dim = head_hidden_dim assert self.head_hidden_dim is not None, "head_hidden_dim must be specified." + def reset(self): + """ + Descendants can override this method to do preparations necessary for calculating importance scores. + This method is called during iterative pruning, before each iteration starts (except the first one). + """ + pass + def calc_mask(self, sparsity, wrapper, wrapper_idx=None, **depen_kwargs): """ calculate the mask for `wrapper`. @@ -70,13 +77,12 @@ def calc_mask_global(self, n_heads_to_prune): head_importance_scores.append([group_idx, head_idx, scores[head_idx]]) # determine which head to prune for each layer - pruning_rules = {i: set() for i in range(len(self.pruner.masking_groups))} n_selected = 0 for group_idx, head_idx, _ in sorted(head_importance_scores, key=(lambda x: x[-1])): n_heads_original = self.pruner.masking_groups[group_idx][0].module.weight.size(0) // self.head_hidden_dim - n_heads_remaining = n_heads_original - len(pruning_rules[group_idx]) - if n_heads_remaining > 1: - pruning_rules[group_idx].add(head_idx) + n_heads_remaining = n_heads_original - len(self.pruner.pruned_heads[group_idx]) + if n_heads_remaining > 1 and head_idx not in self.pruner.pruned_heads[group_idx]: + self.pruner.pruned_heads[group_idx].add(head_idx) n_selected += 1 if n_selected >= n_heads_to_prune: break @@ -84,30 +90,10 @@ def calc_mask_global(self, n_heads_to_prune): # generate masks all_masks = [] for group_idx, group in enumerate(self.pruner.masking_groups): - device = group[0].module.weight.device - n_heads = group[0].module.weight.size(0) // self.head_hidden_dim - weight_mask_shape = group[0].module.weight.data.view([n_heads, -1]).size() - bias_mask_shape = group[0].module.bias.data.view([n_heads, -1]).size() - - # a hack to avoid using torch.tensor, which has problems with pylint - head_level_mask = torch.ones(n_heads) - for i in pruning_rules[group_idx]: - head_level_mask[i] = 0 - - mask_weight = head_level_mask.unsqueeze(-1).expand(weight_mask_shape).type_as(group[0].module.weight) - mask_bias = head_level_mask.unsqueeze(-1).expand(bias_mask_shape).type_as(group[0].module.weight) - - mask_weight_proj = mask_weight.view(group[0].module.weight.size()).detach().to(device) - mask_bias_proj = mask_bias.view(-1).detach().to(device) - masks_for_proj = {'weight_mask': mask_weight_proj.detach(), 'bias_mask': mask_bias_proj} - - mask_weight_dense = mask_bias_proj.expand_as(group[-1].module.weight.data).detach().to(device) - mask_bias_dense = torch.ones_like(group[-1].module.bias.data).to(device) - masks_for_dense = {'weight_mask': mask_weight_dense.detach(), 'bias_mask': mask_bias_dense} - - masks = [masks_for_proj, masks_for_proj, masks_for_proj, masks_for_dense] - + device = group[0].module.weight.device + head_level_mask = torch.tensor([i not in self.pruner.pruned_heads[group_idx] for i in range(n_heads)], device=device) # pylint: disable=not-callable + masks = self._get_layer_masks_from_head_mask(group, head_level_mask, has_bias=True) all_masks.append(masks) return all_masks @@ -159,6 +145,7 @@ def _get_current_state(self, sparsity, wrapper, wrapper_idx=None): num_total = weight.size(0) // self.head_hidden_dim num_prune = int(num_total * sparsity) + num_prune = max(num_prune, 1) # weight*mask_weight: apply base mask for iterative pruning return mask, weight * mask_weight, num_prune @@ -186,6 +173,30 @@ def get_mask(self, base_mask, weight, num_prune, wrapper, wrapper_idx, **depen_k """ raise NotImplementedError('{} get_mask is not implemented'.format(self.__class__.__name__)) + def _get_layer_masks_from_head_mask(self, weight_group, head_mask_bool, has_bias=True, device=None): + q_proj, _, _, output_proj = weight_group + if device is None: + device = q_proj.module.weight.device + + n_heads = q_proj.module.weight.size()[0] // self.head_hidden_dim + weight_mask_shape = q_proj.module.weight.data.view([n_heads, -1]).size() + bias_mask_shape = q_proj.module.bias.data.view([n_heads, -1]).size() + + mask_weight = head_mask_bool.unsqueeze(-1).expand(weight_mask_shape).type_as(q_proj.module.weight) + mask_bias = head_mask_bool.unsqueeze(-1).expand(bias_mask_shape).type_as(q_proj.module.weight) + + mask_weight_proj = mask_weight.view(q_proj.module.weight.size()).detach().to(device) + mask_bias_proj = mask_bias.view(-1).detach().to(device) if has_bias else None + masks_for_proj = {'weight_mask': mask_weight_proj.detach(), 'bias_mask': mask_bias_proj} + + mask_weight_dense = mask_bias_proj.expand_as(output_proj.module.weight.data).detach().to(device) + mask_bias_dense = torch.ones_like(output_proj.module.bias.data).to(device) + masks_for_dense = {'weight_mask': mask_weight_dense.detach(), 'bias_mask': mask_bias_dense} + + masks = [masks_for_proj, masks_for_proj, masks_for_proj, masks_for_dense] + + return masks + def get_mask_by_importance_ranking(self, base_mask, weight, num_prune, wrapper, wrapper_idx, weight_group=None): """ Calculate the mask of given layer by pruning out heads with lowest importance scores. @@ -209,36 +220,27 @@ def get_mask_by_importance_ranking(self, base_mask, weight, num_prune, wrapper, dict dictionary for storing masks """ - device = weight.device + if weight_group is None: + weight_group = self.pruner.masking_groups[wrapper.group_idx] importance_scores = self.get_head_importance_scores(weight_group) if importance_scores is None: return None - threshold = torch.topk(importance_scores, num_prune, largest=False)[0].max() - - # get q_proj, k_proj, v_proj, output_proj from the same attention head - q_proj, _, _, output_proj = weight_group if weight_group is not None else \ - self.pruner.masking_groups[wrapper.group_idx] - - n_heads = q_proj.module.weight.size()[0] // self.head_hidden_dim - weight_mask_shape = q_proj.module.weight.data.view([n_heads, -1]).size() - bias_mask_shape = q_proj.module.bias.data.view([n_heads, -1]).size() - - mask_weight = torch.gt(importance_scores, threshold).unsqueeze(-1).expand(weight_mask_shape).type_as(weight) - mask_bias = torch.gt(importance_scores, threshold).unsqueeze(-1).expand(bias_mask_shape).type_as(weight) - - mask_weight_proj = mask_weight.view(weight.size()).detach().to(device) - mask_bias_proj = mask_bias.view(-1).detach().to(device) \ - if base_mask['bias_mask'] is not None else None - masks_for_proj = {'weight_mask': mask_weight_proj.detach(), 'bias_mask': mask_bias_proj} - - mask_weight_dense = mask_bias_proj.expand_as(output_proj.module.weight.data).detach().to(device) - mask_bias_dense = torch.ones_like(output_proj.module.bias.data).to(device) - masks_for_dense = {'weight_mask': mask_weight_dense.detach(), 'bias_mask': mask_bias_dense} + importance_scores = [[i, importance_scores[i]] for i in range(len(importance_scores))] + head_mask_bool = torch.ones(len(importance_scores)) + n_selected = 0 + for head_idx, score in sorted(importance_scores, key=(lambda x: x[-1])): + head_mask_bool[head_idx] = 0 + if head_idx not in self.pruner.pruned_heads[weight_group[0].group_idx]: + n_selected += 1 + # update pruned_heads in pruner (mainly for iterative pruning) + self.pruner.pruned_heads[weight_group[0].group_idx].add(head_idx) + if n_selected == num_prune: + break - masks = [masks_for_proj, masks_for_proj, masks_for_proj, masks_for_dense] - return masks + return self._get_layer_masks_from_head_mask(weight_group, head_mask_bool, + has_bias=True) def get_head_importance_scores(self, weight_group): """ @@ -316,6 +318,9 @@ class L1ActivationHeadMasker(AttentionHeadMasker): """ def __init__(self, model, pruner, head_hidden_dim=None): super().__init__(model, pruner, head_hidden_dim) + self.reset() + + def reset(self): self.pruner.hook_id = self._add_activation_collector(self.pruner) def get_head_importance_scores(self, weight_group): @@ -363,6 +368,9 @@ class L2ActivationHeadMasker(AttentionHeadMasker): """ def __init__(self, model, pruner, head_hidden_dim=None): super().__init__(model, pruner, head_hidden_dim) + self.reset() + + def reset(self): self.pruner.hook_id = self._add_activation_collector(self.pruner) def get_head_importance_scores(self, weight_group): @@ -414,8 +422,11 @@ class TaylorFOHeadMasker(AttentionHeadMasker): """ def __init__(self, model, pruner, head_hidden_dim=None): super().__init__(model, pruner, head_hidden_dim) - self.pruner.hook_id = self._add_activation_collector() # forward hooks for collecting activation - self.backward_hooks = {} # backward hooks for collecting gradient + self.reset() + + def reset(self): + self.pruner.hook_id = self._add_activation_collector() # forward hooks for collecting activation + self.backward_hooks = {} # backward hooks for collecting gradient self._add_gradient_collector() def get_head_importance_scores(self, weight_group): From 7c73fc8920944cfce0034378e2b033fc4733afa0 Mon Sep 17 00:00:00 2001 From: Di Wu Date: Tue, 6 Jul 2021 21:03:41 +0900 Subject: [PATCH 16/63] debug --- .../pytorch/pruning/transformer_pruning_head_masker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py b/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py index dc60efc59a..4aeef83c14 100644 --- a/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py +++ b/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py @@ -230,7 +230,7 @@ def get_mask_by_importance_ranking(self, base_mask, weight, num_prune, wrapper, importance_scores = [[i, importance_scores[i]] for i in range(len(importance_scores))] head_mask_bool = torch.ones(len(importance_scores)) n_selected = 0 - for head_idx, score in sorted(importance_scores, key=(lambda x: x[-1])): + for head_idx, _ in sorted(importance_scores, key=(lambda x: x[-1])): head_mask_bool[head_idx] = 0 if head_idx not in self.pruner.pruned_heads[weight_group[0].group_idx]: n_selected += 1 From 79186e2f8f390a2e3d8a835919a4fb9902e827be Mon Sep 17 00:00:00 2001 From: Di Wu Date: Wed, 7 Jul 2021 11:25:35 +0900 Subject: [PATCH 17/63] Simplify API; add doc strings --- .../transformers/transformer_pruning.py | 7 +- .../pytorch/pruning/transformer_pruner.py | 102 ++++++++---- .../transformer_pruning_head_masker.py | 155 ++++++------------ 3 files changed, 116 insertions(+), 148 deletions(-) diff --git a/examples/model_compress/pruning/transformers/transformer_pruning.py b/examples/model_compress/pruning/transformers/transformer_pruning.py index c396dbce77..bdcdc06b54 100644 --- a/examples/model_compress/pruning/transformers/transformer_pruning.py +++ b/examples/model_compress/pruning/transformers/transformer_pruning.py @@ -477,10 +477,8 @@ def main(): train_dataset, eval_dataset) + # here criterion is embedded in the model. Upper levels can just pass None to trainer def trainer(model, optimizer, criterion, epoch): - # here criterion is embedded in the model. Upper levels can just pass None to trainer - # no param update performed, - # just do forward and backward on the entire train data (to collect output/gradient etc.) return dry_run_or_finetune(args, model, train_dataloader, optimizer, device, epoch_num=epoch) attention_name_groups = list(zip(['encoder.layer.{}.attention.self.query'.format(i) for i in range(12)], @@ -488,13 +486,12 @@ def trainer(model, optimizer, criterion, epoch): ['encoder.layer.{}.attention.self.value'.format(i) for i in range(12)], ['encoder.layer.{}.attention.output.dense'.format(i) for i in range(12)])) - kwargs = {'ranking_criteria': 'l1_activation', + kwargs = {'ranking_criteria': 'l1_weight', 'global_sort': True, 'num_iterations': 2, 'epochs_per_iteration': 1, # 'attention_name_groups': attention_name_groups, 'head_hidden_dim': 64, - #'dummy_input': [torch.rand([1, 64, 768]).to(device), torch.ones([1, 64]).to(device)], # input and mask 'dummy_input': (next(iter(train_dataloader))['input_ids']).to(device), 'trainer': trainer, 'optimizer': optimizer} diff --git a/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py b/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py index 62b6e79206..b35a79bcc1 100644 --- a/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py +++ b/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py @@ -38,13 +38,17 @@ class TransformerHeadPruner(Pruner): - sparsity : This is to specify the sparsity operations to be compressed to. - op_types : Optional. Operation types to prune. (Should be 'Linear' for this pruner.) - op_names : Optional. Operation names to prune. - ranking_criteria : str - Supported criteria: - - 'taylorfo' - - 'l1_weight' - - 'l2_weight' - - 'l1_activation' - - 'l2_activation' + attention_name_groups : list (Optional) + dummy_input + head_hidden_dim + ranking_criteria + global_sort + num_iterations + epochs_per_iteration + optimizer + trainer + criterion + algo_kwargs """ def __init__(self, model, config_list, attention_name_groups=None, dummy_input=None, head_hidden_dim=None, ranking_criteria='taylorfo', global_sort=False, num_iterations=1, epochs_per_iteration=1, @@ -77,7 +81,7 @@ def __init__(self, model, config_list, attention_name_groups=None, dummy_input=N assert self.dummy_input is not None logger.info("Note: weights for the same attention layer are grouped using model graph.") self._unwrap_model() - self.group_weights_by_graph() + self.group_weight_names_by_graph() self._wrap_model() # Group sanity check @@ -86,7 +90,6 @@ def __init__(self, model, config_list, attention_name_groups=None, dummy_input=N # Remove any mistakenly captured ungrouped modules self.remove_ungrouped_modules() - self.set_wrappers_attribute("mask_calculated", False) self.masker = MASKER_DICT[ranking_criteria](model, self, self.head_hidden_dim, **algo_kwargs) self.pruned_heads = {i: set() for i in range(len(self.masking_groups))} @@ -98,23 +101,27 @@ def group_weights_by_name(self): # build up masking groups name2group = {} for layer_idx, layer in enumerate(self.attention_name_groups): - errmsg = 'each name group must contain 4 weights in the following order: query projection, key ' \ - 'projection, value projection, and fully connected output layer' + errmsg = 'Each name group must contain 4 weights, with the first three corresponding to Q_proj, K_proj, ' \ + 'V_proj (in any order) and the last one being output_proj.' assert len(layer) == 4, errmsg self.masking_groups.append([]) for weight in layer: name2group[weight] = layer_idx - # assign wrappers to these groups + + # group wrappers for wrapper in self.get_modules_wrapper(): if wrapper.name in name2group: wrapper.group_idx = name2group[wrapper.name] self.masking_groups[name2group[wrapper.name]].append(wrapper) - print('grouping updated:', [[x.name for x in group] for group in self.masking_groups]) + logger.info('Grouping updated:') + logger.info([[x.name for x in group] for group in self.masking_groups]) - def group_weights_by_graph(self): + def group_weight_names_by_graph(self): """ - Populate self.masking_groups bu running inference on the module graph. + Populate self.attention_name_groups by running inference on the module graph. + Currently, the group inferred AttentionWeightDependency is limited to a set of four weights, with the first + three corresponding to Q_proj, K_proj, V_proj (in any order) and the last one being output_proj. """ try: module_graph = TorchModuleGraph(self.bound_model, self.dummy_input) @@ -158,7 +165,7 @@ def validate_weight_groups(self): def remove_ungrouped_modules(self): """ - Remove non-attention weights that might be captured mistakenly by a simplified config_list. + Remove non-attention weights that might be mistakenly captured by a simplified config_list. """ care_of_modules = set([x for layer in self.masking_groups for x in layer]) self.modules_wrapper = [x for x in self.modules_wrapper if x in care_of_modules] @@ -182,7 +189,6 @@ def validate_config(self, model, config_list): def compress(self): for pruning_iter in range(self.num_iterations): - self.set_wrappers_attribute("mask_calculated", False) if self.ranking_criteria in ['l1_activation', 'l2_activation', 'taylorfo']: training = self.bound_model.training self.bound_model.eval() @@ -192,8 +198,7 @@ def compress(self): else: self.update_mask() - # for iterative pruning - # finetune before next iteration + # for iterative pruning, finetune before next iteration if self.num_iterations > 1: for e in range(self.epochs_per_iteration): self._trainer(self.bound_model, optimizer=self._optimizer, criterion=self._criterion, epoch=e+1) @@ -202,9 +207,14 @@ def compress(self): if self.num_iterations > 1 and pruning_iter != self.num_iterations - 1: self.masker.reset() - print('pruned heads after iteration', pruning_iter, self.pruned_heads) + logger.info('Pruned heads after iteration {}:'.format(pruning_iter)) + logger.info(self.pruned_heads) def update_mask(self): + """ + Calculate and update masks for each masking group. If global_sort is set, the masks for all groups are + calculated altogether, and then the groups are updated individually. + """ masks_for_all_groups = None if self.global_sort: masks_for_all_groups = self._calc_mask_global() @@ -213,35 +223,59 @@ def update_mask(self): if self.global_sort: masks = masks_for_all_groups[group_idx] else: - masks = self._calc_mask(layer_weight_group[0], layer_weight_group) + masks = self._calc_mask(layer_weight_group) if masks is not None: for i, mask in enumerate(masks): for mask_type in mask: assert hasattr(layer_weight_group[i], mask_type), \ "there is no attribute '%s' in wrapper on %s" % (mask_type, layer_weight_group[i]) setattr(layer_weight_group[i], mask_type, mask[mask_type]) - print(f'mask updated: {layer_weight_group[i].name} {mask_type}') - - def _calc_mask(self, wrapper, weight_group, wrapper_idx=None): - if not wrapper.mask_calculated: - iter_sparsity = wrapper.config['sparsity'] / self.num_iterations - masks = self.masker.calc_mask(sparsity=iter_sparsity, wrapper=wrapper, weight_group=weight_group, - wrapper_idx=wrapper_idx) - # masker.calc_mask returns None means calc_mask is not calculated successfully; can try later - if masks is not None: - wrapper.mask_calculated = True - return masks - else: - return None + logger.info(f'mask updated: {layer_weight_group[i].name} {mask_type}') + + def _calc_mask(self, weight_group): + """ + Calculate mask for each group using only layer-local information. + When global_sort is set for the pruner, _calc_mask_global should be called instead of this function. + + Parameters + ---------- + weight_group : list + A list of four wrappers generated by self.group_weights_by_name(). + + Returns + ------- + masks : list + A four element list corresponding to the masks for each element in the four-element weight group. + Each element in masks is a dict with keys "weight_mask" and "bias_mask" (optional). + masks can be None if the underlying masker returns None. This means that the mask calculation fails. + The calling function can try recalculate the mask at a later time. Note that the calling function might need + to call masker.reset() before attempting to recalculate the mask. + """ + iter_sparsity = weight_group[0].config['sparsity'] / self.num_iterations + masks = self.masker.calc_mask(sparsity=iter_sparsity, weight_group=weight_group) + + return masks def _calc_mask_global(self): + """ + Calculate mask for all groups using global information. + + Returns + ------- + masks_list : list + A list corresponding to the masks for each weight group in self.masking_groups. Each element in the + returned mask_list is a four-element list corresponding to the masks for each element in a four-element + weight group. + """ if len(self.get_modules_wrapper()) == 0: return [] + overall_sparsity = self.get_modules_wrapper()[0].config['sparsity'] / self.num_iterations n_heads_total = 0 for q_proj, _, _, _ in self.masking_groups: n_heads_total += int(q_proj.module.weight.size()[0] / self.head_hidden_dim) n_heads_to_prune = int(n_heads_total * overall_sparsity) + return self.masker.calc_mask_global(n_heads_to_prune) def calc_mask(self, wrapper, **kwargs): diff --git a/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py b/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py index 4aeef83c14..ce0ca7330b 100644 --- a/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py +++ b/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py @@ -13,7 +13,7 @@ class AttentionHeadMasker(WeightMasker): """ - A structured pruning masker base class that prunes convolutional layer filters. + A structured pruning masker base class that prunes attention heads in attention layers. Parameters ---------- @@ -31,41 +31,41 @@ def __init__(self, model, pruner, head_hidden_dim=None): def reset(self): """ - Descendants can override this method to do preparations necessary for calculating importance scores. + Derived classes can override this method to do preparations necessary for calculating importance scores. This method is called during iterative pruning, before each iteration starts (except the first one). """ pass - def calc_mask(self, sparsity, wrapper, wrapper_idx=None, **depen_kwargs): + def calc_mask(self, sparsity, wrapper=None, wrapper_idx=None, weight_group=None, **kwargs): """ - calculate the mask for `wrapper`. + Calculate all the masks for a group of wrappers (specified in weight_group). + This function only utilizes local information for mask calculation. If global_sort is specified for the pruner, + the pruner should call calc_mask_global instead of this function. Parameters ---------- - sparsity: float/list of float - The target sparsity of the wrapper. If we calculate the mask in - the normal way, then sparsity is a float number. In contrast, if - we calculate the mask in the dependency-aware way, sparsity is a - list of float numbers, each float number corresponds to a sparsity - of a layer. + weight_group + sparsity: float + The target (amount of increase of) sparsity of the wrapper list. wrapper: PrunerModuleWrapper/list of PrunerModuleWrappers - The wrapper of the target layer. If we calculate the mask in the normal - way, then `wrapper` is an instance of PrunerModuleWrapper, else `wrapper` - is a list of PrunerModuleWrapper. + Should be None. Not used in this masker, just for consistency with the upper level API. wrapper_idx: int/list of int - The index of the wrapper. + Should be None. Not used in this masker, just for consistency with the upper level API. Returns ------- - dict - dictionary for storing masks, keys of the dict: - 'weight_mask': weight mask tensor - 'bias_mask': bias mask tensor (optional) + masks : list + masks for each element in the group. + Each element in the list masks is a dictionary for storing masks, keys of the dict: + 'weight_mask': weight mask tensor + 'bias_mask': bias mask tensor (optional) """ - mask, weight, num_prune = self._get_current_state(sparsity, wrapper, wrapper_idx) - num_total = weight.size(0) // self.head_hidden_dim - if num_total < 2 or num_prune < 1: - return mask - return self.get_mask(mask, weight, num_prune, wrapper, wrapper_idx, **depen_kwargs) + assert weight_group is not None + num_total = weight_group[0].module.weight.data.size(0) // self.head_hidden_dim + if num_total < 2: + return None + num_prune = max(int(num_total * sparsity), 1) + + return self.get_mask(num_prune, weight_group, **kwargs) def calc_mask_global(self, n_heads_to_prune): # calculate scores as normal (this step does not require global information) @@ -93,79 +93,22 @@ def calc_mask_global(self, n_heads_to_prune): n_heads = group[0].module.weight.size(0) // self.head_hidden_dim device = group[0].module.weight.device head_level_mask = torch.tensor([i not in self.pruner.pruned_heads[group_idx] for i in range(n_heads)], device=device) # pylint: disable=not-callable - masks = self._get_layer_masks_from_head_mask(group, head_level_mask, has_bias=True) + masks = self._get_layer_masks_from_head_mask(group, head_level_mask) all_masks.append(masks) return all_masks - def _get_current_state(self, sparsity, wrapper, wrapper_idx=None): - """ - Some pruner may prune the layers in a iterative way. In each pruning iteration, - we may get the current state of this wrapper/layer, and continue to prune this layer - based on the current state. This function is to get the current pruning state of the - target wrapper/layer. - Parameters - ---------- - sparsity: float - pruning ratio, preserved weight ratio is `1 - sparsity` - wrapper: PrunerModuleWrapper - layer wrapper of this layer - wrapper_idx: int - index of this wrapper in pruner's all wrappers - Returns - ------- - base_mask: dict - dict object that stores the mask of this wrapper in this iteration, if it is the - first iteration, then we create a new mask with all ones. If there is already a - mask in this wrapper, then we return the existing mask. - weight: tensor - the current weight of this layer - num_prune: int - how many heads we should prune - """ - msg = 'module type {} is not supported!'.format(wrapper.type) - assert wrapper.type == 'Linear', msg - weight = wrapper.module.weight.data - bias = None - if hasattr(wrapper.module, 'bias') and wrapper.module.bias is not None: - bias = wrapper.module.bias.data - - if wrapper.weight_mask is None: - mask_weight = torch.ones(weight.size()).type_as(weight).detach() - else: - mask_weight = wrapper.weight_mask.clone() - if bias is not None: - if wrapper.bias_mask is None: - mask_bias = torch.ones(bias.size()).type_as(bias).detach() - else: - mask_bias = wrapper.bias_mask.clone() - else: - mask_bias = None - mask = {'weight_mask': mask_weight, 'bias_mask': mask_bias} - - num_total = weight.size(0) // self.head_hidden_dim - num_prune = int(num_total * sparsity) - num_prune = max(num_prune, 1) - - # weight*mask_weight: apply base mask for iterative pruning - return mask, weight * mask_weight, num_prune - - def get_mask(self, base_mask, weight, num_prune, wrapper, wrapper_idx, **depen_kwargs): + def get_mask(self, num_prune, weight_group, **kwargs): """ Calculate the mask of given layer. Parameters ---------- - base_mask: dict - The basic mask with the same shape of weight, all item in the basic mask is 1. - weight: tensor - the module weight to be pruned + weight_group num_prune: int Num of heads to prune wrapper: PrunerModuleWrapper layer wrapper of this layer - wrapper_idx: int - index of this wrapper in pruner's all wrappers Returns ------- dict @@ -173,7 +116,7 @@ def get_mask(self, base_mask, weight, num_prune, wrapper, wrapper_idx, **depen_k """ raise NotImplementedError('{} get_mask is not implemented'.format(self.__class__.__name__)) - def _get_layer_masks_from_head_mask(self, weight_group, head_mask_bool, has_bias=True, device=None): + def _get_layer_masks_from_head_mask(self, weight_group, head_mask_bool, device=None): q_proj, _, _, output_proj = weight_group if device is None: device = q_proj.module.weight.device @@ -186,18 +129,22 @@ def _get_layer_masks_from_head_mask(self, weight_group, head_mask_bool, has_bias mask_bias = head_mask_bool.unsqueeze(-1).expand(bias_mask_shape).type_as(q_proj.module.weight) mask_weight_proj = mask_weight.view(q_proj.module.weight.size()).detach().to(device) - mask_bias_proj = mask_bias.view(-1).detach().to(device) if has_bias else None - masks_for_proj = {'weight_mask': mask_weight_proj.detach(), 'bias_mask': mask_bias_proj} + mask_bias_proj = mask_bias.view(-1).detach().to(device) + masks_for_proj = {'weight_mask': mask_weight_proj.detach()} + if hasattr(q_proj.module, 'bias') and q_proj.module.bias is not None: + masks_for_proj['bias_mask'] = mask_bias_proj mask_weight_dense = mask_bias_proj.expand_as(output_proj.module.weight.data).detach().to(device) mask_bias_dense = torch.ones_like(output_proj.module.bias.data).to(device) - masks_for_dense = {'weight_mask': mask_weight_dense.detach(), 'bias_mask': mask_bias_dense} + masks_for_dense = {'weight_mask': mask_weight_dense.detach()} + if hasattr(output_proj.module, 'bias') and output_proj.module.bias is not None: + masks_for_dense['bias_mask'] = mask_bias_dense masks = [masks_for_proj, masks_for_proj, masks_for_proj, masks_for_dense] return masks - def get_mask_by_importance_ranking(self, base_mask, weight, num_prune, wrapper, wrapper_idx, weight_group=None): + def get_mask_by_importance_ranking(self, num_prune, weight_group): """ Calculate the mask of given layer by pruning out heads with lowest importance scores. @@ -205,24 +152,15 @@ def get_mask_by_importance_ranking(self, base_mask, weight, num_prune, wrapper, ---------- weight_group: list list of a group of weights for an attention layer - base_mask: dict - The basic mask with the same shape of weight, all item in the basic mask is 1. - weight: tensor - the module weight to be pruned num_prune: int Num of heads to prune wrapper: PrunerModuleWrapper layer wrapper of this layer - wrapper_idx: int - index of this wrapper in pruner's all wrappers Returns ------- dict dictionary for storing masks """ - if weight_group is None: - weight_group = self.pruner.masking_groups[wrapper.group_idx] - importance_scores = self.get_head_importance_scores(weight_group) if importance_scores is None: return None @@ -239,8 +177,7 @@ def get_mask_by_importance_ranking(self, base_mask, weight, num_prune, wrapper, if n_selected == num_prune: break - return self._get_layer_masks_from_head_mask(weight_group, head_mask_bool, - has_bias=True) + return self._get_layer_masks_from_head_mask(weight_group, head_mask_bool) def get_head_importance_scores(self, weight_group): """ @@ -281,8 +218,8 @@ def get_head_importance_scores(self, weight_group): return ((query_norm_avg + key_norm_avg + value_norm_avg) / 3).detach() - def get_mask(self, base_mask, weight, num_prune, wrapper, wrapper_idx, weight_group=None): - return self.get_mask_by_importance_ranking(base_mask, weight, num_prune, wrapper, wrapper_idx, weight_group) + def get_mask(self, num_prune, weight_group, **kwargs): + return self.get_mask_by_importance_ranking(num_prune, weight_group) class L2WeightHeadMasker(AttentionHeadMasker): @@ -305,8 +242,8 @@ def get_head_importance_scores(self, weight_group): return ((query_norm_avg + key_norm_avg + value_norm_avg) / 3).detach() - def get_mask(self, base_mask, weight, num_prune, wrapper, wrapper_idx, weight_group=None): - return self.get_mask_by_importance_ranking(base_mask, weight, num_prune, wrapper, wrapper_idx, weight_group) + def get_mask(self, num_prune, weight_group, **kwargs): + return self.get_mask_by_importance_ranking(num_prune, weight_group) class L1ActivationHeadMasker(AttentionHeadMasker): @@ -355,8 +292,8 @@ def hook(module_, input_, output): return pruner._fwd_hook_id - def get_mask(self, base_mask, weight, num_prune, wrapper, wrapper_idx, weight_group=None): - return self.get_mask_by_importance_ranking(base_mask, weight, num_prune, wrapper, wrapper_idx, weight_group) + def get_mask(self, num_prune, weight_group, **kwargs): + return self.get_mask_by_importance_ranking(num_prune, weight_group) class L2ActivationHeadMasker(AttentionHeadMasker): @@ -407,8 +344,8 @@ def hook(module_, input_, output): return pruner._fwd_hook_id - def get_mask(self, base_mask, weight, num_prune, wrapper, wrapper_idx, weight_group=None): - return self.get_mask_by_importance_ranking(base_mask, weight, num_prune, wrapper, wrapper_idx, weight_group) + def get_mask(self, num_prune, weight_group, **kwargs): + return self.get_mask_by_importance_ranking(num_prune, weight_group) class TaylorFOHeadMasker(AttentionHeadMasker): @@ -476,5 +413,5 @@ def grad_hook(md, grad_in, grad_out): handle = output_proj.register_backward_hook(grad_hook) self.backward_hooks[output_proj.group_idx] = handle - def get_mask(self, base_mask, weight, num_prune, wrapper, wrapper_idx, weight_group=None): - return self.get_mask_by_importance_ranking(base_mask, weight, num_prune, wrapper, wrapper_idx, weight_group) \ No newline at end of file + def get_mask(self, num_prune, weight_group, **kwargs): + return self.get_mask_by_importance_ranking(num_prune, weight_group) \ No newline at end of file From 690969a3458268e5794159cea6cf857ed6fb44f0 Mon Sep 17 00:00:00 2001 From: Di Wu Date: Wed, 7 Jul 2021 11:35:00 +0900 Subject: [PATCH 18/63] debug --- .../compression/pytorch/pruning/transformer_pruner.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py b/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py index b35a79bcc1..9dc131d10e 100644 --- a/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py +++ b/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py @@ -207,7 +207,7 @@ def compress(self): if self.num_iterations > 1 and pruning_iter != self.num_iterations - 1: self.masker.reset() - logger.info('Pruned heads after iteration {}:'.format(pruning_iter)) + logger.info('Pruned heads after iteration %i', pruning_iter) logger.info(self.pruned_heads) def update_mask(self): From 9d344938359f78ebc5cd5a7e65340e76282ed0fa Mon Sep 17 00:00:00 2001 From: Di Wu Date: Wed, 7 Jul 2021 12:42:22 +0900 Subject: [PATCH 19/63] docstring --- .../transformers/transformer_pruning.py | 2 +- .../pytorch/pruning/transformer_pruner.py | 64 +++++++++++++------ .../transformer_pruning_head_masker.py | 57 ++++++++++------- .../pytorch/utils/shape_dependency.py | 41 ++++-------- 4 files changed, 94 insertions(+), 70 deletions(-) diff --git a/examples/model_compress/pruning/transformers/transformer_pruning.py b/examples/model_compress/pruning/transformers/transformer_pruning.py index bdcdc06b54..57adbc1b3b 100644 --- a/examples/model_compress/pruning/transformers/transformer_pruning.py +++ b/examples/model_compress/pruning/transformers/transformer_pruning.py @@ -486,7 +486,7 @@ def trainer(model, optimizer, criterion, epoch): ['encoder.layer.{}.attention.self.value'.format(i) for i in range(12)], ['encoder.layer.{}.attention.output.dense'.format(i) for i in range(12)])) - kwargs = {'ranking_criteria': 'l1_weight', + kwargs = {'ranking_criterion': 'taylorfo', 'global_sort': True, 'num_iterations': 2, 'epochs_per_iteration': 1, diff --git a/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py b/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py index 9dc131d10e..1947ded33b 100644 --- a/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py +++ b/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py @@ -38,37 +38,61 @@ class TransformerHeadPruner(Pruner): - sparsity : This is to specify the sparsity operations to be compressed to. - op_types : Optional. Operation types to prune. (Should be 'Linear' for this pruner.) - op_names : Optional. Operation names to prune. + head_hidden_dim : int + Dimension of hidden dimesion for each attention head. (e.g., 64 for BERT) + We assume that this head_hidden_dim is constant across the entire model. attention_name_groups : list (Optional) - dummy_input - head_hidden_dim - ranking_criteria - global_sort - num_iterations - epochs_per_iteration - optimizer - trainer - criterion - algo_kwargs + List of groups of names for weights of each attention layer. Each element should be a four-element list, with + the first three corresponding to Q_proj, K_proj, V_proj (in any order) and the last one being output_proj. + dummy_input : torch.Tensor (Optional) + Input to model's forward method, used to infer module grouping if attention_name_groups is not specified. + This tensor is used by the underlying torch.jit.trace to infer the module graph. + ranking_criterion : str + The criterion for ranking attention heads. Currently we support: + - l1_weight: l1 norm of Q_proj, K_proj, and V_proj + - l2_weight: l2 norm of Q_proj, K_proj, and V_proj + - l1_activation: l1 norm of the output of output projection + - l2_activation: l2 norm of the output of output projection + - taylorfo: l1 norm of the output of output projection * gradient for this output + (check more details in the masker documentation) + global_sort : bool + Whether rank the heads globally or locally before deciding heads to prune. + num_iterations : int + Number of pruning iterations. Defaults to 1 (ont-shot pruning). If num_iterations > 1, the pruner will split + the sparsity specified in config_list uniformly and assign a fraction to each pruning iteration. + epochs_per_iteration : int + Number of finetuning epochs before the next pruning iteration. This only has effect when num_iterations > 1. + If num_iterations is 1, then no finetuning is performed by the pruner after pruning. + optimizer: torch.optim.Optimizer + Optimizer used to train model + trainer: function + Function used to train the model. + Users should write this function as a normal function to train the Pytorch model + and include `model, optimizer, criterion, epoch` as function arguments. + criterion: function + Function used to calculate the loss between the target and the output. + For example, you can use ``torch.nn.CrossEntropyLoss()`` as input. """ - def __init__(self, model, config_list, attention_name_groups=None, dummy_input=None, head_hidden_dim=None, - ranking_criteria='taylorfo', global_sort=False, num_iterations=1, epochs_per_iteration=1, + def __init__(self, model, config_list,head_hidden_dim, attention_name_groups=None, dummy_input=None, + ranking_criterion='taylorfo', global_sort=False, num_iterations=1, epochs_per_iteration=1, optimizer=None, trainer=None, criterion=None, **algo_kwargs): super().__init__(model, config_list) + self.head_hidden_dim = int(head_hidden_dim) self.attention_name_groups = attention_name_groups self.dummy_input = dummy_input - self.head_hidden_dim = head_hidden_dim - self.ranking_criteria = ranking_criteria - assert self.ranking_criteria in ['l1_weight', 'l2_weight', 'l1_activation', 'l2_activation', 'taylorfo'], \ + self.ranking_criterion = ranking_criterion + assert self.ranking_criterion in ['l1_weight', 'l2_weight', 'l1_activation', 'l2_activation', 'taylorfo'], \ "Unsupported ranking criteria." self.global_sort = global_sort - self.num_iterations = num_iterations - self.epochs_per_iteration = epochs_per_iteration + self.num_iterations = int(num_iterations) + assert self.num_iterations >= 1, "num_iterations must be greater than or equal to 1" + self.epochs_per_iteration = int(epochs_per_iteration) self._optimizer = optimizer self._trainer = trainer self._criterion = criterion - if self.ranking_criteria in ['l1_activation', 'l2_activation', 'taylorfo'] or num_iterations > 1: + if self.ranking_criterion in ['l1_activation', 'l2_activation', 'taylorfo'] or num_iterations > 1: assert self._trainer is not None assert self._optimizer is not None @@ -90,7 +114,7 @@ def __init__(self, model, config_list, attention_name_groups=None, dummy_input=N # Remove any mistakenly captured ungrouped modules self.remove_ungrouped_modules() - self.masker = MASKER_DICT[ranking_criteria](model, self, self.head_hidden_dim, **algo_kwargs) + self.masker = MASKER_DICT[ranking_criterion](model, self, self.head_hidden_dim, **algo_kwargs) self.pruned_heads = {i: set() for i in range(len(self.masking_groups))} def group_weights_by_name(self): @@ -189,7 +213,7 @@ def validate_config(self, model, config_list): def compress(self): for pruning_iter in range(self.num_iterations): - if self.ranking_criteria in ['l1_activation', 'l2_activation', 'taylorfo']: + if self.ranking_criterion in ['l1_activation', 'l2_activation', 'taylorfo']: training = self.bound_model.training self.bound_model.eval() self._trainer(self.bound_model, optimizer=self._optimizer, criterion=self._criterion, epoch=0) diff --git a/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py b/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py index ce0ca7330b..baaa9f0680 100644 --- a/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py +++ b/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py @@ -44,13 +44,14 @@ def calc_mask(self, sparsity, wrapper=None, wrapper_idx=None, weight_group=None, Parameters ---------- - weight_group sparsity: float The target (amount of increase of) sparsity of the wrapper list. + weight_group: list + A four-element list of module wrappers wrapper: PrunerModuleWrapper/list of PrunerModuleWrappers - Should be None. Not used in this masker, just for consistency with the upper level API. + Should be None. Not used in this masker, just for consistency with the parent API. wrapper_idx: int/list of int - Should be None. Not used in this masker, just for consistency with the upper level API. + Should be None. Not used in this masker, just for consistency with the parent API. Returns ------- masks : list @@ -68,6 +69,18 @@ def calc_mask(self, sparsity, wrapper=None, wrapper_idx=None, weight_group=None, return self.get_mask(num_prune, weight_group, **kwargs) def calc_mask_global(self, n_heads_to_prune): + """ + Calculate all the masks for all groups in the pruner. + + Parameters + ---------- + n_heads_to_prune : int + Total number of attention heads to prune. + Returns + ------- + all_masks : list + A list of masks for all groups, where each element is a list of masks for each module in the group. + """ # calculate scores as normal (this step does not require global information) head_importance_scores = [] for group_idx, group in enumerate(self.pruner.masking_groups): @@ -100,19 +113,21 @@ def calc_mask_global(self, n_heads_to_prune): def get_mask(self, num_prune, weight_group, **kwargs): """ - Calculate the mask of given layer. + Calculate the mask of given layer (weight_group). Parameters ---------- - weight_group num_prune: int Num of heads to prune - wrapper: PrunerModuleWrapper - layer wrapper of this layer + weight_group: list + A four-element list of module wrappers Returns ------- - dict - dictionary for storing masks + masks : list + masks for each element in the group. + Each element in the list masks is a dictionary for storing masks, keys of the dict: + 'weight_mask': weight mask tensor + 'bias_mask': bias mask tensor (optional) """ raise NotImplementedError('{} get_mask is not implemented'.format(self.__class__.__name__)) @@ -150,16 +165,17 @@ def get_mask_by_importance_ranking(self, num_prune, weight_group): Parameters ---------- - weight_group: list - list of a group of weights for an attention layer num_prune: int Num of heads to prune - wrapper: PrunerModuleWrapper - layer wrapper of this layer + weight_group: list + list of a group of weights for an attention layer Returns ------- - dict - dictionary for storing masks + masks : list + masks for each element in the group. + Each element in the list masks is a dictionary for storing masks, keys of the dict: + 'weight_mask': weight mask tensor + 'bias_mask': bias mask tensor (optional) """ importance_scores = self.get_head_importance_scores(weight_group) if importance_scores is None: @@ -186,13 +202,10 @@ def get_head_importance_scores(self, weight_group): ---------- weight_group: list list of a group of weights for an attention layer - wrapper: PrunerModuleWrapper - layer wrapper of this layer - wrapper_idx: int - index of this wrapper in pruner's all wrappers + Returns ------- - tensor + importance_scores: tensor Tensor that indicates the importance of each head """ raise NotImplementedError('{} get_channel_sum is not implemented'.format(self.__class__.__name__)) @@ -354,8 +367,8 @@ class TaylorFOHeadMasker(AttentionHeadMasker): Note that this masker only relies on the output of the output layer of each attention layer. The masker collects the output the last weight (output projection) in each group and the corresponding gradient on the entire train set, and prunes the heads producing the smallest contribution as used in the following papers: - "Are Sixteen Heads Really Better than One?" (Michel et.al, 2019) - "Pruning convolutional neural networks for resource efficient inference." (Molchanov et. al., 2017) + "Are Sixteen Heads Really Better than One?" (Michel et.al, 2019) + "Pruning convolutional neural networks for resource efficient inference." (Molchanov et. al., 2017) """ def __init__(self, model, pruner, head_hidden_dim=None): super().__init__(model, pruner, head_hidden_dim) diff --git a/nni/compression/pytorch/utils/shape_dependency.py b/nni/compression/pytorch/utils/shape_dependency.py index 05503a039c..8dda1506c9 100644 --- a/nni/compression/pytorch/utils/shape_dependency.py +++ b/nni/compression/pytorch/utils/shape_dependency.py @@ -597,32 +597,6 @@ def build_dependency(self): self.dependency[node.name] = layers - @property - def dependency_sets_backup(self): - """ - Get the list of the dependency set. - - Returns - ------- - dependency_sets : list - list of the dependency sets. - Each dependency set is a 4-element list of module names, with the first three elements being the projection - matrices for Q, K, V (in any order), and the last element being the dense matrix. - """ - d_sets = [] - for node in self.graph.nodes_py.nodes_op: - if node.op_type != 'Linear' or node.name not in self.dependency or len(self.dependency[node.name]) < 4: - continue - tmp_set = set() - for other in self.dependency[node.name]: - tmp_set.add(other) - tmp_set.remove(node.name) - res_list = list(tmp_set) - res_list.append(node.name) - d_sets.append(res_list) - - return d_sets - @property def dependency_sets(self): """ @@ -644,4 +618,17 @@ def dependency_sets(self): return d_sets def export(self, filepath): - pass + """ + Export the group dependency to a csv file. Each line describes an attention layer. + + output example: + Attention layer matmul op, Group + """ + header = ['Attention layer matmul op', 'Group'] + with open(filepath, 'w') as csvf: + csv_w = csv.writer(csvf, delimiter=',') + csv_w.writerow(header) + for name in self.dependency: + group = self.dependency[name] + if len(group) > 0: + csv_w.writerow([name, group]) From 1e2329e441e90b1435beb5022d27040f4dde70b0 Mon Sep 17 00:00:00 2001 From: Di Wu Date: Wed, 7 Jul 2021 17:58:10 +0900 Subject: [PATCH 20/63] example v1 --- .../pruning/transformers/run.sh | 27 +- .../transformers/transformer_pruning.py | 285 +++++++++++------- .../pytorch/pruning/transformer_pruner.py | 2 +- .../transformer_pruning_head_masker.py | 24 +- 4 files changed, 206 insertions(+), 132 deletions(-) diff --git a/examples/model_compress/pruning/transformers/run.sh b/examples/model_compress/pruning/transformers/run.sh index 720ec75f66..ed8cf599a8 100755 --- a/examples/model_compress/pruning/transformers/run.sh +++ b/examples/model_compress/pruning/transformers/run.sh @@ -1,11 +1,19 @@ #!/bin/bash -# Usage: ./run.sh script gpu_id task +# Usage: ./run.sh gpu_id task -export CUDA_VISIBLE_DEVICES=$2 -SOURCE_CODE=$1 -TASK_NAME=$3 -PRETRAINED_MODEL='bert-base-uncased' # 'distilbert-base-uncased' 'roberta-base' 'bert-base-cased' 'bert-base-uncased' +export CUDA_VISIBLE_DEVICES=$1 +TASK_NAME=$2 +PRETRAINED_MODEL='bert-base-uncased' # example: 'distilbert-base-uncased' 'roberta-base' 'bert-base-cased' 'bert-base-uncased' + +# parameters for pruning +USAGE=2 # change to different numbers to run examples with different configs +SPARSITY=0.5 +RANKING_CRITERION=l1_weight +NUM_ITERATIONS=1 # 1 for one-shot pruning +EPOCHS_PER_ITERATION=1 + +# other training parameters, no need to change MAX_LENGTH=128 BATCH_SIZE=32 LR=2e-5 @@ -13,12 +21,17 @@ N_EPOCHS=3 SEED=2021 time=$(date "+%Y%m%d%H%M%S") -OUTDIR="models_${PRETRAINED_MODEL}_${SOURCE_CODE}_${TASK_NAME}_$time/" +OUTDIR="models_${PRETRAINED_MODEL}_${TASK_NAME}_$time/" TASK_LIST=('cola' 'sst2' 'mrpc' 'stsb' 'qqp' 'mnli' 'qnli' 'rte' 'wnli') if [[ ${TASK_LIST[*]} =~ (^|[[:space:]])$TASK_NAME($|[[:space:]]) ]]; then mkdir $OUTDIR - python $SOURCE_CODE \ + python transformer_pruning.py \ + --sparsity $SPARSITY \ + --ranking_criterion $RANKING_CRITERION \ + --num_iterations $NUM_ITERATIONS \ + --epochs_per_iteration $EPOCHS_PER_ITERATION \ + --speed_up \ --seed $SEED \ --model_name_or_path $PRETRAINED_MODEL \ --task_name $TASK_NAME \ diff --git a/examples/model_compress/pruning/transformers/transformer_pruning.py b/examples/model_compress/pruning/transformers/transformer_pruning.py index 57adbc1b3b..860d8da533 100644 --- a/examples/model_compress/pruning/transformers/transformer_pruning.py +++ b/examples/model_compress/pruning/transformers/transformer_pruning.py @@ -30,11 +30,11 @@ import nni from nni.compression.pytorch import ModelSpeedup +from nni.compression.pytorch.utils.counter import count_flops_params from nni.algorithms.compression.pytorch.pruning import ( TransformerHeadPruner ) - logger = logging.getLogger('bert_pruning_example') task_to_keys = { @@ -52,88 +52,67 @@ def parse_args(): parser = argparse.ArgumentParser(description="Finetune a transformers model on a text classification task") - parser.add_argument( - "--task_name", - type=str, - default=None, - help="The name of the glue task to train on.", - choices=list(task_to_keys.keys()), - ) - parser.add_argument( - "--train_file", type=str, default=None, help="A csv or a json file containing the training data." - ) - parser.add_argument( - "--validation_file", type=str, default=None, help="A csv or a json file containing the validation data." - ) - parser.add_argument( - "--max_length", - type=int, - default=128, - help=( - "The maximum total input sequence length after tokenization. Sequences longer than this will be truncated," - " sequences shorter will be padded if `--pad_to_max_lengh` is passed." - ), - ) - parser.add_argument( - "--pad_to_max_length", - action="store_true", - help="If passed, pad all samples to `max_length`. Otherwise, dynamic padding is used.", - ) - parser.add_argument( - "--model_name_or_path", - type=str, - help="Path to pretrained model or model identifier from huggingface.co/models.", - required=True, - ) - parser.add_argument( - "--use_slow_tokenizer", - action="store_true", - help="If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).", - ) - parser.add_argument( - "--per_device_train_batch_size", - type=int, - default=8, - help="Batch size (per device) for the training dataloader.", - ) - parser.add_argument( - "--per_device_eval_batch_size", - type=int, - default=8, - help="Batch size (per device) for the evaluation dataloader.", - ) - parser.add_argument( - "--learning_rate", - type=float, - default=5e-5, - help="Initial learning rate (after the potential warmup period) to use.", - ) - parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.") - parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.") - parser.add_argument( - "--max_train_steps", - type=int, - default=None, - help="Total number of training steps to perform. If provided, overrides num_train_epochs.", - ) - parser.add_argument( - "--gradient_accumulation_steps", - type=int, - default=1, - help="Number of updates steps to accumulate before performing a backward/update pass.", - ) - parser.add_argument( - "--lr_scheduler_type", - type=SchedulerType, - default="linear", - help="The scheduler type to use.", - choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"], - ) - parser.add_argument( - "--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler." - ) - parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.") - parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") + + parser.add_argument("--model_name_or_path", type=str, required=True, + help="Path to pretrained model or model identifier from huggingface.co/models.") + parser.add_argument("--task_name", type=str, default=None, + help="The name of the glue task to train on.", + choices=list(task_to_keys.keys())) + parser.add_argument("--output_dir", type=str, default=None, + help="Where to store the final model.") + parser.add_argument('--usage', type=int, default=1, + help='Select which config example to run') + parser.add_argument('--sparsity', type=float, required=True, + help='Sparsity - proportion of heads to prune (should be between 0 and 1)') + parser.add_argument('--global_sort', action='store_true', default=False, + help='Rank the heads globally and prune the heads with lowest scores. If set to False, the ' + 'heads are only ranked within one layer') + parser.add_argument("--ranking_criterion", type=str, default='l1_weight', + choices=["l1_weight", "l2_weight", "l1_activation", "l2_activation", "taylorfo"], + help="Where to store the final model.") + parser.add_argument("--num_iterations", type=int, default=1, + help="Number of pruning iterations (1 for one-shot pruning).") + parser.add_argument("--epochs_per_iteration", type=int, default=1, + help="Epochs to finetune before the next pruning iteration " + "(only effective if num_iterations > 1).") + parser.add_argument('--speed_up', action='store_true', default=False, + help='Whether to speed-up the pruned model') + + # parameters for model training; for running examples. no need to change them + parser.add_argument("--train_file", type=str, default=None, + help="A csv or a json file containing the training data.") + parser.add_argument("--validation_file", type=str, default=None, + help="A csv or a json file containing the validation data.") + parser.add_argument("--max_length", type=int, default=128, + help=("The maximum total input sequence length after tokenization. Sequences longer than this " + "will be truncated, sequences shorter will be padded if `--pad_to_max_lengh` is passed.")) + parser.add_argument("--pad_to_max_length", action="store_true", + help="If passed, pad all samples to `max_length`. Otherwise, dynamic padding is used.") + parser.add_argument("--use_slow_tokenizer", action="store_true", + help="If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).",) + parser.add_argument("--per_device_train_batch_size", type=int, default=8, + help="Batch size (per device) for the training dataloader.") + parser.add_argument("--per_device_eval_batch_size", type=int, default=8, + help="Batch size (per device) for the evaluation dataloader.") + parser.add_argument("--learning_rate", type=float, default=5e-5, + help="Initial learning rate (after the potential warmup period) to use.") + parser.add_argument("--weight_decay", type=float, default=0.0, + help="Weight decay to use.") + parser.add_argument("--num_train_epochs", type=int, default=3, + help="Total number of training epochs to perform.") + parser.add_argument("--max_train_steps", type=int, default=None, + help="Total number of training steps to perform. If provided, overrides num_train_epochs.") + parser.add_argument("--gradient_accumulation_steps", type=int, default=1, + help="Number of updates steps to accumulate before performing a backward/update pass.") + parser.add_argument("--lr_scheduler_type", type=SchedulerType, default="linear", + help="The scheduler type to use.", + choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", + "constant_with_warmup"]) + parser.add_argument("--num_warmup_steps", type=int, default=0, + help="Number of steps for the warmup in the lr scheduler.") + parser.add_argument("--seed", type=int, default=None, + help="A seed for reproducible training.") + args = parser.parse_args() # Sanity checks @@ -310,7 +289,6 @@ def train_model(args, model, is_regression, train_dataloader, eval_dataloader, o def dry_run_or_finetune(args, model, train_dataloader, optimizer, device, epoch_num=None): - # no param update performed, just do forward and backward on the entire train data (to collect output/gradient etc.) if epoch_num == 0: print("Running forward and backward on the entire dataset without updating parameters...") else: @@ -394,7 +372,6 @@ def main(): ######################################################################### # Prepare model, tokenizer, dataset, optimizer, and the scheduler - # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", @@ -423,16 +400,13 @@ def main(): processed_datasets = preprocess_dataset(args, tokenizer, model, raw_datasets, num_labels, is_regression, label_list) train_dataset = processed_datasets["train"] eval_dataset = processed_datasets["validation_matched" if args.task_name == "mnli" else "validation"] - # Log a few random samples from the training set: - for index in random.sample(range(len(train_dataset)), 3): - logger.info(f"Sample {index} of the training set: {train_dataset[index]}.") ######################################################################### # Finetune before pruning model, optimizer, train_dataloader, eval_dataloader, data_collator = get_dataloader_and_optimizer(args, tokenizer, - model, - train_dataset, - eval_dataset) + model, + train_dataset, + eval_dataset) # Scheduler and math around the number of training steps. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) @@ -465,7 +439,7 @@ def main(): # train_model(args, model, is_regression, train_dataloader, eval_dataloader, optimizer, lr_scheduler, metric, device) # # if args.output_dir is not None: - # torch.save(model, args.output_dir + '/entire_model_before_pruning.pt') + # torch.save(model.state_dict(), args.output_dir + '/model_before_pruning.pt') # # if args.task_name == "mnli": # final_eval_for_mnli(args, model, processed_datasets, metric, data_collator) @@ -476,39 +450,119 @@ def main(): model, train_dataset, eval_dataset) + dummy_input = next(iter(train_dataloader))['input_ids'].to(device) + flops, params, results = count_flops_params(model, dummy_input) + print(f'Initial model FLOPs {flops / 1e6:.2f} M, #Params: {params / 1e6:.2f}M') # here criterion is embedded in the model. Upper levels can just pass None to trainer def trainer(model, optimizer, criterion, epoch): return dry_run_or_finetune(args, model, train_dataloader, optimizer, device, epoch_num=epoch) - attention_name_groups = list(zip(['encoder.layer.{}.attention.self.query'.format(i) for i in range(12)], - ['encoder.layer.{}.attention.self.key'.format(i) for i in range(12)], - ['encoder.layer.{}.attention.self.value'.format(i) for i in range(12)], - ['encoder.layer.{}.attention.output.dense'.format(i) for i in range(12)])) - - kwargs = {'ranking_criterion': 'taylorfo', - 'global_sort': True, - 'num_iterations': 2, - 'epochs_per_iteration': 1, - # 'attention_name_groups': attention_name_groups, - 'head_hidden_dim': 64, - 'dummy_input': (next(iter(train_dataloader))['input_ids']).to(device), - 'trainer': trainer, - 'optimizer': optimizer} - - config_list = [{ - 'sparsity': 0.5, - 'op_types': ["Linear"], - # 'op_names': [x for layer in attention_name_groups for x in layer] - }] + # example 1: prune all layers with uniform sparsity + if args.usage == 1: + kwargs = {'ranking_criterion': args.ranking_criterion, + 'global_sort': args.global_sort, + 'num_iterations': args.num_iterations, + 'epochs_per_iteration': args.epochs_per_iteration, + 'head_hidden_dim': 64, + 'dummy_input': dummy_input, + 'trainer': trainer, + 'optimizer': optimizer} + + config_list = [{ + 'sparsity': args.sparsity, + 'op_types': ["Linear"], + }] + + # example 2: prune different layers with uniform sparsity, but specify names group instead of dummy_input + elif args.usage == 2: + attention_name_groups = list(zip(['encoder.layer.{}.attention.self.query'.format(i) for i in range(12)], + ['encoder.layer.{}.attention.self.key'.format(i) for i in range(12)], + ['encoder.layer.{}.attention.self.value'.format(i) for i in range(12)], + ['encoder.layer.{}.attention.output.dense'.format(i) for i in range(12)])) + + kwargs = {'ranking_criterion': args.ranking_criterion, + 'global_sort': args.global_sort, + 'num_iterations': args.num_iterations, + 'epochs_per_iteration': args.epochs_per_iteration, + 'attention_name_groups': attention_name_groups, + 'head_hidden_dim': 64, + 'trainer': trainer, + 'optimizer': optimizer} + + config_list = [{ + 'sparsity': args.sparsity, + 'op_types': ["Linear"], + 'op_names': [x for layer in attention_name_groups for x in layer] + } + ] + + # example 3: prune different layers with different sparsity + elif args.usage == 3: + attention_name_groups = list(zip(['encoder.layer.{}.attention.self.query'.format(i) for i in range(12)], + ['encoder.layer.{}.attention.self.key'.format(i) for i in range(12)], + ['encoder.layer.{}.attention.self.value'.format(i) for i in range(12)], + ['encoder.layer.{}.attention.output.dense'.format(i) for i in range(12)])) + + kwargs = {'ranking_criterion': args.ranking_criterion, + 'global_sort': args.global_sort, + 'num_iterations': args.num_iterations, + 'epochs_per_iteration': args.epochs_per_iteration, + 'attention_name_groups': attention_name_groups, + 'head_hidden_dim': 64, + 'trainer': trainer, + 'optimizer': optimizer} + + config_list = [{ + 'sparsity': args.sparsity, + 'op_types': ["Linear"], + 'op_names': [x for layer in attention_name_groups[:6] for x in layer] + }, + { + 'sparsity': args.sparsity / 2, + 'op_types': ["Linear"], + 'op_names': [x for layer in attention_name_groups[:6] for x in layer] + } + ] + + else: + raise RuntimeError("Wrong usage number") pruner = TransformerHeadPruner(model, config_list, **kwargs) pruner.compress() - exit() + # uncomment the following part to export the pruned model masks + # model_path = os.path.join(args.output_dir, 'pruned_{}_{}.pth'.format(args.model_name_or_path, args.task_name)) + # mask_path = os.path.join(args.output_dir, 'mask_{}_{}.pth'.format(args.model_name_or_path, args.task_name)) + # pruner.export_model(model_path=model_path, mask_path=mask_path) + + # Currently, speeding up the Transformers through NNI is not supported because of shape inference issues. + # However, if you are using the transformers library, you can use the following workaround: + if args.speed_up: + speedup_rules = {} + for group_idx, group in enumerate(pruner.attention_name_groups): + # get the layer index + layer_idx = None + for part in group[0].split('.'): + try: + layer_idx = int(part) + break + except: + continue + if layer_idx is not None: + speedup_rules[layer_idx] = pruner.pruned_heads[group_idx] + pruner._unwrap_model() + model.bert._prune_heads(speedup_rules) + print(model) ######################################################################### # After pruning, finetune again on the target task + # Get the metric function + if args.task_name is not None: + metric = load_metric("glue", args.task_name) + else: + metric = load_metric("accuracy") + # re-initialize the optimizer and the scheduler model, optimizer, _, _, data_collator = get_dataloader_and_optimizer(args, tokenizer, model, train_dataset, eval_dataset) @@ -523,11 +577,14 @@ def trainer(model, optimizer, criterion, epoch): train_model(args, model, is_regression, train_dataloader, eval_dataloader, optimizer, lr_scheduler, metric, device) if args.output_dir is not None: - torch.save(model, args.output_dir + '/entire_model_after_pruning.pt') + torch.save(model.state_dict(), args.output_dir + '/model_after_pruning.pt') if args.task_name == "mnli": final_eval_for_mnli(args, model, processed_datasets, metric, data_collator) + flops, params, results = count_flops_params(model, dummy_input) + print(f'Final model FLOPs {flops / 1e6:.2f} M, #Params: {params / 1e6:.2f}M') + if __name__ == "__main__": main() diff --git a/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py b/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py index 1947ded33b..a4cf0982a6 100644 --- a/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py +++ b/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py @@ -254,7 +254,7 @@ def update_mask(self): assert hasattr(layer_weight_group[i], mask_type), \ "there is no attribute '%s' in wrapper on %s" % (mask_type, layer_weight_group[i]) setattr(layer_weight_group[i], mask_type, mask[mask_type]) - logger.info(f'mask updated: {layer_weight_group[i].name} {mask_type}') + logger.debug(f'mask updated: {layer_weight_group[i].name} {mask_type}') def _calc_mask(self, weight_group): """ diff --git a/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py b/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py index baaa9f0680..9e2c992bd1 100644 --- a/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py +++ b/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py @@ -289,7 +289,9 @@ def get_head_importance_scores(self, weight_group): def _add_activation_collector(self, pruner): def collector(collected_activation): def hook(module_, input_, output): - raw_activation = torch.abs(output.detach().cpu()) # L1-norm + if type(input_) is tuple: + input_ = input_[0] + raw_activation = torch.abs(input_.detach().cpu()) # L1-norm raw_activation_reduced = torch.sum(raw_activation, [0, 1]) collected_activation.append(raw_activation_reduced) return hook @@ -339,7 +341,9 @@ def get_head_importance_scores(self, weight_group): def _add_activation_collector(self, pruner): def collector(collected_activation): def hook(module_, input_, output): - raw_activation = torch.abs(output.detach().cpu() ** 2) # L2-norm + if type(input_) is tuple: + input_ = input_[0] + raw_activation = torch.abs(input_.detach().cpu() ** 2) # L2-norm raw_activation_reduced = torch.sum(raw_activation, [0, 1]) collected_activation.append(raw_activation_reduced) @@ -394,10 +398,10 @@ def get_head_importance_scores(self, weight_group): def _add_activation_collector(self): def forward_hook(md, inp, out): - if type(out) is tuple: - out = out[0] - n_heads_per_layer = out.size(-1) // self.head_hidden_dim - heads_output = out.view([out.size(0), out.size(1), n_heads_per_layer, -1]).detach() + if type(inp) is tuple: + inp = inp[0] + n_heads_per_layer = inp.size(-1) // self.head_hidden_dim + heads_output = inp.view([inp.size(0), inp.size(1), n_heads_per_layer, -1]).detach() md.forward_output_cached = heads_output self.pruner._fwd_hook_id += 1 @@ -411,10 +415,10 @@ def forward_hook(md, inp, out): def _add_gradient_collector(self): def grad_hook(md, grad_in, grad_out): - if type(grad_out) is tuple: - grad_out = grad_out[0] - n_heads_per_layer = grad_out.size(-1) // self.head_hidden_dim - heads_grad = grad_out.view([grad_out.size(0), grad_out.size(1), n_heads_per_layer, -1]) + if type(grad_in) is tuple: + grad_in = grad_in[0] + n_heads_per_layer = grad_in.size(-1) // self.head_hidden_dim + heads_grad = grad_in.view([grad_in.size(0), grad_in.size(1), n_heads_per_layer, -1]) heads_scores = torch.abs(heads_grad * md.forward_output_cached) heads_scores = torch.sum(heads_scores, [0, 1, 3]).detach().cpu() if hasattr(md, 'head_importance_scores'): From 94e48044d56d4983ee2916fcd7dc76a61450a5bf Mon Sep 17 00:00:00 2001 From: Di Wu Date: Fri, 9 Jul 2021 15:38:54 +0900 Subject: [PATCH 21/63] doc skeleton --- .../Compression/CompressionReference.rst | 2 + docs/en_US/Compression/Overview.rst | 2 + docs/en_US/Compression/Pruner.rst | 37 +++++++++++++++++++ 3 files changed, 41 insertions(+) diff --git a/docs/en_US/Compression/CompressionReference.rst b/docs/en_US/Compression/CompressionReference.rst index 50dcc12876..5d3f6aa5cd 100644 --- a/docs/en_US/Compression/CompressionReference.rst +++ b/docs/en_US/Compression/CompressionReference.rst @@ -91,6 +91,8 @@ Pruners .. autoclass:: nni.algorithms.compression.pytorch.pruning.lottery_ticket.LotteryTicketPruner :members: +.. autoclass:: nni.algorithms.compression.pytorch.pruning.TransformerHeadPruner + :members: Quantizers ^^^^^^^^^^ diff --git a/docs/en_US/Compression/Overview.rst b/docs/en_US/Compression/Overview.rst index 788aa0ac84..9985786cba 100644 --- a/docs/en_US/Compression/Overview.rst +++ b/docs/en_US/Compression/Overview.rst @@ -73,6 +73,8 @@ Pruning algorithms compress the original network by removing redundant weights o - Automatic pruning by iteratively call SimulatedAnnealing Pruner and ADMM Pruner `Reference Paper `__ * - `AMC Pruner <../Compression/Pruner.rst#amc-pruner>`__ - AMC: AutoML for Model Compression and Acceleration on Mobile Devices `Reference Paper `__ + * - `Transformer Head Pruner <../Compression/Pruner.rst#transformerhead-pruner>`__ + - Pruning attention heads of transformer models by a range of ranking metrics You can refer to this `benchmark <../CommunitySharings/ModelCompressionComparison.rst>`__ for the performance of these pruners on some benchmark problems. diff --git a/docs/en_US/Compression/Pruner.rst b/docs/en_US/Compression/Pruner.rst index 56259742fc..8c02d6a47d 100644 --- a/docs/en_US/Compression/Pruner.rst +++ b/docs/en_US/Compression/Pruner.rst @@ -28,6 +28,7 @@ We provide several pruning algorithms that support fine-grained weight pruning a **Others** * `Lottery Ticket Hypothesis <#lottery-ticket-hypothesis>`__ +* `Transformer Head Pruner<#transformerhead-pruner>`__ Level Pruner ------------ @@ -722,3 +723,39 @@ User configuration for Sensitivity Pruner **PyTorch** .. autoclass:: nni.algorithms.compression.pytorch.pruning.SensitivityPruner + +Transformer Head Pruner +------------------ + +Test test test test. + +.. code-block:: bash + + 1. Analyze the sensitivity of each layer in the current state of the model. + 2. Prune each layer according to the sensitivity. + + +For more details, please refer to `Learning both Weights and Connections for Efficient Neural Networks `__. + +Usage +^^^^^ + +PyTorch code + +.. code-block:: python + + from nni.algorithms.compression.pytorch.pruning import TransformerHeadPruner + config_list = [{ + 'sparsity': 0.5, + 'op_types': ['Linear'] + }] + pruner = TransformerHeadPruner(model, config_list, finetuner=fine_tuner, evaluator=evaluator) + # eval_args and finetune_args are the parameters passed to the evaluator and finetuner respectively + pruner.compress(eval_args=[model], finetune_args=[model]) + +User configuration for Sensitivity Pruner +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +**PyTorch** + +.. autoclass:: nni.algorithms.compression.pytorch.pruning.TransformerHeadPruner From a5a92d93fd6092bdecfa7274d176135f17042a96 Mon Sep 17 00:00:00 2001 From: Di Wu Date: Fri, 9 Jul 2021 15:53:22 +0900 Subject: [PATCH 22/63] doc update --- docs/en_US/Compression/Overview.rst | 5 ++--- docs/en_US/Compression/Pruner.rst | 10 ++++++---- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/docs/en_US/Compression/Overview.rst b/docs/en_US/Compression/Overview.rst index 9985786cba..ee1f548215 100644 --- a/docs/en_US/Compression/Overview.rst +++ b/docs/en_US/Compression/Overview.rst @@ -35,7 +35,7 @@ The algorithms include pruning algorithms and quantization algorithms. Pruning Algorithms ^^^^^^^^^^^^^^^^^^ -Pruning algorithms compress the original network by removing redundant weights or channels of layers, which can reduce model complexity and mitigate the over-fitting issue. +Pruning algorithms compress the original network by removing redundant weights or channels of layers, which can reduce model complexity and mitigate the over-fitting issue. .. list-table:: :header-rows: 1 @@ -73,10 +73,9 @@ Pruning algorithms compress the original network by removing redundant weights o - Automatic pruning by iteratively call SimulatedAnnealing Pruner and ADMM Pruner `Reference Paper `__ * - `AMC Pruner <../Compression/Pruner.rst#amc-pruner>`__ - AMC: AutoML for Model Compression and Acceleration on Mobile Devices `Reference Paper `__ - * - `Transformer Head Pruner <../Compression/Pruner.rst#transformerhead-pruner>`__ + * - `Transformer Head Pruner <../Compression/Pruner.rst#transformerhead-pruner>`__ - Pruning attention heads of transformer models by a range of ranking metrics - You can refer to this `benchmark <../CommunitySharings/ModelCompressionComparison.rst>`__ for the performance of these pruners on some benchmark problems. Quantization Algorithms diff --git a/docs/en_US/Compression/Pruner.rst b/docs/en_US/Compression/Pruner.rst index 8c02d6a47d..5c6e1ef2b6 100644 --- a/docs/en_US/Compression/Pruner.rst +++ b/docs/en_US/Compression/Pruner.rst @@ -725,9 +725,11 @@ User configuration for Sensitivity Pruner .. autoclass:: nni.algorithms.compression.pytorch.pruning.SensitivityPruner Transformer Head Pruner ------------------- +----------------------- + +Transformer Head Pruner is a tool designed for pruning attention heads from the models belonging to the `Transformer family `__. +Typically, in each attention layer in Transformer models -Test test test test. .. code-block:: bash @@ -753,8 +755,8 @@ PyTorch code # eval_args and finetune_args are the parameters passed to the evaluator and finetuner respectively pruner.compress(eval_args=[model], finetune_args=[model]) -User configuration for Sensitivity Pruner -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +User configuration for Transformer Head Pruner +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ **PyTorch** From 5399dd8d6536b0e9fc962098d47dc0904e34f77c Mon Sep 17 00:00:00 2001 From: Di Wu Date: Fri, 9 Jul 2021 16:08:33 +0900 Subject: [PATCH 23/63] doc update --- docs/en_US/Compression/CompressionReference.rst | 2 +- docs/en_US/Compression/Pruner.rst | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/docs/en_US/Compression/CompressionReference.rst b/docs/en_US/Compression/CompressionReference.rst index 5d3f6aa5cd..efb23248c1 100644 --- a/docs/en_US/Compression/CompressionReference.rst +++ b/docs/en_US/Compression/CompressionReference.rst @@ -91,7 +91,7 @@ Pruners .. autoclass:: nni.algorithms.compression.pytorch.pruning.lottery_ticket.LotteryTicketPruner :members: -.. autoclass:: nni.algorithms.compression.pytorch.pruning.TransformerHeadPruner +.. autoclass:: nni.algorithms.compression.pytorch.pruning.transformer_pruner.TransformerHeadPruner :members: Quantizers diff --git a/docs/en_US/Compression/Pruner.rst b/docs/en_US/Compression/Pruner.rst index 5c6e1ef2b6..a2528b4597 100644 --- a/docs/en_US/Compression/Pruner.rst +++ b/docs/en_US/Compression/Pruner.rst @@ -728,7 +728,8 @@ Transformer Head Pruner ----------------------- Transformer Head Pruner is a tool designed for pruning attention heads from the models belonging to the `Transformer family `__. -Typically, in each attention layer in Transformer models +Typically, each attention layer in the Transformer models consists of four weights: three projection matrices for query, key, value, and an output projection matrix. The outputs of the former three matrices contains the projected results for all heads. Normall, the results are then reshaped so that each head performs that attention computation independently. The final results are concatenated back before fed into the output projection. Please refer to the original paper or this visualization [change here!!!!] for more details. +Therefore, when an attention head is pruned, the same dimension .. code-block:: bash @@ -760,4 +761,4 @@ User configuration for Transformer Head Pruner **PyTorch** -.. autoclass:: nni.algorithms.compression.pytorch.pruning.TransformerHeadPruner +.. autoclass:: nni.algorithms.compression.pytorch.pruning.transformer_pruner.TransformerHeadPruner From ec5cdf2794986a846cc40ae234c8530a540af0f0 Mon Sep 17 00:00:00 2001 From: Di Wu Date: Fri, 9 Jul 2021 16:32:52 +0900 Subject: [PATCH 24/63] doc update --- docs/en_US/Compression/Pruner.rst | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/docs/en_US/Compression/Pruner.rst b/docs/en_US/Compression/Pruner.rst index a2528b4597..ce43edd05e 100644 --- a/docs/en_US/Compression/Pruner.rst +++ b/docs/en_US/Compression/Pruner.rst @@ -729,7 +729,9 @@ Transformer Head Pruner Transformer Head Pruner is a tool designed for pruning attention heads from the models belonging to the `Transformer family `__. Typically, each attention layer in the Transformer models consists of four weights: three projection matrices for query, key, value, and an output projection matrix. The outputs of the former three matrices contains the projected results for all heads. Normall, the results are then reshaped so that each head performs that attention computation independently. The final results are concatenated back before fed into the output projection. Please refer to the original paper or this visualization [change here!!!!] for more details. -Therefore, when an attention head is pruned, the same dimension + + +Therefore, when an attention head is pruned, the same weights corresponding to that heads in the three projection matrices are pruned. Also, the weights in the output projection corresponding to the head's output are pruned. In our implementation, we calculate and apply masks to the four matrices together. .. code-block:: bash @@ -761,4 +763,4 @@ User configuration for Transformer Head Pruner **PyTorch** -.. autoclass:: nni.algorithms.compression.pytorch.pruning.transformer_pruner.TransformerHeadPruner +.. autoclass:: nni.algorithms.compression.pytorch.pruning.TransformerHeadPruner From f747a9abd0f61fb1ee7d0e2d0ef33f2272bf3a64 Mon Sep 17 00:00:00 2001 From: Di Wu Date: Fri, 9 Jul 2021 17:10:33 +0900 Subject: [PATCH 25/63] doc update --- docs/en_US/Compression/Pruner.rst | 104 ++++++++++++++++-- .../pytorch/pruning/transformer_pruner.py | 8 +- 2 files changed, 96 insertions(+), 16 deletions(-) diff --git a/docs/en_US/Compression/Pruner.rst b/docs/en_US/Compression/Pruner.rst index ce43edd05e..a6cafb0fb0 100644 --- a/docs/en_US/Compression/Pruner.rst +++ b/docs/en_US/Compression/Pruner.rst @@ -730,33 +730,113 @@ Transformer Head Pruner Transformer Head Pruner is a tool designed for pruning attention heads from the models belonging to the `Transformer family `__. Typically, each attention layer in the Transformer models consists of four weights: three projection matrices for query, key, value, and an output projection matrix. The outputs of the former three matrices contains the projected results for all heads. Normall, the results are then reshaped so that each head performs that attention computation independently. The final results are concatenated back before fed into the output projection. Please refer to the original paper or this visualization [change here!!!!] for more details. - Therefore, when an attention head is pruned, the same weights corresponding to that heads in the three projection matrices are pruned. Also, the weights in the output projection corresponding to the head's output are pruned. In our implementation, we calculate and apply masks to the four matrices together. +The pruner implements the following algorithm: .. code-block:: bash - 1. Analyze the sensitivity of each layer in the current state of the model. - 2. Prune each layer according to the sensitivity. + Repeat for each pruning iteration (only 1 for one-shot pruning): + 1. Calculate importance scores for each head in each specified layer (with different criterion options) + 2. Sort heads locally or globally, and prune out some heads with lowest scores. The number of pruned heads is determined according to the sparsity parameter. + 3. If the specified pruning iteration is larger than 1 (iterative pruning), finetune the model for a while before the next pruning iteration. +Currently, the following head sorting criteria are supported: -For more details, please refer to `Learning both Weights and Connections for Efficient Neural Networks `__. + * "l1_weight": rank heads by the L1-norm of their query, key, and value projection matrices. + * "l2_weight": rank heads by the L2-norm of their query, key, and value projection matrices. + * "l1_activation": rank heads by the L1-norm of their attention computation output. + * "l2_activation": rank heads by the L2-norm of their attention computation output. + * "taylorfo": rank heads by l1 norm of the output of attention computation * gradient for this output. Check more details in `this paper `__ and `this one `__. + +We support local sorting (i.e., sorting heads within a layer) and global sorting (sorting all heads together), and you can control by setting the `global_sort` parameter. Note that if `global_sort=True` is passed, all weights must have the same sparsity in the config list. + +In our implementation, we support two ways to group the four weights in the same layer together. You can either pass a 2-d list containing the names of these modules (usage 1 below) to the pruner, or simply pass a dummy input and the pruner will run `torch.jit.trace` to group the weights (usage 2 below). + +However, if you would like to assign different sparsity to each layer, currently you could only use the first option, i.e., passing names of the weights to the pruner (usage 3 below). + +In addition to the following usage guide, we provide a more detailed example of pruning BERT and running it on tasks from the GLUE benchmark. Please find it in this :githublink:`example `. Usage ^^^^^ -PyTorch code +Usage 1: one-shot pruning, same sparsity for all the layers (PyTorch code) .. code-block:: python from nni.algorithms.compression.pytorch.pruning import TransformerHeadPruner - config_list = [{ - 'sparsity': 0.5, - 'op_types': ['Linear'] - }] - pruner = TransformerHeadPruner(model, config_list, finetuner=fine_tuner, evaluator=evaluator) - # eval_args and finetune_args are the parameters passed to the evaluator and finetuner respectively - pruner.compress(eval_args=[model], finetune_args=[model]) + kwargs = {'ranking_criterion': "l1_weight", + 'global_sort': False, + 'num_iterations': 1, + 'epochs_per_iteration': 1, # this is ignored when num_iterations = 1 + 'head_hidden_dim': 64, + 'dummy_input': dummy_input, + 'trainer': trainer, + 'optimizer': optimizer + } + config_list = [{ + 'sparsity': 0.5, + 'op_types': ["Linear"] + }] + pruner = TransformerHeadPruner(model, config_list, **kwargs) + pruner.compress() + +Usage 2: one-shot pruning, passing names to the pruner instead of dummy input (PyTorch code) + +.. code-block:: python + + from nni.algorithms.compression.pytorch.pruning import TransformerHeadPruner + attention_name_groups = list(zip(['encoder.layer.{}.attention.self.query'.format(i) for i in range(12)], + ['encoder.layer.{}.attention.self.key'.format(i) for i in range(12)], + ['encoder.layer.{}.attention.self.value'.format(i) for i in range(12)], + ['encoder.layer.{}.attention.output.dense'.format(i) for i in range(12)])) + kwargs = {'ranking_criterion': "l1_weight", + 'global_sort': False, + 'num_iterations': 1, + 'epochs_per_iteration': 1, # this is ignored when num_iterations = 1 + 'head_hidden_dim': 64, + 'attention_name_groups': attention_name_groups, + 'trainer': trainer, + 'optimizer': optimizer + } + config_list = [{ + 'sparsity': 0.5, + 'op_types': ["Linear"] + }] + pruner = TransformerHeadPruner(model, config_list, **kwargs) + pruner.compress() + +Usage 3: one-shot pruning, different sparsity for different layer (PyTorch code) + +.. code-block:: python + + from nni.algorithms.compression.pytorch.pruning import TransformerHeadPruner + attention_name_groups = list(zip(['encoder.layer.{}.attention.self.query'.format(i) for i in range(12)], + ['encoder.layer.{}.attention.self.key'.format(i) for i in range(12)], + ['encoder.layer.{}.attention.self.value'.format(i) for i in range(12)], + ['encoder.layer.{}.attention.output.dense'.format(i) for i in range(12)])) + kwargs = {'ranking_criterion': "l1_weight", + 'global_sort': False, + 'num_iterations': 1, + 'epochs_per_iteration': 1, # this is ignored when num_iterations = 1 + 'head_hidden_dim': 64, + 'attention_name_groups': attention_name_groups, # can change to dummy_input here + 'trainer': trainer, + 'optimizer': optimizer + } + config_list = [{ + 'sparsity': 0.5, + 'op_types': ["Linear"], + 'op_names': [x for layer in attention_name_groups[:6] for x in layer] # first six layers + }, + { + 'sparsity': 0.25, + 'op_types': ["Linear"], + 'op_names': [x for layer in attention_name_groups[:6] for x in layer] # last six layers + } + ] + pruner = TransformerHeadPruner(model, config_list, **kwargs) + pruner.compress() User configuration for Transformer Head Pruner ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py b/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py index a4cf0982a6..c25c8c3189 100644 --- a/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py +++ b/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py @@ -39,7 +39,7 @@ class TransformerHeadPruner(Pruner): - op_types : Optional. Operation types to prune. (Should be 'Linear' for this pruner.) - op_names : Optional. Operation names to prune. head_hidden_dim : int - Dimension of hidden dimesion for each attention head. (e.g., 64 for BERT) + Dimension of the hidden dimension of each attention head. (e.g., 64 for BERT) We assume that this head_hidden_dim is constant across the entire model. attention_name_groups : list (Optional) List of groups of names for weights of each attention layer. Each element should be a four-element list, with @@ -51,9 +51,9 @@ class TransformerHeadPruner(Pruner): The criterion for ranking attention heads. Currently we support: - l1_weight: l1 norm of Q_proj, K_proj, and V_proj - l2_weight: l2 norm of Q_proj, K_proj, and V_proj - - l1_activation: l1 norm of the output of output projection - - l2_activation: l2 norm of the output of output projection - - taylorfo: l1 norm of the output of output projection * gradient for this output + - l1_activation: l1 norm of the output of attention computation + - l2_activation: l2 norm of the output of attention computation + - taylorfo: l1 norm of the output of attention computation * gradient for this output (check more details in the masker documentation) global_sort : bool Whether rank the heads globally or locally before deciding heads to prune. From f70343c9a10f48f9de8f69119bebd985e9776b4b Mon Sep 17 00:00:00 2001 From: Di Wu Date: Fri, 9 Jul 2021 17:15:59 +0900 Subject: [PATCH 26/63] update --- docs/en_US/Compression/Pruner.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en_US/Compression/Pruner.rst b/docs/en_US/Compression/Pruner.rst index a6cafb0fb0..12bb404181 100644 --- a/docs/en_US/Compression/Pruner.rst +++ b/docs/en_US/Compression/Pruner.rst @@ -728,7 +728,7 @@ Transformer Head Pruner ----------------------- Transformer Head Pruner is a tool designed for pruning attention heads from the models belonging to the `Transformer family `__. -Typically, each attention layer in the Transformer models consists of four weights: three projection matrices for query, key, value, and an output projection matrix. The outputs of the former three matrices contains the projected results for all heads. Normall, the results are then reshaped so that each head performs that attention computation independently. The final results are concatenated back before fed into the output projection. Please refer to the original paper or this visualization [change here!!!!] for more details. +Typically, each attention layer in the Transformer models consists of four weights: three projection matrices for query, key, value, and an output projection matrix. The outputs of the former three matrices contains the projected results for all heads. Normall, the results are then reshaped so that each head performs that attention computation independently. The final results are concatenated back before fed into the output projection. Please refer to the original paper for more details. Therefore, when an attention head is pruned, the same weights corresponding to that heads in the three projection matrices are pruned. Also, the weights in the output projection corresponding to the head's output are pruned. In our implementation, we calculate and apply masks to the four matrices together. From e6b6d84b9863b0907a12bffb6c11d5876dff93ff Mon Sep 17 00:00:00 2001 From: Di Wu Date: Sun, 11 Jul 2021 14:25:23 +0900 Subject: [PATCH 27/63] doc debug --- docs/en_US/Compression/Pruner.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en_US/Compression/Pruner.rst b/docs/en_US/Compression/Pruner.rst index 12bb404181..44156d6970 100644 --- a/docs/en_US/Compression/Pruner.rst +++ b/docs/en_US/Compression/Pruner.rst @@ -28,7 +28,7 @@ We provide several pruning algorithms that support fine-grained weight pruning a **Others** * `Lottery Ticket Hypothesis <#lottery-ticket-hypothesis>`__ -* `Transformer Head Pruner<#transformerhead-pruner>`__ +* `Transformer Head Pruner <#transformerhead-pruner>`__ Level Pruner ------------ From 5aa63d2141fca2e8fb2431342f45ddb43e800d62 Mon Sep 17 00:00:00 2001 From: Di Wu Date: Sun, 11 Jul 2021 14:36:42 +0900 Subject: [PATCH 28/63] update examples --- docs/en_US/Compression/Overview.rst | 2 +- .../transformers/transformer_pruning.py | 40 +++++++++++-------- 2 files changed, 24 insertions(+), 18 deletions(-) diff --git a/docs/en_US/Compression/Overview.rst b/docs/en_US/Compression/Overview.rst index ee1f548215..ba04df4d54 100644 --- a/docs/en_US/Compression/Overview.rst +++ b/docs/en_US/Compression/Overview.rst @@ -74,7 +74,7 @@ Pruning algorithms compress the original network by removing redundant weights o * - `AMC Pruner <../Compression/Pruner.rst#amc-pruner>`__ - AMC: AutoML for Model Compression and Acceleration on Mobile Devices `Reference Paper `__ * - `Transformer Head Pruner <../Compression/Pruner.rst#transformerhead-pruner>`__ - - Pruning attention heads of transformer models by a range of ranking metrics + - Pruning attention heads of transformer models by a range of ranking metrics. You can refer to this `benchmark <../CommunitySharings/ModelCompressionComparison.rst>`__ for the performance of these pruners on some benchmark problems. diff --git a/examples/model_compress/pruning/transformers/transformer_pruning.py b/examples/model_compress/pruning/transformers/transformer_pruning.py index 860d8da533..9475d3b9c4 100644 --- a/examples/model_compress/pruning/transformers/transformer_pruning.py +++ b/examples/model_compress/pruning/transformers/transformer_pruning.py @@ -402,7 +402,7 @@ def main(): eval_dataset = processed_datasets["validation_matched" if args.task_name == "mnli" else "validation"] ######################################################################### - # Finetune before pruning + # Finetune on the target GLUE task before pruning model, optimizer, train_dataloader, eval_dataloader, data_collator = get_dataloader_and_optimizer(args, tokenizer, model, train_dataset, @@ -428,21 +428,21 @@ def main(): else: metric = load_metric("accuracy") - # total_batch_size = args.per_device_train_batch_size * args.gradient_accumulation_steps - # logger.info("***** Finetuning before pruning *****") - # logger.info(f" Num examples = {len(train_dataset)}") - # logger.info(f" Num Epochs = {args.num_train_epochs}") - # logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}") - # logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") - # logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") - # logger.info(f" Total optimization steps = {args.max_train_steps}") - # train_model(args, model, is_regression, train_dataloader, eval_dataloader, optimizer, lr_scheduler, metric, device) - # - # if args.output_dir is not None: - # torch.save(model.state_dict(), args.output_dir + '/model_before_pruning.pt') - # - # if args.task_name == "mnli": - # final_eval_for_mnli(args, model, processed_datasets, metric, data_collator) + total_batch_size = args.per_device_train_batch_size * args.gradient_accumulation_steps + logger.info("***** Finetuning before pruning *****") + logger.info(f" Num examples = {len(train_dataset)}") + logger.info(f" Num Epochs = {args.num_train_epochs}") + logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}") + logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") + logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") + logger.info(f" Total optimization steps = {args.max_train_steps}") + train_model(args, model, is_regression, train_dataloader, eval_dataloader, optimizer, lr_scheduler, metric, device) + + if args.output_dir is not None: + torch.save(model.state_dict(), args.output_dir + '/model_before_pruning.pt') + + if args.task_name == "mnli": + final_eval_for_mnli(args, model, processed_datasets, metric, data_collator) ######################################################################### # Pruning @@ -458,6 +458,7 @@ def main(): def trainer(model, optimizer, criterion, epoch): return dry_run_or_finetune(args, model, train_dataloader, optimizer, device, epoch_num=epoch) + # We provide three usages, set the "usage" parameter in the command line argument to run one of them. # example 1: prune all layers with uniform sparsity if args.usage == 1: kwargs = {'ranking_criterion': args.ranking_criterion, @@ -531,13 +532,18 @@ def trainer(model, optimizer, criterion, epoch): pruner = TransformerHeadPruner(model, config_list, **kwargs) pruner.compress() + ######################################################################### # uncomment the following part to export the pruned model masks # model_path = os.path.join(args.output_dir, 'pruned_{}_{}.pth'.format(args.model_name_or_path, args.task_name)) # mask_path = os.path.join(args.output_dir, 'mask_{}_{}.pth'.format(args.model_name_or_path, args.task_name)) # pruner.export_model(model_path=model_path, mask_path=mask_path) - # Currently, speeding up the Transformers through NNI is not supported because of shape inference issues. + ######################################################################### + # Speedup + # Currently, speeding up Transformers through NNI ModelSpeedup is not supported because of shape inference issues. # However, if you are using the transformers library, you can use the following workaround: + # The following code gets the head pruning decisions from the Pruner and calls the _prune_heads() function + # implemented in models from the transformers library to speed up the model. if args.speed_up: speedup_rules = {} for group_idx, group in enumerate(pruner.attention_name_groups): From b0b01fc03b3674f35040b1507b3f511dfd939c3b Mon Sep 17 00:00:00 2001 From: Di Wu Date: Sun, 11 Jul 2021 14:40:25 +0900 Subject: [PATCH 29/63] debug --- docs/en_US/Compression/Overview.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/en_US/Compression/Overview.rst b/docs/en_US/Compression/Overview.rst index ba04df4d54..c21545c2e6 100644 --- a/docs/en_US/Compression/Overview.rst +++ b/docs/en_US/Compression/Overview.rst @@ -76,6 +76,7 @@ Pruning algorithms compress the original network by removing redundant weights o * - `Transformer Head Pruner <../Compression/Pruner.rst#transformerhead-pruner>`__ - Pruning attention heads of transformer models by a range of ranking metrics. + You can refer to this `benchmark <../CommunitySharings/ModelCompressionComparison.rst>`__ for the performance of these pruners on some benchmark problems. Quantization Algorithms From af518725e5f8914c500b7c85e14c2f4c8e81f8c4 Mon Sep 17 00:00:00 2001 From: Di Wu Date: Sun, 11 Jul 2021 14:50:04 +0900 Subject: [PATCH 30/63] debug --- docs/en_US/Compression/Overview.rst | 2 +- docs/en_US/Compression/Pruner.rst | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/en_US/Compression/Overview.rst b/docs/en_US/Compression/Overview.rst index c21545c2e6..5b3187b3fd 100644 --- a/docs/en_US/Compression/Overview.rst +++ b/docs/en_US/Compression/Overview.rst @@ -73,7 +73,7 @@ Pruning algorithms compress the original network by removing redundant weights o - Automatic pruning by iteratively call SimulatedAnnealing Pruner and ADMM Pruner `Reference Paper `__ * - `AMC Pruner <../Compression/Pruner.rst#amc-pruner>`__ - AMC: AutoML for Model Compression and Acceleration on Mobile Devices `Reference Paper `__ - * - `Transformer Head Pruner <../Compression/Pruner.rst#transformerhead-pruner>`__ + * - `Transformer Head Pruner <./Pruner.rst#transformerhead-pruner>`__ - Pruning attention heads of transformer models by a range of ranking metrics. diff --git a/docs/en_US/Compression/Pruner.rst b/docs/en_US/Compression/Pruner.rst index 44156d6970..c16cb59d57 100644 --- a/docs/en_US/Compression/Pruner.rst +++ b/docs/en_US/Compression/Pruner.rst @@ -838,6 +838,7 @@ Usage 3: one-shot pruning, different sparsity for different layer (PyTorch code) pruner = TransformerHeadPruner(model, config_list, **kwargs) pruner.compress() + User configuration for Transformer Head Pruner ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ From 521b4c84b7cdabaccd4e80a801960ad5095074d3 Mon Sep 17 00:00:00 2001 From: Di Wu Date: Sun, 11 Jul 2021 14:58:22 +0900 Subject: [PATCH 31/63] debug --- docs/en_US/Compression/Overview.rst | 2 +- docs/en_US/Compression/Pruner.rst | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/en_US/Compression/Overview.rst b/docs/en_US/Compression/Overview.rst index 5b3187b3fd..75492332c5 100644 --- a/docs/en_US/Compression/Overview.rst +++ b/docs/en_US/Compression/Overview.rst @@ -73,7 +73,7 @@ Pruning algorithms compress the original network by removing redundant weights o - Automatic pruning by iteratively call SimulatedAnnealing Pruner and ADMM Pruner `Reference Paper `__ * - `AMC Pruner <../Compression/Pruner.rst#amc-pruner>`__ - AMC: AutoML for Model Compression and Acceleration on Mobile Devices `Reference Paper `__ - * - `Transformer Head Pruner <./Pruner.rst#transformerhead-pruner>`__ + * - `Transformer Head Pruner <../Compression/Pruner.rst#transformer-head-pruner>`__ - Pruning attention heads of transformer models by a range of ranking metrics. diff --git a/docs/en_US/Compression/Pruner.rst b/docs/en_US/Compression/Pruner.rst index c16cb59d57..0c53461da1 100644 --- a/docs/en_US/Compression/Pruner.rst +++ b/docs/en_US/Compression/Pruner.rst @@ -28,7 +28,7 @@ We provide several pruning algorithms that support fine-grained weight pruning a **Others** * `Lottery Ticket Hypothesis <#lottery-ticket-hypothesis>`__ -* `Transformer Head Pruner <#transformerhead-pruner>`__ +* `Transformer Head Pruner <#transformer-head-pruner>`__ Level Pruner ------------ @@ -728,7 +728,7 @@ Transformer Head Pruner ----------------------- Transformer Head Pruner is a tool designed for pruning attention heads from the models belonging to the `Transformer family `__. -Typically, each attention layer in the Transformer models consists of four weights: three projection matrices for query, key, value, and an output projection matrix. The outputs of the former three matrices contains the projected results for all heads. Normall, the results are then reshaped so that each head performs that attention computation independently. The final results are concatenated back before fed into the output projection. Please refer to the original paper for more details. +Typically, each attention layer in the Transformer models consists of four weights: three projection matrices for query, key, value, and an output projection matrix. The outputs of the former three matrices contains the projected results for all heads. Normally, the results are then reshaped so that each head performs that attention computation independently. The final results are concatenated back before fed into the output projection. Please refer to the original paper for more details. Therefore, when an attention head is pruned, the same weights corresponding to that heads in the three projection matrices are pruned. Also, the weights in the output projection corresponding to the head's output are pruned. In our implementation, we calculate and apply masks to the four matrices together. @@ -749,13 +749,13 @@ Currently, the following head sorting criteria are supported: * "l2_activation": rank heads by the L2-norm of their attention computation output. * "taylorfo": rank heads by l1 norm of the output of attention computation * gradient for this output. Check more details in `this paper `__ and `this one `__. -We support local sorting (i.e., sorting heads within a layer) and global sorting (sorting all heads together), and you can control by setting the `global_sort` parameter. Note that if `global_sort=True` is passed, all weights must have the same sparsity in the config list. +We support local sorting (i.e., sorting heads within a layer) and global sorting (sorting all heads together), and you can control by setting the ``global_sort`` parameter. Note that if ``global_sort=True`` is passed, all weights must have the same sparsity in the config list. -In our implementation, we support two ways to group the four weights in the same layer together. You can either pass a 2-d list containing the names of these modules (usage 1 below) to the pruner, or simply pass a dummy input and the pruner will run `torch.jit.trace` to group the weights (usage 2 below). +In our implementation, we support two ways to group the four weights in the same layer together. You can either pass a 2-d list containing the names of these modules (usage 1 below) to the pruner, or simply pass a dummy input and the pruner will run ``torch.jit.trace`` to group the weights (usage 2 below). However, if you would like to assign different sparsity to each layer, currently you could only use the first option, i.e., passing names of the weights to the pruner (usage 3 below). -In addition to the following usage guide, we provide a more detailed example of pruning BERT and running it on tasks from the GLUE benchmark. Please find it in this :githublink:`example `. +In addition to the following usage guide, we provide a more detailed example of pruning BERT and running it on tasks from the GLUE benchmark. Please find it in this :githublink:`page `. Usage ^^^^^ From 42b3d5a650a7f58a491374ed0f159bd95e769a37 Mon Sep 17 00:00:00 2001 From: Di Wu Date: Sun, 11 Jul 2021 15:16:36 +0900 Subject: [PATCH 32/63] debug --- docs/en_US/Compression/Overview.rst | 2 +- docs/en_US/Compression/Pruner.rst | 22 +++++++++++----------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/docs/en_US/Compression/Overview.rst b/docs/en_US/Compression/Overview.rst index 75492332c5..723649af58 100644 --- a/docs/en_US/Compression/Overview.rst +++ b/docs/en_US/Compression/Overview.rst @@ -74,7 +74,7 @@ Pruning algorithms compress the original network by removing redundant weights o * - `AMC Pruner <../Compression/Pruner.rst#amc-pruner>`__ - AMC: AutoML for Model Compression and Acceleration on Mobile Devices `Reference Paper `__ * - `Transformer Head Pruner <../Compression/Pruner.rst#transformer-head-pruner>`__ - - Pruning attention heads of transformer models by a range of ranking metrics. + - Pruning attention heads from transformer models either in one shot or iteratively. You can refer to this `benchmark <../CommunitySharings/ModelCompressionComparison.rst>`__ for the performance of these pruners on some benchmark problems. diff --git a/docs/en_US/Compression/Pruner.rst b/docs/en_US/Compression/Pruner.rst index 0c53461da1..650a0eac89 100644 --- a/docs/en_US/Compression/Pruner.rst +++ b/docs/en_US/Compression/Pruner.rst @@ -736,26 +736,26 @@ The pruner implements the following algorithm: .. code-block:: bash - Repeat for each pruning iteration (only 1 for one-shot pruning): - 1. Calculate importance scores for each head in each specified layer (with different criterion options) - 2. Sort heads locally or globally, and prune out some heads with lowest scores. The number of pruned heads is determined according to the sparsity parameter. + Repeat for each pruning iteration (1 for one-shot pruning): + 1. Calculate importance scores for each head in each specified layer using a specific criterion + 2. Sort heads locally or globally, and prune out some heads with lowest scores. The number of pruned heads is determined according to the sparsity specified in the config. 3. If the specified pruning iteration is larger than 1 (iterative pruning), finetune the model for a while before the next pruning iteration. Currently, the following head sorting criteria are supported: - * "l1_weight": rank heads by the L1-norm of their query, key, and value projection matrices. - * "l2_weight": rank heads by the L2-norm of their query, key, and value projection matrices. + * "l1_weight": rank heads by the L1-norm of weights of the query, key, and value projection matrices. + * "l2_weight": rank heads by the L2-norm of weights of the query, key, and value projection matrices. * "l1_activation": rank heads by the L1-norm of their attention computation output. * "l2_activation": rank heads by the L2-norm of their attention computation output. * "taylorfo": rank heads by l1 norm of the output of attention computation * gradient for this output. Check more details in `this paper `__ and `this one `__. We support local sorting (i.e., sorting heads within a layer) and global sorting (sorting all heads together), and you can control by setting the ``global_sort`` parameter. Note that if ``global_sort=True`` is passed, all weights must have the same sparsity in the config list. -In our implementation, we support two ways to group the four weights in the same layer together. You can either pass a 2-d list containing the names of these modules (usage 1 below) to the pruner, or simply pass a dummy input and the pruner will run ``torch.jit.trace`` to group the weights (usage 2 below). +In our implementation, we support two ways to group the four weights in the same layer together. You can either pass a nested list containing the names of these modules (usage 1 below) to the pruner, or simply pass a dummy input and the pruner will run ``torch.jit.trace`` to group the weights (usage 2 below). -However, if you would like to assign different sparsity to each layer, currently you could only use the first option, i.e., passing names of the weights to the pruner (usage 3 below). +However, if you would like to assign different sparsity to each layer, currently you could only use the first option, i.e., passing names of the weights to the pruner (usage 3 below). Also note that weights belong to the same layer must have the same sparsity. -In addition to the following usage guide, we provide a more detailed example of pruning BERT and running it on tasks from the GLUE benchmark. Please find it in this :githublink:`page `. +In addition to the following usage guide, we provide a more detailed example of pruning BERT for tasks from the GLUE benchmark. Please find it in this :githublink:`page `. Usage ^^^^^ @@ -768,7 +768,7 @@ Usage 1: one-shot pruning, same sparsity for all the layers (PyTorch code) kwargs = {'ranking_criterion': "l1_weight", 'global_sort': False, 'num_iterations': 1, - 'epochs_per_iteration': 1, # this is ignored when num_iterations = 1 + 'epochs_per_iteration': 1, # this is ignored when num_iterations = 1 'head_hidden_dim': 64, 'dummy_input': dummy_input, 'trainer': trainer, @@ -781,7 +781,7 @@ Usage 1: one-shot pruning, same sparsity for all the layers (PyTorch code) pruner = TransformerHeadPruner(model, config_list, **kwargs) pruner.compress() -Usage 2: one-shot pruning, passing names to the pruner instead of dummy input (PyTorch code) +Usage 2: same effect as usage 1, the only change is passing names to the pruner instead of dummy input (PyTorch code) .. code-block:: python @@ -806,7 +806,7 @@ Usage 2: one-shot pruning, passing names to the pruner instead of dummy input (P pruner = TransformerHeadPruner(model, config_list, **kwargs) pruner.compress() -Usage 3: one-shot pruning, different sparsity for different layer (PyTorch code) +Usage 3: one-shot pruning, setting different sparsity for different layers (PyTorch code) .. code-block:: python From 59b8adb090f59f305692ad9a0eb42135ec08ef57 Mon Sep 17 00:00:00 2001 From: Di Wu Date: Fri, 16 Jul 2021 07:00:58 +0000 Subject: [PATCH 33/63] fix ungrouped module removing logic --- .../pytorch/pruning/transformer_pruner.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py b/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py index c25c8c3189..1bdd2e14b4 100644 --- a/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py +++ b/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py @@ -112,7 +112,9 @@ def __init__(self, model, config_list,head_hidden_dim, attention_name_groups=Non self.validate_weight_groups() # Remove any mistakenly captured ungrouped modules + self._unwrap_model() self.remove_ungrouped_modules() + self._wrap_model() self.masker = MASKER_DICT[ranking_criterion](model, self, self.head_hidden_dim, **algo_kwargs) self.pruned_heads = {i: set() for i in range(len(self.masking_groups))} @@ -190,9 +192,18 @@ def validate_weight_groups(self): def remove_ungrouped_modules(self): """ Remove non-attention weights that might be mistakenly captured by a simplified config_list. + Also update the corresponding list of layer information (self.modules_to_compress) """ care_of_modules = set([x for layer in self.masking_groups for x in layer]) - self.modules_wrapper = [x for x in self.modules_wrapper if x in care_of_modules] + + modules_wrapper_new, modules_to_compress_new = [], [] + for wrapper, layer_info in zip(self.modules_wrapper, self.modules_to_compress): + if wrapper in care_of_modules: + modules_wrapper_new.append(wrapper) + modules_to_compress_new.append(layer_info) + + self.modules_wrapper = modules_wrapper_new + self.modules_to_compress = modules_to_compress_new def validate_config(self, model, config_list): """ From af2144a83d0e86e5987e60460bc9775e7156dedd Mon Sep 17 00:00:00 2001 From: Di Wu Date: Fri, 16 Jul 2021 07:07:23 +0000 Subject: [PATCH 34/63] Update shape dependency to align with master --- .../pytorch/utils/shape_dependency.py | 284 ++++++++++-------- 1 file changed, 167 insertions(+), 117 deletions(-) diff --git a/nni/compression/pytorch/utils/shape_dependency.py b/nni/compression/pytorch/utils/shape_dependency.py index 8dda1506c9..57770df230 100644 --- a/nni/compression/pytorch/utils/shape_dependency.py +++ b/nni/compression/pytorch/utils/shape_dependency.py @@ -3,19 +3,34 @@ import csv import logging +import numpy as np -__all__ = ['ChannelDependency', 'GroupDependency', - 'CatPaddingDependency', 'InputChannelDependency', - 'AttentionWeightDependency'] + +__all__ = ['ChannelDependency', 'GroupDependency', 'InputChannelDependency', 'AttentionWeightDependency'] CONV_TYPE = 'aten::_convolution' ADD_TYPES = ['aten::add', 'aten::add_'] +MUL_TYPES = ['aten::mul', 'atem::mul_'] CAT_TYPE = 'aten::cat' logger = logging.getLogger('Shape_Dependency') RESHAPE_OPS = [CAT_TYPE, 'aten::view', 'aten::reshape', 'aten::flatten', 'aten::mean'] +def lcm_list(L): + lcm = 1 + for i in L: + lcm = np.lcm(lcm, i) + return lcm + + +def gcd_list(L): + gcd = L[0] + for i in L: + gcd = np.gcd(gcd, i) + return gcd + + class Dependency: def __init__(self, model=None, dummy_input=None, traced_model=None): """ @@ -39,12 +54,39 @@ def export(self, filepath): raise NotImplementedError +def reshape_break_channel_dependency(op_node): + """ + The reshape operations such as (reshape, view, flatten) may break + the channel dependency. We need to check the input parameters of + these reshape operations to check if this reshape node will break + the channel dependency. However, it's complicated to analyze the the input + parameters for each reshape function and infer if it will break the channel + dependency. So currently, we just check if the input channel and the output + channel is the same, if so, then we can say the original reshape function + doesn't want to change the number of the channels, which means the channel + dependency is not broken. In contrast, the original reshap operation wants + to change the number of channels, so it breaks the channel dependency. + Parameters + ---------- + opnode: NodePyOP + A Op node of the graph. + Returns + ------- + bool + If this operation will break the channel dependency. + """ + in_shape = op_node.auxiliary['in_shape'] + out_shape = op_node.auxiliary['out_shape'] + in_channel = in_shape[1] + out_channel = out_shape[1] + return in_channel != out_channel + + class ChannelDependency(Dependency): def __init__(self, model=None, dummy_input=None, traced_model=None): """ This model analyze the channel dependencies between the conv layers in a model. - Parameters ---------- model : torch.nn.Module @@ -61,12 +103,10 @@ def __init__(self, model=None, dummy_input=None, traced_model=None): def _get_parent_layers(self, node): """ Find the nearest father conv layers for the target node. - Parameters --------- node : torch._C.Node target node. - Returns ------- parent_layers: list @@ -81,6 +121,9 @@ def _get_parent_layers(self, node): # find the first met conv parent_layers.append(curnode.name) continue + elif curnode.op_type in RESHAPE_OPS: + if reshape_break_channel_dependency(curnode): + continue parents = self.graph.find_predecessors(curnode.unique_name) parents = [self.graph.name_to_node[name] for name in parents] for parent in parents: @@ -135,7 +178,6 @@ def export(self, filepath): means the output channel(filters) numbers of these three layers should be same with each other, otherwise the model may has shape conflict. - Output example: Dependency Set,Convolutional Layers Set 1,layer1.1.conv2,layer1.0.conv2,conv1 @@ -166,18 +208,16 @@ def export(self, filepath): def dependency_sets(self): """ Get the list of the dependency set. - Returns ------- dependency_sets : list list of the dependency sets. For example, [set(['conv1', 'conv2']), set(['conv3', 'conv4'])] - """ d_sets = [] visited = set() for node in self.graph.nodes_py.nodes_op: - if node.op_type != 'Conv2d' or node in visited: + if (node.op_type != 'Conv2d' and node.op_type != 'Linear') or node in visited: continue tmp_set = set() if node.name not in self.dependency: @@ -191,35 +231,6 @@ def dependency_sets(self): return d_sets -def reshape_break_channel_dependency(op_node): - """ - The reshape operations such as (reshape, view, flatten) may break - the channel dependency. We need to check the input parameters of - these reshape operations to check if this reshape node will break - the channel dependency. However, it's complicated to analyze the the input - parameters for each reshape function and infer if it will break the channel - dependency. So currently, we just check if the input channel and the output - channel is the same, if so, then we can say the original reshape function - doesn't want to change the number of the channels, which means the channel - dependency is not broken. In contrast, the original reshap operation wants - to change the number of channels, so it breaks the channel dependency. - - Parameters - ---------- - opnode: NodePyOP - A Op node of the graph. - Returns - ------- - bool - If this operation will break the channel dependency. - """ - in_shape = op_node.auxiliary['in_shape'] - out_shape = op_node.auxiliary['out_shape'] - in_channel = in_shape[1] - out_channel = out_shape[1] - return in_channel != out_channel - - class InputChannelDependency(ChannelDependency): """ Some pruners may prune the input channel of the convolutional @@ -238,7 +249,6 @@ def __init__(self, model, dummy_input=None, traced_model=None): """ This model analyze the input channel dependencies between the conv layers in a model. - Parameters ---------- model : torch.nn.Module @@ -296,73 +306,11 @@ def build_dependency(self): self.dependency[layer] = dependency_set -class CatPaddingDependency(ChannelDependency): - def __init__(self, model=None, dummy_input=None, traced_model=None): - super(CatPaddingDependency, self).__init__( - model, dummy_input, traced_model) - - def build_dependency(self): - """ - Build the cat padding dependencies. - If the output features of several layers are stitched together - by cat operation, then these layers have cat padding dependencies. - This is because when inferring the cat mask, we need all the input - masks for the cat operation. At this time we need to know the source - of all input vectors of a cat operation. - """ - for node in self.graph.nodes_py.nodes_op: - parent_layers = [] - if node.op_type == CAT_TYPE: - parent_layers = self._get_parent_layers(node) - dependency_set = set(parent_layers) - # merge the dependencies - for parent in parent_layers: - if parent in self.dependency: - dependency_set.update(self.dependency[parent]) - # save the dependencies - for _node in dependency_set: - self.dependency[_node] = dependency_set - - @property - def dependency_sets(self): - d_sets = [] - visited = set() - for nodename in self.dependency: - if nodename in visited: - continue - d_sets.append(self.dependency[nodename]) - return d_sets - - def export(self, filepath): - """ - Export the dependencies into a file. - In the output file, each line contains a set of layers - whose output features are stitched together by the cat - operation. - - output example: - Dependency Set, Layers - set1, Conv1, Conv2 - set2, Conv3, Conv4 - """ - header = ['Dependency Set', 'Layers'] - setid = 0 - with open(filepath, 'w') as csvf: - csv_w = csv.writer(csvf, delimiter=',') - csv_w.writerow(header) - for layers in self.dependency_sets: - setid += 1 - row = ['Set %d' % setid] - row.extend(list(layers)) - csv_w.writerow(row) - - class GroupDependency(Dependency): def __init__(self, model=None, dummy_input=None, traced_model=None): """ This model analyze the group dependencis between the conv layers in a model. - Parameters ---------- model : torch.nn.Module @@ -373,17 +321,16 @@ def __init__(self, model=None, dummy_input=None, traced_model=None): if we alreay has the traced graph of the target model, we donnot need to trace the model again. """ + self.min_groups = {} super(GroupDependency, self).__init__(model, dummy_input, traced_model) def _get_parent_convs(self, node): """ Find the nearest father conv layers for the target node. - Parameters --------- node : torch._C.Node target node. - Returns ------- parent_layers : list @@ -411,12 +358,10 @@ def _get_parent_convs(self, node): def _get_conv_groups(self, node_group): """ Get the number of groups for a convolutional layer. - Parameters ---------- node_group : NodePyGroup target node. - Returns ------- group : int @@ -445,34 +390,39 @@ def build_dependency(self): divided into 4 groups after filter pruning, because the input channels of conv2 shoule be divided into 4 groups. - Returns ------- self.dependency : dict key: the name of conv layers, value: the minimum value that the number of filters should be divisible to. """ + self.groups = {} for node in self.graph.nodes_py.nodes_op: if node.op_type == 'Conv2d' or node.op_type == 'ConvTranspose2d': group = self._get_conv_groups(node) - - if node.name in self.dependency: + if node.name in self.groups: # the conv layer whose group is larger than 1 will require that # it's number of output channel to be divisible by the number of group. - self.dependency[node.name] = max( - self.dependency[node.name], group) + self.groups[node.name].append(group) else: - self.dependency[node.name] = group + self.groups[node.name] = [group] if group > 1: # for the conv layer whose group is larger than 1, it will require the number # of output channels of their parent conv layer to be divisible by group. parent_convs = self._get_parent_convs(node) for parent in parent_convs: - if parent in self.dependency: - self.dependency[parent] = max( - self.dependency[parent], group) + if parent in self.groups: + self.groups[parent].append(group) else: - self.dependency[parent] = group + self.groups[parent] = [group] + + for name in self.groups: + self.dependency[name] = lcm_list(self.groups[name]) + if min(self.groups[name]) == gcd_list(self.groups[name]): + self.min_groups[name] = min(self.groups[name]) + else: + self.min_groups[name] = 1 + return self.dependency def export(self, filepath): @@ -484,7 +434,6 @@ def export(self, filepath): line is the group count of the filters in this layer. Note that, the group count may be larger than this layers original group number. - output example: Conv layer, Groups Conv1, 1 @@ -504,6 +453,107 @@ def dependency_sets(self): return self.dependency + +class ReshapeDependency(Dependency): + def __init__(self, model=None, dummy_input=None, traced_model=None): + """ + Some model may have the view/reshape functions, such functions may have fixed parameters + and cannot be replaced at all. Therefore, these functions may have some constraints on + their input shapes. In this class, we find the direct input conv/linear layers of these + reshape functions. If you get the shape conflict when run the forward inference on the + speeduped model, please try remove these layers from the pruner config list and try again. + Parameters + ---------- + model : torch.nn.Module + The model to be analyzed. + data : torch.Tensor + The example input data to trace the network architecture. + traced_model : torch._C.Graph + if we alreay has the traced graph of the target model, we donnot + need to trace the model again. + """ + super(ReshapeDependency, self).__init__( + model, dummy_input, traced_model) + + def _get_parent_layers(self, node): + """ + Find the nearest father conv layers for the target node. + Parameters + --------- + node : torch._C.Node + target node. + Returns + ------- + parent_layers: list + nearest father conv/linear layers for the target worknode. + """ + parent_layers = [] + queue = [] + queue.append(node) + while queue: + curnode = queue.pop(0) + if curnode.op_type == 'Conv2d' or curnode.op_type == 'Linear' or curnode.op_type == 'ConvTranspose2d': + # find the first met conv + parent_layers.append(curnode.name) + continue + parents = self.graph.find_predecessors(curnode.unique_name) + parents = [self.graph.name_to_node[name] for name in parents] + for parent in parents: + queue.append(parent) + return parent_layers + + def build_dependency(self): + """ + Build the channel dependency for the conv layers + in the model. + """ + # unpack the tuple/list manually before analyze the + # channel dependency + self.graph.unpack_manually() + for node in self.graph.nodes_py.nodes_op: + parent_layers = [] + # find the node that contains aten::add + # or aten::cat operations + if node.op_type in ['aten::view', 'aten::reshape']: + logger.info('Detect reshape-like functions: %s', node.op_type) + parent_layers = self._get_parent_layers(node) + print('Parent layers', parent_layers) + self.dependency[node.unique_name] = parent_layers + + def export(self, filepath): + """ + export the reshape dependencies as a csv file. + Output example: + Reshape OP, Dependent Layers + model.view.1,layer1.1.conv2,layer1.0.conv2,conv1 + model.mean.1,layer1.0.conv1 + model.reshape.1,layer1.1.conv1 + """ + header = ['Reshape OP', 'Dependent Layers'] + with open(filepath, 'w') as csvf: + csv_w = csv.writer(csvf, delimiter=',') + csv_w.writerow(header) + for reshape_op in self.dependency: + row = [reshape_op].extend(self.dependency[reshape_op]) + csv_w.writerow(row) + + @property + def dependency_sets(self): + """ + Get the list of the dependency set. + Returns + ------- + dependency_sets : list + list of the dependency sets. For example, + [set(['conv1', 'conv2']), set(['conv3', 'conv4'])] + """ + d_sets = [] + for reshape_node in self.dependency: + d_sets.extend(self.dependency[reshape_node]) + d_sets = list(set(d_sets)) + return d_sets + + class AttentionWeightDependency(Dependency): def __init__(self, model=None, dummy_input=None, traced_model=None): """ From 3e446ed2ed8583181541273da9d244182d7bf41a Mon Sep 17 00:00:00 2001 From: Di Wu Date: Fri, 16 Jul 2021 07:23:31 +0000 Subject: [PATCH 35/63] doc string debug --- nni/compression/pytorch/utils/shape_dependency.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/nni/compression/pytorch/utils/shape_dependency.py b/nni/compression/pytorch/utils/shape_dependency.py index b8df14d74f..7184587890 100644 --- a/nni/compression/pytorch/utils/shape_dependency.py +++ b/nni/compression/pytorch/utils/shape_dependency.py @@ -210,6 +210,7 @@ def export(self, filepath): def dependency_sets(self): """ Get the list of the dependency set. + Returns ------- dependency_sets : list @@ -390,8 +391,9 @@ def build_dependency(self): conv2 takes the output features of conv1 as input. Then we have to the filters of conv1 can still be divided into 4 groups after filter pruning, because - the input channels of conv2 shoule be divided into + the input channels of conv2 should be divided into 4 groups. + Returns ------- self.dependency : dict From 8fa6263f2b00afa8e6c8773123febe21b1b2982e Mon Sep 17 00:00:00 2001 From: Di Wu Date: Fri, 16 Jul 2021 08:23:23 +0000 Subject: [PATCH 36/63] resolve comments --- .../pytorch/pruning/transformer_pruner.py | 13 ++++---- .../transformer_pruning_head_masker.py | 30 +++++++++++-------- 2 files changed, 23 insertions(+), 20 deletions(-) diff --git a/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py b/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py index 1bdd2e14b4..c6b1a9b35a 100644 --- a/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py +++ b/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py @@ -64,7 +64,7 @@ class TransformerHeadPruner(Pruner): Number of finetuning epochs before the next pruning iteration. This only has effect when num_iterations > 1. If num_iterations is 1, then no finetuning is performed by the pruner after pruning. optimizer: torch.optim.Optimizer - Optimizer used to train model + Optimizer used to train model trainer: function Function used to train the model. Users should write this function as a normal function to train the Pytorch model @@ -166,7 +166,8 @@ def validate_weight_groups(self): - output projection weight shape must match total hidden dimension (inferred from Q, K, V projection) - Four weights in a group must have the same sparsity in their config - If global_sort is specified, all weights must have the same sparsity - - head_hidden_dim must be a divisor of the output dimension of the projection weights + - head_hidden_dim must be a divisor of the output dimension of the projection weights (i.e., the resulting + head number must be an integer) """ errmsg = 'Attention weight group sanity check not passed' sparsity = None @@ -233,13 +234,11 @@ def compress(self): else: self.update_mask() - # for iterative pruning, finetune before next iteration - if self.num_iterations > 1: + # for iterative pruning, if not the last iteration, finetune before next iteration + # Then, reset the maskers (may create additional hooks) + if self.num_iterations > 1 and pruning_iter != self.num_iterations - 1: for e in range(self.epochs_per_iteration): self._trainer(self.bound_model, optimizer=self._optimizer, criterion=self._criterion, epoch=e+1) - - # if not the last iteration, reset the maskers (may create additional hooks) - if self.num_iterations > 1 and pruning_iter != self.num_iterations - 1: self.masker.reset() logger.info('Pruned heads after iteration %i', pruning_iter) diff --git a/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py b/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py index 9e2c992bd1..951b0631e8 100644 --- a/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py +++ b/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py @@ -225,9 +225,9 @@ def get_head_importance_scores(self, weight_group): key_proj_weights = k_proj.module.weight.data.view([n_heads, -1]) value_proj_weights = v_proj.module.weight.data.view([n_heads, -1]) - query_norm_avg = torch.sum(torch.abs(query_proj_weights), -1) - key_norm_avg = torch.sum(torch.abs(key_proj_weights), -1) - value_norm_avg = torch.sum(torch.abs(value_proj_weights), -1) + query_norm_avg = torch.linalg.norm(query_proj_weights, 1, -1) + key_norm_avg = torch.linalg.norm(key_proj_weights, 1, -1) + value_norm_avg = torch.linalg.norm(value_proj_weights, 1, -1) return ((query_norm_avg + key_norm_avg + value_norm_avg) / 3).detach() @@ -249,9 +249,9 @@ def get_head_importance_scores(self, weight_group): key_proj_weights = k_proj.module.weight.data.view([n_heads, -1]) value_proj_weights = v_proj.module.weight.data.view([n_heads, -1]) - query_norm_avg = torch.sum(query_proj_weights ** 2, -1) - key_norm_avg = torch.sum(key_proj_weights ** 2, -1) - value_norm_avg = torch.sum(value_proj_weights ** 2, -1) + query_norm_avg = torch.linalg.norm(query_proj_weights, 2, -1) + key_norm_avg = torch.linalg.norm(key_proj_weights, 2, -1) + value_norm_avg = torch.linalg.norm(value_proj_weights, 2, -1) return ((query_norm_avg + key_norm_avg + value_norm_avg) / 3).detach() @@ -328,9 +328,9 @@ def reset(self): def get_head_importance_scores(self, weight_group): _, _, _, output_proj = weight_group activations = torch.stack(self.pruner.collected_activation[output_proj.group_idx], -1) - activations = torch.sum(activations, -1) - n_heads = activations.size()[0] // self.head_hidden_dim - scores = torch.sum(activations.view([n_heads, -1]), -1).detach().cpu() + scores = torch.sum(activations, -1).detach().cpu() + # n_heads = activations.size()[0] // self.head_hidden_dim + # scores = torch.sum(activations.view([n_heads, -1]), -1).detach().cpu() # clean up hooks if self.pruner.hook_id in self.pruner._fwd_hook_handles: @@ -339,12 +339,15 @@ def get_head_importance_scores(self, weight_group): return scores def _add_activation_collector(self, pruner): - def collector(collected_activation): + def collector(collected_activation, head_hidden_dim): def hook(module_, input_, output): if type(input_) is tuple: input_ = input_[0] - raw_activation = torch.abs(input_.detach().cpu() ** 2) # L2-norm - raw_activation_reduced = torch.sum(raw_activation, [0, 1]) + raw_activation = input_.detach().cpu() ** 2 + n_heads = raw_activation.size(-1) // head_hidden_dim + raw_activation = raw_activation.view(raw_activation.size(0), raw_activation.size(1), n_heads, -1) + raw_activation = torch.linalg.norm(raw_activation, 2, -1) # (B, S, n_heads) + raw_activation_reduced = torch.sum(raw_activation, [0, 1]) # (n_heads,) collected_activation.append(raw_activation_reduced) return hook @@ -355,7 +358,8 @@ def hook(module_, input_, output): for _, _, _, output_proj in pruner.masking_groups: pruner.collected_activation[output_proj.group_idx] = [] - handle = output_proj.register_forward_hook(collector(pruner.collected_activation[output_proj.group_idx])) + handle = output_proj.register_forward_hook(collector(pruner.collected_activation[output_proj.group_idx], + head_hidden_dim=self.head_hidden_dim)) pruner._fwd_hook_handles[pruner._fwd_hook_id].append(handle) From f45865ffca85e08f717279db19d2142f87aa3873 Mon Sep 17 00:00:00 2001 From: Di Wu Date: Fri, 16 Jul 2021 08:26:22 +0000 Subject: [PATCH 37/63] update docs --- docs/en_US/Compression/Pruner.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en_US/Compression/Pruner.rst b/docs/en_US/Compression/Pruner.rst index 650a0eac89..2ca4a1b516 100644 --- a/docs/en_US/Compression/Pruner.rst +++ b/docs/en_US/Compression/Pruner.rst @@ -749,7 +749,7 @@ Currently, the following head sorting criteria are supported: * "l2_activation": rank heads by the L2-norm of their attention computation output. * "taylorfo": rank heads by l1 norm of the output of attention computation * gradient for this output. Check more details in `this paper `__ and `this one `__. -We support local sorting (i.e., sorting heads within a layer) and global sorting (sorting all heads together), and you can control by setting the ``global_sort`` parameter. Note that if ``global_sort=True`` is passed, all weights must have the same sparsity in the config list. +We support local sorting (i.e., sorting heads within a layer) and global sorting (sorting all heads together), and you can control by setting the ``global_sort`` parameter. Note that if ``global_sort=True`` is passed, all weights must have the same sparsity in the config list. However, this does not mean that each layer will be prune to the same sparsity as specified. This sparsity value will be interpreted as a global sparsity, and each layer is likely to have different sparsity after pruning by global sort. In our implementation, we support two ways to group the four weights in the same layer together. You can either pass a nested list containing the names of these modules (usage 1 below) to the pruner, or simply pass a dummy input and the pruner will run ``torch.jit.trace`` to group the weights (usage 2 below). From ec352677b19a075c2301609b67b8749d678b458c Mon Sep 17 00:00:00 2001 From: Di Wu Date: Sun, 18 Jul 2021 23:07:01 +0900 Subject: [PATCH 38/63] redo example --- .../pruning/transformers/run.sh | 21 +- .../transformers/transformer_pruning.py | 573 ++++++------------ 2 files changed, 204 insertions(+), 390 deletions(-) diff --git a/examples/model_compress/pruning/transformers/run.sh b/examples/model_compress/pruning/transformers/run.sh index ed8cf599a8..8599edc376 100755 --- a/examples/model_compress/pruning/transformers/run.sh +++ b/examples/model_compress/pruning/transformers/run.sh @@ -1,16 +1,17 @@ #!/bin/bash -# Usage: ./run.sh gpu_id task +# Usage: ./run.sh gpu_id glue_task export CUDA_VISIBLE_DEVICES=$1 -TASK_NAME=$2 -PRETRAINED_MODEL='bert-base-uncased' # example: 'distilbert-base-uncased' 'roberta-base' 'bert-base-cased' 'bert-base-uncased' +TASK_NAME=$2 # "cola", "sst2", "mrpc", "stsb", "qqp", "mnli", "qnli", "rte", "wnli" +PRETRAINED_MODEL="bert-base-uncased" # "distilbert-base-uncased", "roberta-base", "bert-base-cased", ... # parameters for pruning -USAGE=2 # change to different numbers to run examples with different configs +# change USAGE to different numbers (1, 2, 3) to run examples with different configs +USAGE=2 SPARSITY=0.5 -RANKING_CRITERION=l1_weight -NUM_ITERATIONS=1 # 1 for one-shot pruning +RANKING_CRITERION=l1_weight # "l1_weight", "l2_weight", "l1_activation", "l2_activation", "taylorfo" +NUM_ITERATIONS=1 # 1 for one-shot pruning EPOCHS_PER_ITERATION=1 # other training parameters, no need to change @@ -18,12 +19,11 @@ MAX_LENGTH=128 BATCH_SIZE=32 LR=2e-5 N_EPOCHS=3 -SEED=2021 time=$(date "+%Y%m%d%H%M%S") OUTDIR="models_${PRETRAINED_MODEL}_${TASK_NAME}_$time/" -TASK_LIST=('cola' 'sst2' 'mrpc' 'stsb' 'qqp' 'mnli' 'qnli' 'rte' 'wnli') +TASK_LIST=("cola" "sst2" "mrpc" "stsb" "qqp" "mnli" "qnli" "rte" "wnli") if [[ ${TASK_LIST[*]} =~ (^|[[:space:]])$TASK_NAME($|[[:space:]]) ]]; then mkdir $OUTDIR python transformer_pruning.py \ @@ -32,11 +32,10 @@ if [[ ${TASK_LIST[*]} =~ (^|[[:space:]])$TASK_NAME($|[[:space:]]) ]]; then --num_iterations $NUM_ITERATIONS \ --epochs_per_iteration $EPOCHS_PER_ITERATION \ --speed_up \ - --seed $SEED \ - --model_name_or_path $PRETRAINED_MODEL \ + --model_name $PRETRAINED_MODEL \ --task_name $TASK_NAME \ --max_length $MAX_LENGTH \ - --per_device_train_batch_size $BATCH_SIZE \ + --batch_size $BATCH_SIZE \ --learning_rate $LR \ --num_train_epochs $N_EPOCHS \ --output_dir $OUTDIR \ diff --git a/examples/model_compress/pruning/transformers/transformer_pruning.py b/examples/model_compress/pruning/transformers/transformer_pruning.py index 9475d3b9c4..68f9bbecc1 100644 --- a/examples/model_compress/pruning/transformers/transformer_pruning.py +++ b/examples/model_compress/pruning/transformers/transformer_pruning.py @@ -1,17 +1,21 @@ -# code adapted from https://github.com/huggingface/transformers/tree/master/examples/pytorch/text-classification - import argparse import logging import math import os import random -import datasets -from datasets import load_dataset, load_metric import torch from torch.utils.data.dataloader import DataLoader from tqdm.auto import tqdm +import nni +from nni.compression.pytorch import ModelSpeedup +from nni.compression.pytorch.utils.counter import count_flops_params +from nni.algorithms.compression.pytorch.pruning import TransformerHeadPruner + + +import datasets +from datasets import load_dataset, load_metric import transformers from transformers import ( AdamW, @@ -22,256 +26,141 @@ AutoTokenizer, DataCollatorWithPadding, PretrainedConfig, - SchedulerType, default_data_collator, get_scheduler, - set_seed, ) -import nni -from nni.compression.pytorch import ModelSpeedup -from nni.compression.pytorch.utils.counter import count_flops_params -from nni.algorithms.compression.pytorch.pruning import ( - TransformerHeadPruner -) -logger = logging.getLogger('bert_pruning_example') - -task_to_keys = { - "cola": ("sentence", None), - "mnli": ("premise", "hypothesis"), - "mrpc": ("sentence1", "sentence2"), - "qnli": ("question", "sentence"), - "qqp": ("question1", "question2"), - "rte": ("sentence1", "sentence2"), - "sst2": ("sentence", None), - "stsb": ("sentence1", "sentence2"), - "wnli": ("sentence1", "sentence2"), -} +logger = logging.getLogger("bert_pruning_example") def parse_args(): - parser = argparse.ArgumentParser(description="Finetune a transformers model on a text classification task") + parser = argparse.ArgumentParser(description="Example: prune a Huggingface transformer and finetune on GLUE tasks.") - parser.add_argument("--model_name_or_path", type=str, required=True, - help="Path to pretrained model or model identifier from huggingface.co/models.") + parser.add_argument("--model_name", type=str, required=True, + help="Pretrained model architecture.") parser.add_argument("--task_name", type=str, default=None, - help="The name of the glue task to train on.", - choices=list(task_to_keys.keys())) + help="The name of the GLUE task.", + choices=["cola", "mnli", "mrpc", "qnli", "qqp", "rte", "sst2", "stsb", "wnli"]) parser.add_argument("--output_dir", type=str, default=None, - help="Where to store the final model.") - parser.add_argument('--usage', type=int, default=1, - help='Select which config example to run') - parser.add_argument('--sparsity', type=float, required=True, - help='Sparsity - proportion of heads to prune (should be between 0 and 1)') - parser.add_argument('--global_sort', action='store_true', default=False, - help='Rank the heads globally and prune the heads with lowest scores. If set to False, the ' - 'heads are only ranked within one layer') - parser.add_argument("--ranking_criterion", type=str, default='l1_weight', + help="Where to store the model and mask.") + parser.add_argument("--usage", type=int, default=1, + help="Select which pruning config example to run") + parser.add_argument("--sparsity", type=float, required=True, + help="Sparsity: proportion of heads to prune (should be between 0 and 1)") + parser.add_argument("--global_sort", action="store_true", default=False, + help="Rank the heads globally and prune the heads with lowest scores. If set to False, the " + "heads are only ranked within one layer") + parser.add_argument("--ranking_criterion", type=str, default="l1_weight", choices=["l1_weight", "l2_weight", "l1_activation", "l2_activation", "taylorfo"], - help="Where to store the final model.") + help="Criterion by which the attention heads are ranked.") parser.add_argument("--num_iterations", type=int, default=1, help="Number of pruning iterations (1 for one-shot pruning).") parser.add_argument("--epochs_per_iteration", type=int, default=1, help="Epochs to finetune before the next pruning iteration " "(only effective if num_iterations > 1).") - parser.add_argument('--speed_up', action='store_true', default=False, - help='Whether to speed-up the pruned model') - - # parameters for model training; for running examples. no need to change them - parser.add_argument("--train_file", type=str, default=None, - help="A csv or a json file containing the training data.") - parser.add_argument("--validation_file", type=str, default=None, - help="A csv or a json file containing the validation data.") + parser.add_argument("--speed_up", action="store_true", default=False, + help="Whether to speed-up the pruned model") + + # parameters for model training; no need to change them for running examples parser.add_argument("--max_length", type=int, default=128, help=("The maximum total input sequence length after tokenization. Sequences longer than this " "will be truncated, sequences shorter will be padded if `--pad_to_max_lengh` is passed.")) - parser.add_argument("--pad_to_max_length", action="store_true", - help="If passed, pad all samples to `max_length`. Otherwise, dynamic padding is used.") - parser.add_argument("--use_slow_tokenizer", action="store_true", - help="If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).",) - parser.add_argument("--per_device_train_batch_size", type=int, default=8, - help="Batch size (per device) for the training dataloader.") - parser.add_argument("--per_device_eval_batch_size", type=int, default=8, - help="Batch size (per device) for the evaluation dataloader.") + parser.add_argument("--batch_size", type=int, default=8, + help="Batch size.") parser.add_argument("--learning_rate", type=float, default=5e-5, - help="Initial learning rate (after the potential warmup period) to use.") - parser.add_argument("--weight_decay", type=float, default=0.0, - help="Weight decay to use.") + help="Initial learning rate.") parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.") - parser.add_argument("--max_train_steps", type=int, default=None, - help="Total number of training steps to perform. If provided, overrides num_train_epochs.") - parser.add_argument("--gradient_accumulation_steps", type=int, default=1, - help="Number of updates steps to accumulate before performing a backward/update pass.") - parser.add_argument("--lr_scheduler_type", type=SchedulerType, default="linear", - help="The scheduler type to use.", + parser.add_argument("--lr_scheduler_type", default="linear", choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"]) parser.add_argument("--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler.") - parser.add_argument("--seed", type=int, default=None, - help="A seed for reproducible training.") args = parser.parse_args() - # Sanity checks - if args.task_name is None and args.train_file is None and args.validation_file is None: - raise ValueError("Need either a task name or a training/validation file.") - else: - if args.train_file is not None: - extension = args.train_file.split(".")[-1] - assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." - if args.validation_file is not None: - extension = args.validation_file.split(".")[-1] - assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." - if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) return args -def get_raw_dataset(args): - # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) - # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). +def get_raw_dataset(task_name): + """ + Get a GLUE dataset using huggingface datasets. + """ + raw_dataset = load_dataset("glue", task_name) + is_regression = task_name == "stsb" + num_labels = 1 if is_regression else len(raw_dataset["train"].features["label"].names) - # For CSV/JSON files, this script will use as labels the column called 'label' and as pair of sentences the - # sentences in columns called 'sentence1' and 'sentence2' if such column exists or the first two columns not named - # label if at least two columns are provided. + return raw_dataset, is_regression, num_labels - # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this - # single column. You can easily tweak this behavior (see below) - # In distributed training, the load_dataset function guarantee that only one local process can concurrently - # download the dataset. - if args.task_name is not None: - # Downloading and loading a dataset from the hub. - raw_datasets = load_dataset("glue", args.task_name) - else: - # Loading the dataset from local csv or json file. - data_files = {} - if args.train_file is not None: - data_files["train"] = args.train_file - if args.validation_file is not None: - data_files["validation"] = args.validation_file - extension = (args.train_file if args.train_file is not None else args.valid_file).split(".")[-1] - raw_datasets = load_dataset(extension, data_files=data_files) - # See more about loading any type of standard or custom dataset at - # https://huggingface.co/docs/datasets/loading_datasets.html. - - # Labels - if args.task_name is not None: - is_regression = args.task_name == "stsb" - if not is_regression: - label_list = raw_datasets["train"].features["label"].names - num_labels = len(label_list) - else: - label_list = None - num_labels = 1 - else: - # Trying to have good defaults here, don't hesitate to tweak to your needs. - is_regression = raw_datasets["train"].features["label"].dtype in ["float32", "float64"] - if is_regression: - label_list = None - num_labels = 1 - else: - # A useful fast method: - # https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.unique - label_list = raw_datasets["train"].unique("label") - label_list.sort() # Let's sort it for determinism - num_labels = len(label_list) - - return raw_datasets, is_regression, label_list, num_labels - - -def preprocess_dataset(args, tokenizer, model, raw_datasets, num_labels, is_regression, label_list): - # Preprocessing the datasets - if args.task_name is not None: - sentence1_key, sentence2_key = task_to_keys[args.task_name] - else: - # Again, we try to have some nice defaults but don't hesitate to tweak to your use case. - non_label_column_names = [name for name in raw_datasets["train"].column_names if name != "label"] - if "sentence1" in non_label_column_names and "sentence2" in non_label_column_names: - sentence1_key, sentence2_key = "sentence1", "sentence2" - else: - if len(non_label_column_names) >= 2: - sentence1_key, sentence2_key = non_label_column_names[:2] - else: - sentence1_key, sentence2_key = non_label_column_names[0], None - - # Some models have set the order of the labels to use, so let's make sure we do use it. - label_to_id = None - if ( - model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id - and args.task_name is not None - and not is_regression - ): - # Some have all caps in their config, some don't. - label_name_to_id = {k.lower(): v for k, v in model.config.label2id.items()} - if list(sorted(label_name_to_id.keys())) == list(sorted(label_list)): - logger.info( - f"The configuration of the model provided the following label correspondence: {label_name_to_id}. " - "Using it!" - ) - label_to_id = {i: label_name_to_id[label_list[i]] for i in range(num_labels)} - else: - logger.warning( - "Your model seems to have been trained with labels, but they don't match the dataset: ", - f"model labels: {list(sorted(label_name_to_id.keys()))}, dataset labels: {list(sorted(label_list))}." - "\nIgnoring the model labels as a result.", - ) - elif args.task_name is None: - label_to_id = {v: i for i, v in enumerate(label_list)} - - padding = "max_length" if args.pad_to_max_length else False - - def preprocess_function(examples): - # Tokenize the texts +def preprocess(args, tokenizer, raw_dataset): + """ + Tokenization and column renaming. + """ + assert args.task_name is not None + + task_to_keys = { + "cola": ("sentence", None), + "mnli": ("premise", "hypothesis"), + "mrpc": ("sentence1", "sentence2"), + "qnli": ("question", "sentence"), + "qqp": ("question1", "question2"), + "rte": ("sentence1", "sentence2"), + "sst2": ("sentence", None), + "stsb": ("sentence1", "sentence2"), + "wnli": ("sentence1", "sentence2"), + } + sentence1_key, sentence2_key = task_to_keys[args.task_name] + + def tokenize(data): texts = ( - (examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key]) + (data[sentence1_key],) if sentence2_key is None else (data[sentence1_key], data[sentence2_key]) ) - result = tokenizer(*texts, padding=padding, max_length=args.max_length, truncation=True) - - if "label" in examples: - if label_to_id is not None: - # Map labels to IDs (not necessary for GLUE tasks) - result["labels"] = [label_to_id[l] for l in examples["label"]] - else: - # In all cases, rename the column to labels because the model will expect that. - result["labels"] = examples["label"] + result = tokenizer(*texts, padding=False, max_length=args.max_length, truncation=True) + + if "label" in data: + result["labels"] = data["label"] return result - processed_datasets = raw_datasets.map( - preprocess_function, batched=True, remove_columns=raw_datasets["train"].column_names - ) + processed_datasets = raw_dataset.map(tokenize, batched=True, remove_columns=raw_dataset["train"].column_names) return processed_datasets -def train_model(args, model, is_regression, train_dataloader, eval_dataloader, optimizer, lr_scheduler, metric, device, - epoch_num=None): - progress_bar = tqdm(range(args.max_train_steps), position=0, leave=True) - completed_steps = 0 +def get_dataloader_and_optimizer(args, tokenizer, model, train_dataset, eval_dataset): + data_collator = DataCollatorWithPadding(tokenizer) + train_dataloader = DataLoader(train_dataset, shuffle=True, collate_fn=data_collator, + batch_size=args.batch_size) + eval_dataloader = DataLoader(eval_dataset, collate_fn=data_collator, + batch_size=args.batch_size) + + optimizer = AdamW(model.parameters(), lr=args.learning_rate) - train_epoch = args.num_train_epochs if epoch_num is None else 1 - for epoch in range(train_epoch): + return optimizer, train_dataloader, eval_dataloader, data_collator + + +def train_model(args, model, is_regression, train_dataloader, eval_dataloader, optimizer, lr_scheduler, metric, device): + """ + Train the model using train_dataloader and evaluate after every epoch using eval_dataloader. + This function is called before and after pruning for "pretraining" on the GLUE task and further "finetuning". + """ + train_steps = args.num_train_epochs * len(train_dataloader) + progress_bar = tqdm(range(train_steps), position=0, leave=True) + + for epoch in range(args.num_train_epochs): model.train() for step, batch in enumerate(train_dataloader): for field in batch.keys(): batch[field] = batch[field].to(device) outputs = model(**batch) - loss = outputs.loss - loss = loss / args.gradient_accumulation_steps - loss.backward() - if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1: - optimizer.step() - lr_scheduler.step() - optimizer.zero_grad() - progress_bar.update(1) - completed_steps += 1 - - if completed_steps >= args.max_train_steps: - break + outputs.loss.backward() + optimizer.step() + lr_scheduler.step() + optimizer.zero_grad() + progress_bar.update(1) model.eval() for step, batch in enumerate(eval_dataloader): @@ -279,43 +168,46 @@ def train_model(args, model, is_regression, train_dataloader, eval_dataloader, o batch[field] = batch[field].to(device) outputs = model(**batch) predictions = outputs.logits.argmax(dim=-1) if not is_regression else outputs.logits.squeeze() - metric.add_batch( - predictions=predictions, - references=batch["labels"], - ) + metric.add_batch(predictions=predictions, references=batch["labels"]) eval_metric = metric.compute() logger.info(f"epoch {epoch}: {eval_metric}") def dry_run_or_finetune(args, model, train_dataloader, optimizer, device, epoch_num=None): + """ + This function is used for to create a "trainer" that is passed to the pruner. + If epoch_num is 0, the function just runs forward and backward without updating the parameters. + This allows the pruner to collect data without parameter update (for activation or gradient based + pruning methods). + Otherwise, finetune the model for 1 epoch. This is called by the pruner during pruning iterations. + """ if epoch_num == 0: print("Running forward and backward on the entire dataset without updating parameters...") else: - print("Finetuning for 1 epoch") + print("Finetuning for 1 epoch...") progress_bar = tqdm(range(len(train_dataloader)), position=0, leave=True) - completed_steps = 0 - + train_epoch = args.num_train_epochs if epoch_num is None else 1 for epoch in range(train_epoch): for step, batch in enumerate(train_dataloader): for field in batch.keys(): batch[field] = batch[field].to(device) outputs = model(**batch) - loss = outputs.loss - loss.backward() + outputs.loss.backward() if epoch_num != 0: optimizer.step() optimizer.zero_grad() progress_bar.update(1) - completed_steps += 1 - + def final_eval_for_mnli(args, model, processed_datasets, metric, data_collator): - # Final evaluation on mismatched validation set + """ + If the task is MNLI, perform a final evaluation on mismatched validation set + """ eval_dataset = processed_datasets["validation_mismatched"] eval_dataloader = DataLoader( - eval_dataset, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size + eval_dataset, collate_fn=data_collator, batch_size=args.batch_size ) model.eval() @@ -331,198 +223,128 @@ def final_eval_for_mnli(args, model, processed_datasets, metric, data_collator): logger.info(f"mnli-mm: {eval_metric}") -def get_dataloader_and_optimizer(args, tokenizer, model, train_dataset, eval_dataset): - # DataLoaders creation: - if args.pad_to_max_length: - # If padding was already done ot max length, we use the default data collator that will just convert everything - # to tensors. - data_collator = default_data_collator - else: - # Otherwise, `DataCollatorWithPadding` will apply dynamic padding for us (by padding to the maximum length of - # the samples passed). When using mixed precision, we add `pad_to_multiple_of=8` to pad all tensors to multiple - # of 8s, which will enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta). - data_collator = DataCollatorWithPadding(tokenizer) - - train_dataloader = DataLoader( - train_dataset, shuffle=True, collate_fn=data_collator, batch_size=args.per_device_train_batch_size - ) - eval_dataloader = DataLoader(eval_dataset, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size) - - # Optimizer - # Split weights in two groups, one with weight decay and the other not. - no_decay = ["bias", "LayerNorm.weight"] - optimizer_grouped_parameters = [ - { - "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], - "weight_decay": args.weight_decay, - }, - { - "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], - "weight_decay": 0.0, - }, - ] - optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate) - - return model, optimizer, train_dataloader, eval_dataloader, data_collator - - def main(): device = torch.device("cuda" if torch.cuda.is_available() else "cpu") args = parse_args() ######################################################################### # Prepare model, tokenizer, dataset, optimizer, and the scheduler - # Make one log on every process with the configuration for debugging. - logging.basicConfig( - format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", - datefmt="%m/%d/%Y %H:%M:%S", - level=logging.INFO, - ) logger.setLevel(logging.INFO) datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() - if args.seed is not None: - set_seed(args.seed) - - raw_datasets, is_regression, label_list, num_labels = get_raw_dataset(args) - - # Load pretrained model and tokenizer - config = AutoConfig.from_pretrained(args.model_name_or_path, num_labels=num_labels, finetuning_task=args.task_name) - tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, use_fast=not args.use_slow_tokenizer) - model = AutoModelForSequenceClassification.from_pretrained( - args.model_name_or_path, - from_tf=bool(".ckpt" in args.model_name_or_path), - config=config, - ) - model.to(device) - - processed_datasets = preprocess_dataset(args, tokenizer, model, raw_datasets, num_labels, is_regression, label_list) + # Load dataset and tokenizer, and then preprocess the dataset + raw_dataset, is_regression, num_labels = get_raw_dataset(args.task_name) + tokenizer = AutoTokenizer.from_pretrained(args.model_name, use_fast=True) + processed_datasets = preprocess(args, tokenizer, raw_dataset) train_dataset = processed_datasets["train"] eval_dataset = processed_datasets["validation_matched" if args.task_name == "mnli" else "validation"] + # Load pretrained model + config = AutoConfig.from_pretrained(args.model_name, num_labels=num_labels, finetuning_task=args.task_name) + model = AutoModelForSequenceClassification.from_pretrained(args.model_name, config=config) + model.to(device) + ######################################################################### # Finetune on the target GLUE task before pruning - model, optimizer, train_dataloader, eval_dataloader, data_collator = get_dataloader_and_optimizer(args, tokenizer, - model, - train_dataset, - eval_dataset) - - # Scheduler and math around the number of training steps. - num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) - if args.max_train_steps is None: - args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch - else: - args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) - - lr_scheduler = get_scheduler( - name=args.lr_scheduler_type, - optimizer=optimizer, - num_warmup_steps=args.num_warmup_steps, - num_training_steps=args.max_train_steps, - ) - - # Get the metric function - if args.task_name is not None: - metric = load_metric("glue", args.task_name) - else: - metric = load_metric("accuracy") - - total_batch_size = args.per_device_train_batch_size * args.gradient_accumulation_steps - logger.info("***** Finetuning before pruning *****") - logger.info(f" Num examples = {len(train_dataset)}") - logger.info(f" Num Epochs = {args.num_train_epochs}") - logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}") - logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") - logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") - logger.info(f" Total optimization steps = {args.max_train_steps}") + optimizer, train_dataloader, eval_dataloader, data_collator = get_dataloader_and_optimizer(args, tokenizer, + model, + train_dataset, + eval_dataset) + train_steps = args.num_train_epochs * len(train_dataloader) + lr_scheduler = get_scheduler(name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=args.num_warmup_steps, + num_training_steps=train_steps) + metric = load_metric("glue", args.task_name) + + logger.info("================= Finetuning before pruning =================") train_model(args, model, is_regression, train_dataloader, eval_dataloader, optimizer, lr_scheduler, metric, device) if args.output_dir is not None: - torch.save(model.state_dict(), args.output_dir + '/model_before_pruning.pt') + torch.save(model.state_dict(), args.output_dir + "/model_before_pruning.pt") if args.task_name == "mnli": final_eval_for_mnli(args, model, processed_datasets, metric, data_collator) ######################################################################### # Pruning - model, optimizer, train_dataloader, eval_dataloader, data_collator = get_dataloader_and_optimizer(args, tokenizer, - model, - train_dataset, - eval_dataset) - dummy_input = next(iter(train_dataloader))['input_ids'].to(device) + optimizer, train_dataloader, eval_dataloader, data_collator = get_dataloader_and_optimizer(args, tokenizer, + model, + train_dataset, + eval_dataset) + dummy_input = next(iter(train_dataloader))["input_ids"].to(device) flops, params, results = count_flops_params(model, dummy_input) - print(f'Initial model FLOPs {flops / 1e6:.2f} M, #Params: {params / 1e6:.2f}M') + print(f"Initial model FLOPs {flops / 1e6:.2f} M, #Params: {params / 1e6:.2f}M") - # here criterion is embedded in the model. Upper levels can just pass None to trainer + # Here criterion is embedded in the model. Upper levels can just pass None to trainer. def trainer(model, optimizer, criterion, epoch): return dry_run_or_finetune(args, model, train_dataloader, optimizer, device, epoch_num=epoch) - # We provide three usages, set the "usage" parameter in the command line argument to run one of them. + # We provide three usage scenarios. + # Set the "usage" parameter in the command line argument to run each one of them. # example 1: prune all layers with uniform sparsity if args.usage == 1: - kwargs = {'ranking_criterion': args.ranking_criterion, - 'global_sort': args.global_sort, - 'num_iterations': args.num_iterations, - 'epochs_per_iteration': args.epochs_per_iteration, - 'head_hidden_dim': 64, - 'dummy_input': dummy_input, - 'trainer': trainer, - 'optimizer': optimizer} + kwargs = {"ranking_criterion": args.ranking_criterion, + "global_sort": args.global_sort, + "num_iterations": args.num_iterations, + "epochs_per_iteration": args.epochs_per_iteration, + "head_hidden_dim": 64, + "dummy_input": dummy_input, + "trainer": trainer, + "optimizer": optimizer} config_list = [{ - 'sparsity': args.sparsity, - 'op_types': ["Linear"], + "sparsity": args.sparsity, + "op_types": ["Linear"], }] # example 2: prune different layers with uniform sparsity, but specify names group instead of dummy_input elif args.usage == 2: - attention_name_groups = list(zip(['encoder.layer.{}.attention.self.query'.format(i) for i in range(12)], - ['encoder.layer.{}.attention.self.key'.format(i) for i in range(12)], - ['encoder.layer.{}.attention.self.value'.format(i) for i in range(12)], - ['encoder.layer.{}.attention.output.dense'.format(i) for i in range(12)])) - - kwargs = {'ranking_criterion': args.ranking_criterion, - 'global_sort': args.global_sort, - 'num_iterations': args.num_iterations, - 'epochs_per_iteration': args.epochs_per_iteration, - 'attention_name_groups': attention_name_groups, - 'head_hidden_dim': 64, - 'trainer': trainer, - 'optimizer': optimizer} + attention_name_groups = list(zip(["encoder.layer.{}.attention.self.query".format(i) for i in range(12)], + ["encoder.layer.{}.attention.self.key".format(i) for i in range(12)], + ["encoder.layer.{}.attention.self.value".format(i) for i in range(12)], + ["encoder.layer.{}.attention.output.dense".format(i) for i in range(12)])) + + kwargs = {"ranking_criterion": args.ranking_criterion, + "global_sort": args.global_sort, + "num_iterations": args.num_iterations, + "epochs_per_iteration": args.epochs_per_iteration, + "attention_name_groups": attention_name_groups, + "head_hidden_dim": 64, + "trainer": trainer, + "optimizer": optimizer} config_list = [{ - 'sparsity': args.sparsity, - 'op_types': ["Linear"], - 'op_names': [x for layer in attention_name_groups for x in layer] + "sparsity": args.sparsity, + "op_types": ["Linear"], + "op_names": [x for layer in attention_name_groups for x in layer] } ] # example 3: prune different layers with different sparsity elif args.usage == 3: - attention_name_groups = list(zip(['encoder.layer.{}.attention.self.query'.format(i) for i in range(12)], - ['encoder.layer.{}.attention.self.key'.format(i) for i in range(12)], - ['encoder.layer.{}.attention.self.value'.format(i) for i in range(12)], - ['encoder.layer.{}.attention.output.dense'.format(i) for i in range(12)])) - - kwargs = {'ranking_criterion': args.ranking_criterion, - 'global_sort': args.global_sort, - 'num_iterations': args.num_iterations, - 'epochs_per_iteration': args.epochs_per_iteration, - 'attention_name_groups': attention_name_groups, - 'head_hidden_dim': 64, - 'trainer': trainer, - 'optimizer': optimizer} + attention_name_groups = list(zip(["encoder.layer.{}.attention.self.query".format(i) for i in range(12)], + ["encoder.layer.{}.attention.self.key".format(i) for i in range(12)], + ["encoder.layer.{}.attention.self.value".format(i) for i in range(12)], + ["encoder.layer.{}.attention.output.dense".format(i) for i in range(12)])) + + kwargs = {"ranking_criterion": args.ranking_criterion, + "global_sort": args.global_sort, + "num_iterations": args.num_iterations, + "epochs_per_iteration": args.epochs_per_iteration, + "attention_name_groups": attention_name_groups, + "head_hidden_dim": 64, + "trainer": trainer, + "optimizer": optimizer} config_list = [{ - 'sparsity': args.sparsity, - 'op_types': ["Linear"], - 'op_names': [x for layer in attention_name_groups[:6] for x in layer] + "sparsity": args.sparsity, + "op_types": ["Linear"], + "op_names": [x for layer in attention_name_groups[:6] for x in layer] }, { - 'sparsity': args.sparsity / 2, - 'op_types': ["Linear"], - 'op_names': [x for layer in attention_name_groups[:6] for x in layer] + "sparsity": args.sparsity / 2, + "op_types": ["Linear"], + "op_names": [x for layer in attention_name_groups[:6] for x in layer] } ] @@ -534,22 +356,22 @@ def trainer(model, optimizer, criterion, epoch): ######################################################################### # uncomment the following part to export the pruned model masks - # model_path = os.path.join(args.output_dir, 'pruned_{}_{}.pth'.format(args.model_name_or_path, args.task_name)) - # mask_path = os.path.join(args.output_dir, 'mask_{}_{}.pth'.format(args.model_name_or_path, args.task_name)) + # model_path = os.path.join(args.output_dir, "pruned_{}_{}.pth".format(args.model_name, args.task_name)) + # mask_path = os.path.join(args.output_dir, "mask_{}_{}.pth".format(args.model_name, args.task_name)) # pruner.export_model(model_path=model_path, mask_path=mask_path) ######################################################################### # Speedup # Currently, speeding up Transformers through NNI ModelSpeedup is not supported because of shape inference issues. # However, if you are using the transformers library, you can use the following workaround: - # The following code gets the head pruning decisions from the Pruner and calls the _prune_heads() function + # The following code gets the head pruning decisions from the pruner and calls the _prune_heads() function # implemented in models from the transformers library to speed up the model. if args.speed_up: speedup_rules = {} for group_idx, group in enumerate(pruner.attention_name_groups): # get the layer index layer_idx = None - for part in group[0].split('.'): + for part in group[0].split("."): try: layer_idx = int(part) break @@ -564,32 +386,25 @@ def trainer(model, optimizer, criterion, epoch): ######################################################################### # After pruning, finetune again on the target task # Get the metric function - if args.task_name is not None: - metric = load_metric("glue", args.task_name) - else: - metric = load_metric("accuracy") - + metric = load_metric("glue", args.task_name) + # re-initialize the optimizer and the scheduler - model, optimizer, _, _, data_collator = get_dataloader_and_optimizer(args, tokenizer, model, train_dataset, - eval_dataset) - lr_scheduler = get_scheduler( - name=args.lr_scheduler_type, - optimizer=optimizer, - num_warmup_steps=args.num_warmup_steps, - num_training_steps=args.max_train_steps, - ) + optimizer, _, _, data_collator = get_dataloader_and_optimizer(args, tokenizer, model, train_dataset, + eval_dataset) + lr_scheduler = get_scheduler(name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=args.num_warmup_steps, + num_training_steps=train_steps) - logger.info("***** Finetuning after Pruning *****") + logger.info("================= Finetuning after Pruning =================") train_model(args, model, is_regression, train_dataloader, eval_dataloader, optimizer, lr_scheduler, metric, device) if args.output_dir is not None: - torch.save(model.state_dict(), args.output_dir + '/model_after_pruning.pt') + torch.save(model.state_dict(), args.output_dir + "/model_after_pruning.pt") if args.task_name == "mnli": final_eval_for_mnli(args, model, processed_datasets, metric, data_collator) flops, params, results = count_flops_params(model, dummy_input) - print(f'Final model FLOPs {flops / 1e6:.2f} M, #Params: {params / 1e6:.2f}M') + print(f"Final model FLOPs {flops / 1e6:.2f} M, #Params: {params / 1e6:.2f}M") if __name__ == "__main__": From b46bcee893d947acc5a5eb3e110bc84f27b6974b Mon Sep 17 00:00:00 2001 From: Di Wu Date: Sun, 18 Jul 2021 23:58:51 +0900 Subject: [PATCH 39/63] docstring --- .../pytorch/utils/shape_dependency.py | 24 ++++++++++++------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/nni/compression/pytorch/utils/shape_dependency.py b/nni/compression/pytorch/utils/shape_dependency.py index 7184587890..9828253b5d 100644 --- a/nni/compression/pytorch/utils/shape_dependency.py +++ b/nni/compression/pytorch/utils/shape_dependency.py @@ -566,7 +566,11 @@ def dependency_sets(self): class AttentionWeightDependency(Dependency): def __init__(self, model=None, dummy_input=None, traced_model=None): """ - This model groups the linear layers belonging to the same attention layer in a model. + Groups the linear layers belonging to the same attention layer in a model. + Currently, we only capture weights in attention layers with forward computations written + as four Linear layers (projections for Q, K, V, and output) and two matmul operations. + The method implemented here can work for Huggingface transformers but may not correctly + capture transformers written in other fashions (e.g., torch.nn.Transformer). Parameters ---------- @@ -575,7 +579,7 @@ def __init__(self, model=None, dummy_input=None, traced_model=None): dummy_input : torch.Tensor The example input data to trace the network architecture. traced_model : torch._C.Graph - if we already has the traced graph of the target model, we do not + if we already have the traced graph of the target model, we do not need to trace the model again. """ super(AttentionWeightDependency, self).__init__( @@ -583,7 +587,7 @@ def __init__(self, model=None, dummy_input=None, traced_model=None): def _get_parent_layers(self, node): """ - Find the nearest father linear layers for the target node. + Find the nearest parent linear layers for the target node. Parameters --------- @@ -593,7 +597,7 @@ def _get_parent_layers(self, node): Returns ------- parent_layers: list - nearest father linear layers for the target worknode. + nearest parent linear layers for the target worknode. """ parent_layers = [] queue = [] @@ -614,7 +618,7 @@ def _get_parent_layers(self, node): def _get_children_layers(self, node): """ - Find the nearest children linear layer for the target node. + Find the nearest children linear layers for the target node. Parameters --------- @@ -623,8 +627,8 @@ def _get_children_layers(self, node): Returns ------- - parent_layers: list - nearest father linear layers for the target worknode. + children_layers: list + nearest children linear layers for the target worknode. """ children_layers = [] queue = [] @@ -644,6 +648,10 @@ def _get_children_layers(self, node): return children_layers def build_dependency(self): + """ + For every matmul operation, find the immediate parent and children Linear operations. + If we get three parents and one children, add these four weights as a dependecy group. + """ self.graph.unpack_manually() for node in self.graph.nodes_py.nodes_op: layers = [] @@ -680,7 +688,7 @@ def export(self, filepath): """ Export the group dependency to a csv file. Each line describes an attention layer. - output example: + Output example: Attention layer matmul op, Group """ header = ['Attention layer matmul op', 'Group'] From 8f8313173ffa83635996501e191830a76b383533 Mon Sep 17 00:00:00 2001 From: Di Wu Date: Mon, 19 Jul 2021 00:08:18 +0900 Subject: [PATCH 40/63] debug --- nni/compression/pytorch/utils/shape_dependency.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/nni/compression/pytorch/utils/shape_dependency.py b/nni/compression/pytorch/utils/shape_dependency.py index 9828253b5d..883b731d57 100644 --- a/nni/compression/pytorch/utils/shape_dependency.py +++ b/nni/compression/pytorch/utils/shape_dependency.py @@ -567,9 +567,9 @@ class AttentionWeightDependency(Dependency): def __init__(self, model=None, dummy_input=None, traced_model=None): """ Groups the linear layers belonging to the same attention layer in a model. - Currently, we only capture weights in attention layers with forward computations written - as four Linear layers (projections for Q, K, V, and output) and two matmul operations. - The method implemented here can work for Huggingface transformers but may not correctly + Currently, we only capture weights in attention layers with forward computations written + as four Linear layers (projections for Q, K, V, and output) and two matmul operations. + The method implemented here can work for Huggingface transformers but may not correctly capture transformers written in other fashions (e.g., torch.nn.Transformer). Parameters @@ -650,7 +650,7 @@ def _get_children_layers(self, node): def build_dependency(self): """ For every matmul operation, find the immediate parent and children Linear operations. - If we get three parents and one children, add these four weights as a dependecy group. + If we get three parents and one children, add these four weights as a dependecy group. """ self.graph.unpack_manually() for node in self.graph.nodes_py.nodes_op: From 941a30134abeea58befa380b76c9459d1dd11fa5 Mon Sep 17 00:00:00 2001 From: Di Wu Date: Mon, 19 Jul 2021 00:46:39 +0900 Subject: [PATCH 41/63] doc --- docs/en_US/Compression/Pruner.rst | 9 ++++++--- docs/img/transformer_structure.png | Bin 0 -> 193880 bytes 2 files changed, 6 insertions(+), 3 deletions(-) create mode 100644 docs/img/transformer_structure.png diff --git a/docs/en_US/Compression/Pruner.rst b/docs/en_US/Compression/Pruner.rst index 2ca4a1b516..e2a10c3c10 100644 --- a/docs/en_US/Compression/Pruner.rst +++ b/docs/en_US/Compression/Pruner.rst @@ -727,10 +727,13 @@ User configuration for Sensitivity Pruner Transformer Head Pruner ----------------------- -Transformer Head Pruner is a tool designed for pruning attention heads from the models belonging to the `Transformer family `__. -Typically, each attention layer in the Transformer models consists of four weights: three projection matrices for query, key, value, and an output projection matrix. The outputs of the former three matrices contains the projected results for all heads. Normally, the results are then reshaped so that each head performs that attention computation independently. The final results are concatenated back before fed into the output projection. Please refer to the original paper for more details. +Transformer Head Pruner is a tool designed for pruning attention heads from the models belonging to the `Transformer family `__. The following image from `Efficient Transformers: A Survey `__ gives a good overview the general structure of the Transformer. -Therefore, when an attention head is pruned, the same weights corresponding to that heads in the three projection matrices are pruned. Also, the weights in the output projection corresponding to the head's output are pruned. In our implementation, we calculate and apply masks to the four matrices together. +.. image:: ../../img/transformer_structure.jpg + :target: ../../img/transformer_structure.jpg + :alt: + +Typically, each attention layer in the Transformer models consists of four weights: three projection matrices for query, key, value, and an output projection matrix. The outputs of the former three matrices contains the projected results for all heads. Normally, the results are then reshaped so that each head performs that attention computation independently. The final results are concatenated back before fed into the output projection. Therefore, when an attention head is pruned, the same weights corresponding to that heads in the three projection matrices are pruned. Also, the weights in the output projection corresponding to the head's output are pruned. In our implementation, we calculate and apply masks to the four matrices together. The pruner implements the following algorithm: diff --git a/docs/img/transformer_structure.png b/docs/img/transformer_structure.png new file mode 100644 index 0000000000000000000000000000000000000000..bd3fcc78b3de8445663f8a691e2de31e3aea309f GIT binary patch literal 193880 zcmb^Zbx<7J7dDIrcXxMpg1eL8?(V?{cXtiJC1@Z7C%7{VFu~p32@b*aa^Bxba^72Y z?;l@%T{Sf|)3dvGuf6Pf*4h)Tq9lugNQn66%^Q^W@1)e;yn%^)^9BkU9`^N{ZNIFd z*Ea}PHCc%_Rinf|Uq3)wiz|x1c~hH+^k@q6`WeCbov!PfH<)h!ydYMs$h_XXc`1G` zC9dIRbhHBJr@li7dqyXyIMor^S1J?4i<7U;j zK4n7sW_WbOGfQFtrhw)0ZjDG^sWT_3k*-TwZV?@`%o3>bgeh3XQ%Jt%uGCx$e@`8Z%PnvJ-+6GQo-&v?G5EK;u+ zsYen3%S7uW_C=ZeQOLl#Xeo>ZIlF`f1mgffKU&>=xqwvbv!#@i5Cu+s3r0WGLzI5J zQVFl>ThljNKQ)(}uS(UaapvbC>qKnRCVv#JR~hI-9O3IQOp~rd;uNRp$#)*Vkt#OAR{S zEU#dX7V{2FTdH=Y!Lfc8iJea2>h^;mvStOX4gs2rr^0p^Jx29|D)-DxT+U@poNKTr z4?2)2N}VyFy-EvG1W8F-Vd{1j`p45BOmB~bo3pHXuKWYdOu(!7Ef*Da{^108ydyo6 z9qAb99z!zD?sG!S^FF8?44ln(coNPvqRaI@UU-NSNt6(0j)y9u1}f<@L%d#Zy?(6&v739B9Yg(20XlU7mtxqnp&!-Nx9hz+bcA`MPLCX7d{ z(feY;<4b(_X@KNq#73DSU9fWubDoj0Gb%loEjZ-FmVO(N9{Z$pvW@P5ni%x_k&oKS zVjPVcYsvYjNvG-7h_%ymRaWYz|F2EH2`4t=;pHe;>L@fW-%@Hz=@!6yl;v!}Wc>EE z)-^b^r$}fJwx5bq2nANsL(+!Q2m7yK!XtJF8lW(|YtS)uXKhO*dHxB*7Hub=?h7ooQVUPa|i=ix5Q3!y+V=E?Caghc=&LiTp=jwiF}HdWA-44)*JbF8-Z=qr-J!vhk1EZj zqym+YRX%uMXy|}uwH5@{61HU!T6SrZo4!sFQ76HF-5I}E{&{|r%GKr?KDQ}Of^;!( zvmK1yNV*m%fMMFuT4bIoc57o1R3=oG5)Xc^jKsjmLuDvw=rdh2o~jX{CLGB8`F$D2 z(T9JtS{7)-vxW(+uf*KbTPzEpSLfrNGQ1og+5!jFbVzWl*s(jyy#D5lt(_q;~P1Pv0i`=**_Pe-FoOh%TDui)=mlzdNcKz#Y z+51g+WIU*iWw0vNuZg$Cky zKkO%To}``pYJu=C-Guw+GvIwS8gx>*pFg7ZS^t{BgrWygg%%bjYhM4)Sd4dSxA{_c z7xYJ4h?Q!&6SjQG_}PTfbztQ!mqN&ZVx~fce7brUu@V(AR?lnX#!PhIbM}NVYH{O2 zSVo-l5WipO^;w(IY_gZqI4kZsf@HyN{;IE{#tv{Kq<6Caftt;YqiUVFHN7K6|0q`= zkxB0LDtpAc5&!R*W`Oe_s$tZvg;Mcy^qa-5unSUdynhvYU#SqUrg^~PV|2Fb`O5q; z4|s(C#0cv1VJI`zFn7fRa%G8^>1`9#nE`__{pY1mYj!XR4-N_X{X8cfyptmG*)1<3 zSQx&4!r;V86uK5`de0(Ucm-#sc@Dh!DerhoXIsL+_ex)vWx90Iz?Tk;o3&CM+QNtb zL{@$!3>A#{gOuQ|wg?CuRaR4dgx_Iy(7VvM7G>1dWRH__Q1$QBqh4dI*75)E_#5#j zWV)eg87%UkF`*eqOSF!(98Ku5Ie^+z;hbqGNEVEvjq%&$TA5%}$Soii$Bzls~Fh0Nr684%fBvZC0 zY1<%aWEcNKoi+c-(wwAJllh;d<1(d|R+eJ93o9VE6Q793p{GBdsK)kL3i)_FHniy8 ze7t!CKK`vpTF|^>U5|(S{yo{eL(QUn&C+QT56t7xtjrT(@p_TE{6?yw&+-VE{&A?# z8HICy#HT5?Gb9$VWY`U!m`K{@SRK`IFXAx&RxHm*In}s4T#Ev1rdu+oI0$(UuS!g# zmbO%PnMjrBrT-pY*(-a@d}tCz$AgO`JO;WOBn?BL&>Ud)HjT|F4ksIJ)9gF$Gfc-u znHC>N1BtaNFB-(EcxJGHO}tVA9jk5MpJeCZ1#=HHEO)WjC!~LmCK`t#k85^5h%)cb zh{%~IappQF_LJD6rJ3|CT3i14FX@jy_E2feM!_SpRlpORef+QCa)j2B(^cL&Zj%Wi z5Jt{3KhPVjA!g~)WQXo8(buaYgl+M5Sp*P##YN|p*g*9Qu{Azhs5uD=Qn#5~Q8VdM z7@{PYHgivW{iZ(@G@8 zAlUXDh1jIVV72~OnXtTt3q$v?&5DW4k6qnc&-*5?mK*Dg_GiATUTPpUqdf{<&F!zM zefE9c3eh}B`2!mJ#{xEi_6sbn&+*|D@-u+NpC4||MylD(!8FuF=FTRA`umTAXgdC^ zu(01W=E7oQMP(Cn^Gp_V;pe&Jz7XdHdkWt4|G=>Z2tQc~SDRaAJa1&*+TNoY$nj9_ zc4N1%+~GXd959=G0cc5)>4XLHewK3k&alPNR4YT)T^Pu_Vd2J^eW$DBqkpWsh(p~lj?W@UyCDd^31G}k;_9a;x>7F?XfhPC^nB(f06rbIe zLy3C6fIZA)hqq$_aF#bunzrsG6P-))A9JM`cxW;)R)fH1*+u;<1KTzdwdK1xzPa>1 zgVP%_AfSsx?xED#b}${^wUtxk-k-8RfjEW&6wsYQQ^!7UN7Db?g1T3v_L+r{na^`D zCSffBI6tvwHa~uPwiiNxzuf{6I3^gyR4bzc!rRze8!a4sFlXP5#JIifK;y6I*LoBf{#@9sL~A0BXX2>%rAY!h^l`)beI(?XtK3+b`#JIpny6_|;YOTT~B0 zdo?5k{^da~nXl=sn=NVq^hV_@tyd@UxH$g#+H0MpSW1siI00=cBxKL`Oab@TuE=-y zXv5iyWUUQ>=FbH~QQQmYZH|am22Md^GB07O#{Myhk9L=LrJOt$tokJOO$ z!?F-LhRER>qF?E~MIK}6D%Bd|&EHa07*kceHN?iyT7i4rv`4MDvDDvubb>F~t|f}p zg3l()IQv_)WyTyEFaj;eBsM>p%xd+?Q+4**Da!S%)uM37e48w?uarBBNtDrO#h|q= zDWiX(^x=3n6Vl z1WFzokWIV|`jaS4sQ>T@fRRCaz;{9+jcZ!p%0|>aGy$N9NaRem7D7)asBT+Wj->LM{5MVK+D)=DbAjU%QseyVbxJFDh2!1hK3MK-v6$@WuM5c`!%Nhd_4jYuVZhk zru^C17get&e@$$+xn<1G)zP$E*ibDr-eSy{B`|~G#;%!^PZrGVuo1eh*@l7(9-a-0 z4E_<%youO;nW|TdG}7gx8}n|(La;v^>el{(-8)V_M`+LuwP$ebEM)RTZ54Az0vVm1 zE$3uKbhDQ;UZdl35DF2eQ?@gvrRrA+^mYSmm0O;E>#5!KJ#EIGdTY*AJ8iTpy{?#~ zAtiF!<9;lOO9hj^agpm2L%v#h!KjP+w-l8hmY&frPBb@XK_bF1?{SfsM=g1EwlLC5 zBVt!;S--LLt>`9WCKY-Fk8=F**mOcB^2OX)dqMj3oSA^o71#69`2=g+u}WOr?%q6R z%c+z6F{L6xE36KU_|XR2@nrmvDgPIp@zz&B*VfIN)9P~uJA^2TZCuYP>&Jm@l~x>? z%7oCfncEhfHdm|?6vUoRRmroZex#_;jNrum%H8g_u8Dea(y zY$-gngpyvpm!v8yE`wi~1xM=xWU~TKFR@_syO$pN%`)-LjnHPTFA#cK2r@x!u(qj) zj9`=qH++)BHcO_*xMdS1{7aD7rcEE!E9$?LYd^d69?vc?lZMKH9nCiPv%9Un6P1j zi%H3v$A^uKdKPRBS`=q@D%xibIA1m}MbT3&^7U;IpNA?eny7N=jB;sxCOsbeADlnv znL@vKrSJFh2`SBg8RuBgy-$9{SS&Q{)ZDIMj{WFrPj#8sRYDM_tV4xNH83zJUBF!; zoz+35>%47V?~CZ_i4K7{sa!KRX&72wBG4GBE#Ev6)n@IxNbad9Ue<1w5UT;@Qb8<^GKpod)W4(1j{uWP#+d70SoPO*`a+8%&X0 zA9Rf9Chc2B)rz6H+s<@29^}K|Ym&A?*~A2!djrq7F2XRxzV?l(iNeL84X?^^N57RJ z0I{J@;h4O`M9qNjyw*0(rJXJ$%nJV3i8@IuPv-#T-#|+lqHP8_Srax@4x;7g`9s-= zeVBAfh#UpDQu4S)mxZ0ypf6K>55sbrqQ-(Bn`s^5vy8X7gy7F}6)`Ji>D~n_K2H`D zR`DIxf@aZWLyb{@hYkyDuc1J2GM<^qIdcr+4k8GwiK27-v0Q_oIrZ&L7)o8Tezh zl=}UHq(VH|q|Jf6^CWEt&CyOIV86NUxt2hJa^La$s9dGL*~UKrgy%iQZ=6E;|DO*0 zI}<6XJM_Qo;~>UsvL|I>L4D-W=8Ax^L;LrKm%TL9)a&WS0l+WslhN_;_6+2tTB84i zEDT|Q>+Z1lyLa%<_lIl-Z65J6ASv%Re=qG*C^d6-v``0lx_39|@WF}2Wih*AS0(-H zkD_u*hs#XMXBah+l`}cR|G&qZeb+*<#sspLM@GAl{pY&>=lg#ywMy>#2ywyo@i!@z z8#VP+SzZ$q_|NmFm-wCX*6E^E7C$BsX8a+1Ro*Lz#B4=CZFW*1&4jHz3%VR$u^kJ%GV;va! z?)`i5fM~1U+H3{d9~PW84SGLz?#itu`uboRY-c380-na30p`uEtpcYMgTjAx&Ls$5 zM)c`YeK_+i-1+%=jb7!bFMSs}&J<%R;%V25h{)|a)a~6J|Lds#u||D`ML+Y2`4mI~R=to|IELpSW( zse3BaR&N6y16XH`qhuE)VS8tPn9lK3*~{&_sr&ZLN^A?+kk& zHk~Pd5;kA_c#}QCSlXftB5hb`E++~J&ckG*T{@%F=B zn4w@OihP>M#q`WP-~v63MJHxKUlQH@yk@HwiNk$`ptm@~6jd;&Z)uE5fCEE2@wD&L zT`ar%v&<1X&X5&Y#sO9;XK~M~-TbcIecq22_M7KrEBnU%9Z09ai=$*%FD6`nY+dg= zg4TyTmSzuUEL?lx*ivluw2u$F7dl`U%D*I2N+7)ux{g?Q`Q9j%05^%&-X=0<^X$*i zb4kGhrF=mR&TcOUw*JIhQYt*s{$lL6YM?}ugHqMogFMOR6pIiP7n_ykjpM3Ny~@zY zNIE}1;q`+}L}g{P+vX4Xlg-^jLr5Ygdi23k>tVjf#!Y%w8loiZR;;bg zXP7|8#duxw08kZaKzf}a?&><-h)x0Wl?QkK`P?6PLRw)4*W!ch2#9l|BjVqEQ> zd%$Zfz1KtU*%wpKel=|LZief4@gbh~$9c{BxeJyI&xhObupG&y*2mFzsmD40H_msj zcKFu%KX^dnUP^Z8X>OFc0#=i!QsCjIw+>1yGrAra)uwal_fJtjGZvwQDQKWG;iz$Q z44dk3Iie=u+J-jWH;Nw$q^cCjC~{TNgMrdS7@J0jmXQ7`?CkK;GZ^#UMv=6Jdima! zYVa4=;?7AX*b0K74g)mf%zBZW)H!u~-jOdkCiNs1jOscj@up>5?qqn)E_LE+SH$fPVHvlv>%5@;F z4pKwB*BFoLQl$ENR?jX`q-w7CaP1Ss#TF>(_X!i-14ls7-Mjk>U${bu4+lnCAhez4 zBbeqWC#nnuHA+<=Y#SE6(Nssxc77N8R68j`Tg4RH@A}%jiW*_*_Tt4~<|5aRPfND) z4Aar=YL;z4CVaI>*_bg!mQybDC)JT!l_7eS!P)~d`!nw%vEatYIVRq;NI(>;p=K(e zhO04z4f?{CEspE)x-rlj9KhKx7wAdD9rOh<_*x8~<6H%1N? zsUldKY9?>?NyG>k>0->kGGm)Y*^g8A3m7CuUq1TEIV6MF2&JIMwn z4j5la98$s_-9ON^KOIBbBglsUtt7qnaWW90i!$0!7b_4x8N?Ru#EO$)!DJ5X8{9fu z2=ID+z)`VcSW7L}P&ruHsMK6-H7JT0v0#bx=5H`*Kr51tdiD|jnbfv}opt}kD+^F08;Y6Zm+)>Fia(HC{iBa-tPO7cz_F{3B*;o9|O39@20M+KEHt-@}#~G_V(c z1(|iYFA4zDK80Wx6Kd%y$;#sz=m^8rvEhm}-K>I}^4b$_j1HrNpIQsluR_kIEmp{= zMk{q=90m_;2>ejWd2@Pqyr4)qMrnO${}z)6*|q_EjUG%M5ww(7*TdSh=^>QaTjdHR zV!>+C>ptArdzh0+0rG{mJ$GAprl|DLnOcVi9-fIEN^@yJQ`S|WYCp{smTf5I&#-#& zgoN>sR^3C*VgX%07lrAf+^dYFT4OQ{Sgi+IX-9OCm6F~y?XoXoiqV#J%apCg^Y{sE$F6m!lY$gvnG04{AN0=-B42(DO!Dt$lG9jmkyg z^xefy_>DNyv+iEf;qOBdli5ffY3AX@I%NL-{eK;wrWhjMMad_g15Mf~o zgT6Ak>A_%gTJPI60qr@-DGmrFs%g3l&m~NfAYaL8pftmxvIVkh1^Q1~K@CD2>6YYUETP!Aw(E4s+I{`8h zC%**(2R%R5T-?mjwvO}kmKjtiKLGEIpR z4au(~@JUVu&di=wTevQ@jwl+LhaMH`TLt8oSDRk;XQyh7VJ^fJ@Y;u zQe$dg*R@ml%skl1QwY&v<(9-h3DyKw$%GAg^1#QO9K`a}x`G{Rt#TBGZyzSLE47eX za?Xi&NT08Nw|WShrZQ|({0ON;O^m_HBZW}@WO)-6X zZ0D-t5pfv1%^Ef$jbD0_w!WJ1=;z$11(-A~k%-<*-MVigOG!x=8Mb-ABO_bvuphHs@hwUo4q zSz(0SwqoBj1Y|LGg#IRc{9>1cA_0$lGKr+|#q#NtEG&u#k_vaBQR~yT+(up-Fvvta z3W!mfjH$nj3YY)Zwdo_Hg62)w$+@_&_r~&MZEef233u^KO-*kPtGmX(DwCx!>teo& z+Iy@{SwVyja^n>WzJH4NHC3y=+V=-t&_^!V^DxklI@Jz;q>yBHKhSVHm`0$Ii4zNk zgn2d32dUI%_^<`@j0?=aQ@-qtE;Hpo$lh-`Iyi{wi^Y3|4l?$YVkxm+-$MS#rg@>x znu?NAqAX46{d?f{Kr#ja!O!;eC>N;zK-xkxGqW#p8_mspdzkN%FGz&ER1Sa&QZ1o> zlmJ6m_1EqQXr^4d*k~v-PKJtBA*1`?A9(ubA)<2eCuDkFF&q>uvY41ykSoL4D~v6& z9?Kp7>2~L6CQ{fQf;IVDQ6{ix%EF{)&R(8xL=|%QFrJ^E)wlsaCyR>_er~(WvSIGP zKJWbfaq)!BmjhEzPfrGTT=81?@IDt7AmY^x0sXq4!OrjM`qUi#=1pX1<16NXQ`PMkSy~jX|X= z0MOiK2iO_J`3EoyOYtXeI6KzRtWRGUpTA#Au(>mU5}Ip=x(~Qu(#lf~nb{@&r7>i~ z`cfKc<|=eOy9bOC7VQdS1!+?aX7~att}gWAUff|`3%aCt^Z&ay{mWoZlw$oSbgX36 zXPN-GSKvNuRd*>Uo}%wXJNO}uo%uN*%IC$8c$EciYiW(n^sy;)T&D>M)YcKg;cI5v zD7-=kqra`tZs0(^Yp5J~XNuN`8=hIM0q*vmOA6Qmz7l&C5w<&!1+ML0yIM@NZ8Kq) zTvWY6--R6hSLmN8`Xr2^7;*eT=VnC%`Lj399Omg2>wE~6`g30p#sffTlDxyX0sqnE;W)`BFk8Jko)H>27MCz~O5W_1sJoej{^qV7{J)km`rKnK3EvXF~w-f54 z8AtSer4JF}`nZ$~06cIq>$D?gqeW28bg;+x2&M^Yn+VrocR6tsnyCsZRqDsV49%L} zEbujBGr|UHhr0?|<>=fJ@sy-W>ef0XJdK)2|Hu-prEW(b#N#wss}Nt*hfUL4*_IBj#${a+!o zG_OL^PaGc@L-n00G*}K{oV39Y+d_VP-Xq~ibG0rzfaX@80@uCKv-5Mns$@!ChSLtY z{-xuQfHCA4FAH?#!RXtPo+cWR`yCRl>oMerDS*d`pv_f}Z-zHJBCR~663a93XT6Sy zE>ybiP39_YV&ky2UDocfV1M@QJWv!DzVp*McKe+K+GD*pQ~%9r#7)VGv{x3vfHm%B ztkUs$Ul#lq!Rp7%(W>s-Z^`FZLr-TbF;h`*OExYd$R_ICjTMAa;Opv1u#{Ly67S$J zOAudegQ&d3c|Ci$^4#{3z7nc;u6fgi5%Meql>LSQD`rq zH`o#1H@xy4qwb9EMhWL4&e}}*?!UtGw$4u`Lo4CeL{I{q5XD>jyc;;5n0GsuVA(oF zp&qCd+39lnKNT2`?>2Nf)33(GjS|!581h57YLE8N39t1wkEDOJc-K1+C@TKdk*bzu zHflIFufNXqT&eBk*5#aldPuWI`nab#z~SuT_9=?@Je1?#kmdP(>mF#XQ_qdb!Jq~1 zvWH)&A(FrDrC?wDYv+0urSl{J2dTnwL92n-K5pm{sw{`f~ovW^#y&S!Jf{zg==)7ofn3s>FrZjUWz zcM8Gi>N?+5HVS(TwC-+jOr=kOXZj-jz8b!J+g1ONaIPdrJmxi%D|-)sIbQZDnk=$2 zQ5*)Wo7273TaVZ4@?hJAQ$M_axY1s-YN@U3qc`dao~9;QBWiq zG7wF@KVN;3SYZu48z3}Wp7C%+Z{YPT_;@#^(k^O4zDjz5Rg&NhF@Y5b9;Dv_-6;Lr zEbX(qfbPGJ{=9%6yl)Y^wWbwjzZxsebZxTVeTUz+&DoPY>Fn5Ni@e_iIiy5K!SbuJ zg8`PQD>Fhn;;83Z1xk&IZF9`?tH~LeH3sJCFcY!4mY*=nQ|Ej*N%}~_Nb%;6IGFvX zMV|`(JjhUdiv(br?gPiKNL??7d8G<|Hxi6W9a@<@o!;NrF z$>QI%6MzE+`Q+D>j;p=p<`w3l+|1{XxRfTtl^G`tC_0H(v$H2ALDQBL-sBU6` z3S{GL7U=)Eg0$JP$0)I@2IudiAx+`r33eedSZP-+0I+z4=g5=}coa3&7OKHr+UW^x zomVA*eW44^*R@s7d*v$H%))5&leY&7rfmvlO7lMs6wEgBb&kQeS}ro~-yOrZ>(RlB zoKz*4V?1mo>cy9d;kb~_eVH^xsnIC@` z@zmTkv)giajBUT%-up*8ZV5|GcvgeAptL)wfAh}?6rMV1M*KQKbExu#sL-KNP=_5; z+-(Vq$U`Z2XZAP_TZ>`u;xQoVJ+4y?3!ktnPJbS1cTlyU$?YD?8SrCO23M=V5tFCs z#)emKrhSw_C&E_ZL&2K%F~?(F3I3ypydQ5ZQY*c(*~Yz%6itl#f{9W`#0o~=+4GGd z$L<~~{<6TE)vCDmsPCbH4Ezeae7aV2R^eAdsKkx=E}^z++2Pq3-;m`J%WU-2ELUOB>3weGUNDKb4BS+FVe`iw`~y>bv~A*HzX*;lXFgD>w@I+w;|sp9kXEy* zU}++TZ5+W&1)|FCh@?)mDk?&ksXO2OP_ox)K$cHq29`n1eY9c+ZsR9c?)3p(p2<cw!Nl(BkML$G8-Rd{0e~J(rwt9c`6ilj)J?rC2$`voz27bLC$bTc@pc8WPj9f#XL@Edp?KHGr>pYC z6}&Jfk+tTh*i0ujrBa-Z0Vjyswh~MRcT6 zu*!kT`whBCGuzHH3?{F=+^+2djYgdj$pDv(D#M<}2H9GRb5mxImqfQRp5-Fm7HCK>B~W4 zR(K*bJG>f?TX1%p#F5K&2CDh8;M$g7s1v*EuGHh>iZk+)AAE%0GFHPQ=c3ua4VtRg z7T7f_*H(+Q3O;D3w}~wLBn-1|VWh25l|;&>Pxr%11UcW~@_h)f(j>1`8|{FOuoKH76V4Q+0;dvd=;S*vVzw(^{3gI8NyC7II7qJ zKivj%LB#zPgO4fbY=%2+15qj16S=@U<&A{8TAYoxM#?YKvHMzY;R9Alou>{9JeRp} zsT2~xtA4kkW4jlH?ka|!==#?*Yx^xZ49BnSWvQAnBb#QFZATr2uYa#m*H%Dj9bmn&@)-6IThGOotJnxDjVgZVO}= z$+^thsjvB_li}G&I@|g$3YIfDDr2uU_DM<~;@k9;WD60SCtw?W?FZ=23~%h(v}{e_ zY^lTT-4Opr_rt`r05b_*ylJ9wvT)wG-WRP1a_+a#4KgO6((0PtBD#AXw@A69+S@Kp zvfOx%AvS@=Eqhk`&JascrT#vjmGP2%Sh-9>OVc5B_o9_i3;d+XJbgVW zmbfY{qbMiM2~j_WXHgNoa4+FTkNSY~!(hm7d$=2iwRY&KDbSKu3=IZwp1kC=l$dG0 z8Vo}Q2d3TS;k6&8J#{cXg$x@u%GmebfYN)OinYhbsUC2;5XT_T(mw+`Ljg6*%lypG zR?J$mG?e@+wZUzti2wR}VQT3l6AkD1zB~38np7 zXE$!kU+2WCwSl|#%HmQlB%lpqno#)#?eoLaYOy+JbUp6Vko1SHh=3LSBJsm9=bB09 zxQ9{^zhZ=ai}5s~AY<7k@#%e1n}u^rWUme~0dB|G0X z8(=DOrV((Z^r5_Gv$&RZpp;J>c6=`@=OWa@mMi&VhA+eLQMKKk0s7vnXGPOhzOQ^F zCt^E{J}g%#ULJwd3!63Cr4O-ZXa$FGPppjsf&&)CPsqSAGk(fZLBb#l@+4QF^!qh! z;}=Si3B=FoL|h4QU)YXfzXxgpW4e4L0-uCN!+LuTbcu18aAd{VI=E!an#hG2Fe$)^ zTLvi@Rl%03wQl4>DYW=ICuc62{QBfeCQ)d8Ny4k2&o>}eSc5)-9p(2T1rfRUL% zuIp2&4lBNh4^7+vu1$yV`lb+RHV0`zOto2tN#Vhtz-GZDLaW!}1+gWz0y zY~#l_c50?4mG5T!&=30sZp=+K9UBFje7Q~7y>tr&oucL2ah=tO#5c9#oE!8V<_J(rKWg?2WbDlD zEdxC<8@#UddHZJ)tvX#>Fu8gZ*2R|p`49NWa&u4!uc`r4sHXk9svtSrF9}8tez9TPB`*(7ZG3Sn0J5Zbru(>`sCS2qT zWntOnYCbQHa_^;E6`(p|y%JA|k4%&lCCdy^I+Qm2@|+@RR-(Q%dl~TI#s1LX_lvzR z^7<&~x2&lW&B24DhJ^_@x1eV2TUaM04w{q}S}(4pRJ5O}^>0{?Nr8TAQAOBXx_vXC zb%7fJO3~ilsT+T*1l!UBz;w+~n9=Q-YWt`9DA->{=F9F^KJ4#ef0zrbG>@AEboJM= z-5^K6wA7CgUR8o=u?{_ju2&Do$d4n zIC0vn7;YTy!FIJR`w0`DE|56orV8MBwX857sT~?_Z1~u*GnX2;pG{js{~#bvbf4&H zQ-RQM(m}1%PZK}B5P?6_u!aVO7$9gnIcbz?<5n)E$c8co(C-%o#~Q(rfv;yaN7@t+ zI?|C0W|WoL6;je)%bE0uTGFuseFfSV%)ht0^1q`ERVxnC`>d+!XU$0=!jJ>#vT*=z zhwPufQj{t)gi#!M^-@841If()^G_34ueA>Hbn9MrnIeLti_EU*H3*Ko4vD0N^h|os z*PHy%5NK|(y*)?z-_jP+Nri!cK;XCy*L;T{^uPe@o^T3T^jh$jFK$0QCCX<6(lP(9 zN<+RmIN|K#;+0L+uXWvn!AG`w9gtUHCw8;WrPn%-Dcc-;gHZnPn>V2|%lPtzI)@JcX*X9T z?d8SCj2ruF^>O1z5|uLmki=*;W82{R^R?dc4*#{C?zQ78|F!6Hc6n*RuF#UO;T(_> z@65M3hH7)02+^tScRm}zu&*fvw?p!?2E(_@HYE6A#iYdm)!g$jh}P&26HY1x-6Kg#1nt6@d+Oo{F`^*l0QIibZ4xqSUbL9{$k* zpC|m;s>O)6wLwk`dlO|zILUffpewj=kbbtA^PrTyVXf4R-Upz!2_BghSMjR z_mi*sx#sc2ZY7WZY0@0va<08ExbH-ab?DP=2@$&j=kUPQI}?rsU>^BYdrV{xZ!Ch8 z9V%F;&%lD6u=XXp{O#s)*J+XAT!D@kC+hAc)BLTZlbe?-trE3hBG3ym7ChG16CFBn z6WhXvpBquX50!D@;d^=Sjnw#jF5uHCisM^V(w*T&h!p8A{grKwl5)kxT=3`KbKir* z(ncu31BN3z<;gZC^5G(`)1yB=AYo4|?(Jv8U{R~#?i+t!^Zn%&K2L7Kfdj82(ucRZ zpOr%S55)=4Om@+x0MweXkM1jJ|MLL*1F^4FqOO-`CKynm*?o1_^CutnI>)4cIzva* zE1Cs7p=ELdQP-4iN%Oh{(Vg0_7&`MFvGfdx5&`8^APYba`!Oy)mZ#bEJ*1b@@)-W} zz=PMzlxoJMx<0j1UJS~)Rg`H>ZqJv3E~?$|Joca9g4_L$ME1+C1Glx_h)TxXp!*=# z>KWTm>pJbT5O;#-l}vcoSvZ|B^XD9_(#1-um~WP9Z?6>kskY96iRlwZfmml-d#78K zo}%C}b(x}to!Z;A<=A&v(G@GmlFPWv!VBnB{Q;3|B0hLzejJ4AGTf5ey}`F$@-)bI z$8gB*%z-ysP8=T$oJ#SY!!5Pr3BXoDacgU2;jSp1z&!KFt*xdtpsaBsXt>q^;QvU6 z-y{e@n{=iZ_inijV@~festf!QcG^i8B!jbbG}|*ImN-$$gt{N z1QGM#`yRZzdlH_14|T4qPlY?Y0-4NUUX9&nEcL>wiN3!FgXXDilk#k{QcSU_Ro$J$ zRaYlBAJj243w-tyoW52GUHCU4nApO?!;5Tj-IJREWlFshvK+*IZOTr4?YV9V+i=dM zj8snL@v8Me*BD3#xXr;<=yhVtr&slFz(XO^ZjH>MaFSs&9>F%$HC2(T9~5Y3n{Zh^ zE;;W)v%+XfD4yzrl)_`_mg`-FYS!Aes2ze~X?)wm5F>ZZy)z(i8Dpj7e?3xvc2f2&-35q3`Vhyz11p@9Gi-8|9 zzltI@GXh-QUA(vtV`wAXbppt7tdc0cp7vj$*mC_;!8)<+tMrfmm-5;d<@dW~hlGME zm^*Y*5YsNxVECsk{I$;cPFXoU4db6`Y7Y3vh^dZ9p zEAKehk=yEgf$AdGusj#ZP+j*bHh?dejPW+GNJcc?oNQkFcN!zlbrBCv;)kdx_MBPuIcQ<1L2;3W_p? zBe~q2{_vx+10CZ>J3Q=cpCeb^|Kw{k;YrXwjzH~mPw*eacM0u5Iq@N3{Qpq(m2pwE z>(`2ibkEQr-7Q@LLr8-l-HmiN(nBMSbV-*;m$bB$bUTDJ3|;T`JU-|A-!FWdA2WO3 z_Z90}YhA9l2!@YGLe^UoBa0;z{$16p=i(quTCUA?8S3!VVsgd^l$sToDSbne7EMc2 zmw#um{AbBgd-iXBd(O6T%HJA_bk}C`MTT0-9yUAv#fcAI5n~98hyXMGXGLm^tauu| z1_lN{Z0h;SA|eLJE_ zy}_r;*%He4%A+uk7jidG&hxIm?;!vwH$DoLYm@PAsg_>p;sFWxs!~Im%*Su{o-{F- zaU9&?yR9~?4Ja1;=$#5uJo?!~z-C%|lOcUPvqEj7m>FmfBgmi6ZYIPoZ1qA9oAE6F z&44(f6F6ShDw#+ms;gw8URQ;DZ}h_d{T{|*1J=YEo{)6a13By6mkVX3fx|pS4h{=s z_gFhL*zZ}D!*MyS`Zc>Gx2Yv>o?cG`u?8QMVsBr>K8MHL>FIzIagFheMVkK<)%m{4 zy>#?Sncp7nZ=HU9$H^26ARinY-!0>f^)Q(?9q)Sw1mk@omH&naNzqi6cr=Uiuf*nQX(HG4x>`=Ui$cM8*gSW>x&B^?R z+3IXcwXoT(dnDN~uc}eH>`OZ>>4jR4E1a)w+7c%c;}LJwl?B72h<~tn{VwR}#6!*l zeNUgf%DV1Bl7IhM%r#>F`F=!bpR~~_ADPOeB8or-bPkSj(_V*$@t7HyeGgb4fJppY(r9jbl)yQ!i)Z<9K)Fg!uejUs9OzXBN+W^!8<12{^MP{gEUCGvdLm)(b0j#ezF#g|bT+4jj(=;n?a% zcFd!M!`>BU>q6I$eB%_iHcIr=nCceHbls_rdHcHj zD;2F7tN*Pt4Oss|y7|lVyhyRA&Dpje@MWB<44l?Ik?3l8xavKb%cgh>5^kdS4h`S$pwEk7DD@kyo*8mGXv_4 zXO|*#Ss__DH_b~y4HOCoO@^aWRTN#gE!?^RQ4y>twTkP_LGsUlbEp#n;qtq2jV2c% z1_)vUtw$YE8+}po?M~W^cb?F-c9}SGH6Kh)0sX}WyP_Xi&aqb_e8e#TA*v6M$Q<7u zbrt{})YaP+$XUbhuZ!u14#cHU3V=;0Eae$t_@{t3@8F)H$O zE_2lWSh6U-70+i`fTr>F>nD=y1N#6iPK~w!IedF8i#fE14Pa&ZFcwE#Fy_znZs!EB z)yoqJI2TS^1U+?n=n@sz7P0R%N3IRz`e6Y~G9D?}KBf%EZspQc=Nfaot5QG&Dr& zQI1Cr*A~dUgnrOc$HpS$c*1QtmLy@q8Av8_ONjaQf%>iZ(PrF}07K8;>R1y6e;viL zd1PWqyAGFIPXJz8p5uBqdRR`)B6B$75%yE3YJ>K*qoX(g%K#VPz9qlQvrlzLr{v1vQubqUtdXfv{|M!uk?CjX{z1afbw*R5E z0H28fKGWfTut3SmiZKvNme?^_7qiYGV$2IUG7Nd%TGed@n|b{Ys52xqmLtXVJ+|qC)o-5cIS-&!!JG_mUYV@UiSC??>Oe?Xzw>)%OtWOolKoQ( zG6*SU3o|N0ws&{E)L0V(Zi_H{P#J3WIN#uhkP zaKT{=$68bl$Hl<-M0GZ?P{(1!L+621=KL1=n=@RYihle;LY9ri0a=use2_|>*;A`N z88uRQTU8g1+`~mJLemfF1myqx|DC76?_b;A9x&(;kX7(biAe78N^P>jrJ%q{9Q>sx zG0qg$ql~f#3pj=y>oc6KYu!0GYQ8Gh2PK#NlpHpoFx}r7lg&D1{hTe^W6C-Bs&H&_7{02?Ib>h-1PudIT8W!#I2}xAlR$$B{95M+AE& zd@BN~;1_XVawph}Xr%k7vf|L3V)}$DBa@?TK|Ip)}mx0C9J%`EM#8`%DCBY zG}0?D=gEunlZCk^TR&C|OeSfgLlaw-9g1Q)U~~u9;0K$xm@u29Qf3ceDW)_F!8onp10&4X0%s1^R>SDlu~ zA^qLeo?51ye9ycF-tlF>JjYV@C&1JN7Ggc;-1lVo*=6!SX+2vi--d^~q%iX%50s#b znmPQ8?!==jNAcmC+uycrh{DgI1l^N8Ozm50@D zJ0FYk%xAr))W?xlfDEM=Y@*dTc4ebagFZH-H#H9;6mvs;n0@4l)^S3eeVd5gvM$rM zbjYJ1n&4?FkMkb#8y0hwr!0e>?S?zJAzmiIkk$Y;4B#Y^NmLjk z@21O4zYz#yjR{We9CAwdLg~BsN?{&5$Tf^~2Sp?HZL6HoOJxPq^0q}Oahh(m*X^IG;miqG%L z8>Ma+LUYx=8wqof9|Z(}&t>ic4Lk0hIJDnWhQ(z?PE=B#EYfe*&<`R`oG#ld$t-Qr z@*6l1`afK7`yP+^5T0(*_%2=%KM=xk8j>FIJEH;WQpXvl=#2$JKU~G=!Eo4VW-ar{ znCdPNJ>pxk2b4F#^M^sG)&8+;TSQl@UpHlW=ZyrpK|S>7l+-jEjaWAUf`73$JlgK{3Oib@EV-msLQFA?eQ82PIjgUz11QtY`@GJ{NILbK{)NVS zU(=!1cbF@lRfq^}B5PY)n94a+RvbHcZSkU5E)6u@`};+?EHmKyMUB^#hU`s3{a&5~ z(wp1N-#%Bwrh2Q(7zflYVH{>ZluQ=V3A1HNKnLBa;TlN}QJSAp-Gx`)dbbrTtG*&z zA~BDv?7d;`7xH;oeDEzgefH$;o2E)JRIF`jd2MSwrh6O@6(IBz!Q0{3(>}MW)ekU44UdsT(bPsXFRwhcff_VP^UDqF;px3z>OvBEvVV}c) zrp3Ar3<|kun(6R0z5mr{E2CS&%XAyQvHyIcX{mW`(Ah~0vv)5qdM?n7In@~8yieNA zmx(~#mMFXrxfD!R&E!1QeV$Ppq!{<_Rd2)<#$N5m2qOPrtg~e%`L$y8vz~KumUSRd zy-uAotHV-&w}iMrQW5Kay>*f)KP8#!N;eQMUU~#)u#pWif`yOsKk*PnFbZB>l{sYE zI>cMA!3rduB{oLOUAV4UpVmNx$7=P~@3t{aoKlRgJ}ikJO@aLDIY@2SoO}wBDY!a z?d=_BG+w8UcXZFL0^AY%rH;PuIsF>}er4Y@c75|?g+kt}R*MJDu0jUM#7rl%`O`8J z*G16N-?O^DKM1QF_IN33ERt(CPZ!^|++7-2&;4pZm^@;k>M#vfr0S z;omR{IBeV))Uxo#yNDwc#iGBd`C+w9{^;R6JPGO9qJCU!Os0%I;Ac)A)vPgr`qJt8B3Qu{p!Xu~WZdwb;bkF=vP=+3z2M)IrQ( z#?_QW@xf4ZQ`nx_s;e*z-*U`6wvCd%;wxYdDvKG2szjUde}C*J=8I#sIgQW zN~w03yg|J7`X;pyOZK_fs&`RjZTq7@UM9xpUx>ZZhTY9B#!${(6~HYe3E7o2JRB{_ z{TXi6Ku_6aXrq!_okJ}ThSO}~6AwerU3`w!S_9@uQ?|9WY*T0r7;n$_05@Jn{N?KJ?X6 zw(32v*ZJ7KHe(+;UmLK&k8d;JWJc5W_z}aR00rDiYNMEfYP|?j*qcEi(RqxDXpfSt z9%`jG`rA(sHG?oI1e_6pVU19O{c_~5U%z~3J@_aopMs;F3ua;JR9>XK!y6aGLB74Y z$p^YKLSau11O$XrGuaVG{zstgi`%#07bc|D*NEfs^tY{$E-ZpEV_8D-pdRxfB$IEs1S(~&FO!%;T@Xd~M(EJo?ZqNG z=_{qx@}K^ZwE=plbr=jr`jHQqREZH{PEproB;YuxboY$HY84$Xo8}eYKqC}GH2_9` zYWOQ0vVy;c)ws@rqy`PPL@Pc=B82E~2nD>KFsU;k)$0J7wo|m=u8?b&Mw6|tfYPuK z-(vJr=wy++(9tLf03=gnCPtB`D;yv>T(*$_qRe`sJ5(^_X#jiEzg>C0p6lATr-g-u zxduI_$ZY%LM*KOGHZxW~zX%Gq!_|CnCW6!JGm>=w+n3_NK1@%m12V6xt0u|5cevM| zs;jTaRJv_>dyF`~S=9Y8BGLZu1R3i5+QO|wfo&sA_;miA53z(h#lS?3dZBLS@L|8e z;aqUPYQuLBpV%^BN;C9-d^4OU6TgQZ49o18jNusf$ZI4o3c1rxV6Jzy8>m&q zE0vKygEn2ZHZEyXv6Ic**r@X#Bmb*En|M10b_45P8uWckmT}}~!q}ZZR5J=^Tz(Qf zkF@CbOiyS8C`I{i{empH$Jh;GOLeDIH$vZep49^$5G`a=DS8k{KCC0KemNoXy(PQR z^{<#Y@&xF#(5a!28WokJfFQ=_7qp|0B)k7To@^W5kR#DhX#UKJuMxUZw%luH9X5DP zJ!Eq&8xh||dkWvkzS@i?{|c<$4KlVLxI3t7bOaG+=)kC7UgVV3b^D`tmQ`!5A$Z70T) z5`>COZEp-8HJGfzUZoYjoQ-^cdttI1d49E?0n%y_K{r+{<&s4BH#DZfY}q) z%XsMVp+ByiwPd-NQAhnWG=nLl?s#wp*>;KWAX3_N`AchK>USgYouQdDpGh*-o5r5^ z&pxdeX)df~<&jNjxg$KX2}YHl5aFF^MD|iLezR8P04$Gg4EC$ z-VHby0u&$opOw1dp=bo6lXd{O1%Rt=_JKPq|L2%gs<9i-`sbR#f{xwainGlY=-eC8 zP+*B`nFuP3vCKMOqq~QD=SmlBFo5?lJ@^QkyjlvbTxL{T)5xoaST@P>iBjoso^(Q+L=%}Dd|0Pfx9xzK9^)vs_&Xu9__?( z%_FtS&iEy&Ah%)AYjKJ!xMy5YpK56uDqGQTp8quhGTbdRV^hHKk}-tITn~pL{e$b~ zJ^ieY7KQKe(-r^Q1CqQNp1ULFM}3Ki)$SnS*s_P8l4~dnV8rWqB=n)3%r=psttVsuZkx_yBdFUP|+ z7HbalsMH+ZdpOYx=mnMM%DfQM(`kQkdpK#(P3(_Bpw_OEpCq*8rrm9sj&P&arz)56 z;r+)j63~PMVaIgjWm!T!=iQjcLV z2TS>H7XphoxLVJ$bdn{r+$&FP@Qk)AD(9ToaIAM{om_8eLyFT*zS(N2SYa=HMOgqv zbhp5Yof!L{yiZa1nQi|;yfVl*P2b?`?S_caBLIfX3_Xzv3@ zRGczc-=^^;Q?%4e=HSU0g7cfN5@@O1S3Z+YxhB^5mu{hgi(Ym=FK~UFuAI3hv)-O(XgLQDlF+< z=>2l$Dx$BkQj1xxhlp{bhK!3Q1>U}#!0Z06tHBPVX@?`KPl2Cm=@$ZFy`!JS;t{z> zFlOC4>Y?np+Grl1a;9ra6w+vL`QF=p12%u^aVjEu;2!?CWC>d$NU9t1|^`AjR7AzfmMMB}P+=a@2?e2T~f*@JX=9lA( zavT&VI))`TM-AqejTs+tb3T+4P~&{G3LXPkSK9aBKO{ zHGdbTqy+9H$RxmC-(F5m8ucsDW*un>Ee{cj1xR>_YxaJJ1FXmY=9G zmSn_!s7Asfj|Rv}`3CLYSNE5zMcI9E8><12eVGC-9NuuNBv##@%}Lo(ZHVH^$w^80 z`%BGufcN9TT&-zQ;xrJPB0C>$(1uf3d#=y6$>`^7M}7YCk>S$`rTS}|#o>nrT{XEs zzD=@HbJWGDUYcRT?xz>u;*|pU-t$We(*EL1C%P8O!*lkm<)UU^UE;QpRxE0gY7Q8ZjuBHWh)fD29V!ZO*)VeG+@d#;=& z-<7w8KHtO?U<4z=1N7X5%oTtNP~6wI?G>8kU;=HC%{nmjGXfgzG=8Ui*8qTvtcCyA zXw@r1VGA8p6tUNb!=k?3-g*QD+oYPm}vL$fFCZCJbQr&*hN$VR4E*C z@>pQrY&!K>QEl<>#)SN~?$J?28#2cO9dre{ZFI*ZV2T$NI%t*RPvI&^c8m75YTbcc z1cXU04BZ@vD*(oURx51+0QahhLu={o?(WAIbNR0}GpNKd(l#QsUS}4ipS1KM{?hn| z&;auoM8Jh)f5i!vU@W}H)IBao?l}G)7S(7ztwy2J8E6Jj9b3ANyt0mRFvh zne(Zh5^iUHe)VyO->u45E7ek+CcS274?T^j=;+?7lg*+*#9n{*ogYtuk#qvW=3+#| zey^f}*)V%JIKpT=6|q z#Ai@htJT2dCjA`V2ia$&3>v?MU2TXKOdv06__~vaiUh=o}%ZTkw|66aMS0DMrg9iA^nBV?=`$IUia!KDpQ1FgBAN|5l zVax(6md_$kc4fcIG&VJ<_x93Ii;5mpU5b_SowzhCCRwdi(0&GJWsR%;Bu}3{jbkOG z^Tw|r9qWOdzgJL*#y5!^S_VR3zURrt|BL!3~S&t?KiqUg%Jw5voO7@NUL6niei!cg-^ALZO1|3!7(yq{O6sYF>scCvWlxQ z9l&JF1!;n%s`WW>GCZFOc%2qb2Wz%LBWm-%@YmgFz!Aap#1m}dUyZQo7h2Dk{o+GH z5bpr?>s+e`J29^j{d>CA6qm4K@n!y*|*yvI9yg0p*7V@|TM8$kHEn$k; zYQxkA{X$usBzD7xDpT$VHy-OrL}1Jh&=4!M>!eYm|DvxugV^cPR#qkIh;&?NAAM^6 zvZ^H^-@NwcI2AH@70Zah##R7hq=?xjAN2|lAEa@aN2+tO+#EJItdawMzCGpD9gzUr zL7vjtLd?L)1Qh59I2{9JD`kC-XIkO`;FnYDv?*<^pMRsJKD~7@Abbc?h}y&zZ`z4q zG0^J;4FD~jSWRAQlX5g@U>gPtU2gS|A3sUlS^P_)Oj?bI*pl)YxVrM(U8O3=PM6vO ze1W$C@c}iYui9$$rQBnmjE-Ci3u9S+Hg{O|7x(E#|8Lu z2@=Mt^UMoW006FtD#$IVsv5zf)IenmUi^-!>c&!LWol}wTB*aMUa6x|wlrD};i=!z zpb2usxrp4(^*;YSzhH^u8MWOa9Yf4Ph(1?i{H$6B_n+2E62!Sf`;3+*D^pvMAxEvKJG%A53B~|1uD+`6|MQr~&d?LqEMQ*g6=d$)MsDMO80I7tUlXIdu1nP|2 zv#v+itu-<9I`d^;@|Qnkru$GSw;E*DzAwPlexv!bw^IrGg9Ch+(GQeU%Eun0I7 zmuh?d$2mE?l+ z^YaUHg<79hBPRjziqQ|$RkLF}vhr^a85jPY3&Ow>`Ge~WRrMwPf&`dNk6*WvGGgZh zAQDFXu@*CEriup9cLAYPPNOTG9)jb0xY}u0ws8)JC+SE-tFg)=L6=c-3ybt0@i117 z2vQ<1!1;}}a|T96x$24;i-|v$V~`!BfMrEqhCqrflzV>?lyJR4RTdys(qTPCx2yF< zQrHskZA+nhz{@)Zgib|qUu(So(vEgcu#ej7I>y1F)*C6BVzmlM6aeZ>O9fr7HGQF! zBg%T-SgDd1g#TdiuH$oIq0w@Sn_4cjQg*)JyO7j*5#z{*p66)bBywhGVs)C)p}+C= z9~(6C*Cx?T38`5x1&bqiN%Bp!)cK=Ot_^%oq!_=0+3C^~5)+GT_V$qgy8BEGzda5# zioBod>$q*gzh?B0S>GkO>LS{-;-!;TkE=OSflG-aR|Gh z)kYy>irC}b*<{l`F2yW1UWGYExFTI5{C*jFD^+i+rj{^0t_4RJB5vAxo9@5eL znT{&4s(+6oKLJ$Ec;bV;dKW6-VcuenAQ0U3@_D^Urq#jA1v)4vmWz~Prk+$rXhmG} z>ZTMSlLaz}6$Cl|vg32dv50revLFJr(Vd7D-@iTRMSBf+<4xac*~*7Bo9y39>?xQm z|8bPd7V#+@GU7y81{m+hiw>Q}`*XFEb|Yz=SAcmOkmS&<1K{5GlAYh7j#_YkuyHd# z7)R97VB#O5Fjf9z7_c4E3+)lrC9uSGH1k+Ny)-gntCz+(UHtPE(*g;6XX06P`^Bee z%}ama7)9|9TIg@z|D;6F#CaF>WOjTUOH@>J9`jw_FNd@3@zkJ&Kdii2!+_+Q`$#v7 z?Bl-67g*%I@ZG5*>`MS`i0_Z$@Bpe`H5K1#?00RB6$b4(zdKpFbxw|e6mG57vglx9 z2w7tZa#9t@TsHlVkB#lPIVVLDyRWeEx1NK{C!UM&GXKX_ew~!ATa$BFcgxI@3LktQ zQr94cZAbQdrkq|;g`UY%iuR}=lSL~2)3p=E$<=LWkx2lA8L3v;gi5FuYo5}Zx>~PU z4b@)iyly9pr7Va5*ac;`*zg9_n~&fzmMM@XJizWxPW8cGCCB1l zvV`a#yFAuA<`H8mQW2nz!1qK2nt)d7ub|Z^CPOEmwCrZ>2)xR)cvfy(5WU_6*SZ0{ z0ZvEc1*XYj{;H#UBQu@7kn0Y%4G>EdF=z9o&=^ZV1B`Ujcz!-o94vHDyuiz0_Ri=( z!ad++SS>cNX8RlpFuoRb`;8CO>u$4eenC{0I|BmH$prEL&~_W_9XjtD@Ud5!(K`uF z?tqix3mSj%F94B0Ko5wPJ&*2djuwHK1V|}*Ajlc@61@EUS~h{lO@NEygbd)PrzN%H zRh?K>vIf#TLNqj7waveNg8 z2~kGvdVj@ zG`8ViYd9|;5XFm6z@`_!@aF3FR(^nzDL|?mMch`G^gVE={x}LUx{i8Rn2_XaglN$< zuwGmnBu+)tV)^)}3uYbm`$szR?`bIheWBj2>7mx$iQgrmJL^Cg3XgbbO*bwmNdqM} zC5HIB zn%_SwnKe*XS!)#KsntGUoZTY&E}Te7Bu) zw!2dS7bwmgL-=*)>Qnp z_Bca8-A)X+W_E7B(cFJ)ab8tII;QlR`6n~o)cfTY9#jH0mP*~P@z}{d>3g^bbige3 zuvs6cEmH3H$cYJHtd|4Pdf@0y9v->>jz0Qt(bzfsDHz)JvXT2q{v9ASWdl-f$)R87 zdV$s~Q0*1Pzsq!#Qr<+uAYQ5~3W+~2%R&yT6Z)7@A`MRN%9p8r>5T*g3E4eddQTkL zsG4g72$Yp6yw5-K^*eXwh^H-~P_$R&lmgA6B>KgjK@4K*&vm-^rE3ma{~i1 zpXS)uLwYv06Xsu}gL?lSjW{eIES`cNKwBI*(-v;)-Oqt(?2a2*Q^+eFd-SycdV2}?KXp^Gbq0C|F z>or&Dze35s|NTefkrg>VKwi&RUGQLRVLbvYE~gC&fTB0i8}V`W;yD$zL8pH^{*TBS zO$Gef8si>YjgH9si9KKqm3Byeqw=RzG87aPHPBZe8hArG*KZYV32T za3cMhm97jA@3%OUfG&@=-uvcWA6le%)btT?Tgc$>K z8)#fyz^|9;h%Nahp1}8+6a)YVnq~57B`RhI2M0zPaNbVAet4Re65@0azbH=7>iflw z&^2JqiJGBVZRNLbuXj@f3=o%q)hHk7W_+f%e|2JyTW)n+-Q1jpM*c$%*x_t$cTR&k zr;cQk?COfrmAwzRKi4dE<~=JVNNjQ2oqW!!^EJLBfH1$s9e}X3+P&-X$9|S;Vgcwx zP-o|(WR{uK%*mgOShf!6a|(cMT$7--ee8i+i&0|6A0Um!{MkR5JSJZ%YQ>EL;FzBP z5E}KXSL)ujKt22i$0-dB_WYQ}X^Qj*hEW#1hWyURoLb+%I9TNLIyGZ#I+`eW0jS^u zR++5+2~ysIF{A=&Oe(~pzD;lgUOp%S(}}-<0!I!7)u${WkBz!<(H_8f_!yvx=L4Ro z^J5g*78`G;@}cZ!Z>N-pfSn_p9*2626gPjsJgPn!N`GFeZ6XoxS3@bRdwO!?RKOWf z2&o2iMZEyg-a(SnBe3`lJ@|IJ3*1SFO!@(#lP zz|Z*yLi$sqK2bv~+=yf0<-L%8%~D0x+|nW!(6oW2yabtJR>IHs3)zupsiKP<(ml~o zCy(NcA>oq-*6|d^F$n*j;XN^-LD&O;KzYh@;F<&Lcj7D0BM8(^f|xE;%V$j1z6Gj% z^Q*R5{DXci%E`$|0L}98PhE#M1FQ`q08ml-=m=nw0GB0sQxmJtMc96c-#XznXXy$c zf??Kh+lKpN|L5gRB&L$A*C2;UeN6In(94y5Gwm?-JlRGJD~p645|BbP`PRwi5z|)p z0gH+~JlC@}f0Di_2UsLDK#ct#tjv)=75Wa{=kl=cPj4(|qRNx;M(+(SJrdA?A4$!W zT{VR^MGQ{%##KNfaMfCa|FotDB!?PB8TuxGiJk!*6TFA})WQAs0ZP!o`1Z4`aeYn> zMHnU-li#af2JAL}!kI1#&bP=ob!gmM=!zT(uWfv+;@7X(1IZ+nq<|_RA_9+o$}|Fj=fI{L>a%6)Y-gRIB#mw;tKi{ruRmwngK- zk*u<$?JtcHa&81mtRv~Hg?bL_NgBpYFFk<$1LOgaa|AM}l!JjPC9{OAYSt&But;pJ zuX@-bo)9c59P+3w_y#A?(un7X`W0q)wjrB70tO;byQtr$6LJ!Wv-w=w>im}Bp3zoz z>+9>3WW#`0h!lk}r*-xYH%qKM{1PZkAjw}nKLS1#u2qOV5}uw->Juyt?Ah;?Ab$(< zxG0A2f*yyYZ3qYx*XMDnZF-;`_#t}O+0zNG-ad^ui9Wf4Z6Cv~Miq{W{rTWOsGeV1 zgfkid7SMDXOpL!nY^S2}Kj|YVio=8cP)95TB2voDVWfX|e#<-c+K#)^VYMgBO!j@7 z6x%XN03twLpMX8nx-nFWTvGQ|i>=I8YVLMYo;?qUs8|(WLCHTh$`bj(C;U!4@}xdM zom1#P5J$c5k583EFf)=bsQa}6+m3eb{ogj1JQa}d$(~(I+yBH2jZFDK6}~;6w?yw8 zmb+oDQtM95piXRkNPF_fuQ<&Y#-*Ch#AGLxYy%{nq7U_ed%EZS`Rqisp%^2_!DLYs zeoG3Ih8!_-_bfet%J71DY^LLNv_rzes00NCb=PaGKC?;K;h06qZS5oe+-T~Paqry( zq9UV)IM6=4oDDJ-C#_CfQ{L~q@`A1m$8r18|0j~?-^sUif{z6Hirt3QOP^ON9kqp@ zk~B*MCM4U)Cij{+2mI#?0tGP+6+Wnv$k*3br9_3YluHYK0D1)55pvyPiM^ph25EfD zZx6P*tZ`TsE&X&M-n$7gy06KrCuAn4j#Y$fjg2ztPe@2A!0G7<@y0#u-*NoIf1jw6 z3F%2cLvDt?!)=~c`gj@^z529hT8*mJP9!255dfv4k#(eGKY0!B4rxrtiK}Bp*-;34 zVyChjjuFr^p5S0k?8&sLM3otz2>*JwYWg=lHJG=}ZqsX=aW``x4UW+_L}b&1^|+#` zb}a$}rP970K#1f7-%Gg2q3nT8({T*`L7b7a-VVA(Ro%JLic%n{vrBA zvWA}W?whn;6DihaaIqcLkPC1%^&{Tw{_(%ZXVt+E$D#nlb3#V1h_fz5NgzCMApO_A zpJns(^2!7HPGn@{?r*zs(r)GXvUR-O73#`w@ zSiqBE=A_jBNG0_H*lW|&+A2TOV-h471)Fa|rz^{iqY#S)Zk-%uEpoRMtj4P7ZzquC zF=rfs)@5!=Bs{z42lu>`Z*v{CBV6FEe1fHq*_KmJ9G{(uS~v51uDrEIH{SIcf26M^ zf96A8<3aJ}#h7l1{sI=;kYy>uFzQ~DmzhhQ6=)+boo3sO-L^9hmLq1k~g#zd)`h&ovU&Obs0M@!QU5W?y7OB!}K2?B| z13H-a1(`oGIKbyUF*fFa=ASB$VcfaRGItrOGm;dR{*$wh z9G@K6Plv_`nbp{i`ouXcCJ;zP{hpV8_EAS)m8MG+y4YpAEsc-rX&OxU0HxlmaXEQq ztJi^0EK%UFjI{^KG-$*mpvrf=U*g@@=Y8fAr{CLWhvvAW&OaH=H?WYXw07KL*F>Be zl#%1rKy~q5ZC{KuXxepKZOTngqzQEq{1D?7yD8{=!!$`c;3m1n+f!$Iwo^~C_=OfI zXH&f9UIoy(+lmG44N7Zib1Eov0^`8fR4=RERT2Z;6OG~=osN?5LGKAVxv*9lp}U)# zImcg2Tv_Z)gEZ%a=(EDzi{hRtY5`v?-q>s8y3|7{g-Ax~xzRB~LhRZTpS$}h{(TXm zu#i1VC)csYt65HIIIlmcc8P!q+Pi3PESD{ZUf2#lO^9vxCyLd9icledCOI+}2E$dV z)C+Jpd#>MehuXb;5hEdyB6kWap8QX-eW_I^Na{Xu??uT!&iK?>0`_{JYL5a*; zntx#0C1oPEMaN{hQzjE@oNi|E61TciWL^g<0@dbZpc z;BvC9Vdfg6X%&`LPhiOkPE&&q*8`E*i|s1f4+mkp*1!gdFH(rTK(RHh5L&5J91QFX z{Hntm{Ff~2jg*E{Yft(!>s&I1Jb2>scDdEcnIzn^2y>Emyf5(VqikK;7fz%aecgz9 z`LUkj`PX0l%&~RcD4?;yFA6fbFs(nsTa~3RIYtO-EEml5ruk%sI~?*9Fa52qPm4WE z1E|wH?qcj-^u(KClW_K=d7@i(N9Fk+T>|Oz^2g5O{_=Ape9gue$B%M9HxJopXhMM5 zDWjUz#}r~WkKhi#bXG38;dLO`x)#icS=96#7Z-|6eQ5vQ;n3_EC5`MGi3E|(%Pr>g z_Vo0u=p~=6vF7FWwfT9xPx2Gr*%-?)hJHsm?e5b;X;VL9yAZ)(HvT*}-LiUe+~F~G z0)=$fP`ys{)R8c!)oZ8d%vX4lpGCrbD0X5#NiJgh2@oXvZX5B~$mL^a90rPVH@S_$ ze$MxhG1<@4&sIr?E%QhFuMtJ3KL9HS?~wZ4xy7}$d2AR26&4s#G4UjjDMPUT{Q2ti zscE$a4)fuB4E&hpVNM(q*RceRzH{Cwr*^VYX0r8;fxP~}UQtqI?{dZkOa3leOJJQMGQkrK<8FWr&3)^xM+@n`2bNOi!wK zKgUN%)f{?LgvF4dvMXDr>1s)sTfbiJg6PmVgLP@h#!WY@++qu_lUQ;9C)|30dCUZr26NSAGJqHNDcAz?4`8>F}q zQUqNW0l4;rEF(5TAA!>@+#plrD;my{>&2Bol1*8BMs{e~be(L+oAlqGoXuzr!a$Lx^QeaPpq*l{_z)y@5|hpxOx-5 zB^eeMt_bp2qkPwB>7HM>`(y`ZWXX$#M$sf-mYpstX?sJ|+=Qe7;)!P%c?s6X zh~+|fc(hkCIYbwlecw1Wv5fc5LJb-ssGgiGSz*KSgXKd+DQ%$!414D{QUsnJg(pa> zn|TheZ}5bHby#w8UaQ3SnUV1|Vd|M1$cZNwtAZdNHozX@wntvmFN zTe!>qd_4i@wh|?!Ao#jS3)SN-Y<6umW9JLkoh=G7rv_ z1yzUwV|bXMzy6aeCO;{ZvR>RBL_Sn)6k{+cQh5cx87$|udXT_U+d;01&Nu223iA^J z)g(v4H#*8q0%?-=R8L-^@HgC9(lCS>{W7gKo|IHJnf8k_>773Le|)`VR8?R12C8&- zhm>@KbO|UR4I*6!1f-GfPHE`|DUt5ZgLFtpNK1D~*IoSLegE&cciazrV2pG2*?X_K z=A7%9&wS?5#}^^5-fKGM!g0j*Q>kg>q|VW^R%#$M>M-!j|GTBJ#z(@)U{+X0CB!R< zxZdh$&deBar=&4be%$e#;G&Kt!#H4Lc+(s>J}_VjljE*{UgJ)FV70}AUE+C5snG8G z=Qo#{0j_zel$0ad3+HFP@{r_d{#XS*SFJqCvp)UqFfDxucV9OQ1B^#!B?CeweN^@j zMl9!z0uJn|&h0N)NnZnXF6Eg!Q%d#v~YwShK$MD+wUQ z20QUnQ|BI%x!M0L$|5qJ-LrcZDVK@mJ*D#HUQGR7_N=xDTYVpR57qY-6>2Yy>`_g+ zVUqcyd%hOwDYx9g-rn9bYhI5zXx<)dhH`)OO_4w@>m$eR^ZIex*^B*kHY}1tG&)Pn z8@0XVf}`!uS?tx_vmRfYMUI38miM%Nl6F{m+8URRbs-Y{5>PTfcuxtDY{EFG#M?U^tSayX& zKt?WiNy)GwHI9UxoJj-iJNz#kr#Rzr6DnnIyT)g6>ztXkis6)JG{xD>`IE_VUz_L) zGAUR=OqoJ1rH=LV@R?8?VzM$r)6=OxD3lm(uc444N+&bUb~w$CpRF^Dumt}Y2RhDh zR_b2YsSJmoQsQHs?1Dn?a!fS*dDC;YO(}&A5(5+vKqD-vDIt$?92Oqv)HAFOb z!hTWJWL@^bJ>!>b{JNf$sS(VHTusbu19Wi~+1?A@&;s)<{G5&CY#P7Idx}J_KTN{u z`(RRuEI=|zL>sx4M9%Q0O0)tVJHG(d7*tqTxDq#4ZL=Tubv|S?Zi(dNU!7Ccs4(tm;?v#%KN!E_?(MTsa>9ApmZE9Mnj%|9 zD?9FpkU_j|I(w&4)z!}{K2`;M8SCJn@t^NOSKs*SBE)!8Hg?E+k;`o~Kk_x$UEMr7 z#?{nGiA>>SlZ6C2)x5n7ld)wQtQ|K*ll0KhxM3R6tk5SZDCQR`82=Uh;Ycu z=tzXH{^!YLq^YqaS>C;5N`k)foA)SKQ}`P*3HKTy)-W#qmrbNuIuz#S4%{a-(hB@v4K|NDU&>I_BH zt+U{)P|n}cPosQ46T~$~_(T6rlpeDsR(S?I3AUuu2UfH4+u*$Jr$57!qaJG7V%bBBEgqDRhe*Hgg z0;NDc1%{cKIsM)Y@i&dqmx88&BVRVb3&sho5%tS7zU2GQ zYYQE_+{%rPiHY$A)K4fF)XY(DLQ6}#Qah~=Ez@f@q}J#hgCe$du5i>=Y@A}pMTG<{ z4#Y4V-`TFYDwQ#=^hKXuXQrz5OBS@*PW1Y7VrBjGvS-P zYJLEZd3t$KVf{dl{44(jiOifcpFb206^jrLkCK}k7ZHQ(rKhK`teo7{RnKE(;XG!Lq`4ULV*Zoz5Qy>{gP`JG3KAd7AT*5kYhK*!QvgOUC> zEWaph#wRO-`gfX`n0zfO3sx`Y8QP!Ge0iG_@=QWO0i8k;$h>fV!Z2kq36-Z?4ftK# z+O2~CAJV@MjJDBu56g%!10W+26k^Xn2hlpa>%Vp>qvKp30PWcVHRo=#EU(}%U#LZo z_&v_8l2cNMMQL3=`9s&7RSF#Jd(RJ-MmOc$4_hxTeGHl$`#>^<5@vo(SSqfZ7T|9eGFdlqzB($FY$yN$KA?ongT zAltSp1VCtO$I+k+tYLdJfv(Jr%8o<8vIwnTbVmi+fc~mQFFduwBe?)9tJ_AlD}LKU zYD>w^9(`#2-*j!&*unzE%*+hT<DCZUeO^rskdtKl z4mc(;X=#IEv&|Qa9#`Q)a`a;{K|%1(y#A;>TAkTQdS{LK#AWv>K0ZF!+yA+D-pP*< zF?B(}5Q4|Y$1mr*9@+8$PU8U8Di-Y;q*jj$8!isr-uZd507T3lOtD*E0F4=gA`KCr z4Ox5o-kwzx)xTE*3vN%SVwbF&GqD{L7Z-p_uV{Gu`+KsW6O&W~ksp8xPajN+o@;(X zVrMVc8G(NuD+3gHLN+r!q7L1J9k-N4`G@X>K)GkLrCpU}}CXDq0E1u=cvB>Rg}k{Rj^a4~D$P#~-QUdwrf85#wBFoVrOxhzA#}5|qGMfn%PmrJg z2p849Dr*kW?lVu~Wz1rH0Pm%!16u>;)2ua3%zc$qM^o9|?%&MuY2bYwvhz7yS z@pYcyRQo#Vh`yMutLzY*Q=ghkwH8CIsJ@%YmBvIPd4UAxSNgWL-jTyk{+l_}kfC7I zi-JFvzdhB;TnB=QF?}y!`VSO}BcY)D_LvS10lG}@ri|m^RkaM0;o;`wlw+*vQL*Im z$C>ArQfH|8{5x5;M*MqNwth`v%GG&U8MQ)p>(byGjg5>h7k^k`iB8G61L~yfTCDV? zagdGkKWjQ(r)<}7refD%0*q!JY?LJ!f?J(7PXT~7b|R|wf_AnQXb7~YC-_in&^^4) z`DH?l2#d0=mX=$-^dAs~Dgi?dfU-xep&&1MeR+BLLm~PfWBi2#9;yu#_+ZQ1u9+Ur zj{-17k?85^-B;^~0fmqNh%S1X@qmZc5${|{laZC}(y?v*zQl6S{@@)!Ch)o4uCk>8 zZ;gge_1(Mgn;KCO5o;c4z2Cnd^L2nISugVq z_g@yf@&qbwe**opm8|$E5}r@xz7?pv|6Kgh!LLm9E{>a467W(kfQS15Oc6YfI35}A zGskltWnjM3`X1<#!DuVeu6Z3C=87!ScAyk?G;5Z1tR&}hxQHGpWqcX`Ump`QCE2-x z;C*{6EqXW{&fbOu_B9GxoM|siH|U_~25#uyjmrmF#)z3)9&1i8?;82ujIn!13X zpwaE+@l2gH>HkgWWQuc_?L?ZL_q%+b!WvpwSR_lz3_?UEX0sg&m9nJ7#h>OUQvSQ7 zNK~aD_MVZ6xZ?pfb%4xst|6*C6Q})O_(qjPVAdfJRt;|a?}r0xgu$H{+uCBXv$G=w z)#%9V0^fv$ikhnr4UFNXrK3Xta^pazKUm=3F9}kA@}H`9+*WO)r-u)DW2xQZ%6XG5xD6y*3P&F*zaw_Rw$fA{X) z?I@`I(u*0=ojs%b`(|UJ!v0RBS1NU(CDV2blqn#&{NIbjnbv}|4}}^eWkPC6mJMao zb{T*O|Mu{25N_P63kp338N({@TaVI*ff6VR*!Fy5#4@Ro+NC`pD+~qsW#j|MW@FAJ znG(RT)= zbdMbu5Su*DerhnVvhobk24amr0|DCTWUU{x3sJx4A{Miq{Hl^H>Lu9H((+Xr@vpaK zPEew6Ze#=#_DX^r3&|YzYrb?U{4V2=e-}&sE6`x%{cz{9BLYJIK2mXdu^LlyYN~!w zUiGu?Ju@(mkLlo_bsawjt--k?SoVm3Me7S5i|DUJ%FoY_-}Q*{p$V*K%sFW3fq9go z(AR$kcqbtsUQ4nNkcxskl|b6o6usU80)Nj9f$AriDZn{oEs~Ow(%0X=((96aB;VUB zs9P~Y2%cDDamupzvGW>Gf>*(-T6U0(iwXycLR96mF2KV04kS#ne_C9*vBXrk9vcZ? zZ{^(AYQt=vqDlETI)dUmHsXF#8cCbq7QqZNLjM`(o0u2A^S7d*c`a+ zEm>Kn3d@@}do#{B_Kl3*mn(>96rwG|`6=1U6~~XB-WX(9p5A$&M7F%(vb1gx@WjNcSqUe73k+eL6JS) z{~}iD;sQsfl==xIURWa|Bf&VjnVDqc%RW2Nd?Yz;Rb5@H%lN-i@P9*DV2rx5 zrepRyw#PHX5X?9z*y(?RxmO~s7C1f4eQ!}fFBMRylib&9^1;`!%vfUz>W4Xb%{jIkF9P~|C( zW>F`m`)hkdXFY~j#IFmH&3Ru?lAH5dy=(%ekm+-h5ElpCBnRGRyzPgtBX~tN!3#7q zPry{|UmPJZ+KP&bx7pf{XI=qo14~R9J}{&lP_<#pTLj=~q+CKIsCYc>Hys7AFYOmG z$oic-C9~u^unZg+Y;xqXdAFY|UU4A8>XZ8UwGk-47|YR0`-7>$MhdG>9_8nq7@1T& z(mlmqxBPVmkboOIY8bFRoY$cbot#8-KM^(*o{@k_Rk|UB1k#dA4lIkF)M(z$s-b9E zC*N|9&*G%BJsn}_oa#eNZUlx%5}XAH*+ou8PEMIBKvV*PzeJ+uq#s@iu8M<`tkqv& z!T#SU!~77S*n|Q8Szx;cmg*8Lrm$~emN$tyh_>J%dyz4AV;Y^%MMX>yLTWUTdK93N zP(8!K7B|NRfdJ?)IYiFG3AI3BK|H9WRnBDM?~Wv9yLk`lXQaCr($xiZ(ji^#)60!c zvseT$InfH=OQ_f?N+}k~M5A9vKMUSD4oe=N@bibV0*It1z#p17t851%!Y{GE{6b@j zfzKU2NOia#4naO3QKWYAK-aSiXl0>bnE~Ec)ci(J-cBu;s`Yd@P&&2|^)C zqNA-4J52`&q5e$^7F3@iOx%0EYI1gl2vlJ}^njaWYgw)sbgmDtB6w|@u!z!K8z z|33T~3_j?DoSPyPhld0Ob%H!My1ItsKf^e^b!{Nb-tZS7Fk! zPiZW;6>fsvB=W9C6#R<|?x4IXq$C_!h>#tJX~eOAZV{1xD*x^631__9R!l7XL2ACg zbh|eZX?i%%;p@)lY7u)@Px%a2YloEi9OQ&piNvBQ6Um`X1Y5kJ1kU7(p1FmiA-OR%_%AN~} zAN|3xU-)ZlJ;gL{lyFf&^v;fZqJ;@;UX-UV*01=hZBV^MEy6-XGeRNW1Tm)RJsK2Q z2nc`H2LygH*W=4Yr>=^Z%^wxmW9$~kHaVaY+`WTK$tIB)eHlSS`pJ&Mfk8g-L({|V z&*h`=(X&90?{2phM`UZOy^tQP&0l|T&;y`-og+bM6$U?$SEX*Cz2XS5>H%Laxi77q zP;?6-d*c_KEAVtXeoON%$G_t!7)Fz<2xn9?WzwW1L8X}c1b8wZm+o?q>{Y~>etfx{ z7rS(S2g6iyXE*9t))GRzMorEXr}#UBf@|!msFH|OTWsroX%Z9T5VUPxU0rF`THsUQ zDJm&Ro0!nHUGFf%At2lrE>{EuzydV~%2*jR2OG?P&Ik;QY*OAfY=Baztfz-gUhgsA z7}wu9b}Y+xPhxYgRd!r5v3;4f+h@WB7ZR-RFizL~{g~bry0Euren%j--gP` zIn6o&K!^JbLA~mst*;F635oiA<7S)7hF^E!-=8)d#=35gI}O&%LS`8FL!k z>()~nZt&8sAL{Co@D?IHppiSUUESSu85aE{=z@X2zCOKw!KT*?AYoE(82g69&*|}w z3tY27vynoHB>Iy)B(ACf3o^))$ca)M)u`{(JazhH(j+kn?Vq@hwko$zGEQg3ET7t>gligMaUd%?+Lfe z^5pt9XVTm6PQRFu-h0QcbddVahobTjdVkK>Z2JD=^x)vUQ|*ek*-eO^&Te9Vn=PD6 zsSW>Sbn;D$oaPMAjR=nbv6hw=c;}c(N;4mQMw`64nbJAuE46%5KF)Jq-pa8bPye{? zggA>CFSW0nEk!qHRa%-Zh#Fm})I|i}EOd3jh(3P0z1k_M>P9QTr0OO`HNVN-%KoAn z??8XduD$xO>tO8c!o&E2yDq+Lp-Jy8r=nReor{uX!;)U3*FnerO_NELah%?RCCQ$P zvz7Bk)bRCGINOQb@)qaV+x)os5; zlc!mw;Mf@aY)t>$1-^43bR$&NZnnTx{O#N2YVL|turELM zg5yV!#CP`zGg|0(Ed4d*q#iu3bNHmCisz1P%Z&n~RI3O{fTu!idt==^H2OkZBl>#X z*>I7Mmgsnj0nLVqAB|`H!uosR(re1L9{lRXWZ%k#JwrC^c}k8U;2bW*&7_~Q-PIBs z9L5<9O-ORaW`kdmqE&j+})4Za?g4l#N~sFO@GF0)lEXSb15tEoQ&UoJce|BN6^ z^L9otJ~GW0vDdZZ4ehsu#1n_h+e+9UN@>2cH1jLjU(xk<<#H>wQpg#|1hJzRR?!K$ zC9l4{FLB*xUyDwS&b(tx~7;^t(67U$di;p8o}>eksFy_$J_=A5s}Mx=h|dUu0P zgn7D;>+JTVj171GkhjT(T&F%qe1Oep{)9AfW$E@wZ(Fp*;7@iBW*<487dsZBA6%#eQ!!1~cyTD{_fb(J z@}AQki>qg-4`zt%ks0-pU$~Us*ao~A9r&51DbUFpZ;WezZ?2hGS}opKFok}u1+9uq zFm7MUCY$T&sXJ|2udJ$X%7{!@EeKw}Jk6^t>U6I1;nR*%I2$-NNt=(H#MXErQnAFU)t--Ku7YeXvuA3l zf-H@Md3v&mlzC9J{2?P%`@-5YE4Y`eeMoBHc&XZ3z(&ZXh>Y*}v|)6n>gT*uvPhl@ zW*DglgA_RKItshTzy7{l(t%BtY042-^{XC>ASYv4CpMz5n=Afj#Z3-^d6=7W1Apg~ zd~lOv3sz`$mAW{@2+4Pf*~~>xfYzXF`{ieX*^u+!oBQiXDI(9fo6YUqeaJqae9MN^ zAE)zM&5{M=!5o-DZC|{mRPV5yd4g6g*L$%4QD)dU;{uK1#IY|*PO>r*_m-?-zr8AF z{!mt7jByD23tE|;dVg)Z8_J9zBURFrmlCWW&bd|Fnnk&(pBhTgtEWQrKWo<&)|3Zo zlr^?5ccdL4nDw&(4(CrYoIVvB_&_>;18D!QFsyZKp84 z`6*I$y5{pY)Vm6~s{?$`;|9zA$Qr#MVFfL$qR{sOv@F zUGKhdv`~)U&oUX*SiT}OSh>(rJU0n_WmH#ffjYU8NV{2Aeb1#;P8oT9?e%=6uF_kH zeu_KH>tgpD`yyl3r92vbyJr5vR%T#-@LFVO20r<4+i^h@=h!Fr`WsW@-C`g4^7UO= z(f;`>@S)?;ZH;r>Z~0(+JNm ztgP;O)M}7lxZyXOzUa6=P@S#ag!S30GL(h|(XrCF{;7H}x!GA#zX3OikaMFGpZgwf z$A~0yGs2XP>0ROM8P8t6%X;eA6YzC+J!@aqaBH(WlauKPsj}id4?g$d558VzbzI&q1?MWzyODG38_}~QxAQ$M z`E=ow!>~q|=`v=Kkhz`8D}#%%eX=wwAyT$D)?LM@;ozGL^FK3thBtSLQ3}DV#Vg+B zbJ9*Gv+c9)n&m#{3yod6w&R5Xy>}3q={o0pf93l z*lWL6b=4%>eIV=)*Z$GH6_gx4@-LF#qx;jL z`Rwtk*7Y0LcTRFtbRPjz9^iG`<4xZ2XFfar`@i8296WA%)*L)&ESzAQ#8xTjbCbxU z!^)wdj`OLXN1c1%-fT79Eq03n3$1#3eI48t>f$8 zS3kU1UVe^f@l9D(E!c=R;R9sd646Ob*yxm5CEqNgJMhQkq+NZ_F2$BU)$%~UrodU` zAkP;r-e;T3fd@HJc3eqaL*EfsO)q>W@^dfm4mHWZ3_}YCZU$PqhB~?lJnSLQCZioY zhv$3zVG~7wX?Wt9_H#@=yGY$(zhbxkt$|9AY#CdwrLygZVog$e&{FTo;R?iO8bD1l zTeS#_fFKxD<%~eDT)l9m8Xo#?R^WVJZRr!5xVVI%9i}PRWF8!g)5*#XbEp&|6{{c5 z+A!-778wqljt;44MIT86i!_b7m}WK=EiL@Hv-4wJg^Zk>N<)_d3&6tT?q$sgyNvshsIi={8A|awzYpWQ zHWy$hD}nSAm;adlkTbT8&fcD?%c?P!oen1Ww3W@D1q#{-V0FG?Sw8qakLHKW;_3hK zaOmY$^j_BqtoCJyK*z74wN(8!zV2~3w8Q8e-tC#zRh)5D%LdzFB*Wd+CtBKcst5(!PqlPy{dk zkb7kJD`|(wK-aQXySv@gz#4)C*{~5Rzlz#wzs5%5haF~fv6+|e0lnLk1bIrQ6k;n1 zOu~g?_en?DPb)i3-kw*~q{|hte+a+}k!k)i4DE*sR3M)RMW?auSq($p`q&?J_&zI; z5lr*nGve9vfbU}EM!2D|SJ9k?uje8RanXNe53$xaB%rh3=}#E^)ah3(!0;6K3V_ z4{c9QeA%{a)BIX>*r}=NZEl-w{4HwUt!KPO5p4 z8!2H@P?%ej2S-4lA$?7^W&?TpJBBS|WqThP8$M(XJ#lG#yvx7+Bj5QqZUEcY`RQfN zI{lg+#jNAym3broO59tNWU%#-4Nyb4(~ZiX3GvIuVoqizMXTvSdTw{PiP=2)>Xlid zy^)HNeA8r7qUDWYD|;;I$fo`&XL=lJx36+Mp;oH zJaw~TvU59o&BH;*=0iOJDuGpZ*Mm;*l|fd>hYq5a7rpq~bJ5F7;jbCmOU-I(w`mI^ z>_alHe%7MKV-WG0J@IFzsocxuxxB4LzdX4!IBK{3LbbJkY3%U}DzWH2H_aj=pAju% z$?=OpP1c@kXh!(rc95E)cIuD~yQNJ#Q9b ztn0%L7wp)zl_JS$@{7OaW<}=DNA92q9_jDXNu=+}bq!pqf7zpl;o0pjAUb4{MyD9V zj?In|i-W1O=eW6?#zsij3S>c>nWTYjqVw)$;ch69UuJk4V$-;7=O??c3-i2g(L~V? zUfie?lBJLV9|O)I5kbf;4ers*kj)nM-luGR5xa-D)uWE?C$3s@l%}X$$K8J}=paU+ z2`d+9Yi-GQ?Z`y9hR74wW1P4e^6r*Q1%Y!NU%b^(a`1zN`Cu_IoPC|3W0y$gy%Aje zLsGshjBFbQt7cc4NajKfQcR?jv2Qh%eFGc#brs*_^jMO<)l~~5;k8@dZ^L|zM736C z954kchzS(2neMBg(qldfXYbJ9rUjK?vk`2dQSa&>B;aMj4TbZSF-Oqwq4-0FGmwGxp3So0r)V>w|4tN ziK%S#^+Y5*=us9!_70zv-{v;j%VXKs6RnSFOk-gI9r7AZlF~#>tm2;*_ht#QLCb7< zlz3rw;QS}0ZVRiqoC%L#Qc|*opa?2f=~2yxRUT~cVW1vclL+)2kM~=A&NCzxc?W%z zE+~!j)GaGO_2?7I;E!rErM6RSDi_q(yh!G&L#!-VWF#oI1_lF>GYEM21SeNa=2(Kj zWZJ(01mcBpp->CFUG1Nh`p6Hx311At%4s$#ThH^gG%Y;GoGe`!I%$N%}l>8Pls z1m(EW{PKRcy~0NcZqkCB4ph}hNlBsD5;D8ETagvTkB-b}M4F2KIhLtu!5<4(Zmp^{ zGvSBd%$_z`>FF<$zp2$y#ONeu8TjUgC{eaZWAgJ0>I?8{MOit2Q?*uHd6siwSjL;42{>a(?@nP`Kpr09*XC|P!dTW@^8mTw!e7G3-*e2iKA68MLDxUvYlHp66N6(k0Z^4!~N!bP3 z;Ys@L#Gq93oQ#Ytwe)`Q*~`6!WbUINJvPkJD6%4=uG_H7r3GmM^1;fWp4;AFQR~;1TX| z94>AyEMnsRCAoJ61zTgqgn3*kD{sdM06WVVs2R=xMjjA^f(+bcXs#ySCH-95emcH@ z4@M&A&bKmwNpe7TBj$%DTcpu6k+T>p6Vq=B`-+g(Jt~G>T7IjU3JjiLc=!-oOpkzd zptYgd?kxg@+wAiXUPlC^7aLq`Papz98@p8Wvth*GG;UtN*|cwd^p6vk7W9ubFV9UB z)Ns3%L7h`0Wn%->LMFKEdJ^awjekxrY-^u+ixzw;iEPr1ka)h1_*o{wM$6!cv!i8||6AVNy+p;6II z^{*qp1AF{U%H{Q-8vj^ZyBslpp)HCT zD`AdujBfv{r?=-fj)vdy@mWw(G$y>Fc1otc87=FY{42O;UVEIcKH{$fR@LfpFU4j+ zZqMh{RG!a|P4G%$ckzc19>+c`7K0|dsi`SI6MR8&4#wf-NZv}ei{4##4)g^3N4XFS z9csk@!{>gaJ1l{dxP>Ncs{LI(D^N*b*7w-wY56lQ_bAYWp63{@IlAKS50h6|cq$@= zNyguCLE`zyP4dj$zan|nxgun21xW!iv-2)XUZDK90vo-zv^R`pJT|^5_z%I=oEMMG zh7RWAz9Z~AUCDD#HN2D46Rep=?({}kH26>*P{@c48*|JKs91gFOymqh28 zG_MTNuF7|kti(Q~{yF=^@3gPoP6 zJ~u4Uskf=9s`_%RK>8Zo9@IS)kU9asto`Od$eShnk-Z5Bqx-xs<$)mzi9Xyq071tm zZn_?IK*8YgAPR(5(Xu`Z{cWdJb?-oso}gFEt*IFTwdV7n)@u?XylyeQRwD99;WB&k zl2JEp(!sY{eR`4~e1Ns-GrteGsaT8{6+T7Fiu*x?cnK+|17j$C%O|-=`K*6?IOB}t z%(T|9yR^&-394e9uni0h{0@k)%5Hp|{0TUk8y+g-OM#A{gl$@++7m$>0A6hp#d35j z4|E(AYkaIS^OS{00Ro|V;3kDN1Y|J=M#dE%Izac%E%{hE?WReqrk7jr1k=18PnK3N zB=9A50<{2R7y`!!+K+_n)M?G$70)OFU(h=nFfp-hyk!Us2_ZdWo-9e?K*g$FcGdM1 z&I*W+N$fbtCrC`Zt(0PZhaC>RNmPT}JJn>%oxT#A^?l#r}6mezxQAC@$9`FBvbC+xO* zl+s&nZcs$(VyE=g2Xix7@)14O6Jj=c+U zt#sH_1oWIfJ&8jL?D1eVjdwJ(0`Q>D6D+{jLsG-yYh(iv7s*80rqp~HKeG92mo$xm zLtgvs_4;6FMCdXF+~>qz_wY*3VnDBb1@sQS_HuA)@6Gru`ypNm&pk4tK7IQ1Ncpd< zK=cJ1Cc#Hy31AhSoSF_LcZv}~0%8E7?fr}@&k0Z^V|ZT`PM>Zo+7wQ!>OINF*foFI zivCFbNt08OlLYy8)@~xic4h6o8O`xHt*qHN%HyPaWIo&y=B8cUfc>vF$ z?c({a-WyEmH`4EIY@$Fp-e|tzqXt6NdsvQYxBL717f(2;T#x}ZX8WZkE!WPxK=-TH zdgfa=5`w%6^X#tf2h;qiI#E)i?p{>RJ{+V<53VFv_>Ov%d2Q+GWbX?K4cxw3SX&Na zDkdeSe$qYQcAxoGrMdWK%6MjYIC9(=yNj5|0`DPa4aiF~w14MAMmATPsm(mYP-jZe zKp;0c)r>Z()Rf0W2HfmNr?206xR^_-R>0JB8G{i-4s66+Tbl%j!9XkA&e5AtV=P@!LI#9=t8$8^*#`=*N-@K7gc$1T6&3$@&x>`nop^@o`JAVFVdCCk%3T{q< ztzb1VCJlnA1q%b4qf#83o-DIXt1W|~AUI_Jtqx4+w~jiRr-Xla-Nv z*UtJ~@Wohwro;i;YiJ=jYJLl z`Qrq2EZ1&y({;c1j=$+%uI1{IW$1c|ATL4PKo611IzFM6N6_g2jSmy^o=0Q+7APiN z&Bw8fGUdlpt}%gmyZ5Xek2Oj%3BhO13x%1`z#8(&pip4nbaD3RDlVcjp7faPGlz%> zh>~ncxs+mAncX->la77Oc5r5I2Xm%&a>^AYyYC}4Z~K|LQ_i})lv;i1dzxv?zawhe zGa#;89JuGO_w*kwbv*EHb{S7XU5|qVi1Gx3z8`vr_;3$&e2SN5gU(Uhh6!J&xLyWL zAC1#MA>?P|i+AUgTmzOJrO5A+XewNWVLWS#@eTqCA?II5D&keLBWt`$aQy;uX;@fT zwiIFsRMpjcHq-N#g{Y>DFcmaqk!3NZ1-SrbD>FyEtSFHygbIp#%4#u)%*+n5tYZ<0 zELX&%?UyK;GTij^=mqg&2kqE)k=b zZ32-kXx%bvWgS^>#XQ$Hn+Xw7EICNYL{#v_ly}gr+V>Q>2YkKn9k>a*E{^ka~d*2gi{0 z4d_;h7I4@w>4JYLB>`fq4NIy`y*IP63bC7XNtbj zQEAY2m8BTenqzLIsgtq3U}lSkM#_t(sOyy{uvkDW<}G}Fu<#f8ua`&Xz^C%RZFjGm zo1VqanZ&FAi)`buOr-6iMDa zKkmirrRd=YQh1Yh2q@i(b~W)iYzdX3KD4Hh{DH@5qEL}CQwB|Eik@d=k+rCY0p+!~ zb^00rE-^E-bp19RfvO~Zul}OF&qF7-*$g(3G~?f{h|9<@F6;iK&{y4 zVXnoE8=Pp~C(oU$*j@I_P^Pm`wgBbkwvi;j=LPGMd%&YJEvZi(dlbi6r!(tf3i@<5 z$^q@;agKn;VJjifDBHN`_xU$3PaoC`jiwO!)aK>UHSp`Rzu7S0JGw-@BVl5;qa&BBR?fc)wsFNE#)W7y=Z>v9$><#@4U1-Gsy68BJ`GQD6x<r z_p@fv^%&ma+cveGD`ABNMt8lq%cfM#rOz%v;#}%q42xp3qMaF+5qTOLYvFAklqaWhLRH2;I59A> zgDERBqN)G@Pd?VMm+qsfZ&2cTGXXgsHINcKJQgGR&s&dG``3=oI zf!kkr_@mq1uB&ATiX?sXD?S%gnOF1vvh}OO((H!U=RwIvTLQom#|svW>>3K1NMbif zayY~&^t!q(JzlI+w{e4HdWnI5-X8p2Yjg4I5)$cGy+`I6pZKU$uAls)nEp0Ba zXCMynE?!nJGwiY74Hl{tO&*>^uhgYnCc3`A1}B7ys+p2H{=Pe|HidDIl@)t==hLx> z$M4mU4MSx+4%N?}kH8FpRnWl4ibG17M`h88ZS-7|{XK|MVE*cgy#>>QY+NOV_e-Qk zY$~!4rPrD#7A$i~zF&5l#>Sar8;ogrGgx2dOTo1k5C!2&0OHp2QgDf4ZzsObij_Aq zQ?0x5OhgT0SQ6XH?;uo9#g#Nj>oo}ZSBCbt-^1(w2mwqxLIVO>Nl?RakdEwzg$8#G zj$y0f#?85>eIh~(Qv0N+tK+p8aWe*;CJaMHzU_$LQ+qnrc39t{o(=BR+NYE&}P8DTMaE|2API}4DoKG{`j`oc6pdj zzr)yJ)3~{V;{HTpZS7lU&lPoEY4O*RwSd+poh|sGort>g^P@lKW#lRu2k?&sTf>Wk z1sQqyz>o}bx{G~OLQ>LD23qXKr&A+;oSb_i!~+cTRUu%yMc>d+UVVa0IQIZ^Tu}-! zj1b}pjRcO7mFCD7MR9Qs9=?!F7-)T|bh-Y~$$5cpT;4F&jYz1ix4|d)2R=;O_A9!@ z624Z^YC@}Zlt13vgr5Af_@-P`fxNg!D<>M9llt%2#VWA~avv}U^+^Zby`uH)L(>m1 zU0rus>X=7v#a<%}T)YsDN-{_f^Ddh|40~=zeDYlMa@jd#Qb#GIZ3osy$zO!r;i8hn_1ZvU<>?5SGjCf;NTuYD@qaL5Waj2NQ8 zrDF$<&?US_r#4et0gj>3Za5aZWipXH0_ysd-Qgs@7XmsoRE37a@g;lUwGdR7dJVRf zIv>^(dSl$Z^^r)(o#EOEGq^qg*HZ#^#mEANS2Ux01haFVD?dIY*^!dvp+cUm@a73P zWYAR_J|kT#Yic5qY;;a?lio`%V3*b%a!Vg%lr{W_a5d|k*I~7m;NC5J=11aRf#hO~(l!!)j%O%a_z9UW* zlT8g~8bvrfzm}GG7r7YdHdHRQ-Ncma+kb3`u}0ecIfBrF9sWI7pp>4S9g{~FJ{aKf zoCJFcSm(UMK3?GkB{g{1g^E=BCng^l#~yp7URi^Ab2Aav7t;~U)3HsOJtM8C*Ym7TGZZ_Z>!)n60=+JM>TW}Fn6lu zgu$USqM^h>uvVoi_9Md-B^#k@MHPZ(-+QNG{%JDxi$;IfXN2bpGVjq?? z>Jl_XDkGB%ta+Y`hCxVxx7?RQ18@QB%D4#DB&R!eCMK@b{*gtD)t;yxFb+UjUw!IB zN)$(BVF!r<^J1(vfacMSzI?%QIv{(n9j{sIezmr~p~}t)8cv>QVz@mhX52g68z$=e zi_AnuN51CNkj~b<2is+**o*WJTZ>Nqv^aB3KpN#U#NVeXGp}rQoO-nOhT^-_=>o%) zM+NT}pNdT8&o;!o3$#D`xd@dgej-m!7Nu#Q~rh{-}r`C+w&G(ZBP&`J5I7#ZQUGmigI zGcq$vKjI>`gl1+)sqQQ@Bb9`O39|6Pm=vG3Ck}!_LOf14_VGh43)Q<}GFZ0Uj=SJN ze<&pf|7F=9eH%=Sy;)e$PjY#6bNckDMCu@DA>lNdWJs2Ie%n6#(RAqkdY4`J&yUA$ z?#?N(hrr0l$Q4|PRBR1Iy!n;dmeAsxbkUFghe7kS!s!UYZLh7e3jh>A!mM86TU=ZW zVKZm<26DD7=f5mnxAT)YK;!UVecV(oOrYWCQT%*kIBl%xEj@I;&K8WJP3AVkpp#Gb z1)7VUkEewbza~IEork31A+LXxKDKILCQ~@E`=dq|-zIugt@cuV zx?~#vW3L&tW0^Nnjf!}gT#t*v{im8m%;oKuPk*!Nb_Ml$T^?8Q&;_|cCzy#=VKX3Q zioN$UvaZ)29m8tDJ=!TvziC{zCvcCvm%WGnRENBSZq%0HrTN^$`OJ=XHA~*Em}_gi z4EK+aG#vELR6nLs{o}-p_0@?M(|(Y5PMxTupO()z6H=7lkhdaWbc319D;pc8munl3 z!xUTR4*@`y_AhVb|4W!47afB_%pK1wO@Mz02KxU{brwu`#ZNz)xE!fDyF;l-tT_adI0hpy~%Dh?j}Y_41w%0 zU58^~V1T$&Ozgq?QVIYzct$@&uyp$~QW23Gu5oblJI)~x@E2B>OG^2Q{7m`@CTSt? zWqepH%UN!^gn0zMEPvmq#+d;3fXQmxwO9XtQsJ|euGmLHGc*9|f5C=?qxVne`3Dyl z7ytJtsRPq6T}P8tE$Y1&FM+n!HFZ^UvuUhg2(TV4w7Ad%o|OlF_}Fq?JQ9*!6HwF>ihV{PheJ$mE~V}l5v6kld*TbACK-4FDEEWNik9NL`i6Nl(E zIrIZ|>ALTKsdaR9iNtu>E-x?BMZCD!^ud970GH{v@|G}c9U|S9GFCZzzIm$b1EliTI z23WKOE1t#gS6u$fvOMHvp#Xn$B*#G;*a!;-0un+^2x_t?G_4}U_7kX?kWE_`t z0=U$<&wWkE^d|aalpDB4)7rhVgiFl{7cWHYKBAebO+&*-vflXzUhoj^!TYb2$je2D z$BHerc-IGcj64Z}`C4| z3#jKL&|&xByd~`{zau5JoG;~H_r(EJ4MBhY;I-Z(2NtJ9k(ZG`&7qf-l?{~R)PZsH zpycJPd|xgHeKBiWG)Dddx7`*tF6gbM4mS_eDaB&n^>TusX6Q`|($F4aP%~XCUrcDJ zU+A~{b4_0!Uv13q{4giopB;Nl-tNu4cgUZxfSCC9pF%&b|L^^Tl3UMYPAr7D8_@Di2_+zr=-Z zM0);?rw~b|_BUwFBn8^I&1frd*GUyW%J3e}@eJ%F#p<2!J4E!nuB2(ash z!4QKcA^6<>Jd;#g%T-JcAtN1~Y4bWXw0m*lhsPidjU|~|bE*ky0$w7f7Wirzrs54N z8sNeBrs3r!s8#(f%mXZk*EUca82RT9_xi@kL67>K-{P-401{RB^eL#cl)m%n>NDyN zQy(^RLn6NO-i`IYvRPk6f;pHgl{#D;Xk^T_pLfKhmZG6nKo>i!UK40AmY>|$6IOrq z^g(?$(rL=3<@u`#qZaFWh@THNCjJ~5Kqs|Vb&tpX?~gWEy77MTn!Z^*IDz;1vgY_> zJ6|`(nIaa`R`bSiY#5=(xj>8)WAAiW7gdGPC_;pMx_&Ra4-J-)J{+G+uEbH$Fb@2$ zG!|sX0B6h*6KRf>abiofijF7em+O7S>^pzLa}1O~y+?7+y{v7H^Hq}9Y}Q}xTx*H= zpXB*XI))W|{Z$2CG`%$sl__Uas6_cO7K>BL_Nup$0D_3UpU{^}3>E4s`eB`3n2Ov( zocwqTx{W!}nsv+FHtX{FD31f>xG;H-%qM1q`$aZB}{g(fF!S=Dva8 zl|E`p^1vq!nqKy!jBXz81>p>0I1MJ3ZzjE$RKZ4kn%QDE3{@=NLO>}bmnx#g#Ut1T zJof{cjka|j>tbT7p>?FTm4gky&H6wr5k%Y4=l0~&;YWpNA+M{CM(ihYgWoD_YkExh zj1K_nKOrY4XcB*1R)^oHT2%7Z-z?dc5vQiJ7IsmWR%dkePXrpt91BLqe}fMp2hhXE z7xK)YjLx|-!9T>4?-jj;_h`}qJ{v>l>n6p<42HmS8Ss_3^KFaYyjcq5+0a1<7gk)x zaYGs|0q-OHIsI#HH*<5MRxhmwGx-A}v=o6nxU8CNN5w!rl&KWT(;7s%vRf?hc1~U? znupRcLdvH|RJ2m>Vj539V$FoFj2zJ=0A1tfOnUaG#H^%Cy2I@lv&)6kaPt2yOaJ|2 zuWvG!$}@bQ4)@dV*Pai!g>d6LeAxRmKk*>fStVU=;)r+84#NS}i6OcG=KJTf3~=#t za(YUY_J?$7fULHFM3uzep6f!kYkpOU+tU91N-Sum+^;9NgR?u~n?0HzmVvOSYhNxRd|X z4?4>u7mH~wEl-(Fd*SqrqCbp$F6Bu@abw|7I(K}M*RAET!T~Wx4GltsA4O)6Zl^m( zO?x2Rx}&UH*Ym}XWeeZOQ6g|$R&YiTdaV?8rwz4$2y-{q+&CerLv=Q!TDI-tU@{Vs zL_CyYr+Sl1R)bq0{ZoR!9h#V!Rj}A<8|BJhXiIF)P-s?D_xG4QSLsWrQ3y-?fG`DF z;OxKW3*%oncGW!B|E&fl+kY>TY2L9YOl$mZ`rjfNVC|_2ng=$VTjB!Xij~XW_Z`9q zfdWx%gWlHe-rj<7(obJgQhZ_fk5*c=kklI8SiZ{znM_aXL=jUsI--*EAbu%P0xTZ3 zhmpGqu|fgl|KadA)-%McGt)RE{~~VjEmMpDCBB<*yZK_O*_x5y=rMQbDFB~F;tOpd;QELL}o}IyV!-=1bQJ8_zq249NbtEI`e`2 zbrGc&j2iVL#QM^=Wq%KT6}n^*0&AUz@luhQ&Q)K$PF=+Z-lY=u@3Z@N+ZBUprmuC1 z-y6a+nhOdDiGX`SS7pMYKeP-^PWKBztZgb`rw$^zuVX zY8VFisJAzFa{h+oz_IaRz7vV6Dn#^$@ng6yQ&MVn`mF67Wt8xsE)z=A+Q3NTOMG(z&U4s&Ic(&Dh_l0b%0cobbBL`tkNs`fW&7%^++x#;OJ(vBe^iDme;Jo=`D4b;fd z78$U;1<0GGk9Y+-w$Lpy@h`zr$rZfewuWe2Y;Pg4e;0?CcB^eqR`8Yz0tgVyWpaCm z$4rw+*xXYV_iJf|LY5JlaXwNN`EiZl6Ot$k2{L8`T&*V_GDjs4dl1pTN6EQ2QkAMo zKOxIFibtbxG7d4=6e!^|q)l$d=S|WyuhrM+S}D&Q$2>lhvgsQix5rA<7{*=&0~Rws z;pN5L+FUv6#PoMZy&iZ27k4K>B*6ae?epJY*XG-m)_PN2?mxQ=YAK- zPiayclckg|6*J>lZbSBY(~so$f5dx#wx>0G@u&6w^tV^j0lHiv?#%BQ(zSspW^jqJ zW>^PZb|Os7HbyANL$E63pz-LwVOZXVSdq;cD}(M$Lgok0NUGNHLP?{q zQ71PV|9Hwm=c=wXrIfdi$M)uk?2#xYmyPIKgmnsmS007U!2z88GdLy&^`P2oaL7|8 zTWoya*z)ls(jg)c#<-kO70)j!3Vj#kOYJxB)4e9;rYqAGGx53KazR6jz^y8sQk%PES0~iL8qk7v z!znxc=!*Mw&Ccg$%KE0ZX@2{GV$eybigdm|!tGnxY%BURJ|&fXm>OOTL2h<e%iE3X=b?3p(R?u5V z#Ac$2*~J=n{Kj7iVVw-nu1s7y&vuV@7s8J!>B|+rZ70WC3@K7TlN?RyeiH5FB4<_` zLS(Ld6J)yl{Ib1r*Q(c3Sw|S876gT}w^j9a2Gab;CS$V~MHMozxf==LV4K2ub66wf z`_7X~w1~25hnwJhZ-R24>uK|ZD>5gHNabL7Fz6TGLIc0`=0o0pKl4O6r7?lfucWlf zVbf>c&Q*c|@7+cvP>Wz0OBgKpg#xk#T*I2N*>hV{AXy{H5oMa;T`N}qwME5q4f+Qq z>VpJwcSIJaK4@%ESf>bT4>W$rh^DLzXq@>scKg7Gvt|L7l_+I zw(n3ZCAW~j+ej1FP=|b_jtv3|PFJ$C`5vDbKYML%?<07yv+L-5YbswGuPQGBr&EBV z6XHx?2V%jX%dHf{SQfXjpLzaLz_<+X_>hs4FjUh*L);cRX80*TsIam5vN@S`cr?+U z5PN!5`+X}Si}aA#ZyVi8^fqGWDgAxt_t0VQm?>*wUd244femR`jSy=izm$5RQ*S1d?~|7w#Tm6^hXKBzwef zAlJ^jdrFPoabZFlAJ@SHN`mx}K7G#54=qbQa!hhb9{P!lA~nlvPcpU6posVsXfE~D zRFr(4X6Zl=dekH0aP>_SLeiax33Pn5NZT51=sIjRwiP$tm}hd)Ue5ME;;o?d^@-GF z3>B7p(5gRX&%Y6*{2&94dOeRAR5U3JReA728}-y0DzbxDsH?easE4Z%C#f84kH{9O z=@sw1X1!WURN*u40Z%Vnlx^#U7LC^{zTHD9Had*A=|7m)DRKSa(64%R*LH&1`5HYQ zn&L6UoA55e7&kd<6p-gdNez!gzRH;syX|8UOP$i6RWlbEolF!+Sc?oTB~2v}Lf2eu zeQAbSh+QzuHD%q8W#7d{Qwe2WyMy*jg7%YZv`1?sH3GSOBbN(G@ZEVTE3j9S|6P=I2&cf`tF1x4}#5DDi^Z{5u zCyJ@NDsex(pY)HIaov#YAlrUP(gSQmgz-4xs|3i=*Mpb$<-WMut7R$`l6RM{4_6S@ z1>cWoM2lYFp(+=w`|G*|4qo3$a)c&B?qwM``07LYm7*2Ut&3l3-FK4_7ny}le=tFgPE@<_U4>va{QfgfjC5+w0 zgj6pBVN~sH2<^ zJ=pE{an1EVT^Z-&lTGFp_nZ~0Z+B4{o2ATP3v1$0<-d}3nsB=oyY@otR3zqs#Lc0u zs3%naP$tEqDFPm#@bU3q+JP*p*(QO}FBdhn=)1CyMXbgc3Q@=?dN=l5?vu&G<~$3g z!s`0^F+fW!&ZP7F03cfYJsaFNTgNG2cowJ@z|D}Nb?|o?pQsc&QT>~NqZ&OK*BwRo z01EO=?mvp8(z}6BMi|h#079}=NC86d}1O8s7TX3w%UyQ*eMl3F;SW?@^@ePmV%_6)KV4zt^!TkCtHWl@3FkaGs5? zDoqIYzl0iek;$8@oOb&@9Vza}_EZ1;%{`z@5%YY8B3;`7`&3UYL#h;HeP2!Jf^q+R ze*<~NxDC~tvu1f`#O9UEUwK)3aX!zPq3D_A6aa}}^rMCRC;a+>Rrt+DevsF_!IT>| z-d+B3jj+gxm%Edxh*_>Lj%NW4Jvbz|0#qktYGAHq9#)Nk)-R!jlw3E@35&@*wzdrOnVY$lO*{r5E4IL9Q;GVsC5~MoC z^g%HO>_=daBh9z*4<$e-;APUp#DtYgfq=>$X_P8}QH(rOi}ZVPF`a40E#o_A^glPq ze+R<Se@hAFJ5sAFm>Fazm90=1@jnAUQq+c*shM^p)@j23>9wONBp@I&`_Rt;qZ3P6r zK^nhc_O3RTHQh<&abP@n6Q&-YhLA&Hn-}5#s#kRJE5!xqh&~12PCmV3ZvSw%=YH$p zLX`5>`|KS~Ei9Bu2QwsTzhXfEbBa!W^Lsdo_bm^Cc1>%O^T&kV>2I;2UG66R&&L+`0@^Jg{80&Y;73)HA45jufMD0wxai?zwO1P6nA^ zlH@;isZP6@olUk}g_p-aXBdvfP~#^rq;?}2vXxood5;wen0pV$2SL=uq+j_DeDw$W zrr{u^9znW0PAA`4IzpQ;2?|GzmxsyzgNADCQa^C%sfIp(D363&@%q-r71TnTT~(z0 zW$`k!7E~WrtmJ#kDwm(!79VM{XN#;nDiH1kZ6yXKAwnSxOoyheB+%)kw}V=j&9yoXo8a(xb6R8 z?Z=Uw1@G1z9?HB@`{0IpjQg+3#c_-G_@=H>+pA)~WR%t(-KTpC(ARYRBq77=zyEl4 z0ElHa&(2h85R&2sr_@RUIjC48a*W*3(S0X3AHV`-Xp#!kz6-xPS;|IB0Mok}U`K7{ zU0hx^a*4NcR6`}?`0!r}7)T^TO6P41y>aEU2M?vl6io~3tVXfy)6>W+}A7hleE1IIOATOF#rBmA>?L)4JnD~PTt@; zdA#LNDM@vD<|D9jAEas2G+>|YkNshSHHCJM{$=CEh#oWXFJ(t)Cw8%gX}X?54TcWN z$J&O}U-dC=cAPKR_a5c^p>udri3!h!;S{I~_qelY3SWhzJuJl4?f!E=w)8R-l2rCu zxwfpsnl7N7K$nIpWGnyZ&o9()CEA0vpeb}X;xct&k&8@F4?;wB3PKoVJlRL2CmC)G zGVgT$8XyO=B4m%?bQ}}kC+MG3zvPz5Q@^g<2XIkke-CTQE36%aikt_jH;F0T@7!N5 z7s+c4BjP3|E>I999NhWX48c<+*R=cyfGO)p{gV)nP!p21Zj-w^=>A3q-D6!z^>av5 z$U6Uds>5ol34rQ6ocx(|u=Tzk=Qpvo9!3Zo1Y+|*7v(0PJ?!p_#MvC1^~w|_Xsd<= zZd9c=Oku)Wk^3KMhbGOwF#fJG*wFVvDm7J9SFef)T&62mh|aWGMO*J~n{}QqZ;$yx ze|vGd593G=hl}riAm+spu3Kz8Q)2}>1MyJLDlU}ueXkT+XTZUGkwfdFy#8pnoEbP~ zdOLfA4b5n-!TOtIRYjC(#FlcaGkU}QN*DO=QtdPWU!4V=NW0gmW*4Nl*Egd+L036& z7Ob0No`G~(NJ#@WFZFOrRGyG9`SZP%ep23NN*rl@^+@FE@OjOSFXr2$HP@J36O^GX zic<EG{Yp?qMY)82@N2Up4uNAeUdv2osfo&t+vn$vqi+rH&jC3oY&FO;b@DD zin;^SGn&Re2q^g`-j^pQ<5hcRu;=CNwOG*mGYj^MI@53a8-^c?kzUO2 z18XN5ntV@iH!mF*t!-2u24>fKH^Th&=xzf}zAf(b?!{?S=3nbjrU{2acEBnkHAa6cjeQQW98$E$IFd0sgf@lB^!%mVpEex z&D1cN|4&%EcXxh1`}WYSOu0LHz5VY-KTwYw_7K3fq5t4s&+DYO>XmgK$5krd?^Ivf zFL*}Am1BQ5f3W4K_swJ3Sjq)L2p3nEk`ii|EZ_k)xHLp-BVaNblhV>6$Wm2FX|vlr zyg(ua*tRYmQR}h?Z`2x350)C4#TiyZk#N58FzA1djxpO$G(8VHdvgc5HH3CZzYfD2 zgg-%e02tqeNu9M;a?+nqK^QZrEGCMXk@&YpVeP&ro8oU&ZW-ki;rLa0XZ3H3eOX=_ zR<3syeT#lb3e4tSUW-atxcGauKflt@m%hvmP_&?bX2*&vm&OBwgvewqj+QJCiw<~r z2(<`QOrI}VaGiliXFhdvD>dN4F-6!LK1-UGe}s;i^PAAlz}WyoMYD-xF08-gDrd2( zfcN4d$o*W%x1JwX|8y z)p2hL4A(rNox)Z+ra79ui00}JVNFr0tECzEf|RJk^gnK#L4{|8x*E%Qm0CQi15www z>7VuJ21Ep2cYTG|Pu&JoTrc@>36FPuAx0|^KHix|WMh(LGR*{*o4IH8ONJfT%&r-N z=riZ%s1)qN;0TYNsb}wk$r~ik$HwYfZrBt!MT{7Ki;qw6i@iO%{p>Y=-JU?l`o=m| z2EWqutUUa4oYQM3K0phu$e|5Z$^+QJAaR_3=Yen|M!1L=8N0|%KW{F0o%^ycG=8uk z%c3KQRw0XOm$;R!y84ukb|3)H=aL%Gr^lXeVgvRZiIZ^xKza5m7_1fp@l=UZ>XB8GpOS^1&1DFX>HfH4!deN( z=n;q~eQ7!HV9~F|iWYyw%VSXv9{M&sn}#-+DB-N!0_WSZelc7x^P_U1yH)w74^OvX zvJwiBO*}eJ{9AOG(OcMgmwv>1Ru>ulZhrdnk+G+tQ26+}<*#2?e8-Y>B^MN;A3|PJ z)C>hzVbz}82FJ?T7?xVl@&ekS*4fr#*!QXi;v<3#XS}Iaz9Lm0O8)peg_k!Nb!w%4 zG>3DKb`*VSV!**sgd%rK6g(K=S;(_R*+h?D04B5eIE~k5>{tQ^566&znHO_%_4_z@ z+1nlo`wLg%*RSHGE#yZ(S+XZtWHMTdy)oviX@o+mwvQm}`CmVjprXrb+rky$`cTSs zGdMN^^$1~MvSecXza>nNJOY>*j^s6HEkQ``;Z^ni7>1lI9R{1<8M#F8aHJfpFxJU) zF!?wz^E)~=j~!7^sxP-rt&ntAuV6KF5FUXmG|bj;o$Ko?eJd-7P|X-VWGmCn*=4M1 z6YDI#8V0<3dl?q+OVrMQY;(7y(yvjuED(l&Qo|V~WGk_)z$AV@j!;6m%a;fUk^CEt zc4av6tQIaP-{;_@I|(Xj=M4^wH9j^EQn&-9ObFA^Hc7qHpF_3_vrVeLx9id+ESmLQ z&s=VN@JSyH<*0M+*&s5zxbVwIymbk6)M21gIS=v4&5rbPf!;8-?43_KPCFh+I}VPV zu^H_c9m$waZA(SO7wbvC8PXBq;3cFIYXaTg_S7CiIF8?P2?f9ygyI4k&j@kmkn-cSY7;(D@+S8F`-kkG^K6=K&!*C6 zRZyz|WA9Jel=E3m?vogsp+GDeg6g)N3GI>4p3TS~(9;Ui%jKk`oe)_>-%n!wv^sU| z$k9mkg{O0tSiI#=RR@zxBkr6mHLT~l;sVH7QBiPJ3v!m^Cje> zb9EyTFWv>tf}z5e?uZ$a#0$5JLMZ-WizQpcAF<+Ie#2J_+MVo?;CRxCGVPhH$*+P& zLtW=9dw3x$q!|3?;d(=|Qk*Dcp}LeK8d~%)LBbP+kYtFzK1wRCJ=hcc{o%x zchO_*CrkX+QjhLnME(?^Cy$H__N%BFlXW$Pcm7>{T3p3AIgl|_@qDAr zq)EP*vZ5Wj<*;tWL*NH*gvu7^8z)l?g8`j&z=0r(_0K68=&*89%V$#4hl?SMj|2fi(UTpy?IG(n@U%>C`V^qt#dyxbaiLPMrMq|;`I zCTkmkshQRANPSkDub!6JaH+Z z#h#v7BL8Y8g&J4!PA9>|+tp;-$f8%7x^?M^CE%b#0ax#bk3}j^4Ib3`$dF;^mjO_B{ek;J-PjPKT0&)vF)gp$*i2 zf)j%V;VbCLqk5Mh5p}M&H98INJaM!ZR23R9v4$Zx z2RZ3oFBp|Sz@bI$b~17=v5%wvgG&n{$^YOOZR;=Y_r5-CoZ|?j=mwr-%h)~J(yp!e zLaWPu4JU8sRS>zQ->X>+s&dL0Vi@abNz%KRV9%&-1kP%hAJDzgC!YR-=eU0vC0= zAD9(?3Tw|6h+Z1q$T}C?py{Wb6I>5rI}L7`{o_VtBn>9qq?QOmMO6j4 z7ojVDC*#ROKYlFv&{6UC2stq*+WTP(`9j{IrsB5H*`g^P6{ z81MZGN*D7a%q1j4{`z8K7tbzX5=;JcC159zfa)dTcy`?bcHE#GBjUz}n5WeqnnOE} z*q|=YfsaNLA4+@4H_z~E)fRpk)rxZAU?=p#cU!@Qa)F=SdWL`{{L3O`nG4~7D@I^V zEQxnSxCuC(mBoIAld@8Hz|}dF=)NO=c;N1|%}Ht_HWCKjY_B^go!17nL=ew-RJ_zF zzZTl2gP1n#@m5rM22;`9!NA<6Jqk)wT=i;orF2b| zl%zEr)59;`d6h|_NLpdDU$-Yg&k^W+Co`Pj$qv|M53=666%B_xXE&QO$DcR>#P6Cy zX5aH9(*->-E#pZeh|YEwD6k^_O&@=;!JNhiA~YF5>&0b9)lasfOI7d>Ck253BB8VJ z&7Fhp*VpqGS5wu`kMnK962HPbN=N`$T=BCaCLR%xeIU^$MWnKa^KS{ZXARXW@8XVZE>;Dv<^*3|3PK9 z<%#`iha)XR!p<|8TCK$l)I4#pxrdv746&ZCirO4#1PE7!nG7 zrh>fLt#>$FVOkpZO3V5F-a%wOOl!9oGr5)XsM2BeR)2Ax`o-7r(&sGWLuAn{!XN9=1ux6Vuhh`cFzN|u5$Sx>4|JjW zHU5Di#La!_$+&ozUOR5~lsO>)jY^u6lbD+ucQU4=$oW^U1q|-)>w}F<<5JhufhY6X z%^)n6Iy=w%nGKnBD7XV&zRtbu) z+J}{DCw1-Mm`Os>lAP(c8^=AeoX0BM%hkO@r7&51-%p$Guq6kiOE|CkO8K}hUBx#8 zAH>PxP98}{-vnd7Wb~I!Asg>vMLFdd7@KIsm_@j6J&$MAg0|&YRvPfKu%jOS;XOrt zUI+!Ivf!U zpZ}LElSSgI);HKMDKD*14R6eR7jw_&0WlcjZ@5gK;acxdBox@ihf^0W#6t_c<*gJN?I* zs88?o$Zvyyn&hI73_H#`F0r1-1&slf^BbKo2tfVSG8KYa``2%=@X?)5$i)^GgrYV; zHn%sXHIX?*0TUZL7%)Ld_k7(uR5}O$r@R2rW7q97RXb2?TU9|jDORa!A%M)~ueSC8 zAt1<_?Ocsf{QsN4Kg;piilqPF!n*&8&;Vr%Y93EK*O`zYP?!ajTp&W^-20Ozl{e;0 zKDj~Dd{I{qEB@?m3^_;GJlYCc4|pd7sCwW1AHsN8SU~{UY6|E=fZFpU6Nlwy!08VV zvj{kJO5N>V*T%NCwh#Y**AzzJf1&+5qW?Fv?~O7#Yq_y6yS|~cFMp{OpbBPi@HHqG zYD2z1QQx}FfJ{yPMfDw5P719dcnPoc$JkhOs0Jm>8HM9>F5_?Jv>QGE%<4m?ZDML> z;I>mFI6Wh}p_l7me1+qehj`3wU6^1ViGK7}yQWOvz_y(YoyHZ4dJ(_0@xEdD5(L2k zS@g=~D#56DQ;|z5{o4}clSS}!8Ly|OGWJSaIAxz)*pl+!{D*0(O2ymmV;6eU5^ z6<>o(llBK|SZIpe@I$~`&XGJWdV^yr1KvzJgqfOE8D=#IaS34U`)n182&xY%uJrG9 zLW@GF*c3L#wtTWQy|nT%5!&@1LvjvQHdv5P3Y0aflpo&6NMD|XL&p~Fipy9L63d*U zFu7wAOQ=*?442p1HCd^2B0Sa)Ho^z`YtMB(TeC>A&C%`-@A}izmdPmMj^XbcZqw`K zJZ!rvzL5(pJ%^`CHD-WUd7x#i5#D`?+y94&X(IvzY%&kuM(l=y7=i2VHg5PE)o z{}<1bUH^c)lf@UO)14jjhn+h>B!XU{T{A#n zw%)Cpu8_dChjhK)lw0y?;4tpI29lHHa+V4yB#-M8V&V~lGI8m=wT{ePNojdhbZ(Xi zXLS=Q3c^%{BZtJ(r0`q+xkH=lm?ZaTxDc`D7Zbrzv)R&^e1 zQY;S6h8vC%nCcvv-8{7Kl7NwNZ+!q*dHAKiVS9J_++_eJNw8k=j|5$&Oc)Q*K13 z@PD8p_q~t=s%z%(Tcv@tuAe<%(`O&7PRZK@0*epEb_5TZSv=s9g9O(t_7`=mK%&)l zlbl-oBx3jo-N45R3MWoGbi<=V^itb`1mf(K3nH-!;_&#v@ZQ1TwypzS24-eSNnJtU z>`?MCd!S@U@rGEO=>BQ3H@N2QK>f?t@Ry%lxZ24y?9bTlq=FIkfli)offU)g1l!@R zvJXD9^ABQ!_>vQRwbkOSxviM=G(R`Q&N$out)iiG_1MEkG=d-QP1~~%!b%Y$y5}|K zj46=vx&8#l3c#NSfJ?oCA#Afc_v4d44!Ka+Z>~Z=hxmEh%n&?OnqrC(;=nimTW1td zTA~Grf0`6URr{m z6n_`79M6RF@p-ls)@B64vr9@KLLu`Uok;q-sLPF*OTt4NblF#kVpiC!_*-be>RCSt zjGswf?bA~(s)|weB`C%7vVgv`-SGqT ziDu(yjeMF1ZjGM@?i^&$I5-V$tfpMb8mr3@u9N9B^8V~F2-8+@N(LlwJ~asz?`<>$ z>BRpmbMrLMLjPkG(*v4c${n097mRqMSosD@uQG#gj-v~KI3UYZz9SoH-wz6#wNVWn zE%~*cwCxHkTMHQ;9>WNqIRafc)8s&N9bBy(Jjz^ZYN}+#%oW;rYVyyYQ3(4Y!S6nf z{wy$k}%J(rt-N>8P19wf%{a?iuVc@DcNkWTs^?dI}9g7OZFKL~E+){GAu*YDE4 zrYweDhqUc?M-}69%rOmQ_M8G^wVLcwSlSc^-G2xELks?M@BEWt>40sk(bUu=oF!rS zuZjyOSL^VJ{ui;#HOCm*p*vlQar|*QgTkXkaM|d8USE0u*LfdhGkN^&CO~-}L@#$Q ziJ=GgaDv|I+()|j;~qoHb)QkmSeV(rFZagL{%Jt`@@O1Ign$R_3X8GS34 z4EYP$IR)P4n)Np4oOC3ZOx#r6fijnLnPii%@iEeE_fqHO6!~MQSSbIKS1+9lg5g4T zh?v>H7P%2gdYwq?N>ldRv2c3|R#Z*Nt?Y;Uw)ttQ!#rBg%}N@>Y(88+Ql;bFLyH>* z)s=87Z41-x*L&moc-FDsDx2dN7~RfCs3N4kyy<$*R|G3Q-MBRqgmF+4mcF%LwQ~dW zv|obE@Lw-(hRzV{eqJsc8i24!RMRNS^sHbRwl6nX;UeHxgtUy8plA&@=haE3I_LyW z9N_Gy+1mS0zqI*pUwp@47a+c~{?{XZ(W4GUw<=7&EmB1#_Rb7&KW*m8K)p+Y_^UIr zHkMnk6czuNeb<`5m6%7peTVd7)QO|BckTFF~I z>HR$Z-au^kVk^4#ykk%uWE&fRK^__cj2OR6tK$+7xG3wm9+0=%e+4Ka?n?*jQH_ff!P}K3bxVRE)E(TBPq&cVhQR_36PS$Su z-+r)$kBy+mm8_iaT2SU|2IWc*Z1rI7+kTPb$mFSf@F$A%ROulJf!@f+HkgN!Hp&UN z5NCl9b@^OAeGmr^U)7o{s_LkJM+!htMI~8^e^;4afq1?Sb_~_K?k%0>dwNS?l7e@4 zOR{^8ItHCeWT54RQ5f7T6h^IS%u1B| zITUm~>3l}Ho5R0|1S6ifv%ZpF<{Xb;HO8hQpuHx&)w8F z>uxtdfYYhj3g%>tYt4Zbk-ZhuEDAh;hgxqLWkr zsy3w@Aw2nb^4$=)6UW2dU0IB8S%CjZ&*(F3MAFZn;JaNNbN$YkRf_tJ3w!;2*I7V( zDeLTx&K31p{Wq3Gb$wuABUwACoiQtpbYG4`YrUNeLDYtgBa+K-Bb1KvS1b+gKMCnM z@$F1xN;@T>`o@Un!qb#>tK;GE-4XL$%BnKbuijY6mRcYWPu33g8|qVu_p{#G@LY#3cE<8up)Zb3ye8*Jnm8dZ|cjbs&J!AppA zWf*G>-9|bbnn}-hQJ@sk9io~|qhh4OFXdMJ#pgCyMyahcqWr-g&)NCUqhGS2evnN* zDxVe~$ip>QxLGCg7wIsgEavh;k0Ilx5B@dS{-k68!I*G`<;D8de2sVgsfUZk(1 zXQB@`@#0fHQHN)Swf@Uoh>Wyp)FVPcG~AWBUV9+M?`X1QxlKQ`b+p8;QNNFf$c%z37eUW0dU#X=VBIHGnI=Odg0?s6;hmd#; z&8*b}a*;rJa{&s>d9mTpDyx^Ttw3WTh7BjG8-q3^_a~g79*D5Cy3t>kGnTb(=?k&;| z$mu$;+2Tu(i5x14JO=Cn1JyPGb#QS927K6kH~1_Fk%J%BL6*t$0oF`2o}3V82b#M) zA16ftUKqi{73C;+b484p3_l7A%J8Bt*`;fcRkiyyHv&ap^A;L{+H5~j?EvEEWy!NP zCxgzwgI+l`u|-7Xc6)9J_&)QT{GJ5{1tfU`f1I*ej-FpwXWi5|ow1@aRI7A5p7tnN z=3AyZe1tIaobAK>x|Xy7`}<*T$1_lz>hw0tH{DP?szoVEbFC=-eo%+A(0u-Z;sZEK zCs2rwFVzvzeFqb^f?;uKk&1%>Q8p~>M_g1Zb}da&tWqR7(Dc@x5Bu9!T4#-SavyKpd!W}a`Jwn&(9SL&F6A1*m54fF|cW_`6m7pmicX4qMTSXRTF8HGH zKzVtJ{X^+j0<%fJrUWfkP``*{v;<c;2;Nk;xs41gT3X_iWZZe&6r94A; zwtciB5_bktAFI0jR+IKXfRUhiL4{cmq6~IfvSRuuEhg`RK>or-MlhVuyU*&I+%b57 z^=wACzq(YNahRdzcW_~$S%kOCj|OF6C$5(6h;Xd-2kIg1i|=c-Dx@uBi`8N8 z7iU!Et*joNo(n6BV0rb!B8l;ewx_6%FZdrW_Foy?NWy*215o3~yGmG6A?fYgQ@YEDaG_+qvr*tvuaTFS3(;A z&{nVkpUQ(|&AIP#0FVE;{Uae2a9+Ass^%F2R@<75{bQJXiEfO9+wm=SPQNA1BfxQ) zn79ux{2!E#b_vc_+vxND05*D_qr{mgoj5Cg4-Z!)ln{Nk((P1N!l$2w52wvZ926jm zGa?~GQEA8&ZxMnwMrPC^%_&x+6E}4fQ@%s=C7B^%X~Wy#)D+Xf-4o;?44`+^Z+M53G9@o zL$Lq~sx5qXuc^Mix955vDzXjG(Pc#uu20Z~PgzO7L}HPG$GUVeh)cHGJJ+wu_vQ zZQMJ;uRm`+zYmM8G{P{rGxUAGB++T-C$D?%>}`MV2s2XIEewA5Ba6bh-)uf#?>}}F z3(*$)7RXDlp=Q{OgY<}@QyqXmWpga*q`TTTFqcY!`n6iE0i;MOMLbA;Bz`qS^I!G} zd?!!1zD6kKEAZc85OA-hc{gEAO8`bfij^6pu$OMr!9|EDpu;6G4%*yp^Dw(kEM(X} zVgN9>pqw1Ceun=HJ{uernr%L<(sW5gdkv~A68+GtB_&LN#`^c26<=L;b&-?1d(*}J zy|hH2xJ<#)DV}GP;IR9L1G<-V9kLew8)Y_17z)Q}NGkZ#|Lu~YrJGgk}bh*NO z`27*{P!aQSa8Re6p2U6iyl1Q&XKHo_HPxxhgR2zU=FGQh;zxH^oBzPQ-WDYqVWdx~ zimXwV%*)$$jX=ia2q+frwe%5_Y2RI(f9zsDcx&gqs!05?R zvMvl0(ymN}cc@O>ft>eRize9A!D+I@5`&SrapIV_Jg$&1Md0^)M zaCO#yQGH(*R}m2@m4>0EySuw%q`NzYjsaAG9tpZod#_{<+LubF+% zJ@?$T*Iw^oB#LRj;AQyolg;(@ZZ|2zp9VK|Kz5sY)lE=S6Q_#;N&ef}FZ-C4V*oq* zd|@HE>pK&01+yBMn*~@f=#98i(tX_CT9QiFiSHcE~@zvmNzQn-l z&@WpdW8IMYr&L<7ZP&4o%i#NP0et>WiSRr0r>FidA-o{xvzl+O-8*(%lm(# zNgAfWNa<21aMdpG(}x$u%02fr#Za~Vn zaQv{K!GAK`9QpgWqLSo!+ILrFCwJEpU()w9qayikzW>sl2$*jeey>> zMy2$AtAf<$;8^|ZyUjnR7vzn|LqM{sv~>NM{5C_f9lq7Wk01gMmY4-rLe z7VJ&jT?IhSYNPQo>8u?*jyK&Y_Vwtz%|6%jX7!h&`<)-Bf!B?x%jolldk>O!i9N4W z+64x23lD6*vDdy_SIG~A2oJYhxBb>B?QS_OTxV_nDUKEhQ=<;#aKX&<*ab<9_#zoM zpX%SPuAU{a$2Q zSFtl?6cL3@=*PEj+?QLNUEkX3c#O%}2Ibl1C991s|JFb}0k7-Xc9Oo;EjuSK zKDHMx$i}9ci51%$NmMLgBB!dVsIW2eDfTP9b5!UgRCPR^8+_;{<0fDZ1CYV@0Q0?p zLu=NWh=>T5$tubq!ec%L((}GnJr)hhJ)FGvjIH>g=S_iA(!NF>C*w)l8tYh>X?jw@r3@uOm1+TsCiLzsP$9Y5D?|;mauxaMh7&6aIQ&SR! z^$>I0cY?k*bW!}zZ_orUiKF-)`H;STD&{J&4&-5p1W{0~Bj|bD5T1DaQ+O!|X!AIa zG6@Y06H5;?9<@ELVdk_nqPP7yCdatibgiYC%qIPMPO(3(I9SyFuoa-1lra>utUKbh zD}3Tr%>2xIKaP(iO-+-ppVtKcUwgQIE@trn|M*mw}3=RT&3|GfgDe zr@jS)_(Yh8hezt(beTFbYUE8ur>N5URj|JWD;6R=J`c+ z){m|_I=cG{MdLMnaO0e9VoU2(djG6K6?Y4c>4U7LpPp!5T(+zP?r$~PkDLeE691al z?ay@;A8@DmoV2+=KXBG~pXu6#Ltup=0tMqwu}d!rzXR+AN2ubm?&mU=QXa-O!!I3w ztMKMzw!=3;wr~E7bh^Dm{bTQV&%48S)nj|*SXa=Zmd|mFtp_#7Z51%a`MMw05s)&d zO+CEUlC>4GcGm0OWkwfNrt!MSwp!d7!`uIU(_y4lleG1->r3zZ)@Z?3waO)@x37=m z@1e)^_?4Pj!%?pt>7#jq+d0ZU^E-5G`GTq4F+N-%+1D=8XKs{zPGfGJj(uDPpRQ1A z`*g8rziyfZA%538aaWkU`Rajwd&0KETv2JOmQTveDs#s1>JXZeHjpA^wmf*757l=q zyp62|oh$?7fk1ZaSqr3Teqfhh48jsW%%%%k%4%!`*@c33F)%QIunc)|aq%>t<+$B0 zfUwYD4J4>0NMWF%QBlJRMGZ20Hm~kwGTTC$7+y*XE~dKgXR|rR zF)=l;%k8Gu|oJ~63n7be5{;t&>bB(lMIg2ewx8g=K3WK!4)EiEk|0jS0mHEl>_q#cKzcF^6O`%S1@A!uA$o#+DQ>vXII*&Wcw6_&3^Wv^J))-0Nju-l{CaLWF^uSl$Df#IjpJF%x4X=>p*bMh)lT`{LS z$KAeX#LxG4Id=Y67tKFN733Gi>Jbbp}OpSrZXDzdrUJQ zE>%rz?_&gSv6ZwerIvd?$%Z+~42$W`Et0VEAt;a>>_L6zljUl3o)q$8Vb(rdZV($& zTiHcgbkPTP4Gm6)PqdS3i^-i&OtYWIRm_FOJ*XS5J&$=t_yU=^H<$z&HAH2kRsGUX zgiROqNAw#dwd# zpYX8W+KH&nzI7!W%-{0wjK=UcAJ3ErKr}b)KESA2ettg4%XK^?74#KdWf0SQ$Tzbh zaXFtKG(Y!-G%3v6AfY zOpjdp66cobo4mW`krbt1=&t8{s@WAt>V`6v)c~zDWHR%GG}_-%CA2E zvn2HK!9skS_UEgwoo2df-0l%u z{j>;5tyeCYp4+CYFKcP-7f*2QHj93-7R(8^EhlT6-n=2v;U zGs8F=ITM$w6yKWNsd7$s9jUK+o>S|kU1OcLKYv8)Y70gN(~ig?hb>WK zm^O=U9(neAs5}SB-VA#r8p&>ZjJAlCiSA9;Z-MZ(yVvID z3+k)0`aT^oRu{tI4#m}eA$5GZ$m=J02#zBoqb?_oB)+9C<0LWxYzhOdvr4w2w8!Jyx2NpGKT3Jug1ikV^4btAbmy?d&rr$ehHVa4(kYfU4_2wXoKH8w24S5;YJmP zHSZnpwiq5b z7G@?O1~~YS`%NxnOQY&E7}i?CXUG-Y7qr7V?L@Un;5DcfIClz^<85^h4f#`s1vaK! zJX@pE#tM>jhscxk`x?M)5ce0G#BV0m&exu!D$UCIiByNcUHdizXDJ}=Mju7Zd4 zlejr4J-e#591ACJnkV^ZS{|Ol+2#kgBE=`AKs18k6oSkZ&q@2=QW7Zm zut8zJkTB%w_j<>2MqTO_`hm#!1M@M)xT{@6{H-3FQx#|vov0gpEQyGXs~tEzM(;KQ zugM1WTC+d5)H{+1%fRfD|i$R385S#Y{D*85e;1|E3Nx?>5pU6ZUR4~;T z&i+_Qb8+#MuXB9Zm$sHRH!4oK&`Npmuwj76MCaaR)5T0$o%9u6UjR68a%D>GDgl`n zUgeFetLrQjnUmpw9$Wu(wj#}pofgGz3=y3-cc4FKttMmomr-d{3C#3Jqmy7s@BDoW zPd559t0gmV8}Inu^{v%gqT_Sg@hKGZQ(DDoJUD%=gbQSKR^nq`&+!kGoB&AO4534& zeFx9&=VJpP4heah%pO925Y zYUI4OHe21_G-|Bb8!jF$1KNNUGjtZ%7!?Fd~CW5kZ@FWSjwxeg|653>=In>lMomDsw9=0pE%?VnD8U3F86`&EB_1 zZ6HUk%itS7^mroUEMXZKm_s0R|6!ixjm6096T9rXWcwdx37L>jLp%6-nW>w?5&7JV zcOg+n2%-w(E$TF(9d_lNDvAWuV2@-Z5Ur(Tp)ZQ;tOuTH#= z*=P|l9Kk7QB~f=vd{EhT`r-w_`3>cpg7U( z`1v>p@1u*JGf&11Aogh>QiXz5bVv_^^{V(!&2nwk#yfIZiPvI65YS>%Vjw4A;3~%b zr9!1``ua)yQCUA_q^C*$9}LB;Hv%MPlO1>SkD`b_u+4oy}pf& z4RQ_Ur%f?xrBC7rJ*%scK5!4~bb8-q{O>4h<+fz`C^!;BFC68G?d%0(3UlGeDv~fW zk~Ahr62MijT6O7Psb8pnYCyie+#R~n(9ni_^(&dJwNa*8#JjJgqoFd;7y0$@vCx8b zD+oceDSo?JWI}H`rsV*;aS_F!*`5KP6GYtB9D2En@h3Dp#?N8CEg}CGDa|#!jaFv3 z8>wIqYVy7wb>&2nbx$rBJVV5K*ke7WEYPNSTXM-+R1hSXO{V6WktMtJQxfXwY9DIBtd9p zWj*+3czvlw|MEUvk&2Pgc*iu!u#8To_1gu35rc^O*QPBmmS%0v3W3wLrqg9_SsIMd zzZMCV4~D>b+3=TcuSR9_6^q^Tt=tp3kHOf*vq{n{-c6Gr)(Z^x$TPYk!o`HFU~bb-Yj3{G{7Eu z{umoDP-so1FuL4w*|O7$-}k%gS9so^)T|s-W-c~n`;VdENvbU=>&}~Y`vpx+4J{3e zsfPFhxm__-i|rnCG`=UM1k8lH#}C1e``ODaRFS~(Pu|Rx0ykw}hszd^k?%IWF*vN0 zOgHK)vjIi~w`;Easm!gq!o8xr^!o2k@An_zzkJ1-UB@bW1X{2jyBThG8)1xqY|d%J z@0PDW3M35wha|oLXedONO(hn(TlHt&K(Yz&p115Wr<*z~AbwbFyTno1ej$TJr;*+A zc8~5ql-6NoTO<-g^N#@{Uyz%d8!R(&9MDb_jf_YY`Of`!s+u-)C~w<*A-gZqd>+G6 zr2JPP-CGoUpU$X3>(h-5pSN6pX4Vvlsi{Lsz(tP#e!s2YPX{b4v~7yTWSd2j?#=^m zD$gI{s#pdbZsm=GFKV{MJlug7M;%BXrWW^jmS7fSv%le_y!_(2-Tlx;=d?K~?D4}U z)P}2q^0DZ##e7qUu+{k8M5pW18i5!J=lTs zTDgupTm_W4F}4$dkIO_%XpUW^z<5RlxjRzYsNS&A=r9V{;gx+jSD9tWz`l<;n_KhQ zY&-4RMVo-c(qhtL$j2ZROVM7I^}W$`4J@$d5yuv5zm;#b(AER%R?z=TjmxN@FFZI| zpU8RdK#uBW2T;Qnx_rADqo)YmmUDX7)x<(z$BO1(f=+q30x0e96#SELPSv+-%qI}q zAA=&PU)>1uNcpO{*Ij0BHkhWj>2^vGe93utnFyN%1_;R7HccuWdlvVuCmO_WIH;LX;LlbVpZ$Aj zZsuvy_#EFUKUc0fkh5hnv0gcmUg`E8z{%jxR;?AYTfIKa5fBWqh;iIasvAS0S*V=V zC&3#I;^O4{lC0+!=dk|S#wp)s3q_zM(APmXpxu7A=y=vp7$FwU~FmI!TKZSLvN^9NZ*A~9J_m%lFz8B za&d7va|oS!(lK)ThDKlAtgQ{z9>O=h4&l^qgWa^+&jeEspp=J<%`dzVNEECbsz zg@Hb(;NR7<)3~K+W$!v*Nc-=yU<8?3>!~{?C+K%{b6R>gpO0ByokRY=L%8Pbi|!<1FQ&y>Smw5l$P`p~OIzS91cZdDlUyuQSE z+-vSkXi~|H%f?JuhHLKm_Cvpz=ih8JT1YcF6{I;}<17@j%>A|lZNACAivzv}60jO6 zc+0lv#@u)G6;%-_Za%(%b?wV&AD3Kz#ZoEqKO|q^pKI6Ii^pm zX0b#WP81afhS}h2n*Ar}Wtp{%4h@ycHmKJ=?@3`LhH{8aFI57?FQuWa`Mb}flcBcZ zuq)nreK3(1M8VZ@)`PU{d03(4t3Sa5{GaFjJ7BGw5d9Ns!S5DOnBT)&y7Z)R`pTVb z9AaP1sW6K*Ku24u&*D+{U|UK={v%{09cCMta071Uv7l0pwIVB$M$;ndJs zjY>P|2!SXQ&Zo{CWEk4L@^+b2KL@Z8*p(k!4|38}-S&IGy_5 zo32-Z6Bz*y2{m=a5smp7= z!%H#f>ntx`tqs7~4c9B?!H4$!R$fO4qQS* zKYB6_i9($2-M_S^G5G1%L=|=S78zF{eH}Icx$h#{c_`* zU4a#JTaI`=v+tD4lF(Xm*JXNT=*xPtTow-F5S|1^+@G_L^{^-I&qWN_UF!HOM9ZBB zsuzLN-$^MaYaeabMRas@G+L}?n0-fF8yn@)xs=m*=(5Y>O0}V1EQ@{C75$dU$Fii(j>xaSC%`xIK6I;CY~1X{l54= z2~p7l{pvb)*><>E82laHv_MjnCZ`6c+~reGrS&oW?qkT_n#?T~xH{?ysga0yV6{A;&vp zt}3L-8Fg@UZH$L~*H}|XV)JZ4%rRHEwS8M;$+I7%k%bc)9xV`rPZyNcX-&h=PrGol z4Q{WWjj}#z%bbWoEq9mFM9o6Y12#Ip__&0hNk#aHYDIR@iFBGneMx$eV?F7&dOrSx zXLiZdhheNDTHSWbjcLAuPhYXHc+@JM;`a86F9oQSHyq|fIm{i`GYE3Qwx|Dubk;g6 zPW-e($5lSY!D*U?6zm-hzFk(G(}6*m-y{%;;|IZwRlk6t z308YtnpJef=aw)dS8TBlrBF6xK8NPiy={wS0Z4h>Bg~Q|c%b^Ccw-59b>WjJ~{9 zj!kFVV8+_nSFMvaJ0|L?QJ@Qnn)MYZ_%`|6jGCdDmhWQ!d5U@FZpyBn$@E_zzQ;jb zjm)AC<#l8gj&|*b%&y1sTV;%b+mo^kJg1C$z#&53WS3IZS>~|UexG%uKA|fzDJj7c z+*sczv_d3KX+FBxpu_MX+mU##o|pLf{i@HBZEEqPa}9Tkk}hC4dR?zlXjC2Y)R@3! zrhsUWnvefME4JIfjf#xmuS)dJ&;=#-tXNld6lx-?hw@-xEWlB!qfMOd@$|&zQeRJh zSB2aeyc2KHY;I|>s>{dBq5lFQ#z2#OAxc@O-N3K&m3Ytu9|;l>rIBOHiX)PtS4qPdS`=qn(Yl zojPsz#kCwJ(ne{Q=t9hvF9&7IHR@8D)}Rq-_MJI1^gQG7kyNO%WjC;(sq<^3zlBhm znC+wJPBK&2tQryVL0h%GOaP>YlH_LuKBv`sSyk8UV)g)$7c%1n;m}*lj=f$Y8HL#o zSl*kPHMNcXuoYeO2|Rk`tu+;c*jb|8HeGe9F$`n$paRSk9VAk6>X7JargwgR<4Xe% zV|1vDnzjDGI*L7BYC`9A!0@Bg4Odj6N>rOQaxO4aa#p?CxTmJ2F*t2>p}W5^zM8?* zfli3?Jzq^Na^*Jcm2Wth7qZex6Sjz&un8E zSx-8Wh7msCzTepCH57JXNb7PBO`7%E$a@hBQP(b*^rAj;|0xo@ZYTM-f6qsn@AWo<6u&X? zeW?2Jx&6scbuU-g=gr5%FXP4g>P`z8Q3h6qeakIQ3$>=QMfHiQ!1qSV_%S_U`F_9I zF4e9wJ^119i|hmBeC2f~Hk)k3``M*a+c&wV6dZo;Vt1A7LvLKsRPE|U(PAot#)Szi zreOtL1%(&ThUJrjDS z$9#3xAjvSO5tB3)^UErJb*3SjG#tZ1YYFogGnx4MbUcPiT-?kXjU2AG_rfv{1PNiB z9Ia8+Y&LPfs_Ng{Vf-w(qkT=tF4YXS1)sF8-b?eJX+=NFtWetjO!<2e@Qy=`M-6Dgf}6jRoNI5cI}Y(F&^8Vi(w zdZ^_#g5vDJ&D%F+Da+&4cj+PU3c_$LSDC&}mW+xv^t~EupPx2yb55X;K) zR3Xqi#I7n9iQD_PlYf6|^o!FJPvp}zTvp{+PHCLhm;V&p689Z6LN1|)iM(C9&-M$= zF(%|}w5>R)Og5k03TP1ndNj_iE_w#WPv?&)jD>wZ$A(R|c)6#m6UAr5Y!4?{ix@a} z0bw0M#%7hv`#5B?jjmTjPH#C}Q33P?QEC}^fJlx~Jt|L@u#V?3dH6AUa9qK{TG)8X zp2G>n&5A1=FaTH66zw&Fn0oVZe3d0s$?cE=X$F|gzHSc=T76B$t~~gem3MHNj+{q_ zyU$ybpC8}47>GHq9oKp_I8euj#SR;4NI~3JLt_1e_0C$=y z;y6TbcI(!&suh$F0BWBG$9ToZw_9nwe!|#A-$$x0FPFPaDYGERXV1GTu`sqC?(~1} zSXVc`pQi*^Yyn!&(4@>n0h1*uY6%+ACu0#USl0N=(m3{oC&3J9thu0_`g(^ji5SgooaO!3 zCFJNJH|Kq$t6!aLKN|n4&`0JM<*#Y7+A;1LO914!y)Q@p5fl`(siJ>&D#AOtl1A_- zf5D~KOySaGhTzVQ30x5nDmGV2V6+Nlfk}58(ZzJxrKYz5f1TcIvLdI6ll$mx``JO!Q*%@7;v!THdXEx@W?FlF z3*Uaf*v9wpM7R3jgB~*nQjnbl}DJ zvO%YJzoE!c$f?CW7u|Ys+dFuH9R_4Cj(PzV0Dm+}fkxordZ@iRfi-Cnj zb6$fP6ZR+LPg-Moi<(>U6P5f_wC%-i(CdlaR= zZ{WZHgT(4+H}ltCNhc2Q9$t6xIT;SRZQtts8jHO>E_*EzIHl)Tl#hW&4z0Ijq)ETO zk66m_c3D(nbzL)72UnYRHK!@9ui4A03L9+qz!_`&KJL4uU^|_>_<&TKR3A4ff>{83 zz|ccBN_3b--_@RQ#okAdjEoAwyMSE8h?hWe#|b6!&(z`YEy3{@ZAg++lk+X!yqH6= zrF~Rk)&Zb5C0f}^^~n2?=D=WCsuZ+SR9rlOMEz4h?ib+k&q9H^(z4N-!*z6@g01)D zkLIiMC-r$DeshiVe0}%V75NoTyUA z@P0(u>RH#zr(YAd9q$gun~&@4$i?My=GuI^)M#MODCOaAzzsPnJ?y_zK1bf0$6ju;w}pVmg7XhxB|Z*z`%**7trvX}YRomb1y&?+>bf;Q&o&6YW~r5%|Zzb9k{ zlWhNZ_eTR4D%k0W?-yb%BSn$WS~MCfZ%?42QS20h*?7uuc^~Fg7Dsp{q1vD*M(EfN zjNg2G{16u>@CKk)zu#69F)Rnr7nVqv_+A}uu8{7z@u=`CXBJkL=_9=V6#y3&gl2v) zU9!vnU+5)1KE7xr#;47Gs+lid2ne~xc{L3UF?Bz+2+%Y5J4x2Bh}fc~%$`?a3l=t3y6=PUTyB!u&mIQa&t@sx?`}Bh-ibY){)dH{q>XB5-;Ta62-K_ig9wc&6uE=6M|T>+){ z6rQW5B&(|dop)|ea0oIv59u4lv`uVHD#7aXakYIM-(yuB%+#T#OSf2EZ|Do=ZeT_R zeaGWf|DOeE?>-v@ykRbGzK8Xx@CTt_U<+ww-tP*^}bpalbayEwZ?P()la<_4mo zq6Kv13}H8u8&H}d*?J|FbT^weiO@jz04@kJeU%m)@L*6Ip90zvF`WXTFdLV zR(`+4OVZX??U|HueVOg88B`%4R*sJ3=WMPsl2PfkO>X@R7^EmP)LJ~?j2%LFr=!q zPSm|zE;F4R*tndpXT!>Tgx)1DU)o?Gk#eW+j~=Ni9B_$uF#LB8OG%YN&}~Dnf>odX+mWcoaPy#LvVvA=oHm8|`P0<}4NToRk6d;c|3*4JvMxuK^e@-d zFYoRy7x-Q$i$;dS{vF7A8rj)}wn!3HwsFNSubEx;1PPuka5{M$UMr@=9gX)TT~iJ& z9$|0}U}P0L&^5je1=V4nT{19#=CS4<#)x`LhO5$%%DI|3ijd_ykY*1Zk;Iq(c-53m zT$TLLs^a?0m$hwnuERo#qn?4=*xAuy6UvByO=qHN*3ej;Au@qm(@-~6p1*Z<2Z*}S zSxIg~Q4EtkT!MmKFhxTKM#iG@@AW9E@EQkJ5ZC&C>GzTdnBVWr9jfd4d(Xrt7l+;< zJO&=$xTsm3*e*HF=_lQJUOxMP&AcCyPGq7ObH>#ihJ8oi405JlR-leGgS*)EgY* zFNr5iLgT;Z^)w~9X-is+2z&5$Tm`N z)#tZdW+&YzUMDbJ$D_kc1U550!yA1?`&wDkV2N?eKh zu*&Y-h3QXSKH%oVhnQZTtf3*zoV`0XIu+b96q?$)S_MmfC>3=GkotjYYe**^Q#+|* zRfbg4zzka*(?ahmh|;jksQmSb@`ENP9sy!Xa%!lp*`g~eE3O>#RO27_QKBkfmCz3I zluT(F*xCl<`9O%^;hlRIq58- z0#<`MfvB;d+X1cGOF{_c79T z$u*kcHUxPlpWbv`dpv|?L1-~XELPon=xJOmW$Psa?SGp~I7;3z;$h#999z0qx$0^s z%6hMF&kr|7d%xWS(L#+KplmhWmGkV%cJdYJh>_uD23=f0cLDaVllkf+WNHHxW_ltb z7WHO1)eb#*-gDB)z*hn#B(Er{0jF7e9sP?!wBG_mq#|T2V$ecAJ(5>%Yl1#lPdM^J z&fR4P6mRzNvj~rVhAl81j`u~5IZo~DIE?tOz)jFS4s6Rq zZsL;?;;MOnZftCz+NUfgL}RHqezLWx(z}CypRV$)n2K=@OYmTDP#9_}&yBTnJwFd{ ztU_7dfzv|^UG%Cz3^Y6>wW!)!2h|NThAR3wAu!#ED5w|N3OYFvPdq`mO~(2i8p=>yGfMK4Fb8WUVyb8eDCLm;9$u9kB)L z`)*lk!;k4T=h)mDSWmAFSVWCR-8Hf#hi*EOTOW)HSc^=iY<$*oDJ|{-kTcmber_@v z`b$l1pFwBs74q6u_--F$hH5$LcC!g$j2Y2QoiNWQ^fXckOdq4_q$jTnx&{xc|Dc$D z-36Oap;VU3A~g>h3} z$GwmW2)?*LIsRo%@9Jd$#&sk}AQv$@^hW$sZ)IiWf9=tS{d!DH%;`hg>YB#saskZH zDMK2;C*0BwjOpnnYH@S%htC|W6?u(?GMD`20`-~rfMaWMV2Z4yWXr_74E}X2rZwLK z`aEsh?s3}C4x!T-f6RK8){`c`o9oA;+7Tn2Z$=BF~uX^?K;&Wf6UBuHM&`J0}PuzSE2 zu~jdZNc$1@N_F1JU+4~>w->9&v){5&${fFqX6z5!C-|!q>?(Sry>;wR-|_SCWDN2B zR#VUrZDDtuA0h_{Suj4+kLa_H(01J#=L2am2WrcZlguRfz81HCqVpDZx zROzRaD5K*KM3nNI+&f3R1;R7Y%|2sO4h;yt;mjh2=1F`(SCZc`gj8Dj9oTLJ>%SuQ z-b9iyeM5FLx0Yj>3uc*^u)U)qjiQaK@%Q8oW>F?1{_pgOJ zo@9~KXt+>aElMShL;CqIk#; zU4G-@_=uprpE~%X1f`NcXhS`1aAJg|l-*hf{m8p@-NM*X<4McTkt83OOGCd^*3tXf zX@uq#myUSC9xD(7|BYy{Ft;`_b&DQU6GA#ag(?dnXe7zon-j#7j1#FvZ*nqN!u#52 zN7<$^hEx3HbrTlCYWG5@aX1pSU9lteE+tp)7c@PM8hyobSPVF;VT4wOdctRZX!+G} z#0Cr)hW(U)ar7-HYs~+n5q^OGPQ}BcLN(u_i_Ft&&|DYt0R`^Xb6ZAa!U!*>mDgj zuNPc1yRl|GI~+$^sIlaKY_i{wQbTNTx!5$@bYc|52!V?v*Hny9qll!Wj-yZpY>{PT zYfTX+A&yQ?31y@KxqgQ68`XJn=+6(geXHFk{t82aAyJ2UD5;z_3*y3tF|~JuCs`z<=bDycfVYA9>AFvzZW z%5&6FLzOrHqC~ssy6v-Z1&oYN(6fC<59NJ&tU@<7vJ40qOH$fbbUTm>BtS}HeK>eb z=IF$ZY;*-YOpu6_-#aHN#rO3U{*T($ z8Sb((vDb4!bSo+mr_s2`2|ken`h!0p;1E>E6kV8Nr1Sk;qiHT6zeYNH&2jNI?ymu_ zp~eQJJTQq{yJ?1&PBrH*3ijl&Sy|)RBxKSuaz;mkKbN7`;{NXuiX$ONF{T|}`lP46 z7xagmrv~$Xb4Jo0=@k?d0&#YuTAX&BMqM~n^z?-LCrtSG`NtoaS5hN-uMdNg znwm<@cT+BES-t{FCg2M=QC7aYySGdPJMNL_ayWmlt*tFC8|hhDsS(`;Pe7#jq46&; z24Y^`B%&SY?`dh{`u#1(M}68kWmVPD5!u3xe+KqkA~}1_z$B2eCU9`8Lzv~@D1s*0 zE!InGo3M5I`i{E%+SnSr)z82g(bm*5nlx$bUbxtd61*7WJw5Bg*8g{UYCrFJ@}ChE z3UD)!L0r*5R2Ybnkr9?TyZ&%kh3!4qBG+~OR${dIgS9pTKP+qbrmSID6g3)P*=W$_fEi` z&o{agjbG|jNwFLcy?kYq@zo>v=}(o@`k4}_LV*t$6v}NA(=%?q?xaP$a{4)Ly1G)@_g)A5D~E)$HP)O<_G_i;fG z#28rb^g=jk;_E-d5wsa~oWDBoo!=yqT-RT?>Nq9b5b0%)9(};JGsrs?)nU=SN+7^r z(fl5Lwkn4;z0!}O9@RVS$*#Zh1hjAkhxhc}&f~_G%RWVO>T?M#Ki{$vFF!W3Xu6-m zw&bA+NRs~SV2V+&no94cdKBfR!&hF{BVrz&#p>PV`9@oObv+MqBrLjYuiKNriqOU_ z!1zbW%p60R>Hbb|VokkXz`o-e3s4bb_pHsFfQgnVz?*tZmY8en{;*%f?S7VhGZuTj zI503!y*qMu(sr86YK~gf`9KpB69ar-Ww3H64DZ@8z%NlAGmF#y?W@~d7c~t{zIHUx zOQ)X3@LRFr2Na30axGI+Ob~afw93VocYg0uuGia;@p5h9OjQ}0nMF1rq3Row9q^&HiFL8r;B*tl;+)AKGWT1zW>NEoX3#v}@xpimAomd6b)Wy#>-5&`-lo z%#Na^j_5~wtMfyjDlX^s7SlStMQe-&WwSi|{j;@O?j~dIxHLo9Xb1JKIkLKHJ{F}3 ztk){<5~I#xJ1T8m5Ac(Ocb>R>$Ko36M&{(7bY=Z28P_VrPf}~ASsZd&Vd^a zQC7X}aqlT$%@;M=A8%cImcz>3arS_I`ZdZZpf&h6_m!<)`yLVZOOr8x&89?4Mguq9_yRhZ>Va zay~^3O9atE=05`Z&eG-c?V8i;^Q{x$=~`Y{5zkVo_p3O|QJ&zFj08OTTf{0|_^2a8^7WFQMV1h~4amg2Sjv4~3((3~^HFP8-r{mXY*uIuFApve^L4m>y zrFfaKkx^hl0)1zv-us6~D~3d3{}x7sieb@ZH4e<53sLRS)>UxSJeKct2v*~r&RkFklms~h4FHPyAk}^Xj*Z0p{BJh4w*N=hTSr9|Z}FpI zfYOc9A>ANdDyejLx8%@`g2d3B14yTIcg)b;9Yc3_{f_tE_ug9Xt+y8cGv}Od?7cq~ zpFS?9<2c0i&$G?^kwpbwe)^BRX$nje`R3{y_`n@f19==$p)EP0eFrOT8P!ylDl<-y zH$~%|9M$wbfFI?y?xG-;bXvr!?H2&O$HX3&P0d?W+zE!42ZDDx`v2^(^_o={lq*yd zs3j$*^aoZ=F)hjk=oEZGCfJq4p{JM0E#=2Z3)npkE%+qrF3xv1?_garC5b3LKu5>4 zeVXW#RgmK+P)a7}HA1e~GgMGKVRfl+gKN}My8cOSU3}KgiH%`Sl;Sv{oyvHYVn=(n z=Y7{~Mgq6!r*LpMD=Z=ysmSiN`ux7N)powqnP!Ln+nhwJ_}k_5h@Fih2iZbaYV&8k zeiF)))9Riqkh6d{Lb7LUf%9=hVlgv3e)8uIiYd*}mi?LH_u>sM@P(J?NjW`verGPl z$dUc)a zGkkT`po>&o8+d`T3JRl|`)uEdxk-Tm*6>iDVNdN}DD1LxtZyw0v1zpA2XhVRPl6C4PV~SDk3B?Q@#ox7Zy{^i8lF zd{?C6_h@}`UC}k)53jzg&Z&OwElclGJ3Y@?pa6+DSmfv)en^5eb1Zuwy`+iUjmVYL zbfTL_YH3Yh^!tvUn-HaL9Z@Cu2>`H#tbX0dKn?i>L_ftP4O}%-T_EtktAUf$=%Su% z4yKT{c&u$5`E-*}h078P8(*lZBqSs!UB9n^^|M=eqB!U-FCU?GKfg*NU4j>*#4m-)R*M3=|~Hm^gFKO!Av%0i;)d z?4X4pfYn|Ju*(uybid2Ugc%lRQDs97*E{H7hL+TohD-dw=p-`Ks;8<}2nU^nA6hVm zQ{DEg^&|lBUefM7=3@b>(ahwel9tx5zb(c=m6d7_Lowo^)v%Y|-rgW5C+jKBl{~`SLrWk^t#12DEXjn$ zXbE^BS6M}k3GkTjwMYGMi&AvgjPx*4E@C1pE77n7M;ds!%5NDJ0e3Y3`L2{jJ!2O0 z%~d~Mo}Eui7Qm!NXXF?7--7W5sI1!n!MBm+eq)mN%W`IhdnD<9Tg|N z9N&Fi0aryGm7E3+1t*!DT+PmOp@Wq=`Kl8en~hrxVnHcWOXM-bh}TGsaPoX{1_G=Q zZXh#dBZ+a%qveMu?qOu^FtMw;0Nm5V*h*8zMU7BXa}F(`B?KM(@R@MArD@BP_giz- zkD_9<{|;G!E-vj|?k6XsoT-z#6(4y(Swi#C=d5o#C!b6;%ZOHDc>taT=f)xi=$R1y z{f&*BzqP3l$l@Mc+{J}E;C5FdIS~i{ATW2hvs^OMf=K4mb{yJSxn7GX?lu+yEzwKw z1GcH{*cb;5Pu5y$fO`?wJBLpKaPQ2&XuTWIR7d#r3n+sX6g3O){`?mKz)b^QnyV4v z-#{V(xa4>Q!#_nJNz^O&s_oLr$q6|ZbwG7>wHh9NNJz-1jE+*d1P$#0@pe=J+z{@s z=ZcmE=Dq2rdp_g0mbhU$O*q18nIC^>&iN&v-2*LdZ5X>)qqMctG=6+q+5wD4pSzc-{n?3$jQ{v8lx zHSr_2JSskZ>c`6pZla%BV`|A{se5G#)zq>CG2>9)OJqfeBq4rKk(C9?LcIvp65qGF zw*Z;+Ao^Gokbp2Tg&8MEuRBjqs|*a+UDb8+7RGWuijhPek8$>yB(#rDIu1=aJ>UM5&vQFV^{0HvkzBpLYPW{S=giu>sE9P=s zT0=kXQRFPbJ;jK^B;GXUB*O?ZG)2kfeGb+txZ_9*%sXauUuisR0o>*$mTx674)gQ! z`pZ=f!z`TtG-9i(&v0x1(fH5rxM~MBB7NTcY`^M*!11Q*KK@{u>+&$yjE?)D-&&(p z*;Z-O5jlCgKKOeYu4VKde9v!qe7szIY|P#TG$yvzZ9WJ9umu3ZgjC^igqwy}L!S0E z(*i9A;|V}zd!^)$D?{H!5ORIrM><%pk9jC@y6~v6$YQ4y`Dv|F8(bMqr?jk>Zj{OcOwPXN?B(=!NXS^DqNAhlk-A`Sc*l%n zSIFe6I?1Ng^4xiFwdiFkKu9W?qfs76X6queIy+e`zbv--`T~LxdRBUPzWfR$k5IVB zW&*Sz3ecpMiIf`;kxfpkfKFiqRuV7&vqYWL{s9xLPL@NX@%hd4{Iq6UX1u)obeR=6 zR15Kdty@uXF`72Ok&CP)Py5B63!)2qdkY%r7*o7Ov7t8MLwti|^?2Zae+?Ma8_rHR zM>%ilKi?7jLO?jss)Lm2d4|=rvHr;+mk(!dT2Q^q(jeCjDXKi{^CO-=5eQrFG( zW-C{zdin>GjDgv)(%xt9!*DmYt5^*&#>S$7x#dJM+9yCWGBA6pTs?e!WkUoG2dlk; z;B&Yhd6hOCNeoFk{}h8c%Eu@e1&b2{G)I z{C-+$a;*X_vA*`i=g847=aux=KTS{qa)Dd(G*At4?;)H5YKD->$w{oqXKIeh?{4S2 zg^ON*FZ|yrNe0+01!vRm+~P>1-O*6`SXv`vW1HCt4rJtzf|A@zA z$RoGx;v4#5w1)bX=&m4feE|eOKhjA_nc_|Gbh94W3{Zi*fDdtVe@|4KE>O4RLbT#_ zj`j)(eFKnF^$ZLQl;>YYp?vsf$|DH4tL2c$0@i6n=B{=n*49LKh(#>Ud>c`kEMGO$ zRQ`ZFsRM3x951H;F^Z6wghXx8$ePVUEuj8auQcd*+30#~J6mKSo$mYOjxA3145m$Q z9~l`bR~?%NTw3FYj3CucyGlkzMyT3&B+38eoP3U3CxA`C51X{CeWU-kjK_7aqkV76 z>OGHP5e@qBY^~y_N}!Ua|94cO4Lsk77BjA`tqmcUIXFK8<)u^oG{kDr(h!jMq^ z^$k4E&K@uCS_{BkWg>h5+NFCz;edW{8+?TulpAcGHQ%QO?B2i@&npY>quF$!Q_SNb zocsJVP2vQpUv6pd<(c%FlLAl(VwjfiE17KyY=Il;hlO;_-lc}NHmB2$0*!q7U_EI4 zEjqgGC2R*McAtO(=l{8{u!G`xt*f2HK=swtVg5)MW6U3AQn%)l_s!Hz`?AE{+&!(H z5anr_)l^k0fFOmxf8V`Bqo$+V2}_p-F2H%BIlPld&*+O1BRx+qjyk;weQ;OEc_4OB z6QXHuZpxdAj5d+lp^~7nIhY~%fgBPa4^;PQ$?!CNhEh!}IigoU+CwOfQU95Xt1C~4 zaIswegcr)d9U&&>pTFnh`0Fe{S0w_xPBP$!-vWZN(?3u{YIGuI5=G+S5mtjoAo31; z9Gd@r@V%&HQ`HzYK*wed!M-}kEH95rn$-{#6a>5_3{Q{SVxyw|ppnZ@KWuGp$EGFw zwa9WUlU|uW$jnTtWS137m>Wib3yVZU5>BTg7w8zv%nWp1jA5lGr53w_vo?FL;~)Nv za`vZIbamRh#!u{zls*E@Az-UssdNsTUliIwhTALWC@?YSg&SI^s;EE#=cVC)G`GW= znIvwT6f?zzspv=HUCP|QJzBu?_yW%(415RxUoc$%0`3AzN-h2@UPxl%NFx^!cz;G7 zhg`sM>7b}ug*X8)C9&TbrSS6hzWs4!?W5hCv=N7kjEr23u+bNVl0Lb+&&E~QBO^Rw zu7O6Fa_Ip|;#E+k5J#bBE}q_t$Q{yghR?oBvH$+LD9eQfuBS zrR*8fg;x4v5Ae6OoMtV}I^6jIFK4GX2K3S*C7QC4#C$O$!uK-oe*6sv3c7z1Ck$YX zdC`Gp-Ppc^c(t8OYONQgo`0PJ$wAIP5ahrB!x>n?|LHWa>0h^F5>T&j?O73hr3D`1zi(93SvB68SG$^{ z=7K~}2Q}&HXlcNhXRpB&(XEMI5xCKvY{nf|r^;4+&nXzjGj20<0_yVp4Uc#KEmg7M z#f1uIMiyIJv7YWq`?aq?DaHYmhQQ2mFonx1!Z9+pBuJa!|Fhpz!~m}q3U#3)k{Ke} z?cqa|CdTZlYU^k=Ce4fevYfm;Y|=m5DfHjps{#HUzaj+|-N6>;&|_Us&e`(Ax2jWz zw_lcd4@vp?OKojm2L;9?#&%(y+Qy;O)AJLYr4IqNW9K{vdwWGTvBE*eZO=YlUS29I z!0G@n2)U5|Dyiknf7M0SRqE|gmS{GqRh1<3zHNrS{~n$U+>#ZT2Ppm?!6JKBbB*V8 zK+A$AF#n#9qO?>_zQw|RN`|38_9*A{FnwBDT1Z-2By&HbxY2DJLl+O*$>b6W}V@!37t(v17~?^K6bSnA!~h;bSlDf3Dz#Nt>sVjHyk82$=}zts_U zcMo^#;UX)pZH6$MJ_#f)SLuYw-4WXshSL1Q?EutOd;z#I%W<6nDgId9gn@Xb1^txj z6aWh;XlfeHX6;z?#!nAGS8rq#8p{D@h{SyHVJz7lBV#cg3rlWMH!)`#GdFiFGj@Zvl-tO;JP1B-D zfX6JZ~FG7<=W#%MQQ8FmQU83Ey+VKbiIorm| zg%b}l>E6+{JvAnESz7kw#ny4Xc@tngSE-8g0}1^gvL!9;Aos&nEi?Ys7PO}a)G^t2 zu{TvUp}7p4LK#-2O^QTbX2i1H6BFmnuJ-3=XPR|pRG`PZwavr7{EH+})S>v6*EIb6 zchwV7`9nz^a9HT=>Mz356x4+8#lY*3fQDvtpk##B+8YteuZW=2_9QUZro?OaSJr$; znuT(>T=16s{fp+AlHa*z?5FwL!@IcMRcsD1au#Ka`#vzKs0-I(;c=l>Oz%{QGWJ7O6eOq4H)v=c+pqBBxNCo7>wDwY6x# zAlPsyg)1jN--@g|49wbk^VM<0bwZZM(%8=UOY4jY5&eQJU*LgE=!oz2k}D8K^-p9U z-`gwIr6#Tp=-2;}qf3)@M+!ZCl1_a3^8GtQF5K7nb3$rUD$%bmzXWeqJR4U1=+pXr zA^$Gce{+@eJ0lJE7nDXFU1!Ak^}#%gzYZ1FFveI6O1B0+$s6;5PZi=AH3}S*s$Yh~ zPr~S5xjn5F@(Rqozk;L;9vD68#0EBa69bW0WB9BBAI^jj}@%hsX#s%zQMj(P9X!FaZ>c)|N$4psU zhdiI&>uK6(BO$-kqsDalQ-r^isM3V6wHbZS@MZ|{qL5(l!SW;I_}>!%S8pBjze)%Y zf|nK+u1&~`Y;I@xZjEM&8lh_0I&p%|&(CWd06c?>zwO*XA~wiv1noWB&!*$n)b{pv zxl={0fAbaqW}&90CjRz~hOQTbXdwYjMGlr7Db!b@Nv5o&2`k@C#F}$`k-%LW*rJQ0 zX@l%oIahx};8LdhMNET^toa#TG!S*~Q)nX{NCU)OqT52qGnN_6xJVSrtE2sPV5WV~ zALuGdMZ=yuVH&36&Y+*%K&r z+YH|Gpn5dbRFpH1KW?}y%B`=J3LC7)9roXyp&-bkX|fX zm-EE8Ok*F)QH(_`dq5Xu@k1#6qdauY4!Vi4^mp5>*fsx%&Z_JDS~|Z~Cf#ez61JAt zHWDYB%B`rYE9>?Al-3`UpG(Y&-ejPe);E7mwh^EUdbr-*->JE6Lw$UD4bo83hmGZq zICSY|RPyyCqv+j+ZJS+nYk$i6t6D<@Tay>Q{aqA-9FdocMTj2=6#Hh|y2jxT zK*Q8Mrud0GL`}T0CY8%7f#w3JaW4PWI1|&rjqnSg=j#gJ?NG(U#JqiXo~0eklaSz$ z7fKP6H9$$=KGe@-Ff(j2IK-69@5})FpN|L#2qZquW&ax=?w)1lHNH%PHL!k-fj~M> zu#9s>OzVONaphN%f zi{s+q8?nrz@$CpMJ?;WEyexk671lvjfN4$Kb8=bfQ0H)x9>bBCRSU ziMH1m&FJfk!-~YE0f|R`*@<0_uR2_F-SGFIj7*71F`b7iRH$r87fxfa+etLU5e)s+@wLv=xiyO$h&_nqPkX zR^l=Kj3>RI&|6~dhEn_zf@|eC*hJ#b{2}`OEG$KubG^VGo{k&ORWQP;{nzxIfZI?z z(c=Y!oO+Rjz*z3Q%SqTO^_~r*+K}<5L#;dA-&Q^n|5BZvAn) z`M5zNc!1vWugqRyzs8U!M(bdD>+>c>K5M;&BH30msF_zM9`Irnxx@|)X~NNM^C!9U zS$ydA)>#}tS=efwguDrdmqW}Bwzj&DNCfCu8L9HjM;sl_$LP=plD*HG(_j{voD>6} z1O&8GvC9B!h<{bNymn|;lglA|E7cCa&DCKnSM)u5Bf-&8M3SZclWI_AF%@-P$Qgsn zvD{i!WP*a0(hy@=b0>A8j$cbl3x!$_aBBW%^AlbgePrt5J6j8SUO)0(kyMajCB50u zCe~dNFC+%DU_GVgl~JN*~Ke|d?TT3~B~83rfcZ#J=Z zXzt8uJj9UXtO;g>o{?smO7Eu_DMqgbnSwb9+DF4rdPFhFN33R_FvL7Ez`P>Y*vE17 zSxjhmDu3y=ABdxaMhv5Ay}X(0YvU!RyW!uW@-bx7p1rx#s+8d2*{zn!4}#LE(F zZN>9Dh!By>K;g9eRdL*|Dr{ggOFuzK(eFLx3wyNwqGc|+HA0C75|W)LDD&&0lpiKV zyDVWIKePs%SRT&hK^oS$@m$82o`M%=q(;UL?jj&!kv7L6c2GTo={w-tBDT$3Y&R~H z8T=9D5NUfv6xr-{XraOMG@3xxf1MiA1;i`<8$&rRa6@!;!C`=9uw(|>Kaq`e0@D~i zH^74)M|J>wqO?A=T~2H3cplaOXS?{naYJhO6Q_=t&OI9WVjt@c$poOeB8q0s1bX%8bt|eha|s4fd1S>z#&JAUgigj z6b)YE{{-bSJNWI}f<`6Aw@5y0x@7T_<}9Xu#2zhc4kPmC`$)tYj7!SH+`s+KGD0KNeKZlt0u&(V!6r&V&fJkc|_;46%GKk)tK_K{k{_#{5ye`*~b& zUv{LeL(}`4ZciZuIGx-$`uoSnTxq1*>AvRj5-XRN~av4s=`{ z`3inly^pmklHue`OTzt_;I!#KXM|@W?4k4gO2x{y62PcwTj2xQjoj zZd!h~gyepd8gFgi7OI-DBomh?#6>UdwqMAjnMVe7uG1ZY@&ci-R(%U?5LSJU@QhFo z#|f%y_U!z7J`^ZzNSBiR;P4>yYkt?<+=q#(>Kj>E*{Ma6>KXWGw-g{yGGk^UNKk^? z?`H!ei2T1WLwAwfvNa+*k`eJlbl7s{;j&do#L)B*lKWi+D?4Y+)ZTBMp1l76V{55}Lb#Xy>|2SGfecdAQ)HVad77s#m5=+CqqVWA8=bg~5B3Unx;nz0IZu%C zt}ebaUf+?rI} zq299RHA7KlGibN{R!X&%5IrwtsP4OWl*E1_*q)nFXSs zGAIw*ppQ08*RS9=o-6(tVjlFn<#2+V>mc~74yYuUY9LE2kF`H5r-Dh&SwyTER!mXJ z`Z}PCS6zdlv`PmeDoU(PJ*=p-998#SksAhYo}#of%|)~PrVq_Mv%@Mo#i=h5J#IrO z3vR5{j_bH`J}IXQ%7}&W5tf;T@4OB@JRbIwhpkk4O^4iEgV(Do*>vPBr$iP1&U+?Jwhwphzcb1E>oJ};j+QHY? zRV}i~2kIDoWB`x65Q)Q%IhbD9dDy;pHZQ4pVqit9llLT{N8HyjEdZX;0Y;m(%nNjQ z*^`cSsoao1+49e2(#FS~n>q0_HACbR_P5)5>A*n5O#kc4-QC^pQx1ZW%W`^5`xm75 zxIXh#^CKy30Suh#6(WrtZ+?B_@;awJ0QpIGcbOWmh4jIXUnHHU&FaylP$-&6`~KX7 z&wem^HZ@*V!n(fuQtnP#BB&T_EtbP~&8T6_5UXHLRqg9jJQPs-pdp!#tdwoE37`CmTdfnTADqxSt1hlnizZ zJ><|IK)X$u`>5Z9AMAC=$>c)Y$-2^s7%izOtWMZ(38Bk>B|Gp9!YWX_=98h}PZ(GNl-HF0AL72^JI&<|SkDH;Uf=QpqGnY9l1ZFDm9^+`flvs_2f1 zpV*#s1b>$uoy>q-+>D0i5pe3> zb3a>YFh{f~ObcO|kfa!3WCY*4NFg^;YPn|?1o;NgEP>AOhJtB13qfK&E%J~NQViD-ZUP$ssektt{iu@6bjXhtUPw@y z-L z(spEp*+E#y|6(PtGp>T|BX;Klxgai$?5|OsS>;i3LH^h6c(Ddzalz{-OD_#a+1}Cz zEb$G6*2lva^@G_AA-pVF5Os(E2QfzuM_8OO5N8uMe50rPrkUDpPan>%U7x)@Wo-Tu@(||GZGY( zh>G14dhr$70tWxG3JkT9#{Ci(-~=m+51@AANr2HG^{9bb;;}{ta@ShlTo5=*ZFuI= z5e&W5-5Ar;sVb9FZ@jg~Z+nEfA3|@12q&;jr#>-j2DM@vRu^Pwkzmg(X#`ojcJYJs zi%^|Mz8^Fx=*at4h{!wu)qrCOT~`Y|d^yB(lV!sG#+;B=iGHt7oiYs1R47j}){{eN zF_v6|ytGPXF;?QM2bh0ZQqZb{Qa5_}#i@>1@Lh$xph&pde0Td=_N~cAFw*c~{GU}@ z8HY%#>OIY%e~R?)>n<|md2+ea&2I0J$BLecw}KMxea;hN%L;l8^?Y%?l^|^ zSoD}kZ`LgH1(sD6h!j;dzx8K|_PYXeO$A8EorBA;c5eyUb*!Swgb~^o{iwlX6cfWyXb}kzy zsWr0U@bl`CyrHC`zeS4s=hqlJ`|lcQ?C=>Y4$}n;>MuyF5ru*}ek&+Oz=@l142HOY zQ-`@2>$2^u4>>#W>~AVhav1TTgMoi=n~}8WYy!%(YB{YmvG3cf`Hs)}{>Qqn)L2%{ zS@m4D$xWoFP9>Ih%6lCH2~f|iFlmU`tA1g<5)+2j>}iwahebr4Cy=yMOSPq1&VaCS5L==Y@! z*UW@b%Sa{o=r(&Smb`7M%GAv_9ufI ze{Jaf-DKQ_G67j)Id|TyEW0kk8mQ zZH3XCZNg)HeA*nIN^SLsg0h611bgn(opO_KG5riYUUeo-P|um0(Tvr&VJGswI}mGK zyDNh1*=TP0>v#D%uNmdE=+5~S^|Q^zN8-M+Cv!q)@dNz~F(unvMQ7_2BN`12QH{pvA;;|)W< zjv=tUlGopvLCOoau_vfFO?@H;$k>w{qTLJ zf7qtF@t|f(grA`E{wN=`iD>q!kk6^G#9`7yY2yk+Ga!8}%zt`-D;glx)%B3UTI=XN zwu$TK#K=mJ%1CE{sdOmcEceSlwf;$|;?ZQNmRH!8PDdE4G<^~?r+3j}>Hu%9{CJ+; zKJD#{TmX?6pT5w`uksp`@|=0rXU~OarpHgNtqf?J8@8t-4$(bnWAF7LWq)2nP}Rm{ z+3{`Px9*%`f-!mR3@WW#BNA9VBfAvel5ZIe&_@*@#BT_C~$ruVMredlErUb@KfvV>CJW5TuG z=L}V3&pc#84kG%#Y(c-PE0wi~c0;eK9v9Sa_rRVvJ7C(|&3xGn^)%bPAOHcvzi41P zmgi++CO}(o;$9_t@d~NaMwG%3jhK#LfmvKyl^LE^gb@AY(e@J$Zlc`XPMvMoErk^* zI{OZ8u3X||u={QT%AP3ylaa7X?B-h59`rs~nA!~fz+9VPK@XqjA^v%S8I9psUK2wG zdSZ#p+Yyh|HFQBHk=M~T;JrAe=$8-F2)2J(_&PO~Z@3br$ENZ77yBq@ufZ43m`Y1% zcU0NdUCbq={w5+wnYMpiB_X!{r!sy#Mau+Uelw3&W8m}1yXbs8Mwp#1Xjw^W;IsCs zTp+fh+!yharg{Rrarxs9M)YTv)O(|EvWC+%f{w79cd$YFDOyT(h~d{c4dz*Tlf}x@ z@hlQPAR*6~ivz)vk@EdN>Ay{sl^k^ntKQ~&9Y`(@nu#x zoiR+mia1`e%yrjxDDIxrpi^}N~8YE`qA(b0lP}NPfpJ4 zF+*FkOy_FHTXS^rcn9x1kD@b<*#@(wTJOtRCS>SU zg2}E1^b zheh}c8)D)uu7*xYToB1mJEOkE+h%#;s2_~)dyUz{g@$ltK=h)uA!;0T9L$soRiG(;DH!^pu*@qFwd5AbGYBu_K4%SyMLQZ!&Q`H_RgL( z?Hai^$f{0ApDq8Hs^g^)m`u>F`)F|J(Tn3OBR!yCC?~Su3ssKEN>jb41xB`=Sqj0hK5RL8NBC2}j$$ZqHF)iUgcoc`MLSky zPNt))yD*d{W@t!pdwV-sh}R|XaomIV6XjP&#%QP2pDCLEY}aH9gqkzZ_*ldw8^^p= zUfTx6&ZTNjm%lw_|Iw?_1i@I=9wtkjkoxxBa0nwbx#fawE?0I*Bb{>Da#1(M{T8ca z!0@?)^80z!+{L=SH3vqvovS1uW?Hd|L3CJKN8}v6V)JE@-LP;xqE1nWO@oe*F|AHC zG|k4x$({}CKAarA6nM3pw_kED6F`_hbgcBFPC5eVSg*K83=qw4D`ft+(5`^L7fO>FF1TQVR&->*Dx z0fH|YR(YpsybzaefPpC~Dd};&%Z$iayFDhKK#%@tYuqiSjqY_uP9}7Y45ZvhoQ)aT z{HTeSe5+?Woiur5e3&oIzU#~WswX0|XBF#$(DfY^l6Luek4SG}Y4%kw{#BynsNLB| z4~!3TgR5f~O(!oGCwz<+Izr5#Yg*$3Ej^9n%cziXdzr{+n?CkS+gY_)@A-xZa!$Wf-lJA}j&KQ^GgvOsmCsQQEh4 z3Q3LV!b>zyJUH7EzZ!N*%m~2GM4swKkhVPn4XV4(xwI9Zo%axwOIE3{SR25z8p+hd zLKx@?p>n1#WUlhv36E8Eav6J?WDnuU-v%-bIfz5QM-jAvXXQlhPc zkA_+G2@SrV1L^9FH>+ziA%u@)(qkO`l8e`e3X5*~&{m-VL*vXZ2e@S;^=oi`aYzP^ zcrq8dnAig~3e0;5|5^tcYp2ZFbbs}+KJ)=>wM+Uold?>qI zv{8wAgmEwSp&*L&OnGJ_M@uVf@!aZJZh&4bF0!61g9&>^>+Bi{RWnF91LI!&q1%SJ zqcZuEoY->gHy8LrD6EEy&-cm4O@1-K;ix<9P;4z_OiTVnbptj*vsXxFT0?x z-{x+xg(=B1xN-D-x6EG84cI%GjUG8N<|}gXkBH0T)Nr0T%rYgYRq!+uO~jWlvg7VC z5%tqPD-=!0R_`hbe22usby15v|LZnkd(y7r>__X(k-vOH^@(f>sXg<@>Xn(Z@%4(J zuvE;Eo5EMg9E0GC++xkf?4L$+iX~-vbX3W*0ELVUyC-h<)d&3bu)pGH0g^bHRoUt7 z!t7z=5oCkahUhuWkN&LBbe%2c)w3+aLY6Nkx;>SeU1+tHr0E1yDIgT%9B%hlhdtm< ztC>=29-c&i?sW>#r!rJEsx6{xBY=3Wy!WOU1HfPwHuWZyTMyyhDT(*@#5r6$$!ST( z$ln?F=q4awWEobHC~UTnNqLdgF`8dysIB!vNb(B0G)Jh6qda)%{g%)bjS30T;)qVB zC6N7Tx)_Xp+Wvi5p1TG*-G=cLF}JZfO=@>ZJw|3YGVWg=ks{-!(8ZpYFzg1^mCBit zxFRbV$2U-_7NLPlqI*rY*RR9Z_d@|YW5q=(9L6LNa~MHdBgS%|=W@Wdu)q0okhWf6 zuF<`3UML6F=^*i19%H))=i)QIAR}Dpl18xcHKJ_5vRu|-O;9mS$FSXM8=P3)`>kus z%eJ^L)r=b(;H|>s+8!0i{arEnDx2Ubzu;|QZ;;@X&@AITkH9GnXH{S_rT2)?$xo^4 zv&-6O6{g;!*oG@YTuo+2>fnqsrT&+~lcdZr!gF!o z`Y#*OEEeWQbhO7UCsFnCz?3i(vOLdRI<~5=Df*BuD7#pNMwwo(J_0@$`UoHTj=xE- z>-N5Xf?6ww9aXK*^A4eze7s41GMooks+9935nm8@qCkRvV%AJ zk3+da?@n`?X|vEDrF9l;*?lVUW;|)+og6J8!52+l;ZmG8!BWR)it1u1BO&~rk-QQs z@{^Vz`2(>Q4#XVK7oMJ;#KgoYXiAs~PXSX+@?*r6`WyS(k_HMLVr~p{aq-hz)hNsGmssu3e+$R@{hg#?U=qEk;J{TqYm7E<#QFZxTT;5G~{~F zeS+^QIT${+3G*-K9GZMHj-c1?CwP(;9*Gl>i&tT?-0bdkvle8Ir&g--O*8pD;IwBRJQw@2Yegc^ z>tt9L;nC(4o7-;ryUEHIF^u*J9CHV#fQhnRe%J;-+?$9?B*a8Ji{L&uM*)VGQhzS>R_ft@D^ zY;^S<>cw785J7UN`T+(uNc=6@&smN46F7oQrgOjQc)4|~6OSmepbed{)6*6MF|uHs zg@?@xA0p6`6sUx3_QUYuD$6kK69!g~n6p2dC~07bcbJU7C87ar=WQ zyL(@RMW4e4OP(cgT0@TG`OF8}@36<%{|%zuq=z-5xH$3_#Nv*2rWXJnm0ldF$P!!^ z7sQ~)4mImn&zX~5yp{r$0)K3v|nVE&Iw-+S-3HkkMP>M?%F1)gyj(DrBL^Xl`=1RmyPv7{vhHq7GF@5zPTtmx?h z^O;Z6c*%K8`*ezRCLbh%IY#lRaMxyqfNo&DCUiv@o|F#16HJ}%kb~|WqEYbY@fi1n zjd}IBd>Wsas0kSMdU&XIEzf!cyi5(&yTfHNgmqJ=Tp4m0c>!{#+*sQu#GC?(H{rne zm_`mFHIgng_AUP^M2&)zQyC}dUtVxilUp?1JVdov&5|WhBJ6#pceS-m>*JL+Iv8{n zWyt>fw}&y{nsS`#(eLtiJC!*BH3d^VDW3~J%e&g8#B>ENgnREwHQ9sDSfz!H&um`i z$U1g?(mgf(hc+oGvv|m4s1ex@DF}1pM|Vdow5p`fZvMa$h)=YB7U`DZHg?&xeaR0;W=!W!u=n~VnT2ljGUqRgi&}Buo$-ddN z=^QrUUP!DsDUlDKAB%7dod{9L^j{-TD;ZADZ}@SEfQv8=iWTHi^48HNU?=Eq&03u) zVxZmKiPhr`pTPt#I?ha$iy_N0jt4d)f7)~)PErzn)&4crEb6YTPO$$@7q?CU*b?m< zqf6BKQW|MT#O&_yp^%D-M)z@>O+h~G!%ZD~re`vFPAN?moe=fx+5IB}29mF4wPp6q%g}Q04kH7GUt-8(Fl|Ci1G1>d zm$6Bo*my|;FD)>h$xPXn}c~29+;2aSwT9#*H>nKY%*PidW(vCfZ+Uy zp`VZffBN^z8{?a%u;(+NM4SDV$`5+~Dx(_JgikzV{qZwxB*C+1h!bI1m->c!LLb<2 zS4_CN3wHcjF(@$f;WBa4xIk1hW8n)OrG`%#`UdjKJaiTm*UL{0-R7Vw|xQg zP$NaF#ip5-vMoAmnY2mnR6xxd1?%#uo}ZysGWc9lDzkCdMZDOE;Z9M>UPT;8mtv;p zvnBsmQA>z>^Lb7af2^yG>C$$odYB1+sI{k(p59bePV4KI-NlXT{Q%|4YANZM?@+r? zed7KUn-uKRUGXfBCGP^4zl@mNrmeKFrL4)~6!~GT&(hc^vUKKBX|RTXPN1gjy+J;} zofGRz*ai#VS$z3HfC_nE2=N*_UY20EIKR4_*illzTHL&{f^q66_`Nx=fv)1O-HK~} zdb2FYn~~#?yrgI2rU!lN#I#Zz^r+|BQSJg*FUEMHo(H{M+ave5UX^@?@H`VkpYPK- z*)rfK!ilV?s5l>_5&*a6dvfQ%HVJaD>(uKX=tt8Ye&xJt{e0m(O-3g&te&N!Ztd-f ziT#34(t-UFxjmOH{=i|3j@vS7Z@{H;EvT)xw1~!h3gdulUlIsH4?Yx0I`T%srkB_w zMW_9A5T1kt-_({(^G1z+6^PAgF}q@TV(-_&#Ge}w^K}de7TL7)flMM-{!_W#zC%OF z_H?S=a*!G0E5LpA3}~IAQewW^*f8?{RQV?Z@DmnEc5eCsa;*DPR1h4N-H;Dm{x(?lr)XKGc}1qMxKMm$k>Rq(!m`;{$=yICbzpI4fhP)x{aozKN^($iwZ5`8 zcq02|t>YTZVWn-&G$_u%+(A6E!<&az`O&bC+lAO>Pp|W}G^67WQ{hyiP*pW$(;tCP z89(fUM17JHgfH)}k4MmW7TX1mo3p`CKotN`s-^F}X*&A)zNM}eDL|MdG(B^6eSUl| zs)e7r?~3&89A!bTsy>xI9%S zRpa|iDbs~TKfZfsVq!vk{_a1X^~0gr?`0Z-JCX?%oJn&$-_dX7)6mAwKqc32KSxk) zsGMHz&sa^2hf|km*U33DQ+)gWef+_co1ly_aH?3n%5(tVlsids0Z%ctZSYYv7J$zFoWrf*lcIEwC6}+a+{E9R3 zS_{QRg=|&O>Hd-q)1*Rby$bIn_Nk0!W<*-ks!#8u=$Z zWb`KZszhfde%j?o3A=N|^rhqYbWf>T5l16A{Znt4m{Aw_k!VAp9UUDhZvQfDY2dkL zPIfi_`0>GVLjXS>_H%U+KjCf$NG$Nj5HXu4xpwFi!uUZlouDW$zQ>_9sQK9_;@wp% z0`J0p0rNLnPCkGBmFTc05RKuHS)6rh7i*Qe(!u+{j50JXO)&9L4aEjr;5}zMM(oCM z^5a7tuE#aasN})Oz@J6_x923s(Sjxj7|ZW}nJ!@q6F>7UwcbL;mZufU{cH5S25VI~ zbwou$3cpKn0NJf_X9Dawd-#dkTnf_lg9zAOr`ML#1 zdV2bH3l?BqOUcNHwGAynp+R_OGcZcBa?wa5Px8UwhHw1TWT$11xJOp=lcM0z9 z?#`TibLY-~pn0mQ&)Iv`UhlHs8e%Lr>kNeXcDA|#7%3Bl-kz@(1<3zp;JG^LrXjJm z#O2E+U3TOqvAc?qmXs1M!=>Z6G&1>G@GyG!fuHg>?gdjwKRJ|_o84I&UcOG^pSCpe z21pWm+}s4+>j?!!LLS#msQV5QJWAedSR9$_HY4^}jlMNt(ZFe(PtkAczdoz?1q+%w zi>c{)xkIyxG!sLLwvzb;-nkJLQBJERHiX}0zD4g8@yNU1%a##rMpy5IpVVXquC;Bf z4GZ1hwZmc@IWF~hRwgc&GJ5#Udh(odSw0WXfo9&ZrVssi*v&UwIGW31WC^EU9>wIu z67Geozg$=%Ec(bFf7A!FZd{pF?`RNxV*fXJat>>+53IG|YS3towEEWQPv5!>wy|sa zgLXOj#+-k8d7#HJpsNcfmdg^^h&H;h?jPCX@p4fmSnsA_Ba3_YdL%9zVN)Y8nJbOf zo#`z8$ohIz-rfLsUp$`P zGy>;IDvECJN;gm!KV9a%oP+t$%wGg>eb00cR6hwG$LS*giouHt%4Iwi?&I7{O~e0q z1{oW{)<2h^S$pwA1|S}}#b~}Sw47E$-}*jrMGp5p7?EeN9d0JycljNhOszxaN2o*Z zQ|j4$`|tz3^($OXq~?=TqSB}Fk~p=$s!#q=*U*S_XZunurTmTfr+)Z9YmfR;p%vq{ zn4mER#-d$^I2@X)zP(mTTSi=Bm(%&nAZ=EK#feQ-HJX~<_b*`UM3vynA)#mJ6?l}p z-YFd=ZApjfizX8~%)5t<&JO@)rzeuNZ2(w@VrH`ffTU6jL92KT6IKyk0oMqN;wL6+ zVQ}yv7fy>Gk1`dC{G#RTYl^!)GFkQWA}94Z=X7j&rPUo`uJ~R?f=pY1D&ispU~bKlooC3D0xY=6Nb^}Q#|YI%Wne08MFSGkbKUk|yeoY@2XP;x4X-Fu@> z4G1^2>PZR8+Fv1h=6@H*ZX0@oKK?E&6XW`gQ=8LT-#;tm))o0HCUwe$L1JybYxeyS zmgZl6f=31hS=pjVT_bvd#YD@-!{{m^2ja{B_3v2E77aNyY=LPKyOZ>N`H_4d!;{Yb zY}7?|$~M*jZ&Rrr%CkBiTxJ zj|6~|-=A#e1ze6NQ;y$@JLQH;*8JWw22OqT*i6Y@-;)6D*hf>Qp9xGSc^_0GgVE~Dh zmi-<57$x`&<9q&_0TwZ;153_IRd!RyhjEab-i}9)F}GwRPzTa%E^zk<#xH*NR>k7{ zBCI0;mL#STU`!WEzd*X}`&*Lyv&JUf%z(({ZGzvXDz=JNQ@Y*=@3{1h->Xr8HK;Wl zhcEbHbI)v{;+La6j5a+1k?a_CeM71zr`)lG0koJ8Jg8ob7D>`j>@voCM51Z+vY`)a z4(CTrF8VM!s}S)wKgYSG_$EP!Cn>A9UJw7rD{Vu`zP8+-{2p6p4$(V<>X*1VOb{Gm z9Kc!m0o;HDI5}qlYcFvYCkBMnd!KyO_fEe6wQTzbZO3X~g*Y&^(ZuJJQSrsC+faAC zanE40PS;_w;MI-G^>i7y>Wbb?{RK_{Nj`zfHY+3^TN7A_w`+X$1(_N699M--n~qo} z;ePJ~NLx-7I^t_+B<74;g#FsT9N2Jn)gaIEzGsZ|Tj>c#_~CI>TCbxJ^`GyeXjwL$ z`sVTDkX-Ta*)(H&+wgqJtLl)nzG}>i5=G22(&?ijmh!xoKd2+j0QuBoczVK)q_H@Yl znzQ|FnP9}$#{=>ck9GH#&I60oDywSD7A)p}P?e?wRF8j#S|HvhSUhLke6Ob>TU>WE zr>r(!L@mF=D!2|)ABvHY5r`CCRKQOhfEgSb8p7vm5S*+OO&!P$Hr^7ueJ|Xap2C(- zdW*`_e;6zV=6Ib&KJ0mtda7PwXRqy;?L@VtJyJt#Q`pk3R|g&l$vM@vorAq3wAS%U zeO(njBU6ZQmV)t_`4-H}1gV>gAGP5QYX!2AJF$roa@8-l-%+!%DL)PQmAN-~S)sYu zbg>*pTaiZ;m=zG^RJ9;;PIfdlq^D8WiA_!dAdjA<{zA!{7Pqt2f6%^74rhDM8y0$W zLQ76yr=!A9nl~1d$eQ2TcNREwb`F^b<&0WAiX=jx*5N(^_B*a5+}8jSt^F~uZ)J|! z7U_D7;&j0BvYvgsNXVv{yL;GeS65(O&$mAeOLNjqIi_Gb_MoANLsJ)E5{?c(Ul$?T z^8zYqgD9R62aS*&b!4RR>PEXKHYKbG1I7Oj1H=900_5Iw3=f%r%RkF6H8Lou6_~Rx zplnK@V`Qe}e_(qrx>DgtyXFzWS=bf={f=Kbx!ve}dFutNpqpL3k_0mrZ0EVi`Yu%| zJTeAgq|Et2*&e%x-4Vfj{scS!^PSHV>SjWcAx)W57OLR8MZgDLm#hsqj8bv_P=ID2va<6H~BrxfuDW!g$5Np>cpR0PY8Jf!d_Qvdwubdd#@kk&^};gE#|JKeTm1vx)S!>@z1Z%=WNEKTTF zJcm*BJOGAZcJ83)Pno2pd2vaJ02?y~B%iItZ%qWyh3Lymwrl4MEnh-W8U$uH_fVZS z1E*dAG$@deN5lAsjG6E%RjqPG5Nrj4syQ7et#VEWU=6(5{j=YAfAW#) zexv1=>GOICf`VOJKp(%Py0E#5so0HOIp$NBs7>I+5PxLLfiT`C7BY+jic-l|@l5S? zCayHA$17IuTtVgbUkSVZUaUPwhzsr6Az-yXngA?Y>0nSlvD-Od)7UsV%W2F zeZd^Q5@U+PJmd(};e(zHDPuln=4gNach8dZ^?vO%VoU~A!L25SzyLl^+=<|iz zLMFU36+q!XSxwj4RXEEa;!8iw{1)s*`xkhn)J=S&>KfDJx^C-gdUTQrOdo^3croZ{lW!h#Re=a8+DwZ}#%*CKW8) ze1>|5y{!He$LrHKRIJR@o{7(1C5lc_QFk4G8>9T3D8z#lG0O$g*4FfUQa?I+N;DgP z{n^*YJagS>EM!VaT*+^Z_f@IW(YfI&gDSmGVXJ*k5^LQI+=Amg?V+42MqmscI@K$wfqBfG}%zlluzAJx4774N@(NDv;3=xvcq(C=F)|NJ-6KCzDjy zuHTM0EP|lf2F=+thMB_I`Q{H?PYts=_n*|;7Pn`~EXGqFFEncoKmCrURHA>Dd9SCl zJ;KZlf!y9-!>;TH^3n|q3>&sKGAibn*Inc5D4Lt!sj=b=hjGC%kTTXS9~O*xqvA(aihdMxPcm zhdSEf*v#h;Loo1ZT3?qqw%=Gbr$jUPxWCpjx?K*{+JYMIFj)?^fJEUR|L7L~tR>;c zre|2D!9dhvmCX#pzeHAW5c?pzwzjsqy88Oo*55;WXKAThRv?R@25k=-7MV;$#Ner& z`-%7;poS3$?6Wkt`2QfbBZO2{_%C0COSHjX?%tjB`jNWhXXMB9lj5{5RmOAIv-J=E z#WFO^Qaq4v6>myF&j?NZD2yv$n|rChl5+o2zs6-43P?<&O|JvE8JYj0S2w{RU@*42 zLiL-5W5WSWrq83bg?!d)ERSSFw5{Y5y&&Fp)no+3)9vv>W%Nxv}Aw)kw-p- zdY}M{1QJrb{%4P?8lFaLemUD4eUlCbW~>6h&N}pz@Z(3W&-~BClF}99uSDZ}hTfe% zZ!fICSjl2*3a1y3sZ^}UsmD@_N44wO_Diym^>>=bOyoxV@6YIzXK7L8rz-?xLqig+ zj^jx7-&1UBhK(j znBjYlX1KfMsYrC*VJvU>BYNRfV1AD%!iE`)hCo(hL8&B^sTjoI2iN z(&9h>pIB_wC-&H91~no{QU;D`0z%81+EY`(}I@sQ+4$mHMlh#*2F z@bO8k+*gwDr>X&Z&Q62r!hJT&<8L(TdlpqncH=BCx{jndl=|QbE?bhs6$BK6!SMmp!>0)W-o#RmPu@RL z5di7XRUr}~wOgtS8dNJ()6^V3i^RhuBsArUK5qOMk-fQz2_SiBg{qo&8I@4Z1xe&^ znY-j#>~eE+(|jt2%QsZboZZ~czI9d92(=uh+SW*3Ki-@S3$cTeH2f9fanX)u%QZJa zaO*{dS-UfHhI6oZsx9~Ht#=LHyCYkt;Q-);BqSt6K21d5D`xxi7Y_SHE|J%LNq%Hn?<8UOSIgewPE|aoWz^JZL-WV5UYKX z{Z;5f^&|9IN{{lee#@gb1Nbeas{vlxF6Qgx$EmpzquZR^>1Mty6I(HJV;F&c#Mzhp zt)ZkseBgW8ugeN(5mP^5H|kSNipH0E3E%W%7V`0&T|PNu`st4a`k>0UIN-ko+Eof2 zY?+`#k{&K5ZbvfbPw#n~lB^Vo%IM9W#u@MJHDT%;Q0G%M)Zz?2Cpa>3 zvcyo2jACA;LVG{1DNIY~es`{c%}Ahr4GQI;rioloqA~i^llXkC1CC{4D`uuXQN{o) z?(BW^(Jq1!MfG(OnucFAWJzd*VV!SajPLm}*Dp7j0D9qEws5bpsU%{O+o}gG1eJIb zXven`frcblRq_acE&;`odXM559O=ju?DfI||3Ajz;!H+J&Q1IsD=)34?!NLQe>_x7 zdHI<~+M*B3f20b@ zIo=Ah=6}S20W;~7A5lwJh4La1vAFoDh4#E4tDW&Yt%^7EsQ9 zGC3j?iIYDbIUK}V;4%32_EeO=%9$2BUAjMPYtEeC5xj+mJM+@SxRF29?|45iQY4!3 zaoTwb3BQz1)}9MSj0uNb;7MmM)y3y8UD6i`EL?rGz_o`DV=5>mFBIl%v33q$)7q&Q}Ea=$*obt@5=%oegj~!tf7}rtP zG;H%}VFoHu_?Z5X=tg5Bw^aAK!pdasnT>rLg|COc8mE6~e)*w2N;kirY-Up@CvV{r zj)w$|EVKdnKpaEAZ5-THGsgZHqM6PV<8{MAri4e-wr3!xFP@zCIkXS#tQR=g-wt5Z@NM=5fS4sxVxnCr!5)nzLzw*z24tQsTyJ zM%d_fW?+xEEis6xDHq*U>S)QVW$>@_w zgF^7>^TSU?ss1tfZAKhCUVJ76EKzAmZe^Yzv?1f!?( zk)NK{J*{vKioqj%2(zoYo3kXDT3E;sbbe{w%cPveShKwCP}IK2q%!oG{1ZS?ya!+kAb_fY;Nu(H6#ac}^SaIK z=Kx{6%QdpAy+{NzVXVnhywgD1C1@YS!eE=GZMYKrXGqG5a9>RP#fi~CcD(nkeb+_5 zP+#MFl;7KzfMwcuru`u%x5CC1R3#jTkkYS zbagsVj^!J7Q(IX|*^xxpSLHc>flA!#yhA&tj#onk#NrP0kxY~5sT19cqf86;5Unm{ zX>XjTtzq&zOy3_63iE9H^ZmN0h^`Q&W*@6$0;IfVCz zN7-=IP|It?2W2ph{p$M*y}GLtcS*nq`M0mF5wfne2{<@>9?w)Ze{2`WKzu}scLU?Y zL3J@PY{uJNY`~8p+X3;L8@n5w%t`9+bl^c?7aTot6}CKw8)Cn|rJp>wni&$%#d9(bh~tkU2;EV1oK|u(tgT+$_U`gAeOgPZ$MpQ82Ubh_|Dk~yr$$e(`d~_FjQssggUErp;W&ScZCoDdvU1KnZ5g#hj zB8Ap%Umrp_w1h3yw8&QH)>ZK(KAA@te`_zQSHJpY-IyidK4OkG&_M<{Gwss{p3qcH z_?1Af@S-0VLr4 z1uuC&2O+6uhPJuTX@Y7)qDYL}rU@HA@k$gLVQz%2*6ZxQudROwjOU*Tl5uR1eZJ7L z`u{s9L^zVv(p_8|HrDYM;ds&jfUs0tR+R&QDtkPrW2UC2f=)*Wws&?AD8MMgX`!K^ zbxQpItu!+BfDb&(DD)dxUs?kFD6KO%X zW_OXscfl{0Jg{fwHk}7{)U958eo_wv5k6fYEk#Uef;W ztnmfC*9mj9X{pQzc3S*8t%USI;exV{zu!5ajHFum8;iUyL2l04@TQ3oy+|~t7=;o` z82`{#Z0j){c?})!uT36`J!Xj4TOx0CI!|IHMTbwKGanJE<*)AE4bcyz$;jRV90|)A zHG0%Ch=q>71}PA_8~OPIm1MEh)Y?vxb8Ga9U-n*tF1xM@{04_dB{eJx)`JprJpm}t z*73pLuCiaJh&|ZPDSgl|59TGydQO79D#D1Dav^o%($bJvjDP#c`D>$f{vD*5WF6$0 z8Pin}luMZ>bWbFW(5OM$@dQs@kmk|dyS1AC>(@JvlG44%sz2eO1vIj9a`B0&5mK{v zZji7rcygna03R+zI$XUOuzGAk0j}MeC-T<>7qJwwfaHINEIlA1Oo8aD!>*R5ZscYc zRB{@jI7LED25~Zxz&HHdenl~Ted5h6)Xp_Oc*Dk1nqrO8PT{+i*u_Nw9(H+1NQh*+ zQkR^pEO`GjQ($+Ok+pfRWPoqrV%U5)Bt&SR4mnEz3NnJ+Agl zABGa}dA`vlvg*>@Ti7X?m{6dS@>6)f-8MSIv`6c!XlT%Uc$aUBjDqr%FZ5R!&QrKa z$G10%BwD?SOdnDcimobMjUNI!X5uBFeh#tJwn-Zc?hrn+kc6vsaW#_W4ZG0Z* z^c<%`3=M`D*7^cVpHNc%a5>Cc2~e)*7Zz$WP6!Z@kT7s?LXhxS)*+<69eT5d*0#3w zKAs4Fc=!HJ#U;eQx75^}Usw`nI_2V6mZXF2Oy_q7J-Z8ubuF`7+u*mfoM=Bx<{?hH z;#gLQ*?WOiU~W%DqN9&uD=HOH+MYM%mOYP3g=x@9Klu{GsSGR$H z9cddIhJP2w#!+je6BOnS4rLP#0F@J^Nm9;6aLwyXczb($9Z)1B%*?ib9lBXFQN_y` znVLew!NFyZ9}HhiO7Lhbw|epJj;29ikZ?O9Moql}{azp7{9bPGa9ItIm6XPT&*}`5 zjp**mQ<(jLp#ysz9J;=F9^Kt*tf*O2F=%3~okBLGOuLqun);F4DypDh*MErA=1%6G zvNDmTTc7(S_S40O*%DpIKp6b<^Is*+uxNUEdOpu%{1-Bd6{lgGvqIsWot;2Zd-~>3 ziQQLs!*5CgbQ#}%7o$bNbCt4Kd<+hHJ!NPw7)=hy1%n&5ZkH}_(u3*d%eEmtr>V~6NTcQPu((7?J}@K!Lq6)}7t1cjn0mP+#D(?rkf?uK~=BQPNlo;?Pky_H;3BHuMI4v{hb{bCS z=W{*A-(7Vj1aN2i)G5PtmgAh{Fx%lojrCC5qfuAqe?+u>JD(DkW| zR*q2a^AW1^?idbJv!l6(jK>_9e79%thuG?*Ail88&ncuN_5R;wYTAT!k!&CEAWvxK z7!U-V5h0gnKIDbq)xS%+bitd_l6u3kUoS6A>LIV7$%vW^CVE!nn-!nNA4>c_<(h2w= zEn$UgjRjTZp^RP0Id8a*CYGAjl+0aT)pC}JidUp{>iOSWA9I}YeV_PV`23_PnFp8J zfbDq%EYS7G#Qn^S2&4p4hv(dI-Xic)lG`c7d3S zO~u5(W5}OH9Wz{9QMO~Ty=e#qKVb+tRiv&o#M-v!hq+OR+|Y-C>miXjvd=X%DC|*SWa(vucGQRmMdo1myX;!(X;G!=9K0@ z32#h--`?>ZQWRS=Lps;LRZ2*K5g4X5T%Y?5oY_LN6j@(edeZV8XD#htRkE9Gt#Ct> zrlzL(b+e`?rN*6VCF#rqnQ8|X z`xj?5c9_VX2+c7e2n3A%6tkYTbdjS6g_*8YPKPOvxML&M682jniUU~(!}y%*F* zK*2^L$6SzUAdiplp~n2AZoetF$*@yuvMFod&aq2+BV98gZJk@An)(%7sbtKqV4k=N zlVEIqT8!f%)zxfrvEzx4d=sCkeewKL6WZF_+`;%gqwUer81LA+cyz*h7G$^Bo#7-T zv5(AFufTgX5Py76ooK9wrggY2+6UX_%50_k_`>l^4yHA{? zt}YpqB!>5_3G44dyY|6>*y`CzvnIZv6W>s%Y@LW1zlOGUNaAiM&ikL7=NLm4k|9Pr z0|)xpT_9fvmUMkRqqBW@g~8hTWjq{gc--I6=9qbOa&wGasC@_=2%6WZGEACM;N&R@ zdw;PJ4fFWG)%QDitw7@<=!UaE0kOXECUB+Cu5q2GR zV)~FC%~BGz?AgtBw^508gj)x#PXt=g;O6qUr&gIF_RD18tiK|~cMMwV`PYQd`2r|sV?^5S+D2n>Wz{cLf4 zk`6qx?skXR?(+-er~4`;I<<)A=H~XAAikjh*+(Llf${!$z)<$hiZ4nb%E3lep?DV# zjYR5;wRO9EC~zr$F_}Z%)p2uDkK5I`s(79>yQVxrNlNPI?%u$7qM=}Z&9A4b8X*p6 zfW!@VGYM%aoO+HN*>R;oWq^>BH0ymON@HH0L~=v?+UzO(`rWb{ zsYZ!jI7T$DHx5Ix9wrZ7&Pnj;QymykzBt$uFsd-e8~ z5P=BZvk``gKyZ{B^8+|%II*yP0I@$$P#?wO$;4*&cXxN^cB*+SM({iL&V@AgL>||J z-k&&Yy>EqVZkiYiNt~II)x_7D5ez-**i+w#izKV?WObip-}0s0xN%v6?#}mJI~F@* zy5>OM-p`Ui&9TCg<#S8m(Ef63fK-6AQTfu}lM-YCxYwRS$yIl|JXSpv>izP}wvdE?d_h+u3`Xd_M2-8Wv-Xe&Go^(0 zr_RaF)gbUaY_GAWz29psvh8e1_rujesY+7_T5#-BRMVpXkMc0al&PcR9Bz19duof^ z@5mWr3!8}b_0K{#+goCbKkcV_dyQv850McF<_P^|QhJSbs3_k2PH64&&CRXstk84H z<(ev89xrIKdYb&+B+Vo~_?loH#=IW`&IP*Laq{`y2m$BgSG&Bs^7GLp{hwcmXx{Fq z{27`yXc8T>SWw+o!Em@$0Y?4r@VEk$V&eQ~& z7%v%s(rKvxMZ&n^)JP6;G7{u6w;KKnHh7OPGnUnM95(# zla-`t!??eqmg#cW2%Ezvk7O`S<6gz1zknMBD>%f2id9?kN!aHyZvyi8_=NQG`%mmJ z-9632aGtBeFnwt=<|@o^D^>U&7C?om#VH;;)B%?=SrBpCilT)6=1_1T`~S0l;swOO*G1 zGj+j*M`5p1@UkkM$I8UtX^T^PYxIgxR%$^|gt&S(ZFy9(s>2@xg5WkrX((BR-h{ph z$QOJmk(E`%_4T_y->@@U`Y{VV2e(6u(9LxZcKU`5t2y_FbBGLeuhM&HWC*h)I89pz zJA)0u7U+VyIOAYIZmt0>kB>(sq_TaPjHZ|2rGM=QQ1N_hp%> zezE1a(p}CTjY{$O3r5wGigq3hzU?1VMa4NiJ^ZjuHf^iFUZ@CYq#!|ZSb-rHR4nQe zA)pBsOJ==CC4f43SFwJkV1+7ileV~fy(^uD_{1y$A9J>_M^eKndvBi%}h(oq+X@5giQTDQyzVe~vn6neheD%Xln zfDk?DR(?~2Uyk;wDpzL)CofL$YH8ow@3AcR9l9$;H0TlSE;BLEqyA7<)m7`IF-hg+ z0`t4Eyssb01nLV@3uy|w8q(|NZwnHcNFIo(A5-HJ5@TmR9e@66&7A48`-REd$GjC>!Jv2w{MS%B)l~e3pW){2nsvquz?q(z zLkdI*C)somy5n8*Wo(H4H8}8BNyMYqELEZknBClPY})l?F89K$s-b32&rES;2o4QJ z)V(TCWPM>q{o4~%B0V%TJUMb1GF66-ZQZ4yQS{SSU@hbFW_QVvlA9Wa($oaBcT#V- z;pu;w>6_$R{OKwp^rJfNfuS%laYI8>*ZADspGfW@3e+FTew#fA*hz_q)>8#Sw#9N> z{uH(*o@GZkpfzCZSahD0fH+!EYjq_IG?O;L8E++f6BTSrNJ#hr4o06l16V^nG-y$x zf~xd{c=8Xwot<1yHP&^$`r{IJYiE#y?}1VK4&l4y?v*dsWO_uWUY@!{z7IP|7r+I) zv7*EWgJfYO3*zOv8~#APmJdtcj^neIqpaOVy6oII@xSfY9Zl7*5AH00CWbqFOT+I? zJe38(4Fu*eDSSWlgTKRKd{eMv>*<6lPz5)4XmF?}S!~H$h#=bR#2xB(EDgeHEPg~s z5-%9*-Pj$HCz3A>&d%i&LarsxCbcUPm9l{Sj#xaw#2pIBg+9LF&Ytw$r!E$EwGbG? zQO{B7e&?WgYbEjwZ7`0efe234YJBr2Ll^Y$+yv4FLL(#PmS}n5@F4uWk$^S;b-6pc z1*=uDuHb!rK?_0ybp#>pc1*3R(@Rj8=T>GTJn{B!?V)LOcTE#PP)~7bl7=qJegYtP zy7G=9wY~M7Bib zH4DmXH(Ij}Oi#}u2BIiLG*hB}x*6dVt3Xb@_IW$0*K9~2Gftoxl<-5^=CY>cMFx7* zz+UEB)@}$&^PhJUS(_jY4P2905}Xf8KwHgMAzq)6rRGU8#QONDYW8@nxz&7@i6}A_ zPY=N4E!xTo@@v9J*Hpx%f2UwF`f&e*6f{yMVQY>KId}zX76}8<_ambt!S98%KQT^; zLDN+&7w+Y{`3Y~HAL}~e;JW^6mkoS&5*QCtIX*`oP*ug-Vpci1xjBmi+C!2H&{U=$ z8Iy%|e@H+hU|hn^@{b+X+1Lb~b^VhJ>U%euPn(dAW=p$|O3vDC)se3qD})c(7nvY# z3ra!Ki;C%WSy7r&9-Uv5w!XVFly?TqtZh6s!ON>HK|ZBPt>+fmZ-V6bV@}s%W{O-U zX6J%38(?;NVd(ze^y+VkZ8(;oD5zj@D4q0-SMpm;%|a^GgHx--wmVV(_Dh-u*NW84 zlw(8E7*#91w^-6AF8cK;pEv9WPBr5LzY3ylYSq$2xo~SwMa0h-g@SWU+4?%sn+};~ z6nf{Iosl&1c|VVTCiOg2_+2vR?>lO3H-{gsPjD8T53M| zkro6jMOyxurB0Cliq|b7*00|P0c`aJ0zjLghxTUy{A0!=sz2nZmyIRhTA4*{=$4jIT@ z$ssOM_r(l2wOA4=K)_}8a;;w-;UKR7HWVX*#!#x;_G{2{F{H{;Q%zIaMkkdl`Km2F zS5{e{+6~iN54gQ`z>Yu=^C@IG5m8NT3?pKuphQM;1(Lg<#uDao+ALEDK1xV$q9>&l|v$(l@;}t)I-?i6W zTDK21VKU1cLddHqmX#bVD3aU?_Un4xbA4a84rCG1^uFp8e3=)(~ewojrOJCUtZ)b{iahWFifl2*hBvid!?ryculwc>0wDP3)ACJv_8yq{Fv;%`)wZ zQXtR)Twsy*x+^FYEh~RV17pbRiJ{Z9`HLUq(>W?Lz1FRFrI^Z0x`{W`l7E|#5wv~^ zYb&%4j$4!YsZ{^l2v1jjL`~WcJ`LZ-4hC*=%2<755s2~YJSfPCLouiAeZ4U2~ ziHQT)ko2wed5{xxizr}*dF;AhX$0jz;s#j3neK*-lNb|C z=sOU!%27&mC_6o^I1)AHyXt}w-9I`TOFw1RIe;>;bm-1|?(CPwiEarlon78U-lY{G+`<@Ocxyf;~ zzuurHwtcLsxRQW57Ft=i`UT@+?E5R3GVGV*UhF{tO9k#6CGQ~mIn{AIi}o-i zf9QdEzPdY0W;acplJEFAxmV#I)=DbC$BXg+MXl06Tl#W)x(LJR?A*}UcvGw;F`MZ3 zdV>etzJ2hkHj7^%z|M=2_~N8JC8Dgax$Jz_s#WPk4e&Suc7_uX#CAXXGd0eZ=ri6+ z=HE@Sv!w;TUh=1Rz*Yg%i5N^Rz&xv9EAI~d4&FoQN!h2pjpO>5iZ!40*QX1>og&sA z>GaDk?vo_q79A7Vi(J z0Aop|-92lCr444RL?uIFdvVbgWzO)4a`j<___n0ssvX}38l{`R}naj!M_@~49M`5f;=o? zYTatKAVcaP5GctsDl_ouUzxYGcOFH+S>f~K%GBs5Vj&?VR@m)I)RhQ~~l%Ge^l>)Qe52m+#?&^Pws?jH(|B-ORNnFBO5L4TFVTKh4tO zWUHtVda5EK(@KkOzE7`6jUKoB(Alj_Ox-xhm+Ve5D7iydR3_P)Dk3Bl#LnM=F}wIt z84qfzHK<+@dba@cr}{_Zv2^W*m0_q*vAjl$^v63aA!j{gQeDpE8eZN+-t2}7M8(N! zaD6w7cg;52@mXeMvk;T9H<*t`d}x;S+{7Vn>O-E~=wNRKsbAFhN6&2exZ&DN|EGVg z1W?|(0LAHE`vc|~YcXV;6K%cHx?q9|azG4F9k<5&-}=Uw@PryXStyW~q#s%knIGma zUdgRiIbp<4w4vobt^5q%xqAsKO<-x*iWFprGmAT_?T+58>+q0Ubo)OtCECc9bY5ta7tCQO3AAgFKkbG8lo?SY#JL-$BFw0RKWo!<3eeaYvkbn0@iS`av01VpzX{3vw1Fc^mN`w$IEA>xzWD!$)v~=*?_<{vgSsvQp%Dklk^0M z7n(a=ry1Wdfw9LJ#jLlqZ__jybamc=&Pa(b%T0z`OLow{cAmEA?#)JxP9lXFMkel~ zz<((_tTM1T*3~6XB#?p1ofYmbG)0iO$RSGf+Ax52wb0r#*j(Avf$3ZrBNbKpzGg-z zuHwL1D9I-Fq1$`iM5cX##05WDr%UREYxN$)I^xhueCMg#xouJpbQx(Ok==iw0X0K{ z=oRr(x(P~rDjtb}P!x9?G0azf*?QJ~OG1Xp^k30M!;a$aZa}IQGVnlxmBAJAKOn(` z7o?FP%52A^w?k(7=*gJbxB{!wHKZ%31p(b>tW}i9<&T4~MfCUkdyT2lA#!rVpluLQ zpqLnpEjA$ZkRpmm2MIW_etxC&I$8mPwfF(C8xP`@3J5FxF@z9kI5fnd`@1zC&4Z(l zK;I%f`WXjeeW|b4|C_1uOHr{p*B@J{1k9ed!un4X$Aqci;JtiM1ufWTnA3**nVL%4Y z=n>4Q&g%*r^I?FdN!-+wvOpn?M&47$nBr8(ev;0OQ)8~}mZm~ss{_qVa zt>&(FUS?brtMtgH9#E6i+;ntwh8BM=GBWkixsxH}Xw1&STb|W%GBP51OReP#QLwiQ zn&0;(8HkCQP|!q?vLeIQHoKp$`d@~X=r^a$-u}By)9|G_R~5EKOGxl}x)blB0p&_Z zA9AfzqWt=Otg`yotAPZZ?Ax5zfDha_ig``pCAwXsT5kkgo_Mb98R0;xt8{`;7jN>; zTovo_e{Zshn)_f8qG4A*c~@9rjA#+^Fbg5SFHxvAK1ryPpkTVL?5jK>Ob%MC zFtzv5F(A237l_6wd6}J^>j#DfVretobNXRSSWxeT^@ucrTF438-=5*>5)*Sf+=WEa zu>#(yF^5e$HS*a_M~%wiZQ<2|-R))ByrZd=rLhpFP`8TPHBGY?r8FQy7O zkki<3e`F~9lG9BwujOmJ05X;jAmfh2uu7bjWT>n&sUa|?EgS!bske%XtLvI@gS$iH zF2N zTdua2H}OygoMf+WFbELK6Ybs>b~xta#is8gDqVyOt(jPUEk(*<{QuCA&{Je3mX2j! zRg~nhKp$+(?0%1x^gUP_i}fMwsYpq|(y3&@VqhdVrKi7vz(H=UNhZ+bInfos7xXJ7aZr(YOT;mZy8aYG`c1E%I^xH&7) z3B37|;d!(MN{!R%`ca>gL}Hm#OdQQ=M7lYNpq2-9yL}Ow_qh89CP>l*j?0gV7fG)U^|08`>|QnmsaPm4;r#P`HfvLPQ_(C1qmBPAAX`SgNF* zTxq!+Bwa6DocT`diz;Zaf}EG50xZ=R^)h=tc+&sW3bh&}JWY!qJZNa$yNv4r)Nygz zK~wDH&k8Z7X4OHG+dXkWd)bKf=Aiu3CSLWaFOCvjw{dHZwRlWFx5Oqc4Z9B)#` zxA^*mHA|W_qJZNAR4Y$ph>%VR-Jqhjacmv4xn_iI)dGg`@pLv>GxhHjAClp0om0^) zL-KO{mDLShtnV)7bc~MvTFS`&(;z8zYmQD$1!}Fytn~uuu%gCt7m+bDPrj*K9( zPEPt97J}IynpfIrv(U6%GghGz`AGW4Os(g)a@Nf$V)`%YME(I1Yj{;Ap;31LE7f9{ zUJ>*wS6OP`HS$ZVC1>DrJ)T!JOUb4`YqjV}g{vSivyi3YfcKO}Fgt{AR@GG}Of1X-V~Ly6CQ$bO~Y zUr_k+^5Ra9>BJ<7t?0B}{@6>^jl`C$A;M?9in0Q;kF4JRTtbsyJ_=%qh@9kh$1%@LKy zgRZ|zK4#QBIr2!1-bn!!6GS6<0R)_me=mobWKC|iNeOo7Ec3_Jn$6)vHmqWvP$UZ5 z?|3^r8N*`w4M@lmr_)Y%hU0sa&?ZI~T%1w*VUhmYRWF^g)ZH6MK<5SS6aE)bh5%^< zFD?#MIAHO~^2P-z3`UHqRuOvR%L_cNuvb2QRv>wM8z!#EWRF)e~C9`2Db9EO3iwU;}dh&L#6_!d#a_2H)}9(tKVQY^6!V=JNaNswZK)rz0@OuyK6jMc^>w z?>Zm5JZg`*&ybV7Jvl;7{D(LWsk39_%l#u&R&=H);OsCD4CI(#sfH!5Ga%^_Xhiuo zKy8kbOosKZEv6&%eIswTM8~LC$2f>Zsz2}`*BcWX8+ym+wy@eE(Iq=GGs<#2Z7UQ* z1anp2VWnQ$MbCxX6(TyY1>igtbA$-XrgC~URD{F*E;@k%({n~z_M{RnxRv73**sxC z7ohOjoP+q%XneUH@Xd`qFSV>QwkA`kWiZd!s;;kn_!bIPe@~~DUrkn#2L}&t21opu z4EHz(*yH~1L!8UD&JO^|RImMeeLC1Qb6gXUIVi{F3J*+Z$`!5S7=t8nJl?N3oBke? zPyq(uuK#rSu8^3163Y_n2^YRKvB@_a@fE=jZ+~$I@~Xw%9a-bgt3$_iKN7Uuh_8M| ziGEv&;?_r|@Nx-rZWR<<-NZLT50EZsJufJQE4v~~BoPkXy#~#G%oBGgA*F)8UC}-j zs7AZ2bL^IQLq9up_{F}eM6a3>USgS#+~0|$WVP`6TuTux{Ir`KqD38LR;dXZk;rPc z5_#MV4PjS?W`-Ln_AYX8SZYQ)n3>r+JBvdg^AW?m)lxF}#LkZCk1qOpI}_6LkBzB& zX=y2huRL~uR4~H)>S}ss2IrVSkAG&PQgc{twghl2fshC-UgZMQ2%Y`Dev*LJ&4`l9ZGc7WVxs*R4dl)a#HvVq`xgq26sXAyIdP{|*Bn)M=`dbtKIe8ZaRoSKXJzr2bc~P) zzdp8Av@TPpOENQLQco=6#74v6)_;kbuu;Y262BWT{TeY3nrXyv1z;y-wqt4-m#ODf zW!=qjScxS^Ep1GTW#+gzF7#~*Dp)~PDXOl*(>>V+@*^uqDgm!G<*$ba$BnJ=wyHMso_S+0x{W zoXLj06Y=iMRfJkq)bZoe&ZxH%Vxz@JlD!~47<%lYU z_x?|;gi-L~YdeIc0h&7+;ncJo)y6JTOdvA5|7ha;?vzLAGO5I@!7BB5kXUihiq;HGN)@+@r5xI>p2WMu1> zmIhVh^4IQw6xV&Rs!v+Tz}TUhGg~z#m1PvVrY6*oT+KsU#v%rv7Cvi z-3d$_i8LlkIOb?-6sP~QUW@ zQf)^abS7L$xs8)8H*(U#_GX&sRsX12N)wKY^fjGk-9}ed7o#)tKw)cxT&0s(*>Y_M z79o=BV!jr`Bk38AZks|3a-!@_sZm>rrety|R8x#)$p34Z|KI(?SVM#9Wre&vlh4#f zjPpAR3l^Cr^zTWbma3zE=kLVFEslfrzJk6M4BlLrX+FVvuInV zKO=smw%7SCx&BDwHFmmVDu55|5%b|P68m+7`M6?NFK@acn@$-rAc(29pSk#W$5Z|8 z&c^csL8{`>5v#5;h+jouQi@ZeXJu9&W4V2ek|1DZWjub67t&)*4q92yfTbTG)IyA% zBgK7MO)k<>+0UjjG|;q@R&zS)yxsLXm%j=h@DEw2(=I>#`>dq}jCtNYC+r6@hri(r zkT+>QeG)rc!ii-2DQ#G$)d+X=(LAZV6o%p1z@uSKcRlR1aVC(X zko;g+B0~{36rv@9=87sVNOQUwG*{!u(Udq$D%4k61A=`Z(=sV0%l*)E&u|PO5JmD< ztp6rO2_4h8$z@J;9wY)U$;{2#=8Z|{V=|=BzgemT_>y&H6MAm-@*te4Vs<8|!|cvs zu~B`i;pSK-R8GHzl6Zuih{1n?|0UWfz!vKLaEdPfNN1kWus;}CNM)!$AjE^>y>S3F z6Y~})Jsm|7>f6FER_xgL@e`y!@>#}g5!3uoj&{1CWjHZIOfldM6nmG7c+NJko|M5m z1@o1N^p(aPV7|8r(BvpcX=TGP9JrY-{$3+(u2yt21-nzzODULsT8K&hsyrp8?}7I; zNJubUB)FGZ*`^Dui?BU4oKY7>d-8UvG8|UBpB8=6 zxC5KlaWdeCd!Uhj{x8u2o?&@B>|DSSA09;Oa%ww!L5`uQ^OR6+rM(?UXLggu z#_<8nFi|om{GKqJKB(j&ksN`Dq(`Bfg*7hC+@UAzI*Z3ubw~}DEQfr)6lNi!60&@~ z>awI%+>VP8zk1i@UcVcgCecm6bZnh0uXooWW-*gOiA0$( z(jsHc*jO^eZHl4SQYxX%+J-^$^nC8or9bdgeq84I3FpyqPxO(FRRc39T))mf{-uS8 z+)*g9^T>B(WU6O<&Hi$7S&~o7C`j$aiC!R_T&H><0X(&BVMS-Q)4X$|1i`U4OMGrKK)>ye7pZiDv}vsg|;aFv}5N+1dYwSf#g<%~AYW z2ruMlFO_xEgsp4lxmEHgtXlKk6hJRyqj2q=UL+thGBO}I$0i)Z_A+0nfaZxj`n)TVU(7; zH++=GZd*8Lv>fd)$W=G~dZ6?1LX*rcyA6^v!xdJNOFD7K-R82w)mr#f`icZML}acm zmlxM7)=_n}BVJ`S3ii%XoC6qaMCS%o_B`ns z8Kw2i6%ejUp=bwbLoUDma;-rW*smcV+(ZyHchp^aGSM`l2w`cYFm=c9)!=rEVNxx* zR9dBOq_}zXdU0z5dF4T65P;$s@6B|Kxb5`F_0k~tqWRSkabVGQI^5(X zPfn`N0#8%XdAgK>?<6GSpX>Y%luw-@5*~7}&pEH!KJNBt>#eGqrR?BNh}a!HQ_|^nk6i6Qlgz=Xh$EL1=|k z;j_v8OaoGf;9n1}&57qHXUGL5%a!oSSHm8NG%$LXvwzsoqE6wd!qbpaeBy2T}1`};7mlYS>e;@g)=|Yt4hKjZ4 zh=!wtB^czs<4~6d<)El!N`)nYtJmpjfd$Klqna+{KQRjd3>gj!r{a+gzvsWZv>3oBSTkmwKggx+h7Yu7^Hb zfKm!`=7hH%_L}(WcgSBngvbZaU=8%#b5+5$`^(bnnvv93)~+>0=huel$h%4-iUJWZ znS14J6SB(Af5ZN6L+@%exH6K1Tr^QrPg!lZxA*tyB4K$$`l2h^iqg2h1k9J>6jn95}iP&ykMQ}(_0TvsfGq$q8eY8em`Eh4HK)sc%b ze~Hr2`v`eTAJn~kneo{#@xDA8YM^&?SQ(QaVNCqDk^lJeGyQ6{foGDzBvAd?G=5I_K3xob;e`PT}7S-&G;0scW zPM*}z)oE-RtE?T(|xU zTupnD)=xk9h_bU_3zM!(%>(~0p_XS<3|w07Q(2|}lAXp_u4dm}dPx?wc(iQiiy&fiQB zwxxGYw+06q#qNQ3>O7A=`s{?`U*6RSX?hEc9}|gBcWdX>g~A4e^Hq;<3LKK`;momB zTCX0kgJ)nyEUQ_jgH_7OV>wl*YoxF;YM>6b6ABmqVr=aLTm&VWyI%`BC(f~He`%_h z>hLxk7bBgI7Ky-|$Qwg;UtR8qLjLqpX1EM)VFYQg$Sr%ZjgC$3*gzGq)eMElY7WrR zZL36Mhz3#ok3#XcDkEFzC5Tu^^G3lnRu@sXQj0?j!4jn$Ia~Fr^Ro{1nIhd-G*dWIS5*~LKD!Z&5*k{D zUWPbTYI81-=~#JRnkh@lOe$>DHlQml4f*g%wR-!Dt@wRX(3P<|cb0B4ZLQK!P}SRz zxA99j4q6%_fe7~2BunJ0EmokY_B^A}XqC(H6voxnG4LYk!{YozpuP9LHI5M^YoOL? zVyT+m3LTSHEnc}uXi7ayMK_Dy^xV~pppZ~h-j#{AyIZcb9=_M@AMea-CH8wBA=qg@ z(ZG5|;S3(Bjc9?d+;PFgGZJ(;BB1N48c0Y$fcXCRU`$!z9IJXGm*x=+S~ndF((ODI z^nZFln$5!Kg&ex@jc;Gx3K7q9)7f=gaclySAq`KPVIp|AYO-hgLl~ z*Uq8+6b!TbJUkS^TvCrXf#-B)#iIhD;?RG{V@v{v7f>_Sjk}wN5yEoS1#RNK&BXS) zc>#AiLUxL;n1k07(t)Vmf!Af2m-0SqT4i+(f3T}RB?XK@`6|BnVqt*xAIW&LbO@jk~1wlv`>*R_J~NolK3J9_>wCE23<(uBm#) z6YUX>CrlovZWp{@d#exA=gW(ix3~eJa#NtBH}MPtqGewtPOBP*eqj;Luz}R|SxRzZdB+W_@y{xCVwNO_hB8RgSDi?e*^CU- z>{vqF>moAat?bt<8&iIm>PYYqcN;tPP*#l$bTts>J`$uA3y5N^nC-YU79eNhE1TUE z_AlmZpdn99tLlZEA|nSb5&zUcVMLyB-SlCeAI|Nod-*(Th&56V_>7Mh$;o3p!SQC+ z>gdQzt3P&5)wOj(I1iLfcD9NalQKd>+V;_%y!lAxf|)#ddq}-k=@g6N&-lS-;}}Ji z9TCg2%7P=iZ-jl|`q52n}KU!l`)7H@*gJL5>+M~}+BD??jtt~Dd zsM$&0NpR|j&?4^cC*T);=PK@^C-!XqLm=vkO{bI>LfEn%SHW|%SpN|ZEsn)T43*P!hZ!5Axy%*9X|S3an%JtDo>A?tc`5Od{HrW>dQ1To^G1u+`YIlR7o`g$ z(`aWa5dcHn)2uf=z9h&zSpmMgtjplb+#5_GY`v?oh5TNO53rqc2w9E2A@1srCXtP` z^5yWnrculjKxvt}62Dpwg0PiE?Ae0iN$ErGMn*IGuHC7|A1Yc_BSv`$7a^QbsSZ7g zqgk!W_8C(G%KxvM`a{4;m;olsfR37x><$Ha{;c>x~Ek|tFVEmKxK{*kqdzr#!y06&+me{3=?Qq@@ zAtwE9Y`=yjZsw_N3{bxt?4GrKp3*N%R=O1{lbsqVO=QsHC(5ZY8j*~iE>ZCHgdzF& zli!@$yaMxOOz3GJiB1ohr^+vi?0v|mwr(jL-BFSjasL#em-G3dM`<;U2zcYVW}Opk znJj`LYx-$^IKC`yIV2R6I-&<*u%X`E^|}ajoSM>}4ZIF;WBxHqj==JNh34R3M%C!Y zljYlw&5s|Z&NV5emC%0SdAZP}Rm=_G;Grkj2?<%m`liZcC>QRf42y>56{Hul`QFHz zm)TD!*I1ryV)2dt8YemFi+p?LvIu5ku@DQHAF(5rr8Q!xmxxZ*weofX_2|g?CUt8G z4HOf4j`(NGD$^0#T^lTDJ#D0OL;6HZc}sf=1vfsm3^&Aa6|181nzi_Wyn+&v;rrK^ z1m4sBniOW5T zCv`_tosZ|gEp^*`Sl%8jghj)4i~%r$+e`DKagJ~nG8+bl(EI94-)@q-=j`Bd$laV6 z#OYKXNIA;OBdzNBa&U8pN5-Ib#>`@2nHO(5TBEY?$R`u=Q_e0BgfGEW$2rK{ucPtV z^B~VeeC;0+!?8R5!tP2Kv6Q4vBK$pUh>mcA`h0NbAuc>Wx65R!n2saDy_GFC#!Ynu zJ~}$po8>?*7VYL+ZWLK#2~o7~iKlqcOcOdI=WVS?>hgJkI}B-*B*Bo(zn*nn6e2T* z*c^#XbM>&C5E(9BOY@D02KK1T*g}zJ_C(0R{HNgYX?;uv;fTduBCat-EUyRiR_7ei zQ&buUs#^G{B+fqnShXDeFqwI_W~+2i4%INl#0**KKY>I$gCJEQ?zG;sn>{ES$?(93Vnh;DTc-DwAed@ded_tdlfKG?xDRTO_Ug&t-3&A?Vj804(m8o zD!pCBz#TCAQc1khBi(?mgpK0E$45OqxL4mlrB1G{hNhe&048!5jYUH3VaLFK6cp$g z?geQtS5IO7Wek~Jg$8Mb%hlRSNj@K>wiHA)7aO1X^5=+0D3-!AA7mixFdMgK_i6j+v6QzIaM+8eVhcXsAcog*#uxL>8Fu$6~!nvJXfh zXg3>0BB4q}hNCym21jJ6?}e^yh+(yEqqo^?J{bRgZ+;Yoobl_i&c2Ub8qwkiDfhHrH(xbO-HzU(WU<{3|9&om>Qkf$#&F6d zX4nbj;HQHaX@c6gnz*JLje(wC6wa_pL0j8?1U#w*?M=m=7w?A;cS7<~;+nTJ2|I%a z`JqvH4BHQMo88>95cfON!0EbU_Iqcch|?5_SaLyl@_0eB|?IY11(}|K>|+q2~!s9~nNQ zKI`@y)v@c(lVlqsgE(LDdA6Q0@V5aT`EUpv3dH5=D@~d)nXbf44`SQ)34V1A0x+%2 zOT#HiY7iLF{gbDdv%oPXy5+T|e7y;(STh?>h!9xe)XYD&P}l z2MrYQ8ug#lvY9+JVa6=vOX?{}wnKH(p`v;gu8%{ru>8di^#zBO79x#v-g&|NY!)g; z0*yDMaE&#vUG0xgHH|l@)G@Ua3`*n?_AO~(=;^v9yM#eQGV= zV)VEtOYsVdk_?16EIj3eU9mqh9sjYM4ob>sQp5dG2miZMXb9`5)npqJX&ZxVXsd&9 zmZ>?9HizZBFH5`?&j-rRga77r(i2cf_EEj%WZyJeVn9p*LL1myT zcsY&|8(Qy6c2H!{)&HQ8#*Dm=_%L;doGUwi{orxC9KGn@c8v5bV0cN~%HT52HXv2rtZn^@%qiM0#9V(uq8Fi(1BPSI!Id zCX7EDa_@Pd{^bi}q|O)hqXH0#ptSPapNUOwGu)xZ@haDy(#_uRn5z*Njl*(F^P94m zx>HR~2N>i$Ags+~MTc>ik-?#eu`uUf?L=;d5U&FknMz5>&oRFusICOTGU{FMc z-s9LqS;re#@*9K%mk_hR6{cTL%t%1c&+T!2WmvzA2>3$^)m$Uup8UZIP`z4ng=ez3#NZIm>{?QoQ(rTlRx8-yf9So1vT^5 zqAqw|V!9_mg_U*bDO%jZuxRAq(IM$Yh$+8_!7E$WKX0`4XLGnW#h>s`6{;FDGIBnA z4mS_mH>p*Zqgiv0mjIaNDG8f^=%lEy6q&moBxV=GU@mEr)`XwWY8<7qP}4sg^d3q+ zIVah-JQjGk&6o2gNe5Q^0XJo0oUu!{>5~M|v8b_?bHoZW@ghmWvjZaBB8QAw`%lnJp)uVaBX4Eiw=XM64DG8b1 z>6Wr6aU{YM`uv|iL;V=S(?JkCe6qU{VW!sI^E!cnXARE9bSG5Ezt zjQ;XJ#d>}t@mL~|!?COid^JR!U1Qz70n>6xB?rU>fLFk4`nRORZ1~4$GUoXYG&D6@ z^#n4>(p4hKhH!1%Wo*(#0UuL`0hEW79w&$GG88_%#&T2C7qFt>pd$px-{tqbZ`XO1u;c(P%H%w1C0ZT z*_H?ih6+)%qT9?9jtKQ9WeFKA6R&GK&1X^ zqIu{dnpj;WNlOEzlUiAo>i%2FZ*`#z4RG=r zJb*caK-h=(Syq6+SM<%nO!&dcser(UHSB7+d38BSPFh8-7*%L^YU5fTw{VkU!k2jW_L|V+wu>Gc>t5C$E!SY{LXSx`IlwHpQiiH9i6Sq zzOp*ZI+{4^Ma15u+8l51jbAHo`o!@LC6M;PPZ2quOYPc#{Lvl^Bh=c{Qc?z%+lYFh z=VI@20pCNFeuEH`tc<{(-a(pgeMd5%-cD4V`KzgYfp~-m>KNxT-FizZGZPcaAT4d* z1`iLHixCN$moL!VxL)f_8yPF4*|#6N*PnV2S=X!kEx zh_@L16XkM>C>Z^_(-f%gZeo$$7#(px3iveb1b8CbPW?{1Wa0@(`EeVH`Z!jVjUilJ z*Z+O@GAQ|aU~1a3(otBDrHCM&ce1TbHJ8~x4+hv9UHTves)~gL9=oDkLWtBk6L9H5 z9;tF>u+A5lisVF5gh^N#AW(?fIgbnEVTb_3p1?j@zjkIC+%8K|DkEd|Rf?|^auDZL z7v7yf{-V^>)?B~u0?%ZI17s7$Jl&|U$E%W3T~LPyCLHNQG&@y>afkyf0m+66_lGPhj@C@<=qhHcm`-9m`Da+&axbrl9D}-Q? zM9_;QyTw6TPmjOrZC{X!3o%=qPZ&|}M_O81-Swwd7OJbz;UbHQ!!roT$jIvJ{i8-ZxTQ9;n$&zvPBLNQ>(0?$4{<*8N+Y*8GYXHsrjv_R zDwx@CZ>`&;*S=5`^YwgH`j^{Ia!gF$y-c2ByjvwGB0>gODnZL1fXL|K$x(&Xnzb*F zTcscX(C_I*!Lw22gRPz#dh--aR5eN{O=C<+=an?GnYgajqPJy^?O!%TqW^ik1TeP_ ztK;$T@ll>UPZv2fb+meW*U12=cxYuj($w!fCDxr8N#G@+1oQPPJcNA~A3D`*iscs~ zkvg#4A?lG^Ub|Q&q4HFs%-x6oI=yhZn@8nob8^_9BMdIQz1Brar&JG3YbH~8=C0Xs z(S+vCXoIJgVj%_pO|E-0;fY-p-n3dIimaiwBLbkhc2c0_%}ZpWzw2NP@^MSu7x`>0N#KpGeGmRMr7 z4=HC5w{qOsa|cNSs_y%qizX8A7Qv>u1~3*aee+8!`Yk($e~SA+%fZNcegV&G81$@+ zpttoVdtRWA!JnfX6Fg&h?dI-!{CG7#>VpwYTp5Qt_#D&|f}cMKSkdOreL%)%j8cZr za)WCUuupi=&UB7{hh7??-VicgXUD!<%6FdS(7L_ zxkxO4>Mo3)3s3>V&IEjdiWcadECxkRSaJGA#C3bg54w3QroPP|YT4GmVN1G4j>o7iuex!J zO#J@-`xtYsEZ#Hrc&%clOifc076?wG5`L1>ogh|BB!9ZGwY4$Z;rT!N7z&l6U+lx2 z^!8S+ZmRP)IUSS?yQVKI-ck-uNGr<@Ig_>Go+};V+UekT_>nwwL@!t`t;3495}O5n&3Jmage8m z%@o|QqUmjD1uLAk!ncM77pL;r8Y=4?h2|np!;<;FsK(i`y1D=(Jpj!%GdFi+c@)6& zTozgvvf%D;-Ut*I0&Gh?3mg8T1wX-f+WpZo-QShYf8DVke5EwyW1tkL=RG##;UoM8 z3dJ>0ldOPc;mC3F#ZfT&g)#F{sps)(XNA-pYr7l`%@YlF;+mlge5*}`@<2P=Xf1{U{rA_A3mUOqUM$rd6tx8CvMQkavfDMZ(5{wk%I>}zRe+#)P-E+s})H# ztZ_0GLEKjmxez9$u8r{+Yk-vK6BJn_7uL0AKl6u~ec+l|D%-nefUi#K?t_{et2aXh zyODCd?38gEG-dKOKndZQ4Nv+>9ub!vt4|h)W85hOhyE42JSvv`nDC#H#-T?WTdNWpv(DRw?UrCCdE*%l&H)bOts z)0xfB%JCiANmQRjM`4J@Jtar<%&R8sWwJ%o3uQk|XFp!=Rk+NS^|;7H`bNK6;XePa z$qDv?sn0vp9j3=%T6!Ez+|#g;LktNGflXHn{f#E4<=J$Z1LF&o|37CK6Q*t7lIVwc z?!jrV`_S%8vc~qs$6~8vaAsE)-~9$aM*c0}U*|=;ZdeC%b!TQ%9@P$tTND<`aa7H1 zMG$p-!4Jw~5PP~~&G5$OHiVUm8c)>LL zA|81kZ`BI4-rEzth@j>NN1ivw>f#RpzY`T4IBYdBncXn9s_>Y?txLtyHu7sivUHVP zI{UZEiKu=dXPqZ$IU6eBH8yb-uA_(>9zIx4b&(SDICk0S+1r?W3tITuH(i(n<76f+ zy&H@`-U}G>Ju@xQ^bUW1`Gv~C@dw5&=BQH3-eoG_p}4sj-IxCeNa-GW7Z&~afeS_- zpyFdjP1Dv6wQJ`_>K{cy3ZXwe+JoDHV*Hf&jmmVYUvR!H zQnJ5Uj-!+CE?^nIW>L4*-8 z$~fREoPw?yJeE)@Mb#+%JDh&c%8KS)_kjVx7>?F{ zfIrsC1URO2-QN8tB4fjp6IzJfxN!4_d$Cy_1vC2POj9VUB+RybQ%(f?0G(36QJb>< zn1p1~CM>rYt$K0M2$irhnw5g*3Hnnd5v4|Y&vXwi>%Mx*f=yrlC^Bf-fkZVqW)TGq z^6OF9Yoi|Zls`50mWZF(Ae3Y5590MUZjKXsaDE&IDwp5_(yr1VU!E$mp{yU@v3xD6 z4}-JJ{geAhszBecL@oE(se!K=XwqVzs;Ve0&RaUs=BRgh3=8VM^k?8Q@pw{3R&4T; zWw`?s&GJ78PfKW(7wzN%G33D<3g=UpZKc%mlV;8ctBfzsGpScDT6PdIueO-QIjXlg zsm)!;KvGt6jNlSMW8DrP z*Mufll07Nf1t}Zis=O|vTEu=>J+zCppv_O|C>Fvmzj-&%I4~C{e2Uvh}YA^&f&1cettYI9v&U11@37eP+yp_=k*RzxqP9u z!_aj_iT zy4jL{>_>Y-ob2qa_3?E=#qPW28O*o7q&t9R6J1shOWf=@gaCPVF|Z{si!9skiMnro zvxr@>{AihCY#7Naq=zXdI4_e^CPLcG$^y1D0&J?DhyhKqhPkuz!QN4c@>@<;JU12V zGZ~f`HFg$4MjTDfqD+;o43Yc8NC0Dm4EVsT*w{v|s-7 zC!tHAwypZ(VKIjJR)~(Ng$=#X3`@&I4*-Pgjl}9NsF{pWQxT_1Z>fDKz#+G7uhCSf z(jwsDAoN)I{(t24|DKjLMR83b95sxj?%|1;zJE!Q_(3Cv1p|CI-H~IMSXvwINPWz= zY+$ZN3TCcNqDvzBAweo8Nb+VFq)~#M2&>a-hH+ApF1TiJI%*hP+rs_Rt3T zKrQ&9&dXu>JFEru!3g@h|NT*5chCP6do{J-NAqDdL&F)bQtP^=rr`E=;?=d)FRyp< zB`)snSd*?nUL;y!AUxEOV}DFtN$VeFDD5y`B@O;MtT-9#>yHmI$ru{uUXrEEEmZUg zhfw8K#uu$JHaLHATa#qi@tsHBBRo5uE`S>qpBC9b^t!)yar5drac%hCQ)!vH=yYd6ue zDmS@U$dXOOupL8o{CIkCNoVoG<5;Gq>IT5oKiEu)#*;5x^#A9>zr8;D-v_A%r>Rcj zR9-7<u(j^Pqc12ZUNAB_DI+5@RNx=9(7y#R z&&AYs-!%zCjlLTjeqKnTmJRc$`~=i@jEs$8|I-8zVkKzmpW6*>WL5nAh3xF$^RmnG z!eFm)J}M(xrjLC>I;cd{Lq8!Va2YpMCZ~`L>{DjeYexV_Te9=Cgx7ZBF-uwUwc4YJ ztYydMv{`v+@h0&Wv5S)Xax2+5*^gW7MLqI%k)(-;XJ8WskLGDl&$r6>F;@-zp+=|+ z-0-SaV3GCI81_<&9}9Ezu|Hw3&}sOkv9eS%^+5CU!6$o-y6RPUu|Y?Zw?LlmqU71_ zzmLB7%ViQfxL9g-c6NKBVg}G(2p~mpNd36}HLVaARb;k1PtQ&|zy!BMNyA5QcX#(+ zvyQ5fQBD2!98f;?k%T5uC5x|at|aa|hwopSUaje_fM+Y&Oq_e_ z=@H{&sK`)}l9uir`-=@6e3(>h$zs*fs+~m#DxW`xR8%nZ_4UE$3R0uX9@)qZw>b{0 zS{%n+ZQ*`kAkpaS=U}m;w?fFI`O*FoXgIAIQZ3yK%i zc0`N8{~8(4588mJ5%4S$N4}?KvN+t4M($c!Q}a`bV_sJ~I<(TxdrCN2lYQNUqP*P6 z;;S5FiD_wRLn5X3%-G52=H}#7REB}^B^Ch4BZFpvepEbIcgxjlVT%z=@z&A~ zUmyc@b=I;$#le9Em?d*XHcSK=nV7JMiDOze0*QlOuE3B)*Y^%zb{-z>FD=+KsQ-~I zLIB2dt#k~Cl9g3iH&QKJK*sWBB9rs+c1Gn2XxKrTQo86*Vy;ZpoIY@YN2wn}CHbA& z;~n|&}q>}Y(zznTz^RH6Ok5gpBLyP8%$=cIRCrG}+ zlOIX?hmwy*ezNvm;neARU&1P$NX!i894#^ zlNQw+`w)*ICUy>NiCp z9+JWnWVQACSp17z_1Uipi~Xl$zJ0>GN?Icpt+BWez!eo#x? zzin*2gR7ZI+rPW1eKAh>{=#)C2#UFLEpzQCfJZxH``?>Pb`Y~}G-vEC4DRWP&+QO1 z0v!!p7o}FKWG4D(g0=2{VwjPULBYLOZDPtZUDx0(t?b0Q?rRVD2)CA?9Bmuz=7?6^ zH(nk348R>Z9=f`^x9tT$7U%z?(lV9c=?YMP6eMPdikeQ5+uYh4JWd<^_BqIVsm--G zkyk`Sq?4*NQRUV+@D=YO;1Vh;J9}$u3kpD6#Jo8oZ~|FcG4Hn*->pbOer)HiEBHPM z$<%rB!=(o6t9ii-6QCd&Vz$>eH-`rxL_+INy~C3Ix_lFT`316-K zaLO+pkEbzgm#-M2h^%m{Yir?wU+@OhD$M~VubjL*mcjd~`+}hlFxG?v?Qc zuoLf2)Cg1j=C6ciN#~&epHI{9bVO&*QJ~$DeCf7Ds9^1X)7ayx8EzjGFn$z%E<%7& zA{>iRm^L8{c{Y35s;NbCbr%qMwqZb)6^*rqxm9za1A(#8ciPQAp<+CQC3o>l^h;t| zRKv;ZD7h)HzCGUSklf_O;7f7?F4Pjf6K@2gSdXx9O|<%BltoZ>Y*<74l}0?_J}iIl zJOgG!PPXf=#WLQ(0cu@vu+(hUeJJx;)=%47dIs@MX~gq5LTC(OBn3Zck=J9y#*his zG<`V!DnBzlQn#D&!V_Nf1evy;B3gu%1UkyC2{JDXjR~sq}X&X7+n0LYIC|9dFxvKAy z)rIkCZR!T}$!3qOqZwOvsKTxf&llgv7-NH8J%7~Qr-0UlgoRf`e(}eXi^dCHbZF39 z8#yTcJ_=Y5dJp(fTDpzKa}F4*90gb0Co(u-R$82gzds+C%UIBO{9`~*>ZrjibIUR@ zHAm-UL2JB!ahnC@4+jr;{LmRQIl+s^@`9hDB{BH&peIuZ~y94S$%#-s>wg;j@ z)cyc7Ff0spR{AaM`^!xNWX0iXdr)|O$TbBAP)txzkTLQ7V6l=4**_h3Qs@DQ2`3;w z?w%Owd;9-Tbxr}9wqLxS$@XN|yw&8%wrkQ%)|+i}vOU?h?a8+7CfnT4_uu$--}mBP39Lju9MP5t29`g$yaWiNJ{^{@{$D*1S=UuLCJP!BgZy-U-1%w8+3 zP_VGH9ZcTGb2Q~9PYntCi#3m~-AQGodZwl!V3A<`*Jit9X0KKzG$J@2nD5_NYP{dv zEx~Tims4b5Me$$9xRGMQA|ip@&ZA=~J$^yo!gH?%qjG9W;DyM@VlK10y~0XEdrfJ2 zH|~6PSFxOs;NiMmedHgL%|&KHSA!374b*C1Nk`5gm=|VI7BmE%?7pj9aKqa@R=9qj z!!A6=OX}MeWRW}`QB1D?{Lz$y=dzCNZ_6T*T;mp%Q|O+gDMs+UY5T)_vagxP7+mU4 zjNt4O{SRaP>e;Gy%O)54?mz=8L6z)f>cW+^8~#ET)i*5=&$51>x~#nOy1f%yv|zAi zFALJjtjWj>2i{D#?gVSD*LXKnfhV~nqmLFbaXTV`0~7>9y0ilKY%WJFyBs))YO%fH zHp{f26>*Hv`ap6@jN|jQ7?rIAf&?FGd<3u50PKl)4U0B5qMWABEzwQ`h%gX#aO%*El>@LS;8A3&3uUL{NWOH%^?K>-+?*Mm< zV|#jJ8Fga5#vuED7qpP{ajAWBw)!qHhi<7?bXEAKDM6@|BZSEf5&#A9-<6qD_Vban zOqgB|e%A&T2cV~Tx{;CMXDRO6cG>iX1P)pxfFYCY3+!*uV15Ez3p2Cssp5z7s|4Tt zKGDOYL%96{4l$-{9#4Kg?dt4;iY8GD$FZ-YIFpM7eF zLOtGPrM39;INIFBLNXF0K^RWOq4>7?Z%M9A)DSl77zpwr@OyzTmt!u2+dy!1ZQB~c z3yiE4)Wf%_w;<)>>9T0+TI1N*h@X*RBlnVKX6bv$&0;h{7Z-AP`BdWnzahbJney#ZWeXMa!k5w#??^fO&c|a~*<_~A0yXdrzMbByR7oLt zeE7jABm-U#8{z%o*zlS+9%0=ILC?~Ll!_J%X}mLyt_a4>-s{}m$WjvF&Vb~|(Aa2u z-DRU2Gnx++(#-d{K_0RsvK8M;eJ~`;=Lwpz<2r0>$M<^oUzT^zpW*l_v~z8B@3`i> z2~C@AAe+#2)XJWGT@HvpY(H|`cWi0Los57{HVGIR19u_x+OV(~Wm;bwQzUED`rs6C z`1jXq<#IM)i1YITQ-aPhPFn4IZ%+re>-#hwu9Z@R4URUT<)--?W%Xj$+*~*xT`h^3 z3C3hz@Bh>~F7>!~^NX6ujw2JL(HHM2)0Cz=HLI(e`zFQJBZBJGu@6l~w8g|BNR}V^j?WZL+phJ5nC?(s9P(KK|}iaJyQ)Lm?Sl zNHB++$?Alzs<3XyKE*T7NRzB<3;%D2HS6CFx_Nx^DYF}+!>SW$-?FGgC8{ArX4{vP z=L#Tk#v!*Ip<6ou!}MBPLbL)e)qx`)f$p9-$#x#TKl99HO??{b%ZhZ5%-HlZ5a}%G zzaI)=x(6j!eq<-+n0}x(zcoW*ud_pZKH$b#jhvW#Agx6xe+HvBu1@ySQfO^uuRe=j z4N_!A@qx7PrVzPrRu>5p64J^9HKSqZ z(njj9U*(ggt9H6=T%^pw8#Xp1W@ct;y&!TdT)V|eZQ*Q&k=>}p8mK#sRv5?(K2>>= z+jF1iCles62j_(x+C{5U3-4sG`(;td8{Tcc_lYf8bfB};z%d3feAD|wdkI}IChP3u z?HaMPw5+OGMM%EDhG`!!m_-5I0lQLY?i#UjkaYBL7=uHBh{Jv8-6X) zJ=@ms@^6TP_WhTaSAxGgxW*hcAl32lG|8B3se?{%wjKlZe=ank!l(x*10j4&Xn)yp z2`Pg%y_mC@MVu{3PeSKRMsYJm?)PvWKQ^kFSlz|Bqr0RSGOBH?nLBM}uqi2>gR*U7 zJxqg@ZrtKNVprSO7BnLs0-VasXJb|3{JPkd5b)Rko-X5DV5(E?$>qYN!eH;|inD2^ormypE{ z#UzF_*0g7016yNdMVc>ka$-cdCxr#*5Pton`))l|%=^O3&`)gZC&`EkEs`Gt}tMM)_d zT<-mWLz+(7DLohb65_bEw-cJF_$yP&RU(RReE4d3Hqnx^Sb0<%ejaahV7g{UQz6Bb&z>#r6e^jr-7RCR zr)v$^;q3j}bT98;!ue`py8Bhsy;$%G%oVj1qk2ZTS%)gYTMJ4dkVVpZM>|j^i)cu* zlOkH~@FS~G<}Ky1Z`RP#$Ej0mKZBL-T@;-l%ku6rGHpPxeO6{4=69fSQ<;~_4~<#x z!gBgEdg!IP7ba)o)zB)Avw^}oGfodO^bczdU-^3wSz96ccw_@Lc=G4`QbbUz`Jd`1 zSvL_x$=-xt`FdyX%A77&+N^m@2_02mPOkpdh^7yZ2%gpusnzOJu%|P2EWNY+%NnTd zrK&di2jlsnONf$hv?aTP{#vb%oJfV8^V+I94aj3ff$D85;E~EV$?nXQ@8KK~D|1S2 zwMXntNs;^^i`7_(3ZqK1C?n_8K)Qv*uE@*9g;%yv-Xfd6QeyxMGE(Sl|E^_CqAprcfdd<$rZ`V`&&X7DpRyMY6z!C>kG~34LTUk)xvAj(Bs7?8TvlT-1y zd2SFwTP*%2X7S5Ojr*BwW&^Gj-`tdeKXS7vOVBqHGYB?N&)yZcFtAmaw!^I~oQJ@p^#2Kw9J`zrm$u}M!d?_M+=&2-yaHMaNLn6w zQCKL_K4Ail^5;>4vRLJxz9Z};rxh2p#9S*VI2T-O|`qgi&$1aXU;_+p=;}Mwo~0{NMxIN=PBx zAJP~QlWwkVLaOreC4r|Kq}!+JfI|_fwNa!Re`2I{?D#hK&eqCmHATg>Y`H6@ESj2~ z+RI^En0?Cn4qmRi9}wf=x2t!Y9y%odOS%@UBs%ZB`QdrH9ejCpg?=3d!PEJ1-qvh% zFh=XSam_4!VPI|ygD5jfdyE`BTNk2dX#x9|uJ-D6%Uf%nwi*ggPd&;VWOZZ?AQl3D zDw7E1c*hhH{_T=-qqfQ&pAlt2P(lIsmuPikoDam3y#UCaS?57XR1+x9rQXrbx9xyw z^vsw-sx3ox`eP$ewIw|+HC(=CYA|lNa zMh-6?O(OY`fAs_o(*+`srOwW2%7pY!87#{hdznlO4jaTvc5Pv>kAvWdxrrZOG=(x< z2n85FkgMKX5H>Hkd@;ou{1R+v$0hmgKGoR60#k84a2!;w%UtAr0tihnlD6Dr$m)g# ztIiF-Td~LF#`OwleG?l$TaMFjndZV;xj{;gj>c#z`4KwrC932E%>&ggHu9@myr0P} zJ*%RyE64gvZZAERbkKu{0O;JB)ci$T6Ox9tz`dHCCD=cfy=;^_BTLmrXe87>Bw^V= zR+2P#U47+jb>^JDKLu*RSY^JcP`eVwA(#wz_adAL2iBFRSx+rlU@aBFwzC|V$+#yp zE@YaW`5jfsd^$D`&dKwQA~p$laC#-(S>hWEgfaM2#L|9tx_yEEJB1nDmQEm&&kUfy zTY7Yv!(0+${KgePeXL`j9qfmVhb*5~i-~?+LAgli#mg=>WR$`@!4H$&keL49NJrN6ol5XRM*`?xgLFX_;+tQpB5vF3qiNPC(!@I zn>)P~py*G~cv)94&5t~;hIM^*^X5;k#)Ix24VP_P4bBvQa!c&81J_{FFz^mH;f+W{ zj5ok*)~dnQwOaZ;`Tm4S@|NVM)PgT+&cXa{PV={O*Nw^P;dgjtKIQ`!`5_MsnG!7~ z?97P5I(2Yk1|qM;PBr@XsOtKv_CNPxbZ1{{Pwu=X z&cUnB(e$UpZIe0Wl{IH9(B!ynD820p_njyeHf{gh>A)9{;mzM3QHY|@7u=`L1Mf&y zJSQbY(l-Jdh{REF@t7f8A_#q#!TgtOP}j34yJ<$Fm*d+vLuD30S}H-x!%s0XYzn{9 zBQa^DLge%%q_L&v*%Xtqg4>@5te?_(9Y@8XJ&$Hj9HEeG^gwp>AGMXC;#Z*T(rRI(E0T1*PwB|Pw30_$!D-tuME7W-uUKqR%%6~y*5p75q5DG3avHz1n zFPdd8^mp3=t7UY>Jll&9v;d7`5eX5@AmfP;c#};hW?vL&CuNr2KD;{Hv@){Wm9d>Z zm2FpRW2{%?{KHxo>?3k&V^C&@qhHI_*BFE_&RaYJ~e_=M|t=*6UvS%y#SydB10D#+5j+&34_~&ftTNW ztDH`+8I(>MJ_M@Qkr+4>O*3fc2u~4Lzj*%6;{-|!PvH+d%0G>@!`B~lYyow7MQB6z zDA2VLfW5;SpVLK;U?Klcc6$zjSi*QQ<7i(_qh5^WYSc$<8rW>?7<#_LnslZ{v^ttZ z92!a=iAW?JTvY*IBN-J|AHSqaF5+c}f8Rg2v)LPbbumqeHJlpb_)LgFYXOb^)EcAO zRM^1^89d=A>7jxs)5cKGhQ+3mG2ayBOGk6~y+t7pI?bBAN=Pb_I|8Ym>7jGyIJIT!VkUOjCEe^wmgL6dmK?4q82~n|fG-cQ3DqYHySRNi9 zBY5K>M@G0xu)~pAC}a3GtOV%My!g~&6p6%c0y1D%t8NIOfB>%&Wgs4X;Fo~mkc_;f z>jjj&i;5^nNd?hfpBCKqP3}_J9xwBvZGo0Z`{kNL4@4x;2()qWfVa<526)YoT=uQV zk-?_hg~^eN8XA>VYsFGX+vf+t@P0nJ*EV>*M~WiMe0?~d4_af2X^(-dYb!R~Ewb@q zfI*n86(e$~o|HmVrs zhR>snz|azBhG=+nz7rYRD?ujS^$9l4K|zNxI7kihz*z@(nGAQYubA0YUm6NxI8fx} zc>&s-`{457Qv4Q8Ai_|hJ~~ffv9I=9UzV(RWGAsG+gHiSVPObpn#mEN*g`znZ}E22 zbr=Z+((X$X(^iU1j>nx^%%GyDN*j(H!?AE6Ff{DlFMn?mPZ9k6H z@nUMeXH#PmM%DiHV2&Ee&C5fU031wQ-1go1n5J7Z{Hgp-I#|D>T=YwY20fzy?R6D! zqxw&xTcTX3_ulRX=)?Ep$-WxhB+{#04iGGLm_!2$dELE?^w!}|Ok=CK)p=L~E#Qw7 z8yXtxlVNQC?Q!0zX)DmZy1qdXEN96wGs}2#!I@sReR4ueS9~tuf--+c1o69(%)qHJ zDw3XHZjPtHP#IH3FWUxAhC(FySTeh0ArR{S=AFlok-_9@=XbiAq!=>bu^DhBRiHah z6Sj7SYW{jT?99eKA2+#DaAFkySR|5O`TWa;#fSPgNT7{!c*v63YZe_C7^nx%R8|Tb zuzujc8oGvh=e5N)`$hD&0_2=omX6ADDl0$JGX<0dLOI#fb1pk$Bh4TfV_Z4WdpnA% z#71Ns;loAYhN+9>aYNM-M!R`qP*R%YX5Vu|)|cSHn^hInuW~u2I^U$pzI-XJc`%|8 zx7PXm4WAoo!;+z=C0#r@1-G`b^Ph{P5?;0#gp7=gwPCN~x1aVZRRtBuR8$fRp)!vF zC){q2Ik1Myq13vOAVzSj*3WjPpSlP{J5`bZQ(I!s2>TTbuG+K4_DBrs>b{9a*_|7Mx=|*2jZwKPT!&| zPYikfcTQeD-beD}xSV1$;fWg0SCc1uy5~oOy}g5-eH`6BZI_K<`k`o>sG(tQ5?L`&U#U}HsWSMin*X_^ zP||jBiSlG8WtlG+>RFhPmRNJ?<(tZyvP$@#L0e772tv*e5Yw@}M}MS0JNQ=e^^P0h@F z0W7%QPJo>%l&zAHB;nY|n5eW82EN*00a^xUh3nJf-;os!BRXT%3Uzq+2$y6DZoXDQlwdWe)OXH?bEcpyWRmNyGs$&?(S}1O;=h9s~-M_7C^DV z!LL&b^#l#=`c#kv<+BL_f&L@@{XsKVHqh749vHaqvRsXFRI#y8*B)P75b|wgFJ)zg z7E&uJ!Q&p6%g}^!fMMMWzP(uYuw`IDWOsJP<+9Ib>H>;m%mVTsoC zl1ikbq+`)n0mk=ao;8+u*cCd_HXPP}i#}eXI2MV8YP7M8U(~wQz6FJjDy(5zMx-~> zKf2Cydp3Y6_;ueou+OB0g;-u1(hv;QLm;Vc^bSv9aJ(vhtSJM+po?hRGs3>Yr0iTB z7%Kv*=0&RQtP0J`yYo8gO7M-e^LoO+G`&-dz&3xm7=pGu`e1@ivUu%`t0d zzFoCg&|12sLVP_=m*vLquE{vfsHh7~D1(2J_nk|8M_v{A4-KEQs$E{iEVb)`OHW`+%}p&V9N!Jw+uEWLd)9a|==$@m<_t)~Z z^OIe#eyIG9pN1_-WfCKz-#0(Hmxn_UBcKX=h2 zRa&7D?YxreJeDJsHN_QyUwLFtPO?|$pc)zjNhh{`2W)S;ocA!UHsuxOd%MO13F(8mL?#U4kwEDewLvf}fv-u$bsxA7L5Li9Wk!c% zTlD>dd?%`;93U3ax8Ca?s28&W%$*edPRfZ*-ZF-Plw&JSgWj1==|wj#MGR#cQwUZ* zWaZ_W`=YXv0b7iPcJ$Zhk(C$w2;zjH5>v{G+3J{e?20MaHweG);Ipf(Fs0v^V~$8? z&*}gh(d4YF1SQxEHF`js?hL#DQ(?y8{Fh0(t*j)*IpmDl;+|5rk;6~t<+#w=7hbtY zzpxA*3A*Uu*cLANcW~~SUKwrR&+>`$@rhLEt&gR;;It#S$sZ3$$Tl9jiC9BQ5C({< z5d_RIE`hB*QLhPgy0g_JVK|^bplbh8`#VAZMvs@B4M7ORy?(tfODHQTsPqFxR#pz3 zn3!RV21qpu^}+Cza_Pr%G!YXeM&*}@Qm zs=xW?LEtf4uiwZ%E3S4B8TDrR zLci67Fvs8xUYky4^7Tx7p;%bxydR-$cA6jrTYR5v>ilm6$8X*XE-l|KE5~be1UQZN zlXUfuW(tJ=j%DUoSEDH^DmMQeU=w(~ggltYpzV`u){#UjY-}hj6sM+gobn@R@#J#7 zB;9cPM+8>%YK}%72n^J(uOcarjLe?E*@q#*5PGVv0cWJBp9NTD4C0U?vBNV=Q#v^UgsDiGlf9U{r2x~n!yMRnuoLu8|`jt)p|lFT9vSm zfNk?~QP&3yL@h3FZ!LL7@gn}~bSoMWs2Pw=8%73jleF9YK|&{>oSH_Z%;XC%E&bNq z+U5X${Dx#~=lTwb3de6zVqxpx)KAOU6+shEg}9pO>ZZVtwZpV8_tTIv)90KkA}n;9 ze+=f6!C>^DPNIuaC=>~!Mu4VG#}}Bj53j8FxHxh(h*mdOBzzn_NN7M;UouIto#ZUj zfb%r6Y{<{UqxqP&^6ME{V=BjISpE)Bu0Z*6(zAVG9~iI&$J;S_@ZcUD3y*mhageV& zJY#<&aEe48(lh$TMTJw+(gM`Hd%SKAwrd87F|kcN+>?m@6C!t_1(#@(KUfm5TmEH| zm#1srMR8qOzV4^|lHFQKtXrWQ{b99^fVm*=c{i_$%k5|y4U@mS!}QM>z}`2Pmm^8R z|Jyn{+W@)Nd;Q&}hJ2$&QT?TY;IGHWpx_AKvNkn8jqW#%G;sEeFbYnE`sf4AVN%I% z3Fl$tp#=}a9yICtL;{lVo1sC-0K0PQv*k9N-le^*hXR>EAzmAomrQ^KJGAUUr5p45 z_CWtegF@T4wX&ko&7J&Sz=^mQ7EJf_<*jW8A0wi~ssK9R-b=vC<>UQLcx!=($9W3~ znkxSN%J7JUqNZ+UZYCrqhm54Qj63nXte7VbCOq*uJ3I5}e*@0PKsnh>;Uy2@^Y?U`l&2PcqI zHg?A6`Td2M#MIs$HBd8j)uxLmi$NWNBGcs!^29>SQ)z?o7C`NH`UBB3h5z`e0(bxF z(agJ_A5NTxMGTsNfU=Mpx7`jDAi@Ol!jT6$OGct?Bu&=q-yRtXTox90W01mRV&nXV zhK9W1|BCCs-k+^P>t+B2D=+fD%Ic&^EOTAYPfzgr^ewU3*E5<1PX8HfyuPAvu(81> z_S$s4u`}cskoC=famdR9_&{hup0V}{(=+6~c8P5yHM!#d#WiKP@T z(bx`C3$j3B5TQ=9ZO+z9b5-^~H(;Sd%N%iRdSX(DM;zW{e4|vFaz0hBGX}rE zK6%ajn|@3#_cyci1+?_!3M~IyHyFY28hcB%Mu?s7&lY{P+*T&~5I$5?NOxC3(f*L> zWAu92WxeZiNKC~YRVG$GKcq^wybl!gA)*TMcYeBa%Z`mhlk6KZ%XYtc7@z=jfr)WI zOhAkk>|k$SW3}p>&h4dQdV7a_i%y998<;XdN&duw<@@wc*Bi5OT;TbzR~6|Q0RiFi z#Kwn|?+xH@;YLfvgKmdOkKDPOPdOJVUvPlwZ`VuTCv6lR`7D0_FSM$N*qYJ&&v0p! zw-?T*7z-(c+-Tuj)l5Ra0SBqbe5T)IY&5S44$cw}_w`xyIv|-KGy=H&kT`&|0G1UF zWS7hsbI;d7hN8U1iD4QZ9uBRFO8Fd@U;^;lw-|(DCk^LiRGL|Xh^W1|LhI!DpD5oS zcTvGwGTjy8(Zuv{YhItBZQ3t7U!X>}xf~9*;^q0#=_+0jMrQvr@p`DYNx8LpKH0mv zx+0BS(f;p`j1>&O(jfTM?tW)pu3iTN?dDdkZ}sOZPsH6VsgaWwTCm$9t6t3L5VRc2 zH*<5vpNeFUG)Z;uU%9z8mO3ynF=xwoi2wJY0*ggt=Q5M;<+49XclS1>ZfJZQvoRv( zt6xS1^6Oi$9x#oteSThe8z0@=Y>>=l!7!)BJ(lF%!%439$Jm2G%NsAiJai=d(dZ z!#%-sa#O-iw;N;1uV29jV=1_!DGO@@3~^(Q0M{IPSt=Isk5l69}K-0bZ~n#%jtNC2K4L# zSrvH15!F1c*{cX=HMRavhtGe#LAFR7X&%l)C`dWzY!Lidw4g`~QhEoyy z+#;NS!vu-2nF&+yQq*eq^2#)DAHH1YEV#=O8CuRb4pIoKsT)ctoe@gE4Q zRl>zPx(`Vh5SLv&(K+fJoc zu!7*u<=j!-(%&+?mgRqd2qtrPUsY#N*XN@bpM#$6`H*kT`NXo|OVF40tASOu*WrJc zwl2My>#J4ObZT|RMCWNAXq4sX(R{U8a!jW$Ubf4tJGS&`va$2+D5I(jNSnAvW@b}h zOj%6Kc%hbrSYXdUWI#dH0zy-Swsy@QfRBw+KtT6$U&g>Su%Ai2JdRWLsStN`sy^tw zQ$>(_KL@V>=lhhNB`gb{TnHn|FusE0J8$V75!j+U@P8k#h>hL|PqJjLI}R0FzrVH%>PlP4Mjf^3Ck{DJ6K;b!Om%(3Z{MF0 zll)ImB;Jo?(lRoum42i>o>%=iJ>-HuP=hDH+FpAAPTDo}(%pKv8RhTez{v-K`g`;6 zFbF)-ztuN+v3=xmdjJ&}3Vwc_>iaBiC-bbv{`zDwVVVwqkMkB-d4ae3t(BZ6(v0A! z_hjAw{mYQ}jq(VlJ`Jq(luHDF!+x|Iov`005l-^naq8>m2Eqj6^Mwg>8C_GdU$tr5 z&R*`ycfn!C_(G{uwRdjG0lF>`5zLE@7tq02r`S;-cn5H*__NEHDA^-SWXd(`-#Hy! z_#nhbz+(<{_w;Ddh$=~o4ARH(!6&#$n3%X$`+{(SY>;@^(bV06-?KbpFE z`6QvcRpn(?Tx^3K5PgrBos*N(!y7ShO*oxa$z)H58&wm5^meifNpOA*{^LgCEwMiZFP4$r75oBnuV#1C$JiVr} zC~O}O$O-3CsDD2^I-=lq@B4zuixC5CuU}b(hiPd1mbeun8cMFbcOzXc|7DG;Q4|Ha zZ0LlvG_7;%sr=K{)<(m{C4IEisIi|n@?R`3w_M;FKyth2ejx*&h{(M6OW#n}QG*&igvAvhpUFGWpsiAZ}5!YWJ`5qBd>HDnct z$>Dsr@$4dpZq4MkWu5keA+UdZe*T}>tpg}Bz~xg4@;)EtxgM7kL1p!F@E2nCj!6g9 z@t|&hEc2jN7gyd!c8L(U+st_2h>7-Zc!bm6Cuds5jh_po8OD!;(#FpVABs!NScF&8HWMkVZ;#=A;hE~?hjq$t&M8#W$g`@c@LU9jE3{>=J)dbd$<60Vb-xZKv?#kqC5_mRF3 zu6y?Ped-vT*(|sb(r1>B_%1s;we$eIs5=-YFeoRKSgY=vn+vk@vw<C7!?H83)ghF@lq-9)Go0&&o7MR>=7mSfV2Qftwl`k*0*^_1?)( z7RnFq4ofHtth%0~i62$M-B-8cP zE&`7)wSNHiQy6KUBm>4zSGG^RYkZkHI+u43!8#6bTC$LbTvEfk)FsSfkjit|9l>TI zFKj96W)!0#g?)fd`^(4<$8K=HL^cv$WGDs2mNi+hb)L2M>?FrOo{%wKEQ;1|w$Hb} z{+P~zf8kABaKXlhWL{8{naY!2O5(e82l#I<^Ge@!UrCAk2#}T9*AM zDs9swCF+M2_*>lfa7FD|g{_}^1*H66arVR9)9`A6;Mchp`sfLh;IfUl~AePU zoEL5i3dkfzU2I4@jTxC(eM+oZlNGI2M=4qBSV7s3nS8@RkNaYUj79EpB_)(V$Izbzic1dD5PTGCV6)uCo!Gn5T-m)LqN!m? zcM~fn?u724e)yok7HY*g!QJw)3o6+`vA}DGlgrU_SHq+YxVx%xe%}Ov^1rcr`ajFM zs9E1BXj3mQFHg|#89Q#{s_Sy!8|{2N2lr%rERKHk871ZBd`43sYWgBj$>+PbrWAq8 zg}~VPxN`?(3kiet*I|;6>NFrVuoa{4Lc@=->lK}r!+P)@83;}c>TWMm3{w{u_ivGj zTrOzZ!1IvztgaHs^WDvPX^XQz8%Zdah)d>`w8>- zueF|BnF%^`&gUym@q-lWGJErPd$aPp1p*)HNSEqE<^-K70=inkk!kE~A)m~U5Zo<7Q{Wwx85((^slhK> zHf4H8Xe3(%WzQf-shj_uh}eiB%7)+XTQopJ9;9H%!<}!Cdw@)37QMO0{EqvADkk6I zz?twf!d{FhZ1Pkn-HOrCq4Bq!Me*Fsi$p~w|82o;+fjv3qNi!nEU&; zxjjnqd-KoV@-M!5Q5;`SZZ~Bukp9>t@Fi|!VlMu`j-`IBzdh%)Tw+3h6e=?V&{7Og z1{kInzv8DF?4a~f0j%KX5qu<~QnV%;RHa{_>V}%$ay*s*T$y;EzQ4YZVjit77Yu$1 z=h46ssOxLw!2&*A{}TK5oTvNIc}SEUjA)^{YbuQn6OB*xo*5h*?0P@-lzneH0o%)j zd=#@|>mcmigXJp?VXLB|63=Ks^&``-v=Rk0Fp#V4jn)UoP$tP>hxU589q_p5r02o_%d-g9CtG{jE-2h&kSs8m!jx04#>=%0Bl}ymgVq zlb#+~6$2AY!xv&!QqDz{WnkT#SfN>SnxrdwuYb1Rc}w)+cBJG9bT-slEmK@e$Sv?X zlU9O$9(i<_-D-ZJX1qLaS2Q zm)jqHwLM6(TLrW~L`yynq;L!QgeRdf{DjVF<~KiE;)GM_yY<(G2^? zxs;#51%quZleh)0=TJlhxVTufVD4dk?j=DrTUr-I z#O{zQ5%dG|&n+v_L89tmabL-jZxP(1siN)Wltw+?h=bS<|P_3JPX7nO*o9d}3C99ub!x?S4$Ue&p++k(< zyt^$)Gb(!uG#~@oVN97%lj369ldWsXL_97C=qWIt|4<)->&M)t)PY%5YzD26cq)1Q zp%}8qXJEb+xZ1WqkI?Si+!3&Et^0gv%5t()hodd> zLyAX+f=7;oeXfs&`s99LB=m?HlhVtP%hR6TF>TR97hxyY6SU8Ro<`My${vO_#p|CY zAldur-2b_U?tqek>Z13a73UVkE}#W`vQ#l->A9uo@I^JO6hCywiMl;K1YZJ2ZRjNa zh8amYx|H|xDmH!p_BUN50s#y@-U{%NXueO~1A$W3yqhG>T&6(uT#wRRm9lcq>yCFi z$Elw!K))cUKXuKiq{gRWv7BzGgC!iZzD53SKqY$XBm%Z1N09y&@CBL3<>ny;>E^O~ za_J=?k+S`EC##Rt^Qaau184Z`F4gQddvbmDcsL*2)bW7ycpyT9*uR++xDiBII4bb& z39xfLA@K>0szkwNR1Xsi_vw^^vze4lV^?og83>7t#5^PBur5z-@x%>eNX@ysy1|5G z!M;uw@H>wkco;t3^u|Q*y-H#_UQ(DQ~ojo(WG5-ajw5eE$#wkg9pvh2C(SQIE@6B!`YjRVE*C$RRvFE-)9%*Ddbv z+u~rn-H(%rGkEiaqr3aC$+4~)U|}H)nY0%-43ZST#79<>|Myr1sxS!pG~$$koHmZcmj!gq92RBbgM0uUe%};W^uo z1837}Jck1;7P&tLD>#EuBobO$>CK7JlWD;d89cg8?Bq38r^(E?thU*%m6ciz$BFl3 zvNuXJ9T!vi(h~RI3m!U8Pc5}t?8TNu6%3ia{>gjLID0_E_&rSS7xyy7Co}4FAY97#W!sTDh}dEkl9xEQhs7D!C}+M>6%|7- z-mh~hLevfE@G(0M=^Y-idBW#S0#OV5 z=8t4bZA^_qxzX0V0t?Ye9ArZY!O8TkY*Hu~sCu4JLYN-u-1b}uZDEn^6;XW#jZ6Xi zJ~%Lmuo0JivKhSTwJ-bGgQB|+sa-PY18K17fSf2 zFLJ9CW}S@VD<(sGm-tJVAUj3LZ`8sIDQ}XECsAl7w8@RauL)yIaQ0#emo$q}D4?G+ zZ7L0&g!V^I1&zAcwq4y!O+_MMy|=kC{!By}2PdS|ar*bCDUX9`K7%+}J`qp4KE8}y zkHBpF;AX#`_xv%N>vn%vc5}Yy8^IYS2Z!qG%X$-lh3=xCP&5D*`<0K67hab4H5t$@ zojJ#7U5^rigx%^zefRh2ybEduouPAQS)W_GVBHm=2hlvO`7>QzR_lSyXeuXr@BL~l z9;`htw}Ux-wjuod{4N)}UZI72ed!?TDTX#XB}4-rE8|`0#dtCHa60T}L1ZAMIVSZH zrZd#AK1#(Xke0O(^Zx!34XAS1A(`qm`QW!DndI%)t0Yh&_4nj=-Iw>=VM-GdlbVr{ z^>yu60r=kWIM$FBzV9Ng;wYk~RLVmQBu(Fyn;&o^I(<=?rZrgBXD*1j7rJr3JAd*! zjhHs4onsr?tG+w&6`~N=>$}{>W!HxweiH~i>I$=tt88BnAJJ4Q(r?W09*sraH6Eng>Z$cVFk~!jnrW-H%@v{X zospfWr6v7(Dlx|j9%zXBnZ`1DZ9K=};5~eOKjX4n!VN?b(RVB%Vep6*9C`EcO+2n?eB*Q`NL$JM28rMcUmJ$GUe>K118`z=V}8(LpU2<8#^Lv zj!)S_8REE&jWxdE;fSbP$sR#{11u&JTRnk@e^sdqFIb=|GP{nAAYG|z7 z@#kIbL3Laxc0y>7i9b+<_OE?3*&K57mk&82gN==lEo5k$9f?D(l&P}2x+*S}_q}7m z$J5iZ6;0Mt4FQ|kG|4j^C=xy|L{v%$3iGDcE&b09d?vRi7XJB=7Bw`}*xTvY_7Jwa4;Jo_+x*C&xYSeW`089=hWDod_~6NHJ;r#G=UiE5ng8Py_R`UOE2 zSAf{MvhqL7jnDgw?%u({HR3NGe_k^pLPF8KO`!B8HplcemzJKk_xJnT_Ri@Trq~%) z%;*ihWBDgRb!zYX}Io$zOiXVw41)K`X86}0UtAl=LVd2`kw7u+S=MRE!G;s=-E~mW#2iE(<9`>rFpVv3wVU#w6n5m-UXOu!XacO zu~Q5w3_~Jd?C$J^OOV z7^D*!^`gbp?{rLpyVyU0?oSZ@uOCD$G$_L}4-SKnuhKN`m?RD$;#~P-=8l!y5%DW_ zSN0>i%&RbYpTsE-BpCr7*E6b%i_5i*{U3c!j99LxMJG!~o0}KPkxp2o-Z1<361{C8&X-yROwV{;&{!5L~J zqx3ynz)~yR_oL60r`OyoJr7|>Y#-Vkgr82Jfp+It{*uxD?;5YY11*M;Sx; zCn#-m25*dlLJ$H$H!v`$Idbd<(xQfT>!YPMP;ibm@c0D5#K8%B;j;U~QP%a?nJ1s0 zu7w+>*YiAAFjA}!@o*|t71zz8klKM9d%c~oN=$ptI#-S}JP?nJCv&t;mw`#E{uOZX z1rD+s0l;qDSLqiH#a55X=3-cJPMzQwp?6w}!tVmn5U1T~YCoE;FB3W3b+-q=r8v^z z@_Gt^V1%(E6jQymaoaPOn=As)FgN&A|fAsSkEET0bH&V-PYG{uNl+OPs|&+|`P zE+|B=X8_U=8rXGT>qbWSm&~yf$Y*_`YY-PPEGIt=W;$;utLetf8ThMvN$P6rm>eH1 z1JB}yP+?x9o=-!-KgYn3Aia1>IJD?q%MIJhA>pT>H%NFzzFs^UHG1?KWNgf_ZYXAl zt*mpMWwxt~9P^3_3OL8efH&WfJ%+)>#YIb{6^iNioUH49ihjQDe#RP}OEPxOw00mR zcD^~%rt8vt*cN`qm<+eTi+KKsixcsyJXM{JwdSMP-E*|v?=N{Wt&X@E9AAlea^37u7$>^(HZXU&R1mU`w+dG>EPo2k?0jtY5s zV#ZE>3EoOz_dScW_g7O;@VIlbCL^af^jspk=sGF%=k4CHix`x}Sa#UXj+NiTT7=i$ zC@mQxTh%h9pyi(w9{IX|7bKs{|3*h6Vi3qbLQu*$k71hfDI(}?Qtr&huL`=@>v{aH zl^tH`7>i7G)SxWzGU+tQL<&CMUrPhj@sC}poR${8R4o2fWztLyLQH>+k~Dfm6p(od zyRAEk+o-r)VkX<>{@t*F#%e>2>AwaKA%Vk<&?dH6#JIhE!{f0@)qZ_IFMz(gQZiBy zd*OybB8p)t|7RLwz<7PJ+1Bg5PlMJMhdiF&b=?n`Ys(IK<;m9ibnU4{^0IG>MtZwC zJPJS0IyPNl8g5M#&ebH8;O(Oc-@*U>{TrX#gDD(^h{n#w>SvSxFzP`vtUdYk_2Kql z3{c1ac$%vDSX3qz*?-vU5rjFEl)$7dny3yVdPuRBS`^HA9kTW7Q8X~n9DId^z9D!1 zKHPTpJC3NtJjzsH`D~s$8NgC%svmK+wc^C0b_~$Em%)wD&5wVJiid}{F{30L*3r>% zmy%A)%BlpPhCOmJ#y$yO*wTEz-u-y!7@3eTjKtd6vwkAjv+gup2u@yY(?}bql+r(o zHF*vW54FGtgtD;E#^b%t;Ujq!Uencu|B6PIm7zkjj)IB<+hbbjo8h1IJl(H|g+e@3 zn}z7>LkhW(dHkO1blqOOzt??H}M zm(SHexck0@33HjhI70fjhdbMclYX>5;g#WG=}2ONtSpni5=q`>w7n}+HN*dYWQel% zt$h0?7HMz2)27(RD*2pTUnaOF)dz{nCai*Q=cWT*5}FU4N3HkX<$QDQm*zD3rvKyI{RMPnGX#hE@Pdh(Y%2 zK|Dh<=Nr1-3)NW@o-iVx2h zf7#7`zLn46QVx#9^mTM8$E(idR4N$evO{O!=t6)d^WMaG*uC9e zP+Wf<^zeNYYV(Vd@E{Pb8C%#yFJ=?+@bl^?=m|QXl|&xxessOALCrHIWXkw?>(DDB zFNZ6w(#w%0zAi2%1_kzrFwT!x?lgsQe*MUG$ij_GSek-aIt%42VS}G`dYm^ zguY|=B!m-I)Ms)*!~s92sul*3#~An)5CGw1-+`jpF}hU1Ql#| zxi7bIwBzO7C3yU2T_v)J(Pe+5ny*Ab0YxN7i3~FEL>!TkzNK^F5Tu0Gp5~;|h0e}g zXtTxJV%4MjW~ell6T!XeRXm<<=ZU?;M=apFKEm zt$6zg2vJeluV3);y5hns)0a>LlZ9zuW4!6~_Od(D!~LR@`S9SE%jc~+&&Au;x^%9p3C>Fh912jp(FdR5 zsZlOGt%MILJJ9bkTH=q^Ce}|v;J^X}4Spy4nNYt{*W@SGZuG4~T)W*)PFQ{DGft-! z%JfC1Ojf=HoSa->|E4`6B?ZirrEzN>d6C7f4+ME;jE9fM=mpZ*2|)|}^eBiInwg=Il0PbBzqtUm&51*hDcIEsK84ReISY*gbF5w5 z4S?5(M)8hdNkHH)h*<BjR zZ4VE@u&mu2KV{@)+AEgQ7)QA!DJ(3vxwI{1on=G#s;a7Ts@XX?Q7YTRSyy&<2^4~upFy~2X!!9N^;A-tmAw}A^Xt^oSh`s;#}7ANB)cFL@%Ie7IG7k8$C>Jl zTm2`KN*;p-$aoB#>L}!`$iG`*6Ko3gTtd&tGgF1K^n}HYBq-pTnbC3ca?UJ>sQdht zJ2*IGZR_Z@I3?DITgmH>v0VNQ|Nh~;<)>95l;BwUkUFcVAI^oD>FRRk$e?Lpuw0;* zCx_Rjkow~X^GCV;<7o{Y)c`f#MTROHqSPw&?wcUO+?FuUEIbs%B96~;M=WtZsV+cuyjUXrnlFMZ(#`Ih@4`O-mb3TLyD1esQnW8 zfWTN#T!n_m?Fj?>re7GMOqG9rb-EU`4INsVhVuXgH88!=+R79E)&g8G!#~aJrKDh4 zXsO;Bcg~bNn|NRIGisX>}A>aA=S+-x>=Z&dPw`Dt-7KzS~(_M@(+GE{KU@$B&Ce zdOHb5;*bwSA=a)M&az;J>|XEaSj~+J;QxF~{gh5q6wxIh2qHe6l?aXr!i_VxLOxZ= z?P@coo4*&H=|IqyFnfROa-sLxlD*6ECqK6Rh2HkJj?2R9&mbpvJ;_vki_$wN?<&&BDBX{d%u!tRFt+Wh1|SjW7TvY?Gh#H1bxT zXKPgFDh1hkz$y+X^3~*>_bTOPc%mtTsk615d#yeeG##b5I0lX)6pZX3)m!d(0Y?VD{GuQ9+Q}_F@~m3T3RKJ` zk%5P1GE|8iIC+j%ppJUCd4KH34_g;?81uEcspy-}I2bqz)zJkw7#vPhv$9F2w@tx? zQ}666W6~{ck=RVsCDhY+EEh9nS>vddtoEo9lrhl==VXWFw`DpM!5!G-ocBS|*P8}G zmRdn8=x1|bhQ^_*#Bi~li8`1b6`&h}VCZ!Or&|^Igxx-IxKD3Wec&|su9!cA2z$6e z{s(jI&1$(K=2yCP6*XD~)t`@3H^jSghhG{S!NR$m;r0->U-FC2EuOuGUVnS6_3Vs^E~;a7pqi`wpL1hamG#hs$J9EM$TN9pYq!H zIjTx9)DTMYx&+{$_V_?-FzFVePmQ zRVW?`1sy5KjV-iJlpQIIR}$D~w{tuGqJ4Qp}gOV0Op z{(d}HQBjuJj?`e|m4T+-3u%6)nQ0*w@InXb1 zmGp}6?u}*9gf%-XcOZ#MopU#`vnRcM^v5)B+TGlQaWq_o?OQx8mR3NM^-y%TW^!X%J;EDXq_B*Q^~Ah1_Y^A$p`|NS z)Hyf_DjR*EpDE*$h3pX%A_~^Ez!aQ6CiXB9_HYNsfh#Mu^Enp!Sv$xZV!}ESy^s`Y!(LxaBk}iSlJEW2{_2CPnilAI>sQN4;4d^)vS$- z3Lo0etEHs=F`es)`MmRF#q={-Uumu$f+1N!uEBTInVXw4p_1s{x2!{D`irXPV7A{~BwWm*Rct(8a;BE_@T= z&Nu99i()={>yj$BBhKx-iSaD8&wFoln0HnVa51L-cG39lJRyZ&qA&$zJE8 zQ#Tg`DMh+D+T>qb?fM$bhw_k2mm*c~u6&r2#ezF+XyDHCZ0DvAs=e~u8QYzqa}>KM zJtkzHn#;WgLo?O;YgO=aDEvKl1*|9dmvd<2$+|0)3S9B0sx(6oaNFm0KRNoXbd>~! zq|*Mn2~JfZobdr*wKE$ZpzACfyE7vnyx!70Db;Y*SBP9+d zYWij5NZKp&+09lJmLHa$3$2aV%7q+4XFq`O@ASx7-~F1e0|RERB3LNTx08VqhLap* zz}bYp?J<$dOEH~H8Igk*O&G8qMfzaiEUjM%?g>J|D}3-W#9;aVT|$1-n^ZPr1&d?UImOU@6}A(r`3dtWF)L-t zXKGVm1Tn_TpR}Q=(~EXKDCA+=%LR5j=TVhg$0);)pdrM>?YpJNM}+bz(!;eZ0 zi*H9vq?9S%<;+b_J7AK!m-xt37e~sWT&U{HVI;@xm5f4T>T|j$CuU}S3YhWcPF&ok zTP9oFj(j-EIUD*cDik%Ey+LlAel$MhKHViNR_c-WwVm5qOpJd$)Sv z+)-IDd6f1+sX)PDEmm;aWo^Ci)<^R{%T}JJvtx2*h5L%5yJNDKqCs;uSQ+i)B)Ftz8&=O6}ns^ac9cHAf2Vt4)Ir&W>{CobJ=Sg{y_8 zWdUmU#8Uy|$83iw`W+roK}?e9N|_K*Ke8jdc5c;Dp^{T7B-g-L0(RKZ(NVASaqiQ= z=j*_teDiF$JH?8%Md5E;BDwhjj}Ky#1HahopZm7}Dn1GzhZt0zk7S zvb9E__ZI96?(mW%|M`zsF$%i^<* zRUF@O6Y-R)J2)8$lwZ@#JQYTLID1zTNlUnt3@IiVoopiGA=m#Qg5{cevAbW_jhjNX z$vFn4voFS|V&xKjmv5QxpDtN%A*M@QmKNURqv{KlVJ#$sU-pY9b+fr`U=0b6CGw80 zGT`KI?}J^yf87c%=SkyCt_`p<_U*IN)5}t=Q*;~I6JeG410?BO9HYPc@rnqSS%<2K zfxdluVj`Zleeu;93C%usH0jm(`G)OJWUH8dn3`Z4frT*+Rx^cjU>EHNlu+093b`*$ zyT?UcT^(y6EvXB@pulsz{FEuk%Etb`;J|NezHlEs7BizJ;IoNc9QqmP;>r=?VPd+h z;wm^h0~_b>38tRRcitJI^QhcYamj`J%1TF0Ml) z^Bc{FLtZbaC33z|FzN!K$MDF=o8Ky+0Ydvew&_+#_R_>GBHM{ob#01(yS6A2mleK;f z0s{I8>eU&0G-`f-U~dakeps}v>s?*V7j!v>ws*ey?;|M>RCAtqlwJmtb})e1HeR>| zjMecZx^(O3|6Hm6d1U}w4`px6cq_UF$i0!tNoi~Aib#v1Nw1q}u7l+cJdkz|4-fw_ zBO?QSH%mBN!jMlh)hwQ?F@6VZBz&GnOi%ffruXXr;_?C}3Q4mE!{N892 zV2Nx>s5A?8hRGAYPQ1O96+Gy0nqORuhWBceTQgrg#M(2ACl!naVoNAkSlAa2JuEVQ z&-Fb3KTkrSYwT`7-B5$=91FPrrIxN!XXEcH;sH+cUpNW)J7!-Pxps5o^|D=f?UrWD zyJmV@kq6y?(z(9C%%fXUK_7UeL=fOae!=bu8`FPCmi>_$?E)Bl3L6_$!9@u2f3L2t zn29RAKX&q3!?Puhm_qgej z{(kTNyV2r@yb{|9vmMp$Fu^uPRMRDihFd2`V!ruru@=A{Fz9!R$p|gfW`Y z(iN@1gvw4rfXPR{sJaoPNWq3Ma1az=OBb~R9M|!&vB>gS?<6Hk8=0m5h!#O4+tp0u zq5zf{#ml)dpjRpWtFnNI)b=(+X)AWfLDFwx{V_RBKRL&p_5f)@GYEge*Y}N zzqBNrTx)aPZrpYv5?hBzcd&mA{@**_TUj}ceDAAc19kf=oaR0McO>}u+I_u@*#5AW zq6Hxc+4^WT3Vjp#Ed}NKA7W8mKUM_E+q@^)`@k_jdq!hdsaK^Ib5k1Fzx@X5^mnHW zkM8#{^gr2&85_|!j7~=n-a9*c{e!L2mEb)KWTI!0G8Pr8)qgL3 z*Zko_dPd*sD%lsyNik(*; zA8_j^KaNo zS%7!qs7aXUs>r=L)F^|s6ef=7HWLn!bc_%FTBA}Wftmt&08^{U#*l)O`*q((QE?3H zX#&tPH2v9gC3ta>P%l2A|Ea43-FH_Y9EBtr?C?4_7+Qpn*%9%y?28-vv*t54B2}Qr(r7mxyul|Z^PLSc`H>qK_ z*oxDUF!=V%2gW;1^^nE~fmFJ^0w#rlsl%Lx!!gd(lDy$sW}=2&t#=HSwgBszdBRd9 znr0zY`?@wVZeRYRuMLPHv_3>dKTzcgwnFD{ANTaSR?unCHB+P@yg^i(SUZ_uo^Q40 zzsi>t3sA3dgRzH0=ofytq132;z*(lCVDrB+g-1F@rQBUyii5}l=TuDOy0qoQU_oJV zz4Z*PAFw}(S-rg7Y&@Mju#}@VtZyuPKF6*8u9kF@7pS!P0Ac$YR2oMaJ65gyK_^Ph za>96l^~dR6RoxmW8ti+KEts+O3+(JXy%&3Y{+}hd$@0TLp6&p87Jj9n9f)t{_vSEZ zeSBQv;l@Lp%y$)NOk;7%Dejn6xPB4t!zoF+nF+CsD)ZeN=*ez+fA=lkPzsWaD96qE z$DCXy>$j$=N3I9UQ{^tkkqmz^-aAC1MdH2KOKv#rzm053kcd<#H&HU|OKVl*NwS7v zm8R@jBFPi9C!dq;GGLhR1sk?u1Lw1iVx5_bOj6kUNH-&MoJVqF`W8EO< zBget22exxX)`(op@M+#34E5buBS&9QBPgbk6PD7=GJr;;NJ(7>FWIK)ynBLr5U1Y9 z+e5-7b@wyr7y0>!oE5vw@|c$7oL z=N=K@$}Ud%!bpc^-1iUW;Lle)0)ky9clWTYECO0zQ&O#)i5JKh{A5N1%jvbbz4_G= zj~#$GTV}F-jZ{xRh%M?LVYIo!hR<(-4EtvG@6LMIyps^xKTd{nxk;nTYyQNU3VzsF z{17(NQhPVUd78T6WCc9s4;s*Cd=s+m!DH(-vjn~c_kWG?mlhB#6h(K0-C)8aI(lVl z{}kvkhuzEB&tLXK*~zCb{?wfbpTcKqUYnm>m9rngu@})tc(;CK`j`%b@8vH3Y+MZS zc<_ovQCqt?L*dC%kG4@KK}@LW=~JMu-&um|mr~IkHW{lI`9P`{%qjg&S!em~6o746 z6u$j^z9zGor}a&ZnA&SO(3p`R%PHg=bhydrr0~Z2U`!wMmu5N!hCbkIJ3Z<+Mg+pt zm;ewnK(3fC6t}l$<1Pz6A2_(Zj&$bxH!q5cq~z@Qk#8Xo)2oe49%}}lYm$#2d*1T! zcwk=fQr+oj>GUlu;PmE+PX+cE$>*>Usg+GiS$!o1jmHqWtut`UBbFQ!sQZc3Q2?= zH4r*+;Rp%J5)|n2U)!)l@L$e}z>U_bK!Gdek`KU6ky^FthnWihF<;M&IB1UOZr#IR z1v8XD5cDM2kAb@aM`;-w?l?s~V3uB7iCeCcMQ(LdoXO^le<$T8YL#)c!)-w$<-4_} z=f?LvTNrEAo}54G_I$g4WB{2`DlROo+5HPbKcUQ9Qtl^=3I~2**H)@AVHFk1`H{go z%vje+;g|a5%a>C?l>Y^;!58rl1|A-*MB3LCpZD+-OqlG*=LI5}uNar=ZO>Os-|Ik=H|hOAhUd%QWmq ziT36hIfz_gT<&PY8r{GPr{;FQGB?LDD-+%MB^k8Q%`)6hGK`*LfGn9uT>JRh^yoyS zqSjZTRV^Y`Eh34j+1N;iR*S=?zaHa>uFdInGfvtizd0&=Jp9MJir!1n==d_OLnkA;PvRX)T@;gxZ%y~xZpbhYnkbdcSqlW={ zCl6=)2I@hs7LQCe-~VLFm6yAUr8>~d;M4Tp1CJk$>6-Wc39_?X+Z11opaNY3v5e6w>A^eLqb-4qA zVAB=D)#C$60Dp3DpWG!PNt&Dw3%AcYmwN3l6^i}&I<59Y;&;)Nx|ex>MZ^E^a98j% zs7+V%BJv;voFSuYo(qie`Q1rB<+2m%=gAk;6-xwYkY14F8krZY?vxtBrPs1R=)Z@_ zYa?YjqtiUx62!VyNq%pL(>lBDxEbyi8k`o_{>jc6tcZot>nTNY_T}72#Qvva5emx$ zg!wy`J^BdU=qF?pXh-+Ls^VZd!J$;5ABhctY(fFg?0(%Y&@IOc5sz@FYkoEel{Tef zH9UwPWT7|It2BC-`*o=jXRe^i4z5b6sp(i5Qx;bQpf@K9_16yfBWnJ%M8n82zeeas z0y3S^tf2(5gbV{8gcKSDF;IzL9y)Hvw`eY}3i zWs}+QqZ?92POGHwP0Yg!Y5AtTTRhmBOZ#+cHX*N@3#IP(6`Bg$34hIo^S2l)Q@CGW z-#FMJUMz`E0S(5*u`oI`?1i|dC(_8}QPZa_a&~sCmu`a|(S9QbhoI_e`D()sM8m*S zgs1bmz`tFAT~UGE=QQ7L_tflGT1hiDHjaLQ1+p_TR!N3N*#BukP+Hra z`E0s3TR}0JTw7avrxppn_5!0v8e{g$a?k{2!XRR%?dCQ$-vHqTfG8z~<4O8KssyW@ zr3`i%LMdRQ^d;I@bX$-$>kK4oF6?aVRaI3_K%_+foG^U9bX-_-oc))@S8cvth(SEG zUtci>KB^P@o)?SRo8!no!WtiJW@U2P;#!R724#Ul;=3j`csk)w!$9HigJ4AWTMMx{ z1VP&Io)HJddI%MPO4#=j%6UHsEN`jKR2z4iDJk}i4cmX6$Hv4XSaKiy3o@`nHUa%H z@s)@!(S;mWed1FIPtp2!l>?qnP^E1|eAmR0Nmyiq2W_s91#U9_mVB{_i~H;F_*q!T z9hzZ^il)k4E*=#k2Z^N>T=mxzcFR$Eb8DGA-k4Hoy9p~@J_XVV5U)jivAQ&Au2kZ9 zH)UyB+JmJAlzjgOVPal4uvAh*zSe&XG=c~SbSZ1-L{mnf{mo$&?yX}?rx0en{`BU< z_gq62m8j6rP~XP%w<^5c+<0>T#-~0|?LC0uX=kOYBa*O8LOw1mWne%eDN>PNEHiZN zrB-@(ch`iMQxfhS(0;sSb&)fWUFn$B1dIqYncvClZ;w~H#tPzGoE)z&0VPPns8Nlc zB42}`V5k0p9%46u56}nB4q<_{!;KhOu+2{7b%Y)&zrStR-`PV7G;7H2AHgrvh(9^v zB4(HJdCR&-%n&X2i<&yMR3?Ep>XE<2=ud5bYo<;lLlT=t4qvL};KoJJ;e4YtqX${s zP&0+QiMA;7cO)cb9eqaij94|_7SrKf?9I>m13@G?4YyWM(Lb!u&ZXL5;Ay-uv9+fIsbH zu8U;xyRqf}@16vn2m&@-=zp{gKK-&K*qX%#sMX@m2Q*BJI*~tn1&z#XVAPMCtF>B* zee19bicD+9Pdv_Z9^wu5ap+!>!rIBYdQI1Jmmz`GbF zr$Ng`d|Px`5=0V;!v6lf&7gt$><~XX?)|e2C=iQ2M(R#?DjkK`wOTgmE?SPn3yN z)na2H``^4bInto&qxY`0%p_w-Q75|g+;n2LKGd72uEl@Ym(6NKzx~fKzUiIaUvfRX zZ8q*LbLC!C&I#@M@!JH{fw4Vzn9tW=<4@!+MXYXhCnyC9#(#+Obh1Y+`aLmhd7feV zqwq_s(-Vxx(;4r|vkmk_$$NXNF3pb-sJQ<-ql!e|F4N(fc6VXf-=bZz_mbomrb8-u72B$c9(EH^1 z=|s^Yh;(DPm)wCs=>A3xm{0L;cDe#LV%(!3P3+Q=#zNoInz_@z0*wFBt+1E}+lO&yQrs zuC7kSf74czaOn`yO;+h0nYRF#wMk&shb`^ zN3BHQtSaf=qTNd48>yi(_(M#Zio#X_eoyiJD`YZGSda(aSMU~RqShc5<@1b+ zR!wl_fdCiFLKMfBR~LjdX$1VR#nP8S;#;WIOD?zzlIok1VV$W|X4o1O z0D$F&D6&~lh>yF5;A*HV0u6^M366y1Iy91yFBsQ?%FGMYrW6a6;jP&Z}d&2VJ^u+5JJS)tSnxU zWC@KOEd3=TgyF98L4qT3h2zPoP*y2TeOg7)f6OzoGMX>XorUGDKxrx4f<(!84h~hjE<}vJWKh%p%c6IPMXmxoB36@BEdN zCUIlNO!3S7*#e*ZK=Rlc=r@)GGnC|W;W%J_B!#g+iq9R&qMyHi813=ac+S)pOC@sR z)p47!j-b27XCPUJKU%`U5Tr-~v*@L*rs>8VCr`a2H%o4U*jQ-{&|afYa~Y& ztMyjy987QugC_>xAnkYjLI$bu`4tJKzsoZ^FrlL}S!%ie9~YDFwdKH*O@zRF|Tw94Y&UlS{{8E@A7Uo;FP6Qtr%beBUhK(lM< zWFho6of^ri>jo7CizN=$e~ZGgV~Q)NbG;`|lT!)q?8Kbi(d}7T#P<1$UDdo;z}no~ zzRR_Q!SLq$*|})1d^j#w{WNZhZW8EGr{JiDD-+;*FEO%ApBfoeO^r%29~0PlP6h)5 zyV~$dRQzk{`+%NgvTkujMUOA@R?1~h&KpDI%*+Z@MZQ~EmZ5ZFPyJM|w$jroW)N@O zluYp{PeOx$TXXV{q2b|(7~w^2BSDYc) zAZK>33mr=#RXOgCq_Q6*Aw9|pV!OXr8O07;y7|%(L^A-w1mm(?KS=iUf z{~u&PQ&VGa&?%~aOGdCemzbBw{5|Q-V4^W)iD~=S?#CzJ19(O72 zxis72oPZCR9_zs|k?VL;n zWk}Axc1Q8XjGwIeDv=LR6U7)lqIv9xoBU0D9f^z-o{&O~T#1yZUW=y7s5at^U^wB2 zqDkAC(5-lYsHukIqVb53((-0)BXB}W$#LIDH#i$f=bu&DLoZ8E&>?65htp|$S7EV?`-cHD^LS1oBq{{z+$ z5?m-$uk9QJD|+BU+QaA=XV}Bjy4dyf^VY2FA0ql7^7j0_gQZw@K9KDdy*iY7(?1q#_eX~tA<~v(7#bpKW#iZ_BK=aaLnRgL5ZfaxFSjk?n_D=e z{Vf!*ZBQd39glO*4^Jvdd{FCN>>M%LBaW?)l)JQu==j$ugfUqwg%M3gy-lw zp(o;1X5AQO{%eqo8WJhTBGMMD(cpBj-__TG`9BK-#B;Pa1WrZAAgJD9*?8WH;F3%< z$tpfxk;navXF@tJs3MEtfP~n=deDiP87GIkc6f1B12KPt6l3{xuKP)yDro;#)f|Z< zcC-9c&nBY)$P`K}*1EIyK($ z0AF8Um16*^KrAPS$w|M?Vmbo}FdBfDo9iRrCS7YV(p?>|vcgOUJl=u)molMXnv`@F zeTIO1fta~!ZOTw%IStQ;8)q{Mi)aM8JzpTDJhrBMpI233U9MIE30TLDB@`qzSP@!x zw7pj2s4zD%K^#_8JUKfJi-P4VRF!ueVzY7uRwxfP#*=3=VN8yzU$&Q~Bi$@wr(nx;VMc>}Gr8l7PP3h7&9zOy=d?-uLzp-{i8x)T>08c+7o4 zW1aLbz5ai3O8`^u2z_9eZK4+4FxbJwjrUd(V(g60V0psG54aGn)@~-=E}D3|D@Rep z`Uy|PfiX}!m{sWUm8D{y$g$94m^52d8}_gFMF-1v7rue_e^yU`&=n0xV4fpDUh<1? zZeAx52gdy3^MHz*JT?mGJfNqDjTZ}u9HO6T`50M*jy(BkO|-E-MoHQAu~5MWu%Di2$MFto*RF>T?efJOh>XW=!X(3iH#hvLusC6c% zC2ASWhrd6+&5SV8sXebO7a_qpPVU>6+a)sAena`kHK9zfvbJfXk1=t)7{ZjZg3syv zZU}`a@>`PN)E8-PFaFG)f|_&;L8=qAod3{!FAN_Gc_HVc4uf2DOo8gMf4CbaZF_i_ z>6NLH$<9gA&$&6QzLWL*Z-W{6{*i;i0TFTgbJ6~ralGz#OlyI%2ANdyt1+J)otz+_ z0`7KUi|S!3*@6B8Ft2#;JCBDZGm|$|QdfZ#Da>gMB0v1`4#B?A{YN_U4S+IEM``o* zR^y3zDKx14?hn7BT&3XE)3{{ZQD6qk+R>b-0~Y#H+s%S1&!bV`_}A$4HmM&jF+DjHWZ3ITK&{reLrXrbe84`U5N|$`&@CW$cLq><PTbIgFh)TK47SU1{Au>&(-7LEa_S6r#YgU;F<&XEy z3^cE!8n&I>COr)N4s=wf$dy1V6m(z05(u$k+Y|)Iw=9 zvZT`|19IcvVAxhvoB#>}2_xl5^ax-*TH8FJc*4~d9DCklN8VXQ1L7;3`)ndih|J%@ z9pmU0%2i{`SWc67-j2!T;!voP$f`{c$ych_=O9CeH8fZEmIlx}C!(VpKzbJhScraL zmbigV*?C`u&Y>$WAGE zF&%`u+|ThvMPgdoy2gvM)}U8OJ)_;w&DiW8d|iR^4PTaMttZifn#HU4c&x1pS-r2C z%PTYrK=C>1lOUVj7(~Krp-1b^pTyL+wS%`uj3uP8jPMsCwOP2wNeppNLrDK$8*>)!V+TJ+<-_+FDmp9J`R@682S!>|cx^&x^_~k*rpZFv@`#|s z9@p%w;r+*rM=y*mHW2d@U$tuI^p&L$<=P<^s(uhxVGJoOW%p~p7)%VpnD)mDo)C(M zu+K&WMRDr+iJ@;ABbin-FrhxPz1xM|YPJ3E-Dfv(mYns8<6SJ&-)Cv)abKyhvst9b zs=d&+O1k*Fs|?e!Y7-bipFAMn%-gP@Z_ibsT6VvX;w>BTt$w%f?Ck7bJgt$DB6qof zV->fvVssM49L=oSfxQyO!Wi2f_4Qgc= zHtMPlvX1P~P#F17@7{n!0DHiD7YF4{?064|4Gs@)r!=7LSU*HE7?9753ZuX20hHT@^E2PVyz^C(k6+OM! zKtSacv05#`aZ#hDT=q>0KgWVJc+`CVtE|iu8=GIRCDGJYr&Dnz@>JE;sYCaL0JHuu zFbYg}nt?8!$~jnA>q$<$q?#!yiTU}o0%7i#7lo|N`qF!NdCN-}i`4S-N}acNhHMUF zbON`VDSZwR#*6HSfqb9-dynQLX8160*c0uqX>9cGl_ocLE9YWvbzBa)j#O0NCQ{@N zI`PY8YLjk81|WaR%T1_U{6p(dH5S=VU7=^L;NQ0Qz@axzA0oCOw4p6CmdWi%T3y53 z;BhIpxoNhlPVtq$u&D_KoUWkQu}sG@*&e@9>>Oc7KM`?QCDup(haB}jGeaiglGoA& zWP&);>p-9<9B7!qbixfMC^HrFe;bugwH^Qz$gM8_qR~~F$wlY;`#0{sL*4oPs#Mo% zbD=Ji&kFA8>Cs)B8u@p2j~R~87=#ZodRwHJFQQ^x=ySX*Ui39k1brwrCsNxrzo&`d z3!6iqS!_N80cgR8V|f!y_lnsue%J6PCu6E;s8VQrNF+h)Z}R-#H9FHnCa(w+$D$XX zcUNWX6T9?0-qO5qM)sKozg-I8 z>)^~0k?%WU6=oG%|EDDYV%1MBFYS{?Hg!d#$|xu(Lh%Kxr>SOVXM09RWvSvRj*VNA zjPT1q5G+NZ9xw`@yj^W~uc{CGc@biU#Ae(FlZ4MABt#0-$O9n4J~ZLvVz-8UjnjIX za%6l|3|v!@S6yUQ|A2cgO$L9l%G1#8cnYbPyzu1gDkv(dLQB8F^O4zYX zji2@J+6n0$;!_=V*=CA*qr7QEC43XCn;20fND#TNx~()rV>Ad2X)EGU+HHbh3RcQ= z)yweP3*b{f93^-iGLOBDkTU>8j&pnLGt;*8Lv#4sc2`81q9n0B<){aSHuFWrmALbT zoOI(x(vm(X9wW?Zcy)+|(amOCFU0ihL*6Pkel^$~t4JC0>oySj24fjWS z@hA@zQvjCj2f%a$h3)b_7Sx6_Fh?mUNDSl?A%l*X@AgxV+EsFd<5<4T0hv7gAueNE z?3b@Pvj2~EJADR`x|wPutYv^@Y72m+jI(Oc`>7k3buJpWdXu!F@eYAz>PxGVe&|R0 zd9^cPY*nS|o?b9r;@b*1k(qpMA@tvS{v7A3niH#oxQMk14)06(@DE0%*|s%i2Cws? z?QpI3a>uNhvp@5Jp5|n8g+1#8tgD?41v6P^!ZT__lZzGyLMi&hMA6{<%1z{9Dx&Q9 zzh4QJ3+Te&*rO31GpHAR`>1?FTt4GV#PrOAzg91s5b3tAxI0ugq_40$7v;vZMubKo zD=8Lt)YH#&fuxGAM z#nhxft)gp9dHA?Ulr-qIy@kAmdX`3QuOuCAGH>c0AlkcY8H?W zvJJ3){M4>a)d-MGQFBbA@{1m4oD*c=EBIekxh=_8JEEKFbptHfiy@{!DLv`e#Y$N) z(fAR1fSMQZ(P>3@R`mvE+%hvu6xDnVOxS-}oTnI`I~SJLO5|jSzSD*@u4P>rme1X_ z(+d2dvbnnPP6ya0NSMnSq=t1E;pS6$Zs=0uCF~x{#t|kZYGL69gY~`rmxY6 z8K4}j_`G@KrjoycqVDvn7&CDwKe&g5$MYZ3yEdKW~*-*ir-oA+`w| zjz;1atXkiTpFck~H^0=hU|2+`X z&tI6sd(2_;dU-B81#mF$18Br*fVv-BP5t#o1QxZ4scB~yUeg1FoL@dS?lG%pLwWOWNs*tw1r2 z$sdW-{}zHvWY=34`HloGr$Y#&i|qtJ9YVvo*_D13KFgoX#6c}OJyvY%`{cWpUk|uf zXg@wiC!q;Hw5Yjm7&-%DVB*UaD9JB|5-aky}` z{U-hD6i%!R_T+8{C-XXhm`#iqwCV@mJlR4qHkM1ySq;-{YUQv@5G&u+U%0E$8arla z5q9S9@?V|V70eS>U@XCj91K3{-7KeOs%aG_#ku?4wM3X-ac_6Gogp^bSM6f_T|lUN zbsnv*AK%p5(HVs9{?Yc#%kM!Swd^V9J817QRmV)?PuIEZ=f-;Ued%PU&^FKzj{#J6zmeV)iFQ=}zY7ro9se{0&h| zzeq%ioRi@(=JRcrSlUZO9`V<6=0bC>-kO;El##;54i6@HnIHrRX(E16sb}Z2Yi~p%N7Re5{u_VxiW=#G z5rvUJMnRce`iOO4g&uqiyz(xnhZw48)TX7cCcXw0v|>3tHh0b&ELFWfPRr;jO%fQ2 zj#e)#*F2vIYIR1=RNn0pmCUt{9eZrS(>X-ht`qBG;KEiVzA!1j_rL>gkP2 z>qBJTld=}26f_Bwm#`gC({sMOu|k|jBId;LhQD9vRGW@C8lE2(NViLKFJqJ2>uQij zyidqRuH_3gx{MY~J=igc8JcpVFEWS5i3Qs{zLG{1?6~sj5+#ZOKWti3hX*jhyCufc zF0!^f{LJ}rrKt*xb}oc(`cvF!$bG#XyZf%&BOT}Kl_TO?kuwy6s#z%kHxXt)V;$JJ z(&#RO`?G@cR|D-AK4<*l9z4`gA51lE?N9(KqvqplSp0O(0lIXNNW}&ijs!Y5V$Tm; zQ&UqtZS^7L%zx}!LFC*o4S6fOmu8s~l9K$5Pn$lyBNFp(e?c&o)tM>qmBZ)iz)J5l zPqKHnRj-u|t4?;##mf^9Cw|G0&OXU1;;KSQ{fNe>O(!JkbL#Oh(^u@5Ntw~LM|UWF zhLhioJU4laMp0G|Fz(uJRTzUSVL_ib>AG`mZ z#|v8J1PT@*&jBD=@d1Ni|7$|o8bjb9EA{be&)GV zZCN#t+}q&L({R}?{l+x=V4Wg^Uwz?}8wt9kpp|iFKJcqFUBuAGiF=7w+NWa(a!U8o z5ChDA?FY{#>lOSp^lC--+|mH?AF4%IBT9;75HT2hFla&bakgWOqV*2ck=q%}@#~Hk zKmKudJe?5my^?}EIHb=jSKqAfwioeNudGikT-lSG7sqS@tj--1m(J0#(FT0EI61M+ z`(ir(DE@&EF_g{k#_a(Yzz*(uN6A|uN7Vn~YFafqaB%D6LlQ&nZvbr!iH%kM!h&T@ zSp1kDN!;dh4XrN7lm62F@Kdolf}B?VRdK*csF(;6m$}qKy&DnWrzhpG-h?vvCK3WI zchI0`WnlqA0hs5?-v+6QJ+OH>t;b9xh;MQTn|6g*;e=Hc6fn`gAN-n4yYJ)>mT-Km z@aW9K$wu0>SSXpCvP1Rbgz{vm9#06ac|{i)Ma9uUKRxXLp42|fT(GLS8ZMyg>Ut|-dW_&OImq(_O!-cITb23Pkyi( z_~AmcrACHoU=kUznEb+JpnrBpV}|~lT(rg`Pu=`+8qsW;qB3JpzH6gFk(SEiwg_c# zVl&dorADGwegfj>qty_oEVTVh^!WUp8tzvkRss@~&Ly!1IgT%;>Gg6#c6w62Q9x%5 zknz|hS3hU)xC=UzkGR-8v-(~C?*Fy!>ukO`pJRTwMN#c{X!g)@;OqA^c>nHM>^wq# zJ9=M*x?>zoRg^=u9F!N?et${Wri&Y~UPEZ#@DxWuz}$D_h~)C~isQuKiRS6HmzY$j z4rjRSo}7vIygQPsq&o1l;nzjdE`O%choR$fIXqnb{_3L=nqQ8~5}Ev-@P=stVVL+1 zU5zms^fTilOD&fR)fes~#x;21DmlG1j$+%F@qryT2M)=$$@(&gac=_ezdGxh-{&Rq zzjxsRNcG4<`Crwb6>A!ec*dL4&VjAU0yJ(a<(*Mv1PR*Z2?qMng{{NYUj4kxhQELq zw`vkO)vTVib^ML)aoufcJ1i(qJ5;`3-Scr7V}8EfqBCCadf5Ay)1`m~?wfOAAnID= zf&2evDGX(@%B+Pj^TAL^K0Ec2CT^@UNr5RI7K}F+4*wNtG>lJw&E0L1d?KQ@!Zk!8 z(I2tBO5j7`mID2?AG=L8%T~jAPpFcucq#ci9k5UgxMWHV`h$2Td6_b$AZY*EO|IH7 zxvXN{HU8i?sOQ!4yOV|YFhoVDhX=21~o)@uP*+pr+WS`1Lhm&#c)s~n&dsGGeX^*!%O-lG`8t(W5z;#GJ1S}ybM}wlZ*wGEe9RuwIVX;K9 z>fJ~M4e_Y9gNyXddGdUSxO^~VRP2JW_G*(f45VVNAOENhih|DX1JV_j!GIj6b^uNT*iTsJk)ep+M zXG6Ju{@V)+G|j4(_weAuyyS&-_=dn`r{nF(S{+(*rL(U~{Q^uUd9nObJv;}X7!w?U zDRvvh4y5_y&DM$4+DS-ph=C?G+MylVf>Tya$p)WX-IxFwc=Dg#=lkla>Jevj z^i6^UO6twWe<*(cL>iCQe`~hCDV@ASvlaD5Oo`-#?NJ#=PjT1HnKA9dRtx-VjYbJA z6*=OKik_RFE=mXcX!hn2_1Qcmp_8lNLi4GfMtaxN4!0A{o4)UB8bZvclY`tObPea7 zk?BnGaqSD<8f z077n?mp(Tq3=caDhepE{*=l)PQ~1p5fTeryh6GB*xCqs z`h4f7cL`V zzs>#hVDPZ26dyMz03(H8_}Mfbd_Hl=cD?g)P@CvHHA3~zuUiiD$evUK73>fI`@NPVoJX;!*@Q_Xd~!#;Q~I$?{>6XxOsEcyNJp7F?=wp zMk79CWdvR6USa(_5WCpR**9z9CUob7fil^f9;{(q z!^@~mQK(w8868E(CQ5{>WMcpdtx%Wo4zq8?de7whm9I|}G<->bFc88ej zS4kNiz60j(tj3T-7LFv;wc$kURXr%CNMP#cs@W3_h6-K*j~jl0Ts(P~rU*B;mfpl4 zO?P4HPUG51Tgxa&21e7DLBIyEqy3Z5{Y10=&d%3<>2M(CbYpWnzC-r>1=MmQGt=MN?N2X-Q$6@S2SlMLIiJs9sOpeKr>CkWp zZ32wvQ6*Z1y7?bbrxX+ui9K;^c<|ZF24yrnZ~@v{>~zWara~i%5)*b}!rQ2yweXqI zp;ye9ACgiyDxcq&6TrV)?3U(wQz75eNSL8DMv>*DTlvPK8touXTuzbDmUHYVy}NUb zFlq>O<`Ykf*7JL4dzl!3L|4%jrw|Er_IK+*jeS%h>%BIeRWg6`JEqSPItQGQt;Vv# z`zdn5-(M0t$D6u2d#4Y@Zirl{gU%f^*|y%w5sbCcnmUAh)snqWF-9DoP)}c3C%~V2 z5gDR=+os9ROojW{C;YK=y4dV!hA91Z$4Kd98;+?$B3a2PJojmv!+#E>^o5fMAj)){ z9BxnckK>2&$r`(0kOax?k9C&Im6!qm^2NcgfP*=2VcOekjpxz3Kd6aOg_}P~N)lqZ zsX2RI!`}W~eH_R=Aw-&?lJ&((o7z)?^NLRrWhw>K=xB_8l zBHiVO_^((yq&$Cwz~9_GPNX>8raXk_4#-UAs$X8ivrz`O1;Ka_kNON1=AiGm9q=x& z1;@oz3NmbR74_QVcIwN;DaHHiTUZ^l|H1+M1KyKiC3mo>g}<8Cj`bl)T>!0w&j>2tAh0Q z+6}4?&@xPaGQ6WFAcLMN>1aObNSD#D_$eIk-z~%Yh>2ijaemklrUW-Dw)&?wlx58R z+!;sQ>`WotCmmT0l>Y%a^dI%|)FN!QNDQE;MS~0jTn)jki5x5b!Y292-IV;KiExOh z=Lg5jAJ&jvd4uyxNsLaT6BBhlLKRL!oEc{TX^u+8%n%5CJPc-%*mWFQhDesBFja*XFe9kJ+yCAFB_lKc4y(tNCh>qXda8IJw~C zb7F(`IP{t~xcr_*{g{6pnfpCFsuXG~1j_23?o03XjbiW4u{>PO?VjP4KD|^AEZ1r; zCZpIa2sLzVlovD>WDe#HJ!^kG^F02y>>e0m_*{1euxf~iiMN2T_~4Y3lslY16aHee zH`^a##9ef$W=R)NMt?-Plsgz0HQnEVEm4W9ZulBn;n+PoIZDvRItNJj!VSggA2LUr zUx%km@YRJy8(YJFpWR1-$RUbQyYu@XQ1=Xz_qmA>rZ080@4tW-8pzW~E+}sy=Hz0# zBIbdiZu0FK(!=D;Al0eNcB$hD%GL-vI%1rGfJ`=tW7goPuy!0!q`+hchJxMQgV=X< zDUfoFT$OcPjI~vE6B9O%Q{u2O^7`vQO~(BIq}{7n=E*@J!cL(0%$r+p&^;@L`!t5n zJ|#%&kE6){W*2tVvCyq)e%Um))OZ}%YYn`ijuQ+29$xkeBcvddHjhGTK+EBK=RbF8 zCG1ar_MaG8W@hGU>v3{G=noYGI^Fg&pesfDBgK2{g=&{=Y;PAlh1dqpxqKbK}w*#G_}c3ehmW3rsR9_r10}x$#AC9y>%bCh_;Q4cmBU_jeioFBtZ)`k-@R zid=bn zulA5|YuY_A#ShVtz(3ltH|T(16J3Z)MRA!BAa?x)*8yfc^h`|MfC)VSOckwlf71nu zL$th_nwow<(ee+mHn5AEz{9DvwKdcesaqK2vco=&Pw&RIIgu^JpYtg`ikK$Z(-Wde z)LC@T9&vDg58_?It$}aI74A7fF8qLDeQ(rx~32$w0sK=|@jSxN}C**_E& z(6y|dn$XT~H^kEZ3t6Y@6n*=K%hqxM%Q*cLi)aFGkG6Q~EqP^~H9&((^kPB3^YHmHvN8Bxbz@b|SUhLL zh48@DEd54BT|IHZ!_#x3GzsVGYT;{5W2ySwR@y2__01JIvtga2o)Y`5ETCvhR-n;4 zvoT!rJs0`ohYbV@c6_2y+}PbM0v;!p9k)ikACyTy;F;b3L#ip(*8iXUWc>cuS)=!+ z**;`#n0+K;c=Da;Q+u`Rz79M*Jmi+sTfxhVcdp7X9*ux4x2frK?qJjaU%&JgaIyV&>I2U8)e61s;X;%7-JT==vLJ9^ zkUJ#<#<<@P%!f92CRjMQS>FsH319}1pS_e*>Fl0@u?nM27+h#*WQmN8m2h*beMwJE zMP)+89FjgKN%N?yrxVkY4;snjBS@lE{n*&pc&8cG2=lKA%r{2BPey4PFoQo=`!rmV z^A6kHH@}stckjH@(_N^B=VU^+erhl;*5F6(F8DN}Rt z>~3aI0~u|YtlAu79<)sJ+%!Gxq9N(;ipgK^6uImGIFw8$efd9a`)kRkg!Dhl#J^v? z|D7xy1Nq+@!>$CURaDBK_Br$uF^vIZa3hqFgyT)=Kt*NeX(l`15B5KV)*JKRZ+Pe( zMX{NjRt86~mP*I^7#rqk%;ue8ChKWy!`3^1qsDauQZkw8Vd2FpOyE zYYaooEO9^mSx1B+d^zp7ulH#t+DfbnPSY>7Jvnd4`Qw-r;L0h}M^AfC`gUY-WjtqDB&UZHXIjFxt)De=fvNnWeqJq=*^}?la%p2wT3&&U4|I3a@Os>W--N zq^HF=2sTkPCv-s!2zzg$mQu7SX@?($TgPIptCU6GKcK8Z*b@?_jjr@f=TKV+PsAJL;{n_r;uAaj)Ovs8=qTML@ZU=(aQy3@gNKZ)iyQy&De7K+T5@Ap}&foj?{D#LZ&DCtM`So zN~p9LzX)~+(d+8Qavpw9_Vggdo~nz8&9m2`sa!1juDO-2GQqKm``gG^3|!T^U(j<6 zEg$Ll{axCkR=$TD$Fn!>;(kr)yPDS8-W;8`m99I2kQZ}5L0Nl$zp_J)gGj{pEz}jS zwIdA3pm3aVob0CE;iKXDN>mf};mWz##{)6`TP-K*6eebzUAxQtYO8V4%rPbzZ4zUy zFk8oe$D*)FwJ%X3&0c2fzbo{rf|5Jqu( z+WoX(liB!p2k;SD`F=E%r!W z;HC)3aLL8}O>cT2y@8$eXsp*9p!v(Y)_Z?K7zd8)o17-$2wXKdc|+Z)qp1-2u+#d> zcAL07K1x!{4A<*KKg`)HN7~G`&fo=9!fHjwui2G_H(My2>xXr<|GBfCD+gZa(WTaS zR*EJ*xMRt`AIsQ4n@x*p8sO2fo(MIdM|(~22(dk$p_DH-NjF)!4pdGdU-Aq@tWq7V zyRf2`0G(dnT2}LL;KjTc@REZD&(WTxOPCv?uwA`9<~#G4ltf2w}dGm_x~e^EFeOmj_*Vi*NQS>*4sL%glO0n4-Flz(}sz zXOU-FzUK1t$X7pWKF3$V_=WkLlh28kBwq3Y56o}cZ#5P0^;k3ES~HpU+EJroT{|iBbe&t;qC`$@>l-U{HHY-405l#bOQ&!cCOc z*O2RYgECL;(lG$vy1(We;NK-kGP~2zS?6#*Fm?0Z;O(LJotj z{fUqaD^mX1Bm_y;Y-=L&AWtydKkchp@%Nj)fv)92^te7m_6Coj_-Ld|W%OKc zrXb0TxJN^R?Sft7?N;M&_1a%tYvozqqFGeC#rHtYS||Bl5w61en4D~gx-hB8)_8h@ z&L@!`3m;7bR@qzDUxv)kZfQ9Ej)j=OA$ed@gs7Wt9&D0)%m#;<+)Pv^^iuR^2xBMO zClJ;lghN}{Ysb|oTqxUdiAvS^ zU2f>dEYcL#5j;1)OaxU1w1Q*R3r@qp%H(Ed+eJ6p`Q@+C3CvZIm$Q;e7wyo1Fek-n ztcrRsf$U65T34q}MaPT@{w#=c;}qR(iX8H+mD0*MTwFs#4n1_4olou_m7HpjjF(L+ zWJ74P`;iVdg_qJeu6)xTjfkgI&%YTF?6j|akE4=YwxT@Z${5v?zeH$Ska=v9=DRDZ zS6Gs1#TN%0`XzwTXN?mhqklxoS3%mGg)qWwZT~w^%l0D%J-w~wEfkmKH42b%_w5kgTTb_#jbkn#uXMjG&j|`yQP7&**<2}l4Pj=^V^)!JaAO_=C)Ou}X3{_B3yYu-%@#$px*-UT zWo7%10@ZpmN4a1i{4 z7=3fv*_Iy7?|=9r;I}^tevXQC+!9q0?IV3Eg;4yrsUt+QQqFTwfRG1jBn=*mlns8dbb_{9%(R){d^| z?c5)CfYw!MwH;Ta=W+l$HH6Ezr2i#+S}w4Es6fu;>Zc5;P%9DITvhK_*>C!G^3`(b zfFT&>#TG<+AOb6+$96`fI+3dF+5;+HQvR^?E}yEcrE;7u4eRqD9zCy%7Z`nSdrB{beoWutEu^Oh ze>x`TzP=7*n{9s!ea2m!mA_^aYeHA_B5gUd9brEu8l_JBXSIxV(A^=fXn}VisVj$o z%Gk^?md3+KO*tK&RV2VTq^PbtuLW<&m1`bdDI&KuNr}T@eGuz-(;*L^POMO&OFbPN z6tKOWAR#9wC|YBD4+pkwNA+Anf)r>{us~ufc^qPy)QsMuSt?W3M%m@&7W(eToAe(B z&o}tbC%b&(4$W3!0kVAo9~d~P)v(ES&WstvyR-l`1~oMwq+O>_kF{OIsTv;oLO8Jg zV<$xVF-7BXPUDfU$FEbFAkp-5Mw1&b<@MHcMM5||o1Ofp)@6b;!!b9@X;D5mtptUb z5iZ{qSX7;e5O15xu1_e6LTQNl#x``v7aEJQIJ|#4n^z6k}iWl3qn-i1oD)fhR`>X_nuenU?hHJNS@S!qo(Qm>D0k1v&VW z6N0vmgua$d^0|IUOCk{@8a8!01hEl#e@`FG3uMBBu;L(jGxyT*EzUah)cDoCW0?pl zvR4J2lSj5`+&BG}Ga2a+y`Wc=inP=Povjf(!bydIN2}X{C-qZ4eqWh7R`2A5^Yu99 zeM#NjR8T^(I1B0l+k^b)V7Ad zO$Z=dY^C%09iHp!BBpeB7WXrzY=Dst#sDgI_5veS$XvdvGymz#LKlYvWDaL!O5@Cp z)ysa0`=4$(i)=c^2hFRePzsbG>*&-^%uWZ#wZUB3L)D9*IOB(%_aNRN z?syvdcArUDp{NoFrQ7~?IR0{<*APxQ#r0K9zarAp(FtT?dy1@!MDX3-k^H8feSW?b zE2YXj_=U5zp6%kb zQY%YY$hx??hGyjj3BJ#Uebd0aPc^hUrXt(lg|lkpVCo9qnGI?z;)$_iRHwW{FMZl! z(s-1(uvcrR6ci9p+~2vm=2G(gmuzC1?wiMorcS^{T_&0Lme(jezbw3(Wfm9Qnq|T~ zADrFFl{xeYE6XsB(G#0mTU!vrSV1}qJ|f)_ZzST2;;Ryu3FyP zrbH55eWx(nP$Dm z4%cv3gANL6Ck0PW)(k*qGKECmt*STr%IMn35tehRY;DZ09AB5=F7kW+z@B0%dw<9` zT`Rx-!>5_}RBwvJi-%8bdDeyi7^AEp<@CWCi;{2m1bLBT3FG9#ML#xnJ2|)`UdvWD za)4%fLr&w!^~WHAxCx=reU-AU(FwE5vyg9?YP63|+wTlSm==)7FNtU7a4DPRgO0kS zN!wj;h3Zc1Zi*R9?44l!R}FAewL4};P{Vva({K7%XmnSiff*4{6`i#!BfYp~$6vUe z-XZ_+Wh^~Uje9d=jPjNCs02sinPVjQ#eWvdw;z=#hFWm+8XfWQ&sRQJE~jpMjYU#F z69G}TIGRG~f-C)1V{XfO6Gh4{x^F!7>fQ0$7QdmCC}>Y$QAHAGQ2L3isF60HGu@PT z7l)80^QE-qs9Bk`Up0un{ek}UwPo|Bef^fISSzXe@F{_aB#+9JAYiT5K(Nj8m+5M} zo=ZN6vh|*7W(n6VgOwCk&RjNu)U`jL`$4&^#^V-|dF?y=2(XhIOB$@r_ zP|{-mf|{~CUw!uyQSKjSP=gu9qVw<7Ws^abaiZm<4=?xD@mgWKE1eTfsekMYNF111 z!kB}}AX*kU?#=GGOec0v-+{u^b@kRRY71x<(P4Z@Xza-#3bn zEHSxkcu9k+93Bg}F}F2A+4&%v*$<_$6o?p^!Snr?AKNqXd=_g&Tbfk6%U>cL%<1IS z*4vg!SP_PlCY6|!S7G{*Wo}xn+kCCT&5s=EphzMoif^kgW}lf8_FQE*JHZ9Hr^Xd@ z{?ISDozPD|u!rL+-Snvp{xL;8&C%*h-!giwrl}owF3|& zXiePoUS#fgLLl^VDU}F*uyA5{e>reIP%ikR42A>v>c&$JJ*JBBY+$~MCR-qtkD$b0y_fFe0O=ETmf zOdTwTRn7-k>aA3l*i_Nmt4$B|$!KT9&1i5f7z~-3m>{Y8QqK&YVr}_;1uH+lGUg}x z(lt7V+KaFVgh>z4%KG0~Ai4nJKA+~#_fi;ihKk9!R(V>J)d7%GJHkDQ|G?vd%$!_- zF()K2Vw&AP@d{wNtFvWHa0MWW(?Rugn5ftnmtOc{7uhGs?tT)L*cxtbcAl3$5h%Qe zK@=rVhel`zZ*kByMFWe2q}oyN7-lY+gO$D%_=@V$*HoiQ?K|=C>{c~M_Ga~BAOR(v zHHBoyi3PinN0#=sBNqmT-TGG{qGCJG@=4q3hJ@!)V}4+}sH8Yvegh&I(p3cMhcmCG zeo9I%DVJYrSMI#wi8HT6N?CBa1*y3$Ui94-ZIi^62Gg19oI71`B#q5p3tA=^zAI22 zS5Fr;fv1(<-%NbkX0OoYe_#e3_ZkZFeh~+q@{w~DamGiuvlIIJb#c_I^0)BpKVb|X z>cN{?RI$GUw_9bsTr8pJDpz)DvN9DZwBIw)2`q;KMQnjw?{kiH4wFJi;8I7xg-(j( z{1{Q38=R*vQ89YyaNlpmv_y3s8fg{m=xEJ&6YI~EAB6=wN7OKV>QxT35~CJcG_)XT zC=e)@3o|WNqxLMHv_eL9by+E-+(yoer-nZ{J???A?zjmGN*gDWC|^ON^s-`~<&#UR zW}Q$K4G}>p9hhHO%0>LG8*e73#E1%g5#PbSu#?%FSf)>}0g11mC9r1e-86Vlb2Q3# z<)qK5z?dexd#2bv8!9KRtG6%B12HZd=*&>$FgP_yzX5VN|3MgTgR?mt%E>m&%+cNb zS;~-Eu6FiZs!!nw>=o0GR)-EX5aG=2`4 zF}=tb;)N3QrIa@6i|G=!KmDbu`RLfJ-ZyuVRL6Uw97n+qRP#ZEN82yZ;K&Y^WK3eJ zP)jf?J298%%oy|w9aciNWnRnJ`O8_#Ui@^Cvh8AhXtE~b?b8f7W|7`%QX*Zd0p;*o z>V7rlW5rD=5YX5JaGBkd)?bz+i?ijun8$%#A8}+Z4%bM)rz56?1!%tQSn2o3QbxKzt z3e;t5fFlNl;)T1p=Jgswq=Ep6T?D)^rYQ{F9TD8KB&=IaQ<>A^4tHN{ZUl(ynrCnHLCf zFtIy~5y#YBAeoYmCy$9a1(;nh*5-{mJFTocef1X}mmi}`DX+BWtIx$zNvUjjf<=?H z@#r<_P0iMcx_xeB)4`8W_>61;Qb!ik+;V9dPaa%GaC&-kzM(vyhv%&nL4!*5r{kbv z9eVxGE%mT?JPL&RzFo_GTK1N~#DF?FU9Y>d3wzFLubQ31zFPmYP{;BjR;n9yX(k*n z>2BIsxNP%VG_iBn2d6csU5QMU6Tb3z#rN&wmoJ#YkQ{FI5Sfy(gJV0GtR-Mz*2&ed zMJ8AmC0te(k)VF8R9Yuxjl-y(fy0fm(_+L`J0k3!q`~!nXq|X-S9x@p(K)z=Uks7e z*T#dU2(oj#OQvm{A$#A-SYui*Of|M!HeQ77a?g*9qVP_Q`!uo11PeYS)qLE$z8=PI2UT5VrN%tO>4yYlf<3V_8b3v z{EkVO%xK1!f*rU2;-;Oq!)h-&{nQ>#UxtA(*@vC0Ata;iOj*6ME=mJ8IaLBiO7HWR z?(`vuSaT3+RFvdS%Dnjc0;ZgKkpck1o3$UG6Gt!33 zSOLw^%Cg&dU~3I2;HD(!wy03-S>dxgZV6*~8=O_DYrkS@M=dVCgV5F(Lf1z|kP-St zyY2Tyex%u&PjpYh-W_c@c4maww6q;iGaEF5z};PdWN#1Af$5*3zI(%gUZ>cr7iYB0 zATc!nbgH?PXvjqdA;*fdIeYUH)?=Ecn-b?*8#z~**~6g)7l$f|z;P`sdtl=;OP*+* zJ4d8~m4%L~2JHIRiq|UpAPlDIFTHr?4ql(w#W?>_=;qa6^K#FzxA(rMI{UT)E*R)t zEI6+I)qy2#xmy5vjYBZ*)bk1A_6rHAdv!(cv~-O9l;A&mt3Q+y;tUjE+25VbOfu7) z?iE)_v;r3DtmcHnN z=_SLC^x4ly0NoDWn=ERu70Jv+URN4E1Tff5OkWhkrkry;21r=msNQ`4JV(F>$JjU; zzsHA7)8ed|Sle6RF4f;k>gD!T_QA!;CP%0}3IMFF|DH14D@frk{tt9x0E7kp9fA;M ztG(kUzzAhhH`Fa#18Rlp2I?^WA|eL|7iw95T1jTX_#a+w_cwkN6qIyMTTUz4xUTQn z!KSu$7#T71>zrf+#HssfxTc%MyYgiWJrC#jU8AGQj|}c~x;lW7RaI@jt)q8D)IA`MLA38~Eyz!UXGuS) zzELVB5H0RBoSaN$Q=~ zF#tD@cfP(-#>U32X||4l0B^F<-5Ny7{=#Z+@2GVotJde*j``ui-cR_}`NGXZ``nUn z@7;+&OWl<_^PMMAlh-K}sO9thr=ZGPP+ksFDEb6c@2P}@NaDqw`2GC+E^d~cyXWQr zcd_ZE1cHXVJW9jz6>30_(#54J8av-N+TFq0`f8p}EwGE7Eba5N^Z8nEK{+N*oLKXh zRpr{o@Pq^=K!ODWZyNRCWzq*!J+&56kcJknur-FK;52zj$@4esu^5r*xwUKYP%7_E z9T*ubG)bIY+Ly?HeZngk$yYl8>&uJiXjnc{Teh(Gj}|7p=l8z1mR&om?{jW%MXRRs zXwIuN1Ab#Y9t8%LU0r#s6X%j_vcCX+ew6}F$VW(Gzk-x#ljh3x3jhhqpc$oB_i0IK z>6`WEr(5j+h+cP6IQmm6M-&R!8oEdzi03FZ1-jp$5 zH^E7a66qVgeEC&%d|;yBD}}bC|m75;w_DVP~2=kAc95J_Nd4%>TRM zq{PHLz`)ubFt7$^C9j3!Dg&OT!0a;h0S4}!4c3RMli{ynRX;`3&j3ei>l^lo@E?(w ze;WPb1NmaBZvzl%1-ZHLTK~kv>$I|1lOn(IvU*X?{E9^cc~cjUW%pZ)XaPh5b_ z`*pGWJm9w3;G2#5ycBC`8O$ARm+AkG_Tp*)a6oGQxM4e8%<%ekAron~kas0M+uL%$ zf#JyRgJ{&c(qDi0;zH`Ls|SAF2uBWw`Rl(sFtIS;{`&ZV;{O@|bg48y=o3axYr+5Pz#XbZMMbuoU7>e34eb>X5yryE-8BwXNvIiw@(Le>ZW%9*csa3JAUux#)mH zMMuA8L731bS|M+n|Eqx=EM=vo>M}B+nJOimTwHjhq$h@OxorQvtP1$QujT)70TcZK zj9&JqQMVqDCYh{O1JFV0^6t+E^cOGA;QzXJ6a9^bV`bkVMm&yQjXoS1o_rKDcgE%O z*4pTHbd-oMHtOa6x^MnQ;#==TvTt)``{0LHqOD!eQqBCoAHm_xUEP4JUc9*QWPhCK zz9s(CmrHXQ~05Bdb2v(`PAX@M?Htpc3k01T~>*MHU~>1 zyF|B(rMxmBO`eS>>8SCf^QD)uoFUTmJ;b?8-I-9wFT#PU4@#qvaGFB&|_fpiYwC*lB3Kh+>ywGT0_!zdk3)h18 z_YLrf+)@42%?I0=CF}F!skjV2C+svK2!r773eVSL`ufmYyZ&sdEMb3H%ijB|!Ai45 zB#^d3yNSLM;-biQz?D~|fy&uzb^)EXj;jG38rs$1ngS8dnTDA~!bqQrf7@30(h9#u z0g`X`TA5~2mSD{fyb9s0LJnI>sK8wY1|G$1o5;SUN`%&j?&3nz(kPTq(A$=x-Q)UW zm-i{qGDdA5cH=Z@0XcclG`u&u3!5ld!UxQ@r8>zdAKjfR|rJ$bOd z_6u}i|J?FBesB8}g*e> z$e{Tn!>Vr4H?@z@A()48wUpX$eKtIYRP8WS*0TvVr^-;_%&M$rRL}aS78Spn zNWjzjg9_ib->goJGd_oCcs{Amw)Ysj6!hxankAVcLKgpDW9J^u1mDJSVw6Wup5z!6 z-W)^3oI(<1&WEkSP&uqEa|#)w^q@x$3CV<&!ZsT@Ef$Mvie-t-vsqM3b6UcXRT@R$5|N zaq6z%SZm+y(Ow={)slNt3buiy+#INn^B_Qr0&3RpKWJlJ!D%)evHs-qnq=xrt~gbM z@dYO?J_dmpxOSqQLN{pZTrkK+lKMEQrG!}TeG8^>04RC1lO5yR_Q#!(`oEX&l+=*S)%JjS5+)#!|xMxROTR4MEZ zoZqj%S$#@Kw00>MU86>l13YkMS>a9bvBeZ+U7tooLvvDqo^xm9M=Oi7Eo|8WFS0X& zJPi&(4;4lU3w1dyP(av|Wc#kk_Gy1!aJDBEpW77u@%O?2WFA?yZG#?FYB^H9I3;k4 z#QoszDnzZOsonsoSa_b#chpu(x}Kg>QZqfBSg==K+k48Wjr;{F#~wAglGGesHKvPL z^H9p%sAl^87K?lOaw%B^mAP$%f?$KH2WH|_0nN|{B8&UmoIn?KJWmXbZ6^2dR_3H_ zGc2uCHV>J^5r|)6W^ArP1aBxNnuyi|2l!y{EIw78LTBIwesC}f1F z47D|OW#>PKjyIU5gkqF4-T6uOi1?zG!;mHVed6iRtp|NM6~cJa-w7Dy4O8t`-2)*# zu29)}ZCwjjr3t4@>74i1J0`m6D%Ht$@=AVglb<{@1l_0;-Z7dJ!%?yY$70IlZ3>?R zm)Oy!a=;>})L-2M^~OjxQK~`Ns_m9U!`U|gc)TkM1CO!f##DL4tYd*E# zj1VUmVQFa+Kbj@p4<=CPVy=!#RDW-z@!PRD!m4pzM#oV62G4Fd{F2;$HlBu}sKkwQ z`UThR?#}|a!~KKvLGxtTySrzF`Q?Uvv^LZFbapVXBuPVbVc+)lScs}zK4MMbU&!W% z<+BeJ_lHf6Pz#9zx3E)glP*pzsN{~Dpj;`)bwjez?}`ss9iQlOxFpVR;MTt{dXK+k zO#CRWkW+x?b(>+3ARd52{r(fMcy4P#ic@y_~PnBNxc!>kdseeIX{%V9z z)g%JWgvTcgr=*slY}~y+Xz-AD){hu)D;Md#vJfU*%gW1aET(LziU=a1(5-Wr2J-^$ zvV54#{?vlw8=mh{G~IJuO0g74E$x9QH$X%iX(Ymmgc(0j$q*r_ZJU8jucDQk`xp-f z3$EL7d_+>Zo4Lkb(lt6VyAo-hoVvk#`&N11ACD39lUaW#W7hA_l_hsK2%N0fJE@oZ zxc(up#Nz?)pe}vgNwb&r{g-RTdlS53%90`TK7I#0AF4m1yJxN86# zYDVL6;lgadd+#KdpON~7D&vqdENicqSD}i+!$bYqW`8CEw9PF8bZ|IO)~-V4LXk$R zVoj#vothguQBq_KsnkcA4s__M(vE$9g0ZMpzeb!9x`%+}KcD2NoK?RVb~Dp< z<%GkHVr7e?UTXJ54p+V+e7-$uIrRRKp4=;B6C?@mo(m@6h*n2|_uHW3;c#T!n}jCi zHdHR~$Pm^Ygcz8dMP9jMsvGcqeFqQQqq6M#D@Uky%iO~@-Y}{^?~-y@TH?Cj=!~)9 z$kTwHB7?!qLOmg^HW?q-7UOAuAo}TBMSEa8j7b%dUSC?sz?WBwv3GK~pyNQ~wvr8+xLbJT# zjpTXmp%|akyXud}TT!OKrs|h+*t#*JZj1@2I6k-U)&Pusq^#|J=RVaCzVaXk3HC=} z_R31==v$nmEozzqf!8)n2u!3WmoI5D^)O1A&z3aP5((UMbLdy1-@>C34tYJ}z_|lY z4z4flKx_13(>bAR{bADQNC1F)?)<0+V&*W@q_HRHHe|C}=KRLRtPG01kjcbGe|t}8x!_doFWXgeBj3HSfTyq_Zf bc4kvDMSGb)o70fERf{+|xSb-M3`+ekk&ZMZ literal 0 HcmV?d00001 From be5f38b5dfc9ab182f29d082d9dc171233907a7f Mon Sep 17 00:00:00 2001 From: Di Wu Date: Mon, 19 Jul 2021 00:48:42 +0900 Subject: [PATCH 42/63] debug --- docs/en_US/Compression/Pruner.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/en_US/Compression/Pruner.rst b/docs/en_US/Compression/Pruner.rst index e2a10c3c10..cf16fff854 100644 --- a/docs/en_US/Compression/Pruner.rst +++ b/docs/en_US/Compression/Pruner.rst @@ -729,8 +729,8 @@ Transformer Head Pruner Transformer Head Pruner is a tool designed for pruning attention heads from the models belonging to the `Transformer family `__. The following image from `Efficient Transformers: A Survey `__ gives a good overview the general structure of the Transformer. -.. image:: ../../img/transformer_structure.jpg - :target: ../../img/transformer_structure.jpg +.. image:: ../../img/transformer_structure.png + :target: ../../img/transformer_structure.png :alt: Typically, each attention layer in the Transformer models consists of four weights: three projection matrices for query, key, value, and an output projection matrix. The outputs of the former three matrices contains the projected results for all heads. Normally, the results are then reshaped so that each head performs that attention computation independently. The final results are concatenated back before fed into the output projection. Therefore, when an attention head is pruned, the same weights corresponding to that heads in the three projection matrices are pruned. Also, the weights in the output projection corresponding to the head's output are pruned. In our implementation, we calculate and apply masks to the four matrices together. From f193106502f7cbc0bd16d8fbbca1bb0bee8c7fdb Mon Sep 17 00:00:00 2001 From: Di Wu Date: Mon, 19 Jul 2021 14:56:17 +0900 Subject: [PATCH 43/63] debug --- .../pytorch/pruning/transformer_pruning_head_masker.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py b/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py index 951b0631e8..9123cbbdbb 100644 --- a/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py +++ b/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py @@ -143,8 +143,8 @@ def _get_layer_masks_from_head_mask(self, weight_group, head_mask_bool, device=N mask_weight = head_mask_bool.unsqueeze(-1).expand(weight_mask_shape).type_as(q_proj.module.weight) mask_bias = head_mask_bool.unsqueeze(-1).expand(bias_mask_shape).type_as(q_proj.module.weight) - mask_weight_proj = mask_weight.view(q_proj.module.weight.size()).detach().to(device) - mask_bias_proj = mask_bias.view(-1).detach().to(device) + mask_weight_proj = mask_weight.contiguous().view(q_proj.module.weight.size()).detach().to(device) + mask_bias_proj = mask_bias.contiguous().view(-1).detach().to(device) masks_for_proj = {'weight_mask': mask_weight_proj.detach()} if hasattr(q_proj.module, 'bias') and q_proj.module.bias is not None: masks_for_proj['bias_mask'] = mask_bias_proj @@ -435,4 +435,4 @@ def grad_hook(md, grad_in, grad_out): self.backward_hooks[output_proj.group_idx] = handle def get_mask(self, num_prune, weight_group, **kwargs): - return self.get_mask_by_importance_ranking(num_prune, weight_group) \ No newline at end of file + return self.get_mask_by_importance_ranking(num_prune, weight_group) From b9837e841fb4da11d9368b47016778bb20dd67e5 Mon Sep 17 00:00:00 2001 From: Di Wu Date: Mon, 19 Jul 2021 16:03:27 +0900 Subject: [PATCH 44/63] unit test --- .../sdk/models/pytorch_models/transformer.py | 190 ++++++++++++++++++ test/ut/sdk/test_transformer_pruners.py | 119 +++++++++++ 2 files changed, 309 insertions(+) create mode 100644 test/ut/sdk/models/pytorch_models/transformer.py create mode 100644 test/ut/sdk/test_transformer_pruners.py diff --git a/test/ut/sdk/models/pytorch_models/transformer.py b/test/ut/sdk/models/pytorch_models/transformer.py new file mode 100644 index 0000000000..608d4ed93b --- /dev/null +++ b/test/ut/sdk/models/pytorch_models/transformer.py @@ -0,0 +1,190 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import torch +import torch.nn as nn +import torch.nn.functional as F +import math +import copy + + +class PosEncoding(nn.Module): + def __init__(self, hidden_dim, max_seq_len=80): + super().__init__() + self.hidden_dim = hidden_dim + + pe = torch.zeros(max_seq_len, hidden_dim) + for pos in range(max_seq_len): + for i in range(0, hidden_dim, 2): + pe[pos, i] = math.sin(pos / (10000 ** ((2 * i) / hidden_dim))) + pe[pos, i + 1] = math.cos(pos / (10000 ** ((2 * (i + 1)) / hidden_dim))) + + pe = pe.unsqueeze(0) + self.register_buffer('pe', pe) + + def forward(self, x): + x = x * math.sqrt(self.hidden_dim) + x = x + torch.autograd.Variable(self.pe[:, :x.size(1)], requires_grad=False) + return x + + +def attention(query, key, value, mask=None, dropout=None): + d_k = query.size(-1) + logits = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k) + if mask is not None: + logits = logits.masked_fill(mask == 0, -1e9) + attention_map = F.softmax(logits, dim=-1) + if dropout is not None: + attention_map = dropout(attention_map) + return torch.matmul(attention_map, value) + + +class MultiHeadAttention(nn.Module): + def __init__(self, hidden_dim, n_heads, dropout=0.1): + super().__init__() + + self.hidden_dim = hidden_dim + self.head_dim = hidden_dim // n_heads + self.n_heads = n_heads + + self.q_proj = nn.Linear(hidden_dim, hidden_dim) + self.v_proj = nn.Linear(hidden_dim, hidden_dim) + self.k_proj = nn.Linear(hidden_dim, hidden_dim) + self.dropout = nn.Dropout(dropout) + self.output_proj = nn.Linear(hidden_dim, hidden_dim) + + def forward(self, query, key, value, mask=None): + batch_size = query.size(0) + + # project and reshaping + k_project = self.k_proj(key) + q_project = self.q_proj(query) + v_project = self.v_proj(value) + k_reshape = k_project.view(batch_size, -1, self.n_heads, self.head_dim).transpose(1, 2) + q_reshape = q_project.view(batch_size, -1, self.n_heads, self.head_dim).transpose(1, 2) + v_reshape = v_project.view(batch_size, -1, self.n_heads, self.head_dim).transpose(1, 2) + + # merge heads and output + scores = attention(q_reshape, k_reshape, v_reshape, mask, self.dropout) + scores = scores.transpose(1, 2).contiguous() + scores = scores.view(batch_size, -1, self.hidden_dim) + + return self.output_proj(scores) + + +class FeedForwardLayer(nn.Module): + def __init__(self, hidden_dim, intermediate_dim=2048, dropout=0.1): + super().__init__() + self.dense1 = nn.Linear(hidden_dim, intermediate_dim) + self.dense2 = nn.Linear(intermediate_dim, hidden_dim) + self.dropout = nn.Dropout(dropout) + + def forward(self, x): + return self.dense2(self.dropout(F.relu(self.dense1(x)))) + + +class LayerNorm(nn.Module): + def __init__(self, hidden_dim, eps=1e-6): + super(LayerNorm, self).__init__() + + self.alpha = nn.Parameter(torch.ones(hidden_dim)) + self.beta = nn.Parameter(torch.zeros(hidden_dim)) + self.eps = eps + + def forward(self, x): + mean = x.mean(-1, keepdim=True) + std = x.std(-1, keepdim=True) + return self.alpha * (x - mean) / (std + self.eps) + self.beta + + +class TransformerEncoderLayer(nn.Module): + def __init__(self, n_heads, hidden_dim, dropout=0.1): + super().__init__() + + self.self_attn = MultiHeadAttention(hidden_dim, n_heads) + self.ff_layer = FeedForwardLayer(hidden_dim) + + self.norm1 = LayerNorm(hidden_dim) + self.dropout1 = nn.Dropout(dropout) + self.norm2 = LayerNorm(hidden_dim) + self.dropout2 = nn.Dropout(dropout) + + def forward(self, inp, mask): + x = self.norm1(inp) + x = inp + self.dropout1(self.self_attn(x, x, x, mask)) + x = x + self.dropout2(self.ff_layer(self.norm2(x))) + return x + + +class TransformerDecoderLayer(nn.Module): + def __init__(self, n_heads, hidden_dim, dropout=0.1): + super().__init__() + + self.self_attn = MultiHeadAttention(hidden_dim, n_heads) + self.cross_attn = MultiHeadAttention(hidden_dim, n_heads) + self.ff = FeedForwardLayer(hidden_dim) + + self.norm1 = LayerNorm(hidden_dim) + self.norm2 = LayerNorm(hidden_dim) + self.norm3 = LayerNorm(hidden_dim) + self.dropout1 = nn.Dropout(dropout) + self.dropout2 = nn.Dropout(dropout) + self.dropout3 = nn.Dropout(dropout) + + def forward(self, inp, mask, encoder_output, encoder_output_mask): + x = self.norm1(inp) + x = inp + self.dropout1(self.self_attn(x, x, x, mask)) + x = x + self.dropout2(self.cross_attn(self.norm2(x), encoder_output, encoder_output, encoder_output_mask)) + x = x + self.dropout3(self.ff(self.norm3(x))) + return x + + +class TransformerEncoder(nn.Module): + def __init__(self, vocab_size, n_layers, hidden_dim, n_heads): + super().__init__() + + self.n_layers = n_layers + self.embedding = nn.Embedding(vocab_size, hidden_dim) + self.posencoding = PosEncoding(hidden_dim) + self.layers = nn.ModuleList([copy.deepcopy(TransformerEncoderLayer(n_heads, hidden_dim)) for _ in range(n_layers)]) + self.layernorm = LayerNorm(hidden_dim) + + def forward(self, src, mask): + x = self.embedding(src) + x = self.posencoding(x) + for i in range(self.n_layers): + x = self.layers[i](x, mask) + return self.layernorm(x) + + +class TransformerDecoder(nn.Module): + def __init__(self, vocab_size, n_layers, hidden_dim, n_heads): + super().__init__() + + self.n_layers = n_layers + self.embedding = nn.Embedding(vocab_size, hidden_dim) + self.posencoding = PosEncoding(hidden_dim) + self.layers = nn.ModuleList([copy.deepcopy(TransformerDecoderLayer(n_heads, hidden_dim)) for _ in range(n_layers)]) + self.layernorm = LayerNorm(hidden_dim) + + def forward(self, inp, mask, encoder_output, encoder_output_mask): + x = self.embedding(inp) + x = self.posencoding(x) + for i in range(self.n_layers): + x = self.layers[i](x, mask, encoder_output, encoder_output_mask) + return self.layernorm(x) + + +class TransformerForSeq2Seq(nn.Module): + def __init__(self, src_vocab_size, tgt_vocab_size, n_layers, hidden_dim, n_heads): + super().__init__() + + self.encoder = TransformerEncoder(src_vocab_size, n_layers, hidden_dim, n_heads) + self.decoder = TransformerDecoder(tgt_vocab_size, n_layers, hidden_dim, n_heads) + self.output_dense = nn.Linear(hidden_dim, tgt_vocab_size) + + def forward(self, src, tgt, src_mask, tgt_mask): + encoder_outputs = self.encoder(src, src_mask) + decoder_outputs = self.decoder(tgt, tgt_mask, encoder_outputs, src_mask) + + return self.output_dense(decoder_outputs) diff --git a/test/ut/sdk/test_transformer_pruners.py b/test/ut/sdk/test_transformer_pruners.py new file mode 100644 index 0000000000..39bc1631a9 --- /dev/null +++ b/test/ut/sdk/test_transformer_pruners.py @@ -0,0 +1,119 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import os +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.data +import math +import sys +import unittest +from unittest import TestCase, main + +from nni.algorithms.compression.pytorch.pruning import TransformerHeadPruner + +from models.pytorch_models.transformer import TransformerEncoder + +sys.path.append(os.path.dirname(__file__)) + + +def validate_sparsity(wrapper, sparsity, bias=False): + masks = [wrapper.weight_mask] + if bias and wrapper.bias_mask is not None: + masks.append(wrapper.bias_mask) + for m in masks: + actual_sparsity = (m == 0).sum().item() / m.numel() + msg = 'actual sparsity: {:.2f}, target sparsity: {:.2f}'.format(actual_sparsity, sparsity) + assert math.isclose(actual_sparsity, sparsity, abs_tol=0.1), msg + + +class Model(nn.Module): + """ + A binary classifier using a transformer encoder for contextual embedding. + """ + def __init__(self, n_layer, hidden_dim, n_head): + super(Model, self).__init__() + self.embedding = TransformerEncoder(vocab_size=100, hidden_dim=hidden_dim, n_layers=n_layer, n_heads=n_head) + self.classifier = nn.Linear(hidden_dim, 1) + + def forward(self, x, mask): + raw_output = self.embedding(x, mask) + pooled_output = raw_output[0] + prediction = F.sigmoid(self.classifier(pooled_output)).squeeze() + return prediction + + +def train(model, dataloader, criterion, optimizer): + model.train() + device = next(model.parameters()).device + + for _ in range(2): + y = torch.ones(10).to(device) + out = model(torch.randint(0, 100, (4, 10)).to(device), torch.ones(10).to(device)) + loss = criterion(out, y) + optimizer.zero_grad() + loss.backward() + optimizer.step() + + +def head_pruner_tests(criterion, global_sort, use_graph, iterative): + print("Testing criterion {} with global_sort={} and use_graph={}".format(criterion, global_sort, use_graph)) + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + + # Build config list and arguments + config_list = [{'sparsity': 0.5, 'op_types': ['Linear']}] + + kwargs = {'ranking_criterion': criterion, 'head_hidden_dim': 64} + if global_sort: + kwargs['global_sort'] = True + else: + kwargs['global_sort'] = False + + if use_graph: + attention_name_groups = list(zip(['embedding.layers.{}.self_attn.q_proj'.format(i) for i in range(6)], + ['embedding.layers.{}.self_attn.k_proj'.format(i) for i in range(6)], + ['embedding.layers.{}.self_attn.v_proj'.format(i) for i in range(6)], + ['embedding.layers.{}.self_attn.output_proj'.format(i) for i in range(6)])) + kwargs['attention_name_groups'] = attention_name_groups + else: + dummy_input = (torch.randint(0, 100, (10, 32)).to(device), torch.ones(32).to(device)) + kwargs['dummy_input'] = dummy_input + + if iterative: + kwargs['num_iterations'] = 2 + kwargs['epochs_per_iteration'] = 1 + + n_layers = 6 + n_heads = 8 + hidden_dim = 512 + model = Model(n_layers, hidden_dim, n_heads) + model.to(device) + kwargs['optimizer'] = torch.optim.SGD(model.parameters(), lr=0.001) + + def trainer(model, optimizer, criterion, epoch): + return train(model, None, criterion, optimizer) + kwargs['trainer'] = trainer + kwargs['criterion'] = nn.BCELoss() + + # create pruner and call compress() + pruner = TransformerHeadPruner(model, config_list, **kwargs) + pruner.compress() + + # validate sparsity + if not global_sort: + for wrapper in pruner.modules_wrapper: + validate_sparsity(wrapper, wrapper.config['sparsity']) + + +class PrunerTestCase(TestCase): + def test_head_pruner(self): + for criterion in ["l1_weight", "l2_weight", "l1_activation", "l2_activation", "taylorfo"]: + for global_sort in [False, True]: + for use_graph in [False, True]: + for iterative in [False, True]: + head_pruner_tests(criterion, global_sort, use_graph, iterative) + + +if __name__ == '__main__': + main() From 53ab047fc0f7adeec67c5f03a7ab240864d564b9 Mon Sep 17 00:00:00 2001 From: Di Wu Date: Mon, 19 Jul 2021 16:38:22 +0900 Subject: [PATCH 45/63] ut debug --- test/ut/sdk/test_transformer_pruners.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/test/ut/sdk/test_transformer_pruners.py b/test/ut/sdk/test_transformer_pruners.py index 39bc1631a9..a2b1d227c1 100644 --- a/test/ut/sdk/test_transformer_pruners.py +++ b/test/ut/sdk/test_transformer_pruners.py @@ -13,10 +13,8 @@ from nni.algorithms.compression.pytorch.pruning import TransformerHeadPruner -from models.pytorch_models.transformer import TransformerEncoder - sys.path.append(os.path.dirname(__file__)) - +from models.pytorch_models.transformer import TransformerEncoder def validate_sparsity(wrapper, sparsity, bias=False): masks = [wrapper.weight_mask] From 416f6805e89aa5c67cb1cd8477d863a4701e1e0b Mon Sep 17 00:00:00 2001 From: Di Wu Date: Mon, 19 Jul 2021 17:35:26 +0900 Subject: [PATCH 46/63] replace torch.linalg.norm with torch.norm --- .../pruning/transformer_pruning_head_masker.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py b/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py index 9123cbbdbb..cf6db493b7 100644 --- a/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py +++ b/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py @@ -225,9 +225,9 @@ def get_head_importance_scores(self, weight_group): key_proj_weights = k_proj.module.weight.data.view([n_heads, -1]) value_proj_weights = v_proj.module.weight.data.view([n_heads, -1]) - query_norm_avg = torch.linalg.norm(query_proj_weights, 1, -1) - key_norm_avg = torch.linalg.norm(key_proj_weights, 1, -1) - value_norm_avg = torch.linalg.norm(value_proj_weights, 1, -1) + query_norm_avg = torch.norm(query_proj_weights, 1, -1) + key_norm_avg = torch.norm(key_proj_weights, 1, -1) + value_norm_avg = torch.norm(value_proj_weights, 1, -1) return ((query_norm_avg + key_norm_avg + value_norm_avg) / 3).detach() @@ -249,9 +249,9 @@ def get_head_importance_scores(self, weight_group): key_proj_weights = k_proj.module.weight.data.view([n_heads, -1]) value_proj_weights = v_proj.module.weight.data.view([n_heads, -1]) - query_norm_avg = torch.linalg.norm(query_proj_weights, 2, -1) - key_norm_avg = torch.linalg.norm(key_proj_weights, 2, -1) - value_norm_avg = torch.linalg.norm(value_proj_weights, 2, -1) + query_norm_avg = torch.norm(query_proj_weights, 2, -1) + key_norm_avg = torch.norm(key_proj_weights, 2, -1) + value_norm_avg = torch.norm(value_proj_weights, 2, -1) return ((query_norm_avg + key_norm_avg + value_norm_avg) / 3).detach() @@ -346,7 +346,7 @@ def hook(module_, input_, output): raw_activation = input_.detach().cpu() ** 2 n_heads = raw_activation.size(-1) // head_hidden_dim raw_activation = raw_activation.view(raw_activation.size(0), raw_activation.size(1), n_heads, -1) - raw_activation = torch.linalg.norm(raw_activation, 2, -1) # (B, S, n_heads) + raw_activation = torch.norm(raw_activation, 2, -1) # (B, S, n_heads) raw_activation_reduced = torch.sum(raw_activation, [0, 1]) # (n_heads,) collected_activation.append(raw_activation_reduced) From 4d91a1ea3174836d52048717bb3eb7414fba2b04 Mon Sep 17 00:00:00 2001 From: Di Wu Date: Tue, 20 Jul 2021 17:52:32 +0900 Subject: [PATCH 47/63] update ut --- test/ut/sdk/test_transformer_pruners.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/test/ut/sdk/test_transformer_pruners.py b/test/ut/sdk/test_transformer_pruners.py index a2b1d227c1..3f10eff1e1 100644 --- a/test/ut/sdk/test_transformer_pruners.py +++ b/test/ut/sdk/test_transformer_pruners.py @@ -98,6 +98,14 @@ def trainer(model, optimizer, criterion, epoch): pruner = TransformerHeadPruner(model, config_list, **kwargs) pruner.compress() + # test model and mask export + pruner.export_model('./model_tmp.pth', './mask_tmp.pth', device=device) + + # TODO: test exporting to onnx when we can pass dummy_input instead of input_shape to export_model + # dummy_input = (torch.randint(0, 100, (10, 32)).to(device), torch.ones(32).to(device)) + # pruner.export_model('./model_tmp.pth', './mask_tmp.pth', './onnx_tmp.pth', input_shape=None, + # dummy_input=dummy_input, device=device) + # validate sparsity if not global_sort: for wrapper in pruner.modules_wrapper: @@ -112,6 +120,12 @@ def test_head_pruner(self): for iterative in [False, True]: head_pruner_tests(criterion, global_sort, use_graph, iterative) + file_paths = ['./model_tmp.pth', './mask_tmp.pth', './onnx_tmp.pth', './search_history.csv', + './search_result.json'] + for f in file_paths: + if os.path.exists(f): + os.remove(f) + if __name__ == '__main__': main() From c0efcd52b26692a46f35dc6c9ab3ba2e2406e9d0 Mon Sep 17 00:00:00 2001 From: Di Wu Date: Mon, 26 Jul 2021 03:51:52 +0000 Subject: [PATCH 48/63] improve docs --- docs/en_US/Compression/Pruner.rst | 14 ++++++++++---- docs/img/huggingface_bert_architecture.png | Bin 0 -> 89809 bytes 2 files changed, 10 insertions(+), 4 deletions(-) create mode 100644 docs/img/huggingface_bert_architecture.png diff --git a/docs/en_US/Compression/Pruner.rst b/docs/en_US/Compression/Pruner.rst index cf16fff854..bae2a38b3a 100644 --- a/docs/en_US/Compression/Pruner.rst +++ b/docs/en_US/Compression/Pruner.rst @@ -735,6 +735,8 @@ Transformer Head Pruner is a tool designed for pruning attention heads from the Typically, each attention layer in the Transformer models consists of four weights: three projection matrices for query, key, value, and an output projection matrix. The outputs of the former three matrices contains the projected results for all heads. Normally, the results are then reshaped so that each head performs that attention computation independently. The final results are concatenated back before fed into the output projection. Therefore, when an attention head is pruned, the same weights corresponding to that heads in the three projection matrices are pruned. Also, the weights in the output projection corresponding to the head's output are pruned. In our implementation, we calculate and apply masks to the four matrices together. +Note: currently, the pruner can only handle models with projection weights written as separate `Linear` modules, i.e., it expects four `Linear` modules corresponding to query, key, value, and an output projections. Therefore, in the `config_list`, you should either write `['Linear']` for the `op_types` field, or write names corresponding to `Linear` modules for the `op_names` field. + The pruner implements the following algorithm: .. code-block:: bash @@ -754,15 +756,19 @@ Currently, the following head sorting criteria are supported: We support local sorting (i.e., sorting heads within a layer) and global sorting (sorting all heads together), and you can control by setting the ``global_sort`` parameter. Note that if ``global_sort=True`` is passed, all weights must have the same sparsity in the config list. However, this does not mean that each layer will be prune to the same sparsity as specified. This sparsity value will be interpreted as a global sparsity, and each layer is likely to have different sparsity after pruning by global sort. -In our implementation, we support two ways to group the four weights in the same layer together. You can either pass a nested list containing the names of these modules (usage 1 below) to the pruner, or simply pass a dummy input and the pruner will run ``torch.jit.trace`` to group the weights (usage 2 below). - -However, if you would like to assign different sparsity to each layer, currently you could only use the first option, i.e., passing names of the weights to the pruner (usage 3 below). Also note that weights belong to the same layer must have the same sparsity. +In our implementation, we support two ways to group the four weights in the same layer together. You can either pass a nested list containing the names of these modules as the pruner's initialization parameters (usage 1 below), or simply pass a dummy input and the pruner will run ``torch.jit.trace`` to group the weights (usage 2 below). However, if you would like to assign different sparsity to each layer, you can only use the first option, i.e., passing names of the weights to the pruner (usage 3 below). Also note that weights belonging to the same layer must have the same sparsity. In addition to the following usage guide, we provide a more detailed example of pruning BERT for tasks from the GLUE benchmark. Please find it in this :githublink:`page `. Usage ^^^^^ +Suppose we want to prune a BERT with Huggingface implementation, which has the following architecture (obtained by calling `print(model)`). Note that we only show the first layer of the repeated layers in the encoder's `ModuleList layer`. + +.. image:: ../../img/huggingface_bert_architecture.png + :target: ../../img/huggingface_bert_architecture.png + :alt: + Usage 1: one-shot pruning, same sparsity for all the layers (PyTorch code) .. code-block:: python @@ -779,7 +785,7 @@ Usage 1: one-shot pruning, same sparsity for all the layers (PyTorch code) } config_list = [{ 'sparsity': 0.5, - 'op_types': ["Linear"] + 'op_types': ["Linear"] }] pruner = TransformerHeadPruner(model, config_list, **kwargs) pruner.compress() diff --git a/docs/img/huggingface_bert_architecture.png b/docs/img/huggingface_bert_architecture.png new file mode 100644 index 0000000000000000000000000000000000000000..9187c79c2e5c3e498d419343564a6ad440c96f6a GIT binary patch literal 89809 zcmb@uby$;c|39pv(ka~~Eg{k+Af=Q@*XZtUR9af3B<4U7>F!XvLz*!VCe3I@ZhMA) z^8Vhx=dYXN;1~dc8#hp& zRG!Fc`XjYpNfjgd?+?kIFTF{lVhx$$nU>+6+v0okAxFZelqHyO9eJQqV1ImQE%oa0rc$-3P@>3=+l66l5hV=EG}nHrjz(Ck5$3ry&$18QSlpS0O^uZ>;F$_aj+ z7({2oF#~uXmSY%lzGP6?7QbxZUFTyM+}OGL?DcbagW-^YRzQ8v3>gV87Z0l=T>4g2 zz&~qj=EI(Q@bvL=t4PMrJ#B93yn11ZZ#Jt5YYg%2if1rKr=<$P(f!>urfNbV)hA?& zU~I@xnYOlM0Ot7)tq2Mmc6j=}E)hUp29J~f#w#~GVpem6^ENn}Z@nI9-tGs8ccTG) ze~#!Xpm3JK+-9#s9YUjdm%bslhv4h}Yd$Y?O2Q^TLax_Al{=4wS2Ly!$R)gcu(~5h zv#s3WX)Ep0WjT`)bz(*hi6pWsubE?(pHSMX;?2!Tx@WR+$5<|S6~HZ+BVifB<5nwMt3m z7axs){HGu)|9-8mE7R9Xz_tA%p6~tV>>wKtVptSilu#fj4Hzms6Y^#;^aqh%d&WD9 z8jY$A9-^(@lxepUniAOnr$-&1qTE{nuY7fS5Nahs&8``6mq083b{#wGw=U)$Avce3 zsqG$77oMYzYz&h@G^5MM>NUXplY8LM?({uB6d4#9kj4=HYwRjT>6$2}f-(%cm?@kJ znx7FlG`!&Io^mIDAlAc3tRwU$BFaCq{d}x=m#4$WWY#UE)>3`(<&%khrths!^xMYA z?t?BdSz;8XmZ&BBPX$z~Q3bR`vguM4@5=)NGl z6BPZnwsH^)VsS()xyinUkV8mP8@Lh87^qfa49eY!We6bQy$5~R=)JuV3NA2B-h{Zl4SQOXz zroz02%+R~li-YV$02VOi!d1E`IXtsQqe%u*hjL4CeKHicq^r1#Ax$XE--%{<2Qr@61R440rr@rsv zY9iJiL_R#%v)REH&l7r1Pke^35~2obtn?91oN|M>A&#c(p?-kN~Dn8Y1cCX=fLy(Gl1wv(~P z>oSxLi9C29U;e-Xbg9r!U%lMp(F=+|fiqXP1>WW?oB=Dwf^nqPOP1!tU6hUhv16?* z7hsF}z?e&>njpPpg(;>kCEO$F>4(;2I8yT8-D*DK)RvYr0<$`^sxquok3tkTe0&fg zJ{#)I#T8AL*TpL--ckV+cv;D=nP@8iQJ8ceLh;Bin|r~3&Hru>3vbube*FuO1VZ{9 zHx6XPo3FAysOJGbg+x{|gOhQYFfAgOj99${=y&?Il_Rh<9QnO9@-VDahOu023h_WV zv~ui032Fk`k>hXb7Q!OVHKE#+9zC;kEIY9Ooi}c>ACJMX1{wkQ7F{|<=Rqw0K7@3O zKFErdzOUu;Bf^N?+*zB$mjLalrLp6*ug_#cD?R9D!d;T#o;HZHWgj@;MM4_ibn6oy zuU_p$S%^VPnXA*Ua(t9c0O{by$S2Ct3ct*E=%e}W?Yk0=c{01)EZQVPKi;okM+X#< zjPCK|^6EA1oL#|k_K4$0)>mt_y?Y(Y=$c#suHVV8u-1|JTZFX?7dFLZ6R#D%H z4H+O1n;D47G+tM(-$>{KBQw4CC78&)g#OF?YLxjpLCU_vo_b5)aUP7lDkO4s7IMAK z*rB|Iy_!D>TNqA7w3z%tEJ^Wwn`5QHkW9s}JF66Ub*U`hj#FJ1RXFTvdS)0pxHkhD zUXnN+vJrij6QKwp6d|yDKV_G0jpv*oFQRcdP>U&_3a^tLJ08O6X}u%Hn=w33j05C4ZC>S;qc!~U5Zwa@#> z(ebMsY?C-&W_2OLl@xfYZwEJQ$kd-*H?$1#)NsiGU|_qQ>Ob$^;6|x;NSZH*yAV>* zMb(+gVV*68xxH`k8aHppH%_|r$ubPP`obBvU%vBMS@4>WrYAJ+6ru>HhhEX8;zIbG zTHb01M$B8KcvsLgb$_OCl&L(K80p+aZg(Bh?X-_Xx+s#o5)9^2^-g2{KGA}IjZhAc zVj^9tf<}V+7_CYLOjSge1S1##ciov@y}rhYRmqwgJ~we7KWZWBD^d{VG$gmsM~ zobX7SOkVy0{sa{Mw@|Wf7X+5lj~pyTG^91UhY0Xh9W&)F@+dB-Uy!u}omqAne&NvK z+A!kK>HC78R}z^%7@zNzdah#g47rc*`{G;(k=N@K(`&0 z?n@oDWoJ*O4z=Cef~J?xzl+~)bhue!L2EK&jm4iy$Z*SczU)I)I__`@$-Q9jK0Km< z%pKhd>Sgt6&f&+ObQ)}KjVyn(-bNxX9?D;p+8hSTP1FBvg;^R*{}(E>lO(>`jQv9TKehcI8O_!J|=Luke2N) z+ONDM8F=588R1fS;n*8f*b#^c0xu$Ef6YB^{rWLysz)csAg_QAH}CXgM-R5dnReg0 zP>pev}_9geQu$A6Ul&Rd{#StC~Bm7iial^Y;i-<+)H4O$2g_dW;E~zO~ zbo6{wVII3CCp{AoCBw%`$~4R`c9XUe53>8p zppeG=;Ls_&Y@v4hGd|=!FX@`^bm-0-pwArPF8LuyQ%2 zkPl@c(tQrPW-e#E32aZ9QX!8&2Tc<-<$P)PkxU+Qc11B>PtJ3)5~B@u*Q$j z6VS1aLA1I4iW=|jty>vH;RZE7DLW0dr#Q&z*Qk(d@?V;&vB2Kuvue``V*VH;Y8{pO zrzG?&*qsse&(LhqwR!;}LW(qCp)=7d2NE>_mbLfY zB`?pY?uF@K^*g*8AZoIFeo}u|<1iR+VrP9GFqKC*b`IZDRR`Ta-acKiV9v|?m}H~(BPP3jg07U?7j}US3dFJw^kIQ& z(KsCCeRBhupcp?@{KhaP;H8#@8tIWqfJCnHMqT|sR@7&R&g1b6YoUe4F{xO1N|VdI zfZw>{%Y%`KU1j5-`taR6i146Z$MnTH>4}`6R>YXMz6_IB;s@qg)*I_SkE;tnV0T0< zG$!07@DlTS0ri!eLF9Z@PbepIy7w#aBAPU}8UkbPCVIBwujalwh{hH0x#`aJu3#xj z>yg)Ikog;hL0RxB2_e9b_edwSPVN+LsqulnHB#BR&>MEKDSsM41a({6xR)~a0A0Wf zfzaLi3ESm#d{;P%wEzCldLsvx0v(e+ur)D`H^|GTkoZpj1?KtWhJ+=O@&2GpU;Up2 zeO)f#qdC2L#P?Z8N+ViXHSwikO9tj!!KFR!&&r~L5^y8Qy?+X{^^Suk&2n*z1qQJq zFHY{6Qc*^1yklNq(2-wEDr-*JnLoKl2r*SIt3F_~&TYlybR1(5!T)v6*L7a1*WQA$ zlMr>Z%ki_4O0zMT?fi2R#IfG8z0l6s;!*hcS>?v$5_TD{EI5j)qR1YvmNXOLhX>6G ze_LlBgkWlUTefb)`x+9rsve=G!a?x6iUPj$N{d?P?vgXgLpfpz^jlqB?TV@4%PYtb$DAl_g z68RSKN~2Y$u))=o;d&QY?(jG6e-Ssr;|BX(CsV9v=l-!l`p4;!jGU@2_omAlE6<94 zbI@4Ch~w7r(+k83InLbx+T{PEfMoS5`!I8SBFbd*6$=$cy$iBZI+w4ClbG!i5?=Ye zPgNOi)BmA?%sKpzbrE5c4Ijd^We-Q0GHaw^yMCW?OH$AmLOvs0;+B#2I4Xk+A`Mq^ z78iB|&QHY?!BfH)?OO**=-snnz5oZZs4_*Fohi2$Z=vj4KCA#tezNj?pF~_^r^cX- zgcJWohI85+4|I;pmbEPjl5QDLh*P;uTO`?GUFgIR=5fe`T(AtxPW@iL_x*djY`($m z;*2MumN%IfW_;)}<7|f1MVPwtU}wNoz76tgB}EfElzl!EPROE18+yH%bSv%UME;>L z2;tGu+}B1p#j)PVG%kE`lI_kFC;n`yEqK)(iy%f(bzaP?k3v9`*xK_dl>`^UGK2b& z#$ZqS1G8R@v?)|rkM6H`t+#c-ob4A^t~UVeQtcFOy0~Cmk1Zk`moQTEL{GpllFUr!g&){B+AT$*GGVR!0b!r4+7dg50hzQ9 zK+a%7q_{VSa*CJ^&jNm{ zTcz$Lwg1jhBEhHike`(5r7SBA11F*GgdDpIf6zJU{#EyN%?$A6t* zBKIsdFQdCcQf=jq|3zTX=UA7YT43b;{Bd~;>>nreQ_gT6AAaP_nh{>K`RXnyD?20a z(Zc{T+zko3t%a-m|D|TV?;7w22Y1%^0$2twVUb+&h^nHo@@yqN^Q5wD$1IwAnxRg; zF+ZQM{uKVvoo-OAdB5~Lyzv|L$6iGi;{eK;OENz_>(*AP7*f4niUW?BiKd})ER!9Q zNvKe?$QK~x;24ZPBch3zoNrWdV}P!nFOe|k-?h8LbGhGD=D#Q+ttl|=jRJ(c7plVd z>Y6%tiJ?d1vq(tqagJ*7vYe*X_*+Xn=I1*3`hFSg?~XtYb!1e=n#-L=dZ2QC)wwrb zDVirmJYD?3D{75QX3--6lEh-mYtdjF0mC++aU?0CP0yNl&l_|p3@~8}mo)8>x=$K! z&~&J&jmHaWf5K1|lr#ep#?{F@wFUj0?uA&j)$zaOH}-1|U++c=4?=?Fm-not7Knop z6jQhvKtNZXV%X*`N%&TK43Ry6z8x}|qcB@m+@4iJ=XrRS^?hB6@+nEY;qU`sIYy#U zu4;YIyiE4I%+9YlkL5m6fPGziB|s8nCxG-1(~;ueUK?WIQ+%)S6^BJ|LHL911Yk&_ z5c@9C*4eAtRi5|N@vkBP2?E+;ydtX{+L_aPj~`H6(9@2m?)ApUDAX_r1KaHj51+P& zVe{GzL;-%dTjD?Uu&DKoIav)rYj=TMUyGbR9iN1{3lKkluBBYfz5irnX)|FuRl)@j zLOTezxC-s8gxX{AJF_Q`A&@q8ZkWd01UDBu{Axl)>qBQHkk=S)C0p%Y)5IkrF|b{^WD90#Xs`nvYiTe$Ou3l&E>0D8}#&f<$EIL@;dBOhh`N4Pk8$PqG;c&unMCan~6|wI0evIyg)Lm~| z0m%|MGSo@iugcvii2)_1VD?IO6V?{nmHuIdZ1T8-V4_d3p+O039hy2buJTuELs&!DX-rUDh0r<)DaNmVC$v~z_ z(v%Q}rxcO9rEUK@W{4JzKy*{MF$y7My!G2{`VPxpS2lj-itSGq1yQV@=d$yl~DMq7+I5Lxw(`3ZK2H| zk+k4wtYfmsZMphZy}RF$%L_>Jy)zc{<{s@SthQmpN-R3@xkKBQlUZt*?<&rhFPb2&rKO#XI${SQJ-F@!GZpuzRYt-USykXu%E zwAKg94sV7rwZ@aBb`)eI$TXfx3 z^a0?M(;LvIWQ$YuuNhjS&eLn&BjghkWUht0RbictwF6(?FfE*9Dg}yQDQAjsGZ3zv zvbL~GeBr@i$pM*%#np?$WV%(?DM(lamuiROCGzI9|axKnIK9lnSBy57Xy2_3ehFOdw2jBu6><5KBv>|NXW-YJXn&7vk+xWsP3W-LM zZ)BsJRp^4{-9fLo8N{MC>+kplaQgVKinmqCFzuvP;*4r2S%5V}zhoJ1r_`~~+@JgnY!(!BraXJT@bB;hZ_qa)F<;9w$KSg8vDW2L zHvq5ny%Hc{pmpZx{uyRv)q9&6ern}KklPKF@ueE@+yxa5s|s32gHR+v;9C5|8C(?qVF*ffE>Wp`6to@=?Di`Y3niAD zo=L-owK_(Ta%`Z^#0GbYTpm7MljiAvU^VM& ztVZ#rU0-ZTI#S~6Uh!~eWLt=i?HGKyxh^DZ_zsp}DNqB43m7kdARiG^7Arc_RqOjA zp>lx_S1+)^!-v?x=Wc$EUeb!ds}pZU9}IQT(m6^MPbDswwK*f;^ne57gZ zdTz1pD!%)avVPCLCSm__=}{zow&KTrh>wjP7Z-hNSddQw7{xYbloi{W<%II=8b9<; z*c#nY?uMa3DKBh-ZWJkc=!W*vbasA9^zma1oVWM9P5_UWHZ|AR z__RW8r(Lhjq+M6EnFNU~0)-gpZ0L>T7xOFYjW{>|AM_bpRI_$(?XF`IJx#pZrn&K+ z-Vc-16XcLx{WdfGs^QYG)!zaSJ8xQmdc^L7wqQSag3Pn4tMBFM7FArTr}ZKPGwB{+ zLE7fV552y%g+cmn7H=`tJp6{azh+7Q#wXLMdZ~KfnGpl?3{J*5GtwzRT^h_je$+q- zSV}khAWeKJnX`nM7KZ)o+Zd%D!^Ex-K^k8jviQ?d+najda9CCdF=WEwIDG@Wggf`( zDMtY6mAtXP>QjMvb^|Z+je6ADEQ;2>CFtNH(6)9ZtB8;t!^bSp14FvfRkOoHB78Hn zv$X%t(5TUd=oiGxZe`}Nvo@l1HEFK^IsG@Ga@2m3lzXT_C6%hl8 zwV(LPleyYZckdxlDTWnm=aM-UF!Az}!Myvuezy{esJlBlB|+JQ-qodjKhnBq@{7mx zC<~mX;8at98q?GW$^wm_Tjhk0^NuLKpgg8sdt-Q;DW+zu<@$bFFF<(@|3ua9Oke$y zN4GVzOtb~}M&|URf_=9M}hp zkl&xluQo=&zh$`sl08D`pWA9!R0(0Lge=aI(T5eZd0Yk0wOsZanl+r?*Cv;DA8l~T zojs&0MLER4LhC+Gh6*|@4OgMWI^l=i+oQ_2+;?I~0DGw*hv@EofEKh4IzotCz9b^E zW>P){jT1HbGNX3wTg2*TWH)?*kj&vuK}Kv|wvRZSqg?hTHV})iWtfZicWihEni&{b zHg4TTwY|aG{n)Fl^&FL|;8}NYn=-`yHZ`x+c00~m=es$T*+29(i3y{Gy=Z;}$)p3+ zo{FGmkh+L!==ejJpp#+f_<8JPsMz-UMxKLVJ4Wq-0_q#F$$7{^hW?XF^P}@D zQ|;F8duFe3n+}&L{1)`X-e&8=>hUf-q!0?lr<3z1QBzU7ry|vAyhZL<`{ZdFU?sD? zGr?}Zhz}QY1}&G|q^4}%v)+tke3ysM0hEb(1NoHxyq%SdP_MRr`wa%JD2EnWpoE5* zSIKSsc7HLQkQxpE#9H>G!eG5we28^*j_8#U;yViv+z@b82a2cd4 zDvoA@dKFHk+m3pW>4X}t{59S=#WL0Pqz52_ttV_& z-EM?n244AEgj(MuO-v~cUqaoP-W2eE57fR)OAz}AZR04WrWno zRP`O^n4nTgAgBdG&}uUdUl9S5wBhV&LanOvhcVa!ay@P;+(l#)Kdwl>;tc^UTqq~x zL34Tu;yjk9s;y3SBjF2uSX1@v#WJZ1)&M4(I|?cI=p*d0whqItu+K6y9;3-)y(xXR zDSAJA_vT1&t3zzaMQ>wIv*j<{leo>oOL$Ujw++pnm`?yB7o|IST<=ad=p`pcmv-{D zc4M5`0QE{b2z)iym^p|RJ&THbCuwY2SdY1lWYDrbNpyClF6c6SQJNNAQWNLaa1ERV zei9NgPIp>l-Ps2{qG0!0qP2^g~w{(0tn?4m&xuJM$}!l%yUuD*YGHMXS++b zUs=xA$MUSdG870}+G%NC=Qky@K~9qx*pS z0$ib&91-TlBZENtJz=K+c?ab-D{n3C{;(>{qH=g5Th)eiIL;9W)g0ZuQrOy4JuhqR za6GOdOIU<)0o!Doo+p`+gP>al?;kZds-?di5C)N1=m6H2Kr?tOqe-@5QWTzI?91Pn}o3d3*qoQjISqaO04fKZ|xjqxa#z;e8W zEwIw);z6%Qd;7}m0sy^SZolIR{O%bRY@IxA8?j2+m=j&I@%V{~r5hAzdr-?X>m z=p4FbAJmMVx@wWJsYkB`KdOS#sJdQF%I_L%!#KWKA-q;kpbEksuDOys@Yi*zVG z?n1d0W+*AjEFa7)9USHWP^nWI?ZGF6Ts_qEA17g)mDB>xJrBZ*V_aLir_( z0K$`o%?HC>kd7^XdY3E1x33&Agq$Qi8e+*(yl6azA8ox=wF95M&kluYH9lf0Nj_{ihXa%ug~xx?yI> zd5`1^2`-AZ+}Z=Ezrp|+g@~Z^@2u*{^-j0$#z{Qyh4A$f%EHULTk+y?w(j973G>M@3smIKOx=d!RKt9 z*$L_(d+$zJ@aoW-!{WWG?e@1c<(ve$iNdd<>lgO*fYVWWjMzLl+4L}TYl0c(yH zy%nwf(?~YA%x=i|Xp~X2v-kPrcH`jA);_mo-MQK6B9Wh`^V{BTm40zbi3EBVBGAA(lccNXCh&J>nPypK}LYcG+0FqAtls%Ds@xKW8dG zp{abkAk{mq=%K=R_d^TG*F*(->$YrO3II#gn;O7POkBXts3+FY>1b-cQEC7aTaV0r zhPmfRU5HxWy#5G$&aM|&4L3nxoeUo0Upl_zE(MlK^k>EABlh$}+rR&2z_<{rBjQDV zmY_Q=qNjBE8eELttCv&tgedy8x5ztQk# zGdi5Ahis@ub*al;d~dT~ti44Cp}pss z# z2^gPzn=u79wJ29mJ;K};ZPxXj$Pm$zTpJD|>}l0Hb&2^yhXV+~;ag{e~Z4X;Gu`4J?g)Z6A z%4t5j3jJl}U%j@#qSN_2E}nOz_a@{#* z6$BES_-v$fiS%w3E|HDL z=Vy4(T88^qS30<6WYCQ!Uip`0beYjyEvWf~COBsyn^98MO!%lV4LntaX?e>`A9A=dKR*=g-FsJp!_dqJD%s33u)D|YJBw7WA%v#992@iMg@WB~1^mXjJ;vX(3+q^EDRv30v|v$Rn)k*|vlY`BU(KGI+HnxMqhA z)xv_mesB@zk2Ph<`bty}7;kBAYWDUaV?7=O%&dvJ5anWy?% ziCD7u54v2|UdX+;C@br6Yvns%UZ7z-@-4qfUrz#0&Q)4-yR1u!!sAY$QT0#V2R7K!=;!@Qy&k_IRkKq-)-vwX*QSD2)AFI`R-<+*s62N&WDeI>x*DulIdVxM!jZ zk{iw3yb`RL`GbMi`PzS9)DSTb(p$o12J%Uc?5gh;MMcJfwSrZAMepOJd9QZji z`C}fEJ9f6(tjQHHI@O)J0Iw9dyy7bDBS6fQd;tWh|j(nzB zeDccWCay!gI}$FJfJWwE<%B}5 zpPhWI^|6CDLP79qJ&N2l zr~yu5isvvcZ*SIOdlRhjk<51usFm_eqKzso$w{$l#JRP}?`(;#HTSCaDapLFPhzN% zoCfgamP_^&MtTC;^!}R%W_h0*&L-!EmCK7Mi@-+d8yFA_iHxpM{S{q!stHys@k=2t z#vGw0aobv;K`a!L<)1W>XJ+)yUtis_sQ1GzPh?a)E`I29fZ8cZO!H^Z5<6efmaARB zvyxpOF+9lTDagzG^lfUCOIzS8wDT43JM-K53c3SxD)d|k{`T$4dllZyC(0{QTcyc< zP2Y^s^o?gytPw$_LQP$uUqkzdKoGi&tC8|}HLey1(tZqIo|QocC%JJDZ!+~P2T>%r z&ae#iK<98FL&AVQhghaizZ;eGgW!;nl}np-h*Z=;5q~uy#*hmV#@1b!AegY79;$lj za}W8-nC&IuYNIIVa!p=B&4aD2rT_6;9i!-->pHD#jYjbGlZvXvO-c2GhjJ{&H7A4) z-DsxpNLo3~xYy+80|xD(37Nu;*cJkE0MPXDe4AZ(x6Mj9MRi7_ujZ)_7Q@awcT11j z<%d~)DXm5nW>mQO=S|3$KSDAAd2n@$Cm+gPkO`kD%7M@^Wz zy#?jtjW4&hKPxU#9bumAi~x;7k1!7$uG2BzE2{}$4W3DVCfM$ywYz0Z?&%^emfZWt zu>zAZ&Y=iNf0zD~q*19}_t@a1-ZOfoD6KGmUauZ~)%9p;FEAQc>n=$Lz!O6WKO&)z zeiCXW(bGj|yujx6Ox4T^*k4Wq&;$lBIcJ;UxBL&yOCN(G^Ge5*;a8*st+-MUVgA=})awt9E{4-{Aw+lkbKokg zGXq4_Cn3~1bH+6L%W&^3KZWUuj7xr~Weh7(db9JD=x}eRXh$tE%R%S)XW;=LD#0!V z)_0aIx3@x~So|r{e6X^fgQ41_ovCBGtx^JyVlVq^moilWCT_!p66Mw?n4Ub==)s+{ zV-VgO-)=v5ZtNKHc4^MKP!cq>Zbiene+7?-czG^jFRIa@@zT;rm8J+42!_X6E~9Uz z30;WwkTAo}BF!z=GBvF|4kzh%wxhW%n3Q@&`(^MX14?Gc-b|R~_8EW=TxBRwOeL$Z z(E~=-pg*VYVpa>3)w@K6htA^KymND}^k#SFHb+yQ9vMp8`)JA&hNe6wjISejiI|?x zH3;SP3|9B(&Bh#P^eg$iYuE9;S0$WQ9n6!6I;q;*QYr+bWd+OEJs(4=U@>i54o0{< zGm`SU=N|Wya3Pbt=`qhpaPLM6j`D*hjwGOMzAHmp>{AugEpgm9nL-7$54t^f_-FK2 z?|;%oR<|lwA6S8xZglDXASGlWTr@G+kLg^C!L*K?*)J!w@$V?`yzRAFdBmM7V~1i6 zPdY*KzQO#DR|Gdc{A8%-{B7_roCTP`SmF$TQelCsjFPcA3t_nhg&}$DiQpjHX+R*kDLh0!4u;ttl4ox)}68VUu@&J=uX8OcQ2E4rpJ1O z@^)YT(bn~nKSknsLi7|68?EXM-!8I@KRWsWwz!}V@r4fH56Z+(6W^)7vd5DWjaN=- zwocgE!;9Kxcph;I06(9RzB6To>v8mcKeJdKkV#6?hP=2y=)|R&w}t!>{elx0Gu%rG zd*7y^S;ZB!(-vM;;A#U>E6FDGkMs!!IMe%N&xXYP+fIBGky$`%*?8S5=C-=SJh_9X zC{fNh*DKm;-tyAZ?@LJH06&ej^utHNHlPieME;bEnDMCH`qbpQLHkR)IjK;m*vE)% zXkqjt0*w~!xG!0CU{AE&{Fgas%i8P?cNe>ey&CGl=d2lc6WzXX=qxt7tVj#tQ!rk$KIYQi z=p!U8sK!qXUhveM%K9m{jx2?eF%+EI|#-&OXLsc?df_hIV zSelSQg;_LRtM48e2TLSYMj9(WYyK9}7JobCf@)p~jQsz}prNtX?+jY#uMC=aAUHAE z?Q$_5yHKVM`ox2o#DivkWw6AcFl9QZW5EpnyJbExuM}Fz#Azw6lBWMFdv@hgQ1|cj z*=M;^@+E1>z;(hu{lBN8A=A|N(q4lqB}p@VTn`cy9hX#&C3$;hPVZ9|YII3!fU=YQ zpl-pO^)eBbBB!rj?KxeSp%6+&LEN3JKhOearh(GEj;K4Z?ffotFMhH{gx8M z&{=^HS=FrFkbfoMB`ERM-SSOLSZ<+x2(edvhQ{bH7Oyi?byz@F@p% zHD0Jh=W}%9(W)!HdAR#vhg1Z~X>~Ymame{K6@s#RY=0)CwD@zFofYoti69fKf9Kqa zVAW0Y7XoknjOhejWRUVv*THV@gh4f!=(x4W>S2u?-mmQ(+?gqV(eo|MKP?zS~7J^P<1AFMl zPwFYQygGG_WQ@T7WH9KVv6697<;g=-oL{NQ=jdI~p_NcQ>8o3bG>)2Y@1ZfW^~R?B z^F7WZWJ58C5VGfLgS(Vu*Uj|RNM9Q5O3t2#^@eV1BX)Qe5o9=gDdHt$JyN9gm?Iz| zqcd_fi8u+=BmwDrZ|no{Aj$#cqn(K<^O;xLEjxos!RC^0xDqXXHsbZZ9$Fvv?7kk8 zF1x|--I`{cDBnaqnkLMm7sU4;$frMzQEKdoP2)%K%xi36#H%OmpOooyxTSTDm6ql! zf+Jp{-Wywc=uI__S+yfAp3H=*$>rq3YZpVdX|~>?-7Wep$^Yn}qpbFzZ!d}6OB*w( z0i!d3*;}S^V5O_8<$rJ*cT-3EKfS4A`QRTR$Dxw1B_T&F^X9+ZE9xcWpR5KUVkA*< zYDM3+2#zvggR#CVMf`O*8CwE{60ytX?x1nY?|bt0f?j<2fiYbe&qL!MBMyW=MoRQw z&1_RkjtDL09OA_(8AtDv{W`qXi)|QZAjZkI;(|Zgyo){<#<^IP*rhl7b&yN_n)Tl= zHV0w805@Y72WyZz$KJTqSpg88^7Fh-`ITSh|4R9FZMks%r&HDUms6#*PH2t8N?`Gk` z9`$FQ9kOAlkZ&_LU6)CN)W3bDMP4w40vnvVhg?ZBCAYBk9tH6RH(t6QQ)5E2vXQiM zrtPKd(;jY`;ia{40JV8WZfH{6D#L9sL*m9y=9+|2PbLK{F$>UF1NfFJ2O&>yR-^wA z!Y?wvNrcHRYCEkwbc|YbHPtgmJgZGQqAdX?>XkN3vWs6`m#Si#d08(!W znlW7#3Yf<{>Dfvy3NAJM?LCDQ9?8u^?1%7;Z_ZbL2*WbFKiL!v`f)9reZuS(#Mo&&k0=RWvT-Ct}Fl{wvKPsfFNA{op|Nx7ov5_)bRkFFdh`}kRY}ne--_2N&~p2l>e8~`1}9W zfgoi=9f0mg4C24t_%_190WXKZl&TNxd2vTyLBiu0VbPkR`CT}!Tqazv;ACM)DCo`u za-8hdLZqBO|G*;8t6)R3X~1kn5Hi)ZN$+l(qeMK%gx+28!gfL0vi1~ES#{HGX9k;^ z6B-9jx$sTB9cFX`hS(|9Q#hvdi7sfR&Hwba(yH9)U7B*MNmY=rf#jt9I`O;Mv`R4- zYW&%L%);PD&g5Hl5kEo8_v^3x@h8y@e!p7zj^N(7Ng(%k-BHDy!VC>oK?M1kxcP#7 z+(Wy^Z_pNB@ySHH$J@F@{)C((%-=*Nq-E`VFIsqxAa0cNn3vbQ#?XPXjy5p%Pyl>F z!|db?hL|__t>0!-9N?GIf2m|myf1jZW)-N<<5EldV|ZtR z-yV*qvHrD(Gb#1bW*6HdeLVH$UUPt_)^G&jGC_uW1JTo5IuReBK|*cOQhCDMS{Niy z{>N~w$`)OOr)$U9++RV;hqy1SV4~H*x}M0{w5c}+!&qE$B8%{RL@bfq^mWWo5;t7?h9wg2GrMQ&}cW90z*0_L^;-3AH0NsbOltMy`6yNlo`RuN9+KN|~jesU-b z8VL%X!&EBi@h1BZA4jMVkz?tK^41YT5S>f~iR5BmCfccybG+>8^>zr$DsX;>Re;jVsSEA}^Hj3d51suxoAf>@L*U zL|QRwWCfg+f_5nh+P!hq4Tia3eEx@z-mi=9R!R?=@cv2D%>Qw%`@m0 zcb`sdN1Yis*;-5E;1F0Hf)o%s3w;VSI@jyRs5vXwkP10Dx;Ll)Z!5^gU2Evr;-Yz(X{ z5^O}3?d#!@bwm8Ep0~&B#jIuaovOjPLJ%Ibxpv1XseH52_!eo+Q zpNw*?L`I}}l|qw&bAC(_{u_L_ud`N#)5CvEKjw5uiR%p0Se2h5#XPlku~oVpbr79z z`+XUhF#~VvQWl&FV=%#Qo_HSje=qb=%))}*)p+4Vi+gk{<=xH3Qi<_gS&pLq-dQ&) zQdivl(KlFU;JJh}^rZcpz&-m#;F2TT^`7mVjVE;O_4>%wcfz1u$h`%))b-U#8etL< z-_~x0Z??slR~dbCNW=4mo;<=xowt0nQcifL1?(B_CH#Duwto;na*8tH*vVC}jN~Uo zo(ZX*w$1N!7Wq&yUnUB|)}!-}fabKAFh1Q~J4DyKp2P^(Ok%Gx$$m)^E(DYCTd&i3 zpc?{9kXsX*=(CbQGzsLndOoa(jZ;&voBTW6eQogl}sH1nWY0b5~D#*IVlrT}NR z9nfmrfaoJPp?vp;yPl#d~mq+QC1KYDHq zM${PlIecE1XNbN06l(5Qgx&5g`~A`J!QivD2bdBI(fOC3ezp5X;Lx4F8W{%4P>tfR z{C~8abzGC}8}=V8T0oGLjSx_Yp`_Fl5DWwiKw284k*=wfNIxJU%7kMIKGF&P3Elf#JUG^0x9nXA3}!}o;5V6 zJc!;m812}k|8mGHidCj4K)PZi`(bVXu|nwqnSbrFs_|E8zrUXBke%+u<#dbhD|6yY zRnfY)FMy-p6jey&OICH@^6naKYRN`1QRLR7@`qGb6f?6f8Fe0}iAF7JEo{V;k~#l%utfFE&dXLGvu z)RJo6pj{n;}a~dof!7!7~hdP^D)^R^TGyPR~Ke~nFC(>nl(=mC+kvc zy_0NKj8sXucEUw~o_ug8AR{+{XR~*jcr!{bG;2;_$KwWrDQzWXlc0d*e4C2z1E4J= zS%#KOT{Q0JdrY@&bt41s4>V7zhCP~wnU&RdZhTLY4a8_xKIJ`A&!^=O%=2&h`#XG6 z%|bgZ0hgT2XbNYQMQw7On;Tfd)1@tz;I^QT#9NWcM_j&s=9AUudynh+exp3hz#C)R z2!9m8gmV^$;ogYxCOp|fBlhNZnKDSvin1kV(zYOMi~np1#=mm7$-S6ye|cF3nhO+S zNMO3QaI`_jHNIJLX6}YkAvb$N8%t@WQG1t0C=x=Z!^=zv&XCEovPFGdvv@sT5|>+NJLXTi#_c;r-r>e#;s*Y-a6m6EV-3ihID78L3&^|OSZo7t+|FwC3}#UCm^kjhojKA zQ5_6_!1PuaP#`fPp{61al~$NvVjMAM#?P1FdMN{r<%K;eAG~Uc!XDR!nNJCn`+CY4 zKW4{&1LLcaH<|Ag1!1%SIu!Xhp4}vQJ)TzKN7zi2j@yF0y!HnqZ|nZOeS4$iNBexT z{ZZYw_hL=_RHn&ixKv(?nzx9ebb^sY+MddtPVv1lg3$sLN#sYc)_M#^}a+1^zXx7w{NZka@snN-{HOcDO({`kz|Z%qu@AP;kj(BH3+nm(@?yq(3yBle+IRO3`HY z#=9y!OKH~Yq=OZoJ}|0uamq|itSLpxjubrGM*aBXW@Z~>KpT5M?TF zH$2F3&p%Y=#h090^nMcjnw)V5bAf}ewtxY_IuC6vOy)G4geH}(yA&o&o^wQ54i`Lt zCYevQLWDh67#I`6LWhIw2I5gTK*hR3Qf@b{PW341+|~EjusTj&oYh@`f&oy#s=Gi} z&-lsIVyU&tGnd}XQ2%%f+^9OO)Mh!bivd5l{kyeE%DScZ%LZMlkrZLTN52l8*7m=2 zFBKj}TJG!8*>Wxe=b;8~i~4r0%}QtCm?wJh6e7quo>Ko{lO}wNB)>jhaymymor=A9 zbmJ|4lxA#RkSRL$X+wlnihYyWK(Q(g!Pxt5+xDdM!-M&z(##}<#vIfT)f?B|vht_I z*0ED^W3AX(rxadFw?ReB!O<-Lq=7hjUXsR8i+;jVNLZ3=FxcZC<$~ykx#vM&5N1q0 z&xGp2fbR%g*%9-64$s?H-b6O|FVxNq0ruOXS{l30W^7$1{&?(Uxx*x9)4V0bab<21 z+FFn--I{>UAMLY%nKilbQVoU{wlOlNGGCR>Oloy>AtcnVqlW#)a{f%0+VA;qxSrInt=}crM2gbYD z)lhqX`p|HrjX*=%;RlToXh^pFvPIes(qoSY0#OPqm-b-_k`{Fjlz9C6^*>`-=aH>Z zLgjZIL+nTT$M_-rBY?!{FRIwC!3+oT+BKX*@PM+smLuDnl#9jaw!F=LYReu-0u^L` ziYq!0RfVk!oVKPxm2bDdhEeafZ`u`K!l3qg#6j6A5H@Rl;YvdxbZ?Mm^MIuJ+B!&k zli0$`$I&I73S1piL>!S>s@;-J4E}eM_o(&WEF z=qym>pfc`daOwK;>|2j^I|7Aijl8Or)f0OL_-+*@ODiYiH$}CtPSNW_bK1)gYn|F| zQ$v2E(_u^5LSaiHWppZ3h2LROjIR{IW{PvS@v06I!a2249fE^6Dt&gI?Lp z)Q)_;R^KIfe1!aWrDU>uiSM3wmBn2Rqw?C;;AYZGG+rJh@;KQuQgh7ZjStR>e(;nJo1jMTRc^Rsn$x8a#>exZp*p8yG!MAL*F>oP?=SH_&0+izkSMhneg4<3 z+>8ubPVqywvLEyy_$;XyPxnAWLy6>r_=aBx;Hc}~UqEVu0cOd zUZqNY&$}BVW01}6DA-Mz+_yW4+X$@L)=SiI)HmGhd#rYIc_tXJpxHPF=(ojT>pE65 z4Q7=G)yEEBpxwmjCWGnSi`*w|z8Sfjys&g@MY+<@ZM8*l#gL1)Y+z za8$wkEugRO_W#k(ThtnsE`QiBf}Y9K(zeHsT>QyW?WCf%KIiC3D_cMD>5mEdIz;t= z5rRsO-RFHMur2>O{C4YmGz~-BT1*eW?nuAL3znTNiMqW(TN~Q1($4C{dB zZEmMh<39w;hsUq1n4JZYv^G|@2dt=4ntp#m-kIWY$O4))hj4GE$2dDY*qb;NEm)%;CzDra=B>TaJoFomI>45u;dahWSdZ1 zRA&)hbDV?Q1PUsX2}q*#7Og9)zWzDFZiZ1NETLkaS8=K48XTo2<*djie)iNC3nxZY z1snq##~-DF@rp=mZeHG4Lf&-OIfZb`k|-z0N)vz8^lm?^KK7?pc4|Uy zbI(W1AhPeHWt^<~Q|CKUnMU<4@i#f}z{1=rMpYW&8|tZ21Z zu6+2IsB*y1KEWEy{f2CD-q+^P(t7fP;gPa}l5du|tc=;m=!QIe8kp#zbl`&VFNksS zam<~{MezIMHru&R*TK%Y`{UKWk-7D=&lTw)!*A|T zNy1T`i`3hp`9JVp2p0Tomr*lV!pGA#(xPN*xY?=##}YSZ$wQNQ>EVRZg^1n5f$Q8al($;d+d&O#!T zGw{kNMph?OSfFXPo+ceMACOFodb+5Udz+y`2hi$b{4Ew06r(p#l!Ki|DJ!?l2tqHlxXylcV53m;sKW=(7*;NTzN%xY40_E7|*r z%Jlfu*H$EX9_fhb3eAMcFZbv`0xU!rW_J=!{tLD$fIlO%{Ud<}J9w>YAC6YQPy)?0V;$#|v&r&c935>YqAX^+3Nw{M@Y;ID&Z^E*8Z((tXNL}>=xU2({n?v-)eNFw_LP@)QZ+SI3p=H|^{ zczYdar@i&Xvd;O5GV;>G#cxq?lq0sK{Dxhw*BL*XeYYmNdNFy~YrRWx$hCDbt|-XW zkD^00da9PuIbOc)*z6rkQ!hk(m7iqGju>Tx!Mn{^JPCeSsMZdb{rSk8kS??o?!j|1 zO{M}bFkK7dEe@ELi$M$G0NYdPq$}tPgN;OIMRp=S@KlOc*YN2&074mFkOxR$`gQ$A z7UIuqGg#-eTR+P4RRHP7?9Z@Gw{CvZf?7{`8$%uA`}qhTg(frEvNR@k`?Q?cs5c4! zu$t=~ClYi=rS6mw|Gg%PK?0|CQMvH(;4R$CI2#e$`BhLKqvW6uUQ;zpEW?ux`2^xO zu&L~CV;xt`5XcUDGg5Su0_^<+UmLQqcv-VD;I8fz4iW%)<%bIh%rHe=jcw}(4+B%l zo>t&oezDs~)rDSLI)U30%QsY3?QQPpwDJ6aEQ>%&+kaXk9xKT3M;BCeF3O!}Ms&6d z*1Q>iJ?{`00Fk;4y7JiHB>l&19pGHVZzpf?=I=eQhJy@yW=5?t=MJwl$j%u@qJH{W1eHzfzuKY}`vlborNTZG$0U$t;% zz^Um3zV*a90oSf`krc7k7g+z~*x2UrzHbcILJ3UhvHW?wY>Q3yoG2?!X%U2tj zSLkZ2Q-SWUbilJtU3ZkJLOuFPL(dmQNwPRSsiHG$n z0yTC*J`JKQ?ECCAXa^rUe5GjJ#_8gVVz0oy=;y7Kpq*^mUi1}rMvWA+0+&X-i%of9 zo9J*rsnq0ooKq_0@~VSWn@u&0f7*YdLMBr~Lt?j3*&(>oSM*1dr|%iO&!dY-nqUBY zl23p7B;y0*72y$SUM7QpCb-1z^($t%w2vA?fV_@=khgQFAWHSe>&$xJfzzb;N70Tyn zNGxYld>-%uLAft9r^|q9X*zo+wtfnFTd5Id-(26lF(oT#Xaxpy>myb6`{=+Gy&u6N zH=_hwlYE?9FMc_0y)LosG1R_Lh(s#;OpFrTduj1=vf+@0#JN<&uI_ zJ|)(h8$tlhStNj0wGid1n1EKgqC26xGxkv_Do@H9xY39IVSGKh`E}tv=5?`ZW z4!swRX;a8dmd0dx`)-?*^ojj5Ymk@UW?C3WD8u`17V1pt+0sdHvLxa$q=WN1p2M>4u31s@1Vz5dib+@em1*HSJLRFfV;>3t4INi zE$2hMhh5snUg$>nZR2~s3({^>f?4-Rbm!SOt>0GcC3J|!+@?Wbcezqjq=O@SVIA@9 zsWY8>|6Xq@f1t)=p;eg{bP!AC3UB6%_lj&*`#$UA8UOy7%8I(zeTWQ`E3k6bnvgCs!?3+nc(=AKfpt3wz&l}gKvxa zn|1e@eu*Ld4|{SV-CIg#tR-LfstJVQz2;uP#zVedqrbBxXDH?$8>SgN*pmT=jDC27 z+tpxWCHwYJMuO0L$j0@@`FOyIz&_RsT%RHn>MkR|9-I*~>VX#-yFN-2ss5}k`GF(D z@!3YdcF|w6A_xbDs?{4sG6oBBLd*ZYd%jh6EKH_lL@kc`2vgu+jgVOb|2ar8%f>&z zx_X)$oj#f_8U2iLnr49UVF`5%xk}Jd^MU0s$+YSh%i+!i;Nf~mKcn#AvlI)QS6>&) z^mi7d6;Pp~+J^RS|3`yP--XrM#n|#l3lSZXH-yn55zqTabmUYkj^ncC9=i08&b1Y% z`M$Y$O9K?NSD?G5iA|aFYc+ZIIv@Ssl({!rgnQy|U%t=3tx6BTA!{9E#v1w`M7B>9 z12-o>Lo(=-F2mOW&j6AFBEj^{B9idVJ<9nK9ww;I?e6rZDtW(dk)3pGQ6q!%c z$F`;$#bnL%^r1~?){)-vAEjK54D*GAvqFDeS~|n4hzq{VJ+^CW4CI4vybxkr>v=`H z51z5*H(|JBsyoF@tD)-yKT+H)j1n~I&f>ot!5}~Nj2#3_KZsSr7B58+Pl~;Hd#R;i zfwXd&)kWo#J~U|sE=fswk5B7lzd+0ZkFi?@6(;@EeU+J`U=}I5W(H_Tr~hp)yZrmr zJghZM%xOW6Mq9)qK!}b+3CYLN6>h4pfqtTpg~hhmUg+}}trxJy$))8F-tLgIz37!9 zS87PxkdLd#R75=C(vDtcK@9E#Ht4M@jP_KHJD5k+e4O8=QHp=!6qowDk^{^9b82Y1 zQdR~Ztg~|fXrwIi!F3=IAkzxyu|NpBNumF3;COuFSnFzrV7KOr>ncIt=MMqOUq9{w zlzr{1W#a|C4k7{`(-!Jq-grpjz(wBO#VBE2?yw*fHAm$+^HH|OYlWJyex%7B@+F>@ zhN!k|o>35Dpf}S~XY)J`*TyhILr#!4LKv-h6`zGQY=688H~)bXx5kh7kILWNh@FyF=c?9BNQ%jH-lCWnL{m}%KJ=ge1-2mus=I;s8(3Srx7E`>OH&RuWsZRE= z3d_42j3IS=&j5FKIFqEqXmYlpSR{vJ4fDu1TQDJYf0t|is+A7|u;Rk3JXX=u7Y{?D zzMcNS5ZMvjH_e?Q9^2DHP*?g=SE7J9$4|y!{0+P1Z>&$hhhIOI)@@+r<8K3i->+~p zFsn(MeMeRbS_O$ZHYuX7;*P-_W?QTEs-~7acFL6D9{l~oaqVSVza>dg73 z0@|!1+U;1~jC9x4lgoXVKfNFY_9(+_8yThgYZMmTs;rg?tv>oatI|6QsVsE>x=$;B zO@rUAGwkgwf`W*9?YrE}UbhndO3m<1_Q^OR(HWZk0?PDlv!Z`$8y_8ZlBO`Y9pL9f zLGYY;UM(s&&YfivJyBtUuCtCpGB?DldhwFv-9;CrGFwH-aeqbwFae&SzJQqyF2^ws z=31yKFEQr(^AX#5*U*0b6l}Gu*l@Xj>9nYflAt$0Uluxfyp%*l(x;=9_xgIrM-RY? z)bg)^dzI&j3oZRutN6vBN}LWrk5mkgS`Hboor>d8AB3QAxPeFB+&9|v7@WghujRXmhkQF>v-_2FN@1J0P4 zN%lg5Hl5wr5_sqi)jFe>5uK0DA7mUY^201i9tUgFFPx7mPUiAeVn3H>-f*#1Y;RAc zfUUr_2Q_`?5!931mURiAi2~_I!l^d*yK8-R(ueIelEG}d3!CCeUOT~oUoQ9>7hEwR z89cP`TdWU9$^D)}pj*|>Kjhu&4Cw<<%%)SVpP}Sj46%d!{_&?~^*DZg>%WUp7zCln zC&$ zV-Jo=?Ijk7dW!hAOZ(9?SNxAg?JcyALLfQjWJ| zRb^g(0iYMwVn+c@#yxi7R$$l7(tf5Fo^0UW)NV}$;8YI|6rNnb`+N4{;i$V%32Uw7 z1`CIyqz$s$-zLfII&*0BeR$#8>9NO;UGLEwsSD5}YIY4{zE;+R!~GYfK}`yvXC6oj zjP^d$KGHp{D_Q?7Brbt%IEUx8^c3Xov~@iGNKcYZ#)|G9-4!?JVV$!kCR zDI|EZ(s0#HJECa&{|Eqj;PIqCJrR1`2DdlZ*qWf`#Rm}zb(y)>tHgo8oBeLlBO_ME z;n6A8M}lpBjj<-jpy>8_zcGn&96iVBXNs#|I+ynhht0 zu3lH_k2t6icpgoHY0UKiZo}?oq>Ta^Wxs|kgtRyMl@|Y-#+a!2L?Lf41L*U922lTD z2GDFfKdC}rJl7``@(-A!9z5_AYesz-N%rU2kt3d;7Mz|*0fdRe;sAh8feG*_oo|W2 za61pdZVQ`0DaKa8UKFJ)j`!?#RL_A z!HAD~({+WMLE8T1D#dCggw{%>)Si<;0}nVE21G%Nr6F_D(QD*2Bee!fdCc>xJ*Z*y z?iLmP(Ha=zfkt2Yu1Me8M4M1Eoazx}y!h|gfrR^SSA27_(ON&v0swWH*uBbOeD8m@ zSQ8o;oPB|>qhyU|7T#o?<{qJ!$-vlsJw8ajk6P{-d}^hXMUeOd&sGTmmvn(tj2phd zpED!68#yCO^1p3PF#0W+I2`0RNZ>iee|9RUCU>lO`@9L=1oliIn~dq#d`MA$QByO) zo3?utr1JCCP+m@&F;l*cUuUmdQW`;fi4OnGD*Csz#VijB!YJZ>@k) z!Y=565tznNfRYpvoBwz!9AfyIqj6%Bx1dk1sBE5)J3*ZbI_D_aaA*fU%EC!{wPp6y zz|Bi14@QUvGz+l@UfJ39y3Jo=5y;lyr7P9SRi6&B5zzf%|Msxg|0EJMxD#3jsqAOseTRQWuZGIf6U=0}HQz1d{(xXSAyQ3emECNo(9!|Os%XC4@NNo^J; zl?Mzv4by)}#kucan0j{rrFgc9EwgT)tjZZ;(0VXgGntD1Yh2K#ny<}91z%UlCs#L7 zS&aq1zY%*3K%V9%zn^^L$yKsI!5SnSZ`Rr0Pm$h#p;$kOZzCu5ji-gj>wEt{79^^Y z`0gHkXy~t3|NNfnR*O>1e<86R>A`Hi5kKK^Ou^te;@WNYgh-y`AJ z1%a#SIOSZ?DZ=*N!}W7}SKHm*w-F8f7kO;2yt#qx&GopkH{@c(4Qb=(UjZo@lM^k) zCL?>2UgF4t_sHMg(%T#7Pa#|?2%JvqVRq)jBk7d)e*y*o$0wi`?N=3eplbPgMD{&S zSp=+Q7H?APwvG2Us_UeFcoz?_F8Ah5(O+g_kjK`)(8yihvw6Z%w{IRTKM&^6-e8;3 z>K-0iC*bNyG^ywiiBZlPL;(AzduiMWa=48G+q~7<^!oW3KhqR4_`tGXyTops5WD-; zT4HH_xlDduAW`<$=)opJ^dJbq1tiGs>L&|Xp1XDlYK-c$>s8&V%l-OCIb6%+!MFX>EV;;j4@4kHEHTU^@OzuRXO!3Tr{LH6vm z0s;V~7?|s4oprPXN6ir?H>{lKD-+W1G^Zwh^*`1NR4x5+#T)nwXgc2ujLqj#d9@G)6 z{C8#Y;*GbaadRp3f22oK1s;lNM|8c?<4|7}aIUDMCp zMxpjrzuBj+OS@J)u8Qjti;x-mYn#Az0Ca!tWud?BU3z-v4==sCC^_g$ zC;3$2gJ*$!4CE&~V^qS`w@I~6=EV4z+)hc|x;QUDv9`BxCB1bEeEF zcKRqMYHK9x)>N$iGt(4i1JT;PMwGs-NF5d7Mf2`V>G1@cNh~y~xq3^SLa9^V6m_3+ z5K%bwL`k44V|TNfeN8p^*JPnVZ;KGsU~I?+IriLuiF9l9FfLQdoA9Mg2JWc@`k>9r z^7PRGgZ)6TlF1TANadkfsDAa8;{KzmvwHO;m{6+`h3qS%wUVh znc1J~AZX8LeH1+l-Hl%!2spB@6PoI23tS()rV(PWtxR(v-~mv_Gx>L}Gl)yoWaB%n3|29X@MOTKefCFK`DRghW{=H^)HC%P!^t0#bUe7PMA9?wMW*JE9l|3?2 z8z7M5V1xUcp=S_SCxw76@e!LX`+YEbd&&69sMQ9goZa=a98H*yvJ3$1<1ge1$46VK zjDHOU(k%Z^C=mE>p+M8s>on0c&xNono0B=Z{aAUu_(!o~)-ZqsPMGra@tt$bj@)kn z=QZ!kF=8pTEjEs@r{@Bj+sUv!Y&n3paM#;6S%8laSPPbQkKcHF0nl;4R(@zP`G$*k zNY^q32+7fHMrRF*_Z%f38(k0EyxvMK7bSeE-&oz?z5dsLwL4<%{j?TdJ0F@4Ru$KnUNt0n*d5VHO)NjutGNK$6WQ9nno@ z9SPQF=)EnB`6nxvM-LNRvJezRQ~#pio{ zUU>fq99u|uo8F~9(>W~9Cs0*sB+}$w=aw#lZ8L3QihFO9NfmgTfWx40aR>pHZE`uS zcb`?(Gk>WYn-iTOa*`mS$e5*n%`le6JFEnkJoQVIk5>79luvh0n>_~2Cs)KD+7DT7 zBsMQrZO-L0JNbx`K#ukv{QR*TJI@LQR=j%Ct8kCR>fQWX3aTbmy8Rg41*=P^sEmj1 zX$2$!`=kV!u5+W`!f`-lza~ntvI`7ozg9Kf&Xwyq^a#Nq&akVbAildYa^hhB<%jL3 z0-r4c8P3-M8w6@lhDdy{=s7M3Lj$6D>-52iIoT#PHmsHtcM!U}|Geb*=*XsJK za|Jz(F9}!R`MyCLGW8M#G&Xt#3&o$kax7=5mXSZUEuoeL3lIHsH|=^U$%L3|9VRYa zS6~LGuw*J;*uZ}e7n~dT5};0KLN^t;h6}XqT$Lak+6a<#VBhZHT{|$?ZyQF_G_N)> z{E_cRJ6nu(Rra&-n4k2fo1as>U&A~**WjnIhH4_YM47Gai9t#3tqae9b;01*93P*m z|BSqyrXmhfbJn}9n$DYjKD5WzuGtYmzuivv>4Z16?)WVn71cYR7AI?+Ys0j|J4#Sg zdljqR4uN;}@Qzl?Y8)Vs4_eZB2{i|4JxvTVAHFz=)9&Jw?zw>H}u%G2>-q~+RSJ(WHv;59E+Fm*^ayN&p z1bO~E3}zoYg<=e3_!qJ^Vq(euOn0;C4GVgckQC}lhY+J#OnF4nss!IqxR_Gf%YENi zB=b}Ls;w4t%i}W}pQQDHT@RX74kX~63vL4H7>1Z5*>Kuz znT$MY6Q}zX*aRZbr#|i^_8;Jj`lkD@MY{&rkv>$)mG!8XWb+&UkO040x*d227ryQgX0}v z8+hY#ne!7>eBy1Sd)|7z&8!vlN+WqG#|MOgQ&e4>h1|_b4Di`z=||2`^vie>XTAb- zv`8sx`*svk4l#W{Ep_KAH4R(bmbGnHUZ&~pkJ}`>;!YjiAR97G)~?4fj`gT zR&7=Mo4vM`=Jy1@nqI(;@!tTyZXYz+RrxL%`P0%*Z=y-IcJOZOPy1C{+MsFI0^`hT zNbK^mHr(?efB`!W=eD!*O<>*|2v!$ShN#1MdfM(_*XE&rj-%6}+rsh&#gsgXpt+xi zFF0BvZxsK>HC!2RCTD#1vV;twvUqKmplLOl%{K6%97Oj@rm%byFC@v|;x)@8=ujxsNKq>VYf1I*{@7;Zf_7;<{X_bC5t?N zz5SP7MLEm)5s*dAdEr#ZiqTr;sIhA1qw#0=;{(b5mq}uSN{W0_-xwy@3O@ABh1kw;Gh1|&aVIDrmI>*nj1*>RcGC+G-Q8G2kt#c2j=N*%h_P+8&gs=R-UgBU<&FqiO4e(zB0mJQxCL2Td)O$EWP0Rn)`?ap)M^Oe{;B4d=(2ub*l z*~$A;p?^-g1xRb+yy^8NAndmqTcG3O3&G7J+D6fxO}(vVe)CPm;A@f?8nv9-kt7{g zHDp;ZTcmTo1}w)RXIqjLF~3_AEzf3pggflo$_Pz#Sq#@+MRW$LLG?Kk57{;Y0%*Gkb z`2!r8fXIZr%5{@ss!RH60JkhI*;z+KEUTzNka&%O*X2Ipz9w#w4=P%Hz6)5a`i%!K zZEBp5VSJ|3AS)u`udEGd|8&c-qdtTK4hgWmar_ZuZ1`KvW4s(`)wTP}BX_2Tj5mq5 zo&i~e77j71W-EKF&qB$dj}EduO$t(k2b~)9zwUh75c*(1pClLn?TsG95)4QozcCzy z7EiWi3jNY%IDs3#?SXR1>q-W?3Vn@}$su-F4i;h{r>j~rgV{&u-c;Egis^-x$48ca zZG!{+O-?{m@+y>8v}u(w5MsPB9t(OeJ9nbY9AJQCWkESfE5I81SF^OL2{Fc1g`Ad@ zF;)A;PyF1omOqg;PUx3dQa2UmA4S?$0Ne3TAZK|_zd->iU>%GS-NsezH7^o*ocsBD zF`APq_*mv0=)1O$JDFodT06aIpsaH5h!#LeM*qb%bu>%IZ!(QD$2wBi)ph6Epy%Qm zib2N>%MniwTxQj%QJs*w-Q{FA_>A zx$vL4ev#upyY!JKGpptGSuY*+b>JT3@E4{{4+T;PzVtc~IG#e;ytEa%u>{GbsS&HE zvH5nhYzGWv&?J=s#C@0Jb_T`fS&KKiCm1BL*V2BeiHItd7S0A1Q04KSJ{>l6RYtPz z>d>JRveM(9TmU$l&&M=h3-QhgF8HxDbZBu>rVTwqnh>(%`el zicZ|T6K{M5+EownT-5ZWb<32){ogMZ?}R&TcQ1K&lB+Ko!;)jXNdrcN4wV&wWm^a+x7MD+cY+t z$vk%)UZ2G0Y@Wwu@4#LQsG957@{=##a$ka_E%b&U8HyZA&?{u0RK&owoFFkFmSPJ@ z^f39Nk<=EId~BC~TgO_E6)PR0==t7IMT^qxY|`didouFHcsobf5rWYv8gD1zxZ<^T z_b;`fKYA-t;5T-QZneWmLt;Llc)M^yWV7^wo)}c+jJqItO_`6V0NY(1R{iR?I!3w_ zzoZA$I2u;VZa5DM9t)7?0BSZH^F)yO6+)=O&rsH%qnO{wj1m-n{) zoVEVYqZtSRmfWy)ycuHty2JeS1imdEK$jFHbZ0?F|J%YUzy##M8*n(Da{Nt883UV7 zC#G{cNMb;>Yu?9))JP|9DJ-+t+@9&_yI*%B6RRQVeL#u92K<#0v(NwhY&SO{Fy^W8 zv@zh_6u%&qE^iIk77%T$Guw=X!uEp`fAkBfY6v*&1X+Qx!t3q0yuJ-^&`N+aUXze< zSS23X2K_OKEHH<5wClEq_Hn}ClB1%yHlcxGo>q>k5KBlzd&~75)##ODcnR!3$zPi~ zUJgEoL8dFMWQz@$YEAl(-Vq;tfR$yxtOfY5|C;ZJxcMWuf};1b{Ui^v{jg<4J0C}ko0G?O1s*0|hChyO`xV`C)tj?bt34PM zUEEh;*qD3intrY~2BP~ofapF4^m?X-TDTRSp*Y%)Z5k z7g9bR;|#pkJxmDlzDR+B-8-okv@+~yQKxONLD>%z#o#y-poB<5e=Y}1*hiLs3R|It z4{Y}DfhXyfXZ?iCu``&_0y0opT3)rqZh)bW68 zTVXz4!vy*-?8u&-@+Uqq`F3=J!S3EbvAKg*Z&_&iy$ih8> zu9s$ha@%mQmfZj_rTxgxhi)CY)%xO%KLf>DFGyw+7A(Mb%8Dx|DlVgsz_P zWNo~Puz=w!uv2)~V{Vd4{#>RH9nP{?46FvTFud1e7)9KF0L9r%4n?|cy4!-CHJf2v zzH7r%@c4D`Q0)LrodJ#VCBCM`FymtH(&wpu=!@;>207ZnMuWv zg*e#eiEjZ!kZu;D>e86GYkjj^t1$~AQD7@9k(>4V1zXX$UQUMzP#QOhY%y?OeT z5Zqp+k0o7S>_@8y{k)=c-?M8x0V1$MbS23WfH$qvfX>b~S*e*u2cI6BufBD zUz`uw56xS5W-=ASJHIfzU&mCp)&x!Ul01&TB1l^i@LnfD(5>@GWI`as_ENZU2R$$Q zP?`K^uY#(1E3g?r!_wRUkL=`pUEor-Mv7P#?N~I}JLPJ{iUhFw-O%+pj!?_1^ic5)I>G7UbHwfv&e^m zTdd7yvqBC5u7ff(Pb+bCkMm*6X9Bn*wr_Yw{FRf6vD0d81J!E0$hs(SChqe)MM<4p zmNZoB2r7?a?B)iW%=|w|CljhxA`y}$HeZT|^g|<1f!Bjh%0~&G5W~6A>$4cPY_Ba3 z)0^z&?Dg4Q+}Y;oMbFxz*-Vm_*th_yQ^AV;C$EI7m@p$27d%ZX7zngDcfDs33PcXl zB+<}Fi4_w4&Rgi!hvYcL8$4t7+r)0zJ4q{1ytY14WoO#* zSkU^n5b(>N+UBZg4oeA=-@+nF;e2ewA`qEbK-bLBxacKRS}zE~*ewtJBeRlChuJ0y zgQNgy9Dz-T1P&8*fWSd1>Nc2s@8^j1+g$b}J)7L6XBU3OiU%yM<%DR2TBNGR&*_+-woKmBK$W(C0?3^Cr~Ha|np0h}uvWIi z1ptzD&X3!=??SJ>B-Nq9dDgb4oMX5C>$cUt4r11GsH+2lEV4rPV&&bXS^Ko3!uqm? zsSPZL2mX_#7b)q1sxq|CeD_bXOw?6$79+%li@oDV?>r+RVQ&YY=aQ^)#|i^l!Y6{r z>By*4{vo|PCdl?o*7}X%6@@nv=T0cuQJf=ras86j(W~EzPm8^hle$*jPJE-M zY|?)+$s=iHKy^rCIIWattr}Fx#))J|zL2WnqOzf73G@KcO5^J6LI#WxoW6=D3aMmk z{J?9QBof!cDCiowW;9~!lriGZw!^$K3-FA!Va!|-dbgjBmw%!tjAp;P5%jiev#O1= z+q>xwEQoTjQi|o29eXAbUUi)sulY3?Zf)_6ZbCU+h(2P z&L}-U4^-sr6k(2ecHrYd#Zdv`b%+GfW^;c#!={->v@_5wgM#&BFcB;AIUt!5@kdVZ ziGQT)>nMco40Ad1=zc+!$k1tTP4lzeemXGqrV@-B z?Vbr1vvq!RFEKpAcTN|fzdKz1A?A&Qc~;eb4tGaTg`sZJAYR~!!alCszntgs-Er+% z-ppeT)gP+^=3SI%<%}$a`2$O^w3#iGle}7G66SXkdwOLB=I>wT zmtf)v=FU7WL@O3PVMuIM3uP+H4jb)^r{(T~tb|(>LcF(1;WG_aez@lqu8`Hj5gebr zH3J*I+?A4I2@>4sR-?LQ4W5Dad|by>)xWXZu91=m64m1G#psQiU9sZFW&eoqvu$+F zKk=d2Ssb%@t%sg$&=MFD1kPR%0kOWQl<0U}ata750Y~MPT-64sRzyuw=8~rn8j{-c zd3R%>Ua8?6BbJqEdvG6%0OadnW~LSVGXDMh~Ulr=NJGjaNf0SLhq$MYjIpyFgN zyM>RlUaci>l(mm)hS?Y8&^NP&GDn=sT)JNVc&(jlN_>9oU=`-Ueteu%{w`U?)%@JXR{Q*T? z^Df6|HA7-vDT;qLC>glBp4?!4G&4@j3TXg6@!r>mf%DdtmL;i$^T7s2+KFUV7`-aX zjP1w)8T^@qMW&jwnA4Bv==>$y*t_c-pdSrH6-Mu3RO;8h&WBmih7L&k<1WD>w<>Rv zcjz-wh~im^2W`*E}jntBMYK9WQD!IvA}$tt_Qxi9`R*Fw+99P=s1&~iP=15w3+-4ig z!h2p<*5SzOBY8MPH63I8W*V#Pf5e?YYVgeHc^GP7D7fQYv@SoXxA$J3}86?}4;~fuoGJH+Lm2u;FTZRIDmBI1liP^?? z>%-(FA3I~`u(ZzhO?9a+M|&89T2IX{OW|JED4%@o4P!PO_prBkGA?_CNa`BOAk)kB zR-gBIrN-02UU4-snv}xKloOjsw|mz0KZeXQFm0}W?3W~nI!rd;NuQ4%$BRMuu1|qO z{kgT*cnZ9v+~&1Cz14%7iy7tXH@+L<&n9VD)=4T9vTD>iV)&f1zrDeetpubQ*riaL zylH|lE;8n?oqjuMb9o4P-|#g%6KPVvgTh3JH09|5%(aKKE^L#K)9DD^r$eVY+Y27- zO!BW?(9nmx`ed7V2Hj`Y4tUdMgsy7{&>*<_6_LUGT`-+8d~66<79V$Q7|!qSHWvo{ zIy&som25mEP~sO?X4aADbQ!Is-Ow&fk>jn@&H4)bM8MEn}&T1X1#zjm=;0C6(60IF=3mD2c$hZSD;%cDbP#e2T|w&Y$01klLLZI5d!H`?J^O{ zm&p(Y8Rxz#@wt94G)u=F-||kxv!W|RYSeP!HMCdLe+{wW)P(eFd#`XdsIW>M^PJxl z^G*=-g=A7rNtshYSVi^aYjTl01_;&MRqSLRLaL&p-8d6>rmuW1UMO}#Ho8E_C)kSf z2nTTW>8+R^G1_dUymwe2H#ze8+UyINyTLhybSax2Rbm|UV%wo38~=y9H;;$9?f-@= zi4c+OODL3mC(IO~l1SFb*vU@zZOXn&%90pMBC;=oFm|#H*^Pa&XJ;&9W}dG)*Xug3 z>%Q;bb3gw)|EAX%-{m-t_xcgb2vuk{MHpJ_46(CM)I%NeQPI2~X%=ypuQdYCPSNWM4G66?swR zWHl^xcmR~hKo%{xEHHfH1FRi%kS83fD$Q80?ydQnvWsQU0h}9{#F0b-Yk0Bn7Gni3 zSN2V+k2d5>lv*MwmL+e1&6M~|3scr5V7%b6P? zDjnfG9gt6?(w~rzyL>mP&Ch@3+iT*&8DZ<(I*q`N>5%gl%=kQ!PY6Ct5D^cT`3Ck%GhrOf`Kx&xJucrT+K1?t#%sTiu^19yp^P&q+YiK1Gz`H02^kPsySO2NH(Z?M3-ZPm7yGr zuyKMnc)GWV@ltP@d<2)+7y3q2^Qcu}Ubhz1)pG+U7Ji~=H2)KiCkf}}j%S|DQ&iI~ zhB#ocMRjrI?AbNUfd@Cc9n#zn(u=I}{aWhPmyC|^@kjmVkxY z3dNJV^6kRTD3u!JuDXZ%=Sw&i_NO>m@w)q-&ZxNqBx;Jc)}J)TcV^PAjYhz>G zkM(}cU!$y;E*+faRD|2{)pYr6PJ2~1xpddunc!PkKqBeu%a??N4i{PPDSX*PUAmkU zBu6mjYrh|krstqc@2gX;%9b)wm<(2c?7VDYs0ki1saI$1AbCrF z=ghO%i-5g4nQ}bD_P*Z?a}&7ffd^OLU7~NnD(`~L(C6w60gux(v`wDfet`d+ApI@2 z_#jp#wIyu`0)rm{NvhU0TXKe%p0OmFdEp{cNHj^BJME`C6&@YBvi%68$sbv*CSJ&TmeiWR^K4j1 zerqg{?sDX(b{>BJ;rOOdhb?qVkcz=FUM3W?XvP=IG7yy|I8@Siqk@p)F^o=>ns^z4 z3!5d;=B+zwGxt(afUjZnk1(=LI@UCu2~~uqo8|_^Im^V{deOx=?!DEx7jp}zYtigp z`}oW2Y26LVkJw-3fj_Mo(CdV?wQmUHlmcZOm{qWaoE~NK{^OT4KQ=rVali9qH6^9V zdF2W96ot0woc{TBE#)SNtEHo#Cre>@!pvI!{RbeSb|GEOS9X|b0O$rT;cmvxGF5+d zRUwU~uwxI3AP28(QM*R4G4j4q9V`-#O~p8$-DAh<*b+pd*d^|}*1kK_= zxgwN>i!jH>_x+#{l^Xr3*Z?KNPQ~|TPaiT{iYrQ&Sh!W3-(w#seu72DzKDUC%K+v} zvjf}VfPwpQtaU&;S-dN;v}$<^LtP`KvR;89=YLQ9jn7-ts-PLgni5 zrIg$Vl&4cQ%N8?YT2f3@=tJ6iGrCeUsBjK?$~@oisRi~t2RN-l4*xj>O|LJ1a$%>g zOL$63#7wEqzjAz%cKS+=QuN^=m|Xk>T9h0UMXKBZp?N-ow2)UxrBM-Lhv z$qh%%di!%W9pEokGO~45u@z~du_LOD>w##^}icE<31&hoY> zJFQ{2G(bbav~aq;*%aaaLpx| zj<9itcs`{F-tXKawqiqs*=7JcgeTnuk*p5Y;mrXmxl8_ZLgI+o4omc|8+fweyE&ie zMEL;2wCnq-9hSUgq%qHQI=t`{$HHp(=QdS7peOfQx}_)EK5biIW97}uqnNrjui8q5 zF-g+5-U$W--WtILu0A_llkdxkmtO1F@|p_X9DYZ)$1nZu zdl&TMpoFjt0!k?|JG0w&Bz-GhE(K$F3kaOF0ft&NL1v5dLeLUwEaTMC03 z?tEP4+Q`YqVGCFnBsiD1Ws6yf=>{0}IeTM$TeG&WG?y17YA4N7-aJ0l3yDlb-HiS^ z1vb&Op)nokbheFBvbtBi>`lj5bGVlyk-J$!IJ5mdJgi4!Zry-x5-@}v?lB~@EUF;p z9=>#b8X`&%buHORVlGgcJ43~-nc-6hsdAD8ujnCq`fiH!h0zrH&pprU2jhsJW{)4m zje}KI62aCb@kF`Pun@gNBSR*jghk_&dcXUq4P;8Q5<(X5LTm;B&iOjPWQaf+yyKJ> zbEzxN>iRzd&Ji#xxmx84G7isEzjjojJCht>t?QQ! z&R#LWZ;DgI&}KexJ)}aj23FRa?oJ4_V-xRFh+pMud~DF$QCpIe5#WO$wVR{-ew=+q z8_w5fV9HcO7a8Zp`R{=9u`JtNfB*mQ0OzkeShmc?U1(t-pt_)dNhG;e6G#fZcog*o z>s!#ZKKv?kPd>+F>*ES!n=q?h|t~WJWEn!--ED zB@ARsdwYEi$pi*9?A^=Wx)MLs&3fmZi|c&M1}x1Bw@rep?iKw!QdeYv7x2}Q;M6Z? z#f+CB|JE%}!lB+HcKDWlm3Jx+$?=Z__g%(>$)5Wu}f~_G{n7CTlcXC`5UX z#!SeCm0+=b`?kdX^336<_^iIs1;({z9C=(==kui)I1rj#8TNfz?TRDY5!l|o0Pwa` z&~kKp(OZ*{XUAJDWYn_cI&DQc$j~il*OeLzv>YHI6XDM z$38p%YOBcNbnaO5TY8pTQDd0R7pbC zkMuaLpV%iM7sIIILucadFtwfjWvMSUCS-pnDMc(TJ1KPl=(Yadjokegz)3y{{&X}? zbtEP*e(~yOF=JT_d&jMGYuO)_MZKn#(~l}q&}FR9XRGjxuR1uHQa_{Qk!puur&B|UE#y<*S~We?X`W8XhVFWtLuGXDO@HbUKuwMAr~*CP`Ed=M z7Mk$snYe{lBf$y0;4gCqGWnvhYC~aFMZ*8nRB#K!ntl+j0G&WmaV>cL1bae1D#$w0 zK9q;4c#LsJmS#KoFICn-V+M;3Z%;G%tmW_H6!yPiAW4plit$NVIHVXq5%$ect}XU* z9ul@yE~*2sMJJNjb#yml8|BO+iuS3-Bk&_WkE(8q3&@b0DOPvPB%I88*!#djKR=E` zBi%!2>F4~O2h57q(2nCEYZ)m=RE};KSaz_33d-@*9{g3GKwFiCS%=M%pt7740^~|S zr9G};mUMePAW&~8y?UTzc!H?|s1ly1&g;NP8wejNvBLny+e!D7wsrY*`jR_Y;5+{} zE4{vAqEzNm>IQ6soV_L;E4RhYzEuZiCsJqntc9oAPxuUFuNv)ukw1Yb<`)o605^)1 zQi#WtSx-K@IoZ%ml}28B`qkn@pFNbo@(BVQwCgZKSWt!!@N_&t)`maJ&-m(3m>WC6 zmf-(}ExFYbO(eI}b9hzMYMqUst^Tviw8s}3A6#M2Bh$F*{q0QryJ&ElB}eOLK{~~w zZJOny%O6qJ>?)54&O`VtRxq89SGeLDoPQ7A(Ir}OCbdK|=HCX2kGWa5giK8v2sb*K7NAGnQjQu{>Zt9XFxpClFk_QKVK&{X0~1QEPkv$g@_+FQN5R5?_yItkD{lxnKe zP?P#9a)!g3LF3)#8Gf9B<+xC*BIwqSQ10-x_`{4%w#a>|hn!p-=VqP9q_JHTAK!V9 zjX1q4LejOV#}rZvva}|TAh}sqq$8-P$~3M_N3}%RdkQ=NvEXRk_|_?AeOJRy-rKaFLMuMZ(?CjkQ<1x=r_S_%=y_Xx_W=08An2Y z4Ey6iHQswe`!JbZ*5S0OI2P#DamTS0$NmfyJ&EYX3~E-^6Q=)?MAl#n()loVZcl3a z+>G87QDH4nUbmU&8&nWM!(FjVpz(VbHF64|Pylz<$loF6mwSh8qPyKa&jaVKMKa%# z-1yus($Dm8fU<91|3>md3P9eDK~AhaGhn>J+?xDwtg zVs}d3s>D2Ndz84MXZ>~@SV-Njx}t2R=STfjM#v-#AO(;xh= z9VKev?%7Y*6f1~>{6m+8n7W-&(&HYNt20&XZgPHj@G`k@Ev$T4N-7vVSixD#nAg%$ zMl~sA8;Hz4c15)9A?t7jS zV(;W67qoT1aVyQ)_Xy4Skh{8<1p3@WRyk+L(+J-6-B{s)OqSn;|5JUlrmU~<%3mZ+ z0S_z_+p?ueSAc&Ackf##yu<<x zPs66bD1ko#Hb-9hz>Ct~SGFe~an z1zDL!cbkTm;Ui!`nm1lq)aP&C8zvMeh2yum%T`l??GOUFOv}vt0;i@A1hj z1eZ(ze+j20R~2fa`!jey$4l$stuqXG6Nv$jtlCOiGpSx%-v=OH+&)LmloqC?#AQ_c z*t%z@>?XnHnm&|#2~6GyyWLywO3J=&c?Aq2EqP|(ya9 zj${7gm-f^70!Tnk>`!|}H$b7K&*GchkmV`2Pou3vc^stGCO=UIo1mQ9R4QXoa;j+k# zcZU@2+ZVi7$u4r*kO0ke**)qalC5(C-)%GGwD$4i+XWiiI7Rs`0dPcNh?o5K`{+_ zJ~fXqvajdwN}u^@x$4cv(C)!}!!V!zAU}ioFeE5&>YfDsc7W^ZNJz}r>B5Yw66d#) zbNb{(Lj5hO3Jq<%!uyskhjc8Ry^}Z@>b*@#dpuoRe5EQmSV)<>{jk3XkQ)pWGrAEJ zPk+cQDBH$Wz38(f)voF5QLWuP_uS)h$Ou>gH?|JeM8N?u&~j@>EoK>4;JP>N}%^ zxd(5y>S{6T!AHG12eN&X$Jy6bT~BxG{{}2?{sfltB7M1&g!eYP>d@P|&x-F=7aWOv zZdYDffS&KLPi-^bfE7m?fFui8;B9)Y2}>gz!GN1+>rk7V?D2fqWJXIW90tf!Z!+Wx zZU@KW=wI=u-?0m!@t7`M5+ombt6C9@>+6BIKNrS=>RIc9sm6HvQc*#O8(&0V?@lFG zrP%>eAzi?@-{gUg`(-fCk4D&QWv0U+d{ciY%zQQ-prEFs zyLKv#bZ$@PJvCy#>0dx0_J-&A{Lv|%x>{Uc?qY|;*C>7#9%xE?*7m9V`E`GTi!&B7 zlWaKrynHeFc$K> zHJ}&(G{{#IZ_G3u^8I&zq%KP)4sV`*EVUhkIK+VW?+22QMB#)yA*|y!JV+ByKMUlI z9F{|SXXS^A_NN498E#<@+JLZXQ_=+f{^6sw7|p(7edm<3tLhArzCu;4#_F6D3|-M)%<`0ozvfa zfT9Ow^U0)2>UZDzH`7jF&8fD~J3D;>{yk^y+VMn;q_mU)30;7i77#+7JT7LDkTF

ho)NU!r^d z_NZ(U`k@k8otPMg3(H7xo(>+4wK>+TBM%oDFY5!8J9>s1E2sR?;bgmZL7~Hh)Mq6Q zCSI|IQ7kKs&_wZ>XE)h9)aPO~9sEfY;TqgKiY}uL;toYkk@9N?%u>^k9hY**eYXtk zH0HhNc^o``KyCc?m$Z4pA3GGlyBZgkP0THbuOX4&5)(%$qkHS+IR6HHVCi|PQr*7* z%-6oumu=2;1+oQ+U4YJZz2)WrPQ*j?qu;?(d2MTG>;r3ny%JZRveVOF`zz1I|z$-SG` zl7Wi!L3kWwBH~2r&Vchl=q>62=Ii?SojM6>kxv%Z%(y1M^L+`baTJqjhn_xFBm@H& zRwVRh437`?xvw`EAkWYRYZAoINTuJQn&d3;uTF3qxB+%e1exc0M4Rm4MvTK_G zCT~4kZ}&hRWU^IOA%BFs#Kx0zNLH~zeDel8rrA?v_6W_T;dV7e?pqwxd?tAveXzR_bbfDDew5GOE)98m*FW{dhAG@b zJ>DhuSx)njSWJypyCQJEHfN=c4o@{dE}N)Xl;M)5FG#i8>GeL|4j~`^p+W)9RahR9 z;$Ca^UqX*Owbe@VzVQ`W%%?`<#qDl6_`l-k#y! zLlv9@!11RD$t0u{eSf)GyYATOVYb7X3=rEl!Yq0Xocyx_34l8@M0Sual233)^e}lHzw4MO z*EC=4&6G)*eE?f}Fms>$?&B*`HygfJHx`;o48oGU2>SLYE7f1F&F=5?FW%1F*h6<3 z9sfkD`v~{_%m4h{EAsb$PWXLI|M?AG4TUSf$(>I)nH>P}Was>B760|cngoQ| z)tdQ@fcgPh`JA6)4_dN;?{+$c93^tIplzq-=_W+w<*y$Mo&4bRGqUf>B)62Us(xNj zLz@3rj@bWtQ{a)#R6XsM(_EbF+Hhqcf6n|xDkVYS@-LAv2}TeTe^+G8)803&6>xX|Z9ce- zk1^DM1kfd^rw~P3r1pg}}z!-;0*FAtWFiza-0hkCoF6k;3zoyk6T zX=V!gF4)`mZ0e#1r}4|RV|i?7niv**^1Rs5);#^I!hu z0h%s0^yCzpO)sW6)9!0QM4ULsSB@hsYzYj>JesRo#w5%o9st2M(Aft%Vnk@SCU;v! zwtu|q@FnZwyGOCqJ{hurUBb4_NghmXa(rx(GTUfR&b6cPB#;aSF>J3daA~oE-QKtw zNkSrN@HAFnx(+H`ee%X|^eShdl>X2ZyBZn~KFm0wu4u9A=YpeVOwm{(#mr$LCQQq zM(3X@;+ol?P>}R#$ePf&by9-sA?s+&8MK^{LB{u0b$N1k7uu)vJJh^+i{)+358U1> zY76aWzesIMsrNP~S;0U`@ql&W4LL3}fMS)rqe!WJi=t{7b$1RI?8TYC_04Fh@=Fx@@!_KPy1c;x4V5nZ5rPbrB+Nq+Xhind@&tu;;RSFPt9D$Ltg z6=QW7II}KYeCp1=@>N!26S~tB5Tubyr5%rWs(82!!^e~o*STq-kML(SG5SZT zEpyBTlN9Q(pUyH!RpRM;Ra3W6{?hTkDRpj}87ZkOw{Upzmd^dTZ@|p^%`=|cdtCZZ zqYm5Dw!vnXvo-n-aIOuZZYw^m9K*)ZM19}np<7w~#xaBxsi6@5S=?g{SS#Ra3kJMMUDu^CrPOgfUEspZcB4mJ3q=e`WMh$2Jb)FmC$ z$n~gCCq*HGB0QgyNxJcj`89r+#9Jnt@-K zLge$EpZFnsTr95ZQeI1<760NbI#xAr*_gmPY23*QKp+dsRWe;_s~QsYvO zUl8F5aw!;-i8dh|cmZuz1DQA)F8)i!tXL}ZD;niOn%H*FJEkhm7v8Z&&kp1y=05B< zTA%H}lJ5u{ZNGkdezJF+z4GYiJA4CJl*6J{j?jaZ9!BR=G?M-eU&^MgG$&i?!GK3&-)$t-#XOWswm)n}jo z^YtqXP=H5<6)-j$T2*h}A=`2VTxALxhvsa!%Nzjs938`)&#Q55K5p9WPD!e~lxGJ4 z6_@Wt{-IFR87|C{Ac;XM$S1sp2m)v!MCi(W(%|EHR;CGoP02St=dN*rx;?`q6*Eve zxN3_TEh}=`jJ9O}S*9Q2N1val)`5(hib z|DhZ7D8|xju2#P?^9Nj3ThACvh7e}%<*X6*_g-z}wZ8B^(<9^marO5z^LL@ka?(E8fWP{%{R}ceWXt^mT2Na z?Scu+y|gd^mHhQMC!{PT$CLfL(k5xwK0}ay?yX8d|KZD0H1QPl8>qBs(dD#)k~)(!Fy?&>;l^<&O_;fiPr?TX ziQS+mXASf{4XhB&!Hb^rL=@^hE?5yHV`H^-|IHtU{-)bVXYK=NOj+0bQD_}r{p1L8 z=Hv+SSF*a`!De4*UMNXaVi7;QCwPq^RR`@gDR?mOJn3oY{avcMjsBCj@zp#qlD2hI zYK}PPEvJEN!ay>Gm8jCl9iiWxPge{Y80|`Le!fGv2)NZweic7G*ih!XS9N*i^653%yPRE@2C$xQwZ1y?i$F_c-Pb}MNl_W2KEvLKuyMX|fO^$Q> zk(=$=0W)}sPuZSV0%xV=*>^rlIzWp=GW)h;p0J2sbd|PuU-3wN@07nXT^OvFK=$^C zZ()%@alwaNrt1LoK!@Ct+u=%2E9GbuRMZ>7-ku}H^B@dl@f+Cp|xW|7K=`s}&RH9c)uo=-hqC`QBEqcY;DYw;Lo`^@V%?7sqmCfLC|kBOjgy^1w4*c5B1(G$fes49%RE&TRPgRC0mi_P|46_p(~h`sd6UGs^2rV~9fne)gC9e8ia@^=#w-H-1-O-LGW zw&rvLKL*w!P=IzUT3_{ilaiGt!@p%xRJ0tdaHbEgco?Vogy%W&S%40!wqjb%3C=-6 zP8#uKlgEh{@jp_W&aGQSFE{qa;Sxu|ZBnc}>vEws3u3zXb9HXSq||SBHo!y9l-hL@ z={*n8Y7qHSBDMW~BdnksmbLMY4mx{*n$lBG5dH>d<$`LkS-6059F2W>V|J#sgGNJ4 zF4l+4Y;;s7=M1WTWb;WL{wiEOlQ91OXV=o97>A5N{K4_uP0Y0}4kRMz;ZPKZ>(0nr z_CHvYYFyURWdYC8>%prni&|OGBl4R2ffqJ7{=pe!4~>6dZM>e^NYocbN3(hseNh;?A(VqijoY30$@$Tiz-h zi`!E1J~oIsil>;o1{W^d=dUR%oP$t4FN%!0(iNRA6yDd^I&s4wX=LRNjcaA4t{&aE(ae9Aj&lcNB#1C|y z{-U>^Mr0)>+VQTGGCr|iBPd|hg$ibgK0177ggpNT4^!p1uHAp|Fa^K<7Z20e2IBvw zbqT!)gYT7+t?1wuAvONNhjdA}9Rgd5KQNjT7;G>;FozVW18?Ik$y9gozr$zA%6MOrKUGA9L_Yg}wTIhqbM@r@y`w0B{59Z2E}Zss zi{kzr-y{r=XsANU7oOp8|K6xVJnK87YqbZrRZVfGzlUDlJ(Swq3|u_pc z3O$G!} zl~e&R4O$~sg89b>QUD&lSx&dF`Z!2Z&vDGv?J9L9TKvjW&ky=1Bzjji29k-N*y)$- zF`aQa{axhV=%c|PqoF2XXa(kJCq}l@-TDa-DB5X#+R!n=F=)w6zIKW6yA^NbJY`c^ z)|!T$YJjh{0f7lb!C0Ph8A?Zp6~X=osTM@{mHh29Hk|`% zA5~}f!4+xK&@$%hA0d_o0bdij(&~UEOZH^RisGjU#SSuTe7^N& zz1Lkad9$OzBgqmxYwsxWa6(+ZLwM@ChI?%TR()K>bkB*%JU7LiH72Oi%_P{`x4VE4 zdG7fSl_<;_)nlPp=>KE(v%h(XLvy|e7lfP5rT|g4tzGB104#8y?%$;Eu4mdOw|6u!3 zj+zLz^Slmh4YK$+j}6G|`d11&+P%|2o_QSf3BMZlN|e~LrjqO1lj#ZCn8za#o0hpW zmyq|JazttHnPNVx&1k3WinERoZPRTp8UO0n0p?`{hxP-OI~yZj6eAsKA7vFh&5f~F zvl?Bxn>{<`QctVIk=8=V5yavpV6?%Q6W%|B@sjCm&+?{#3 zlU?NGmi#T$^3@G=@Fz9PO>)V!lX1=VBzS`5)oaO?YmphAFE>#Z3%X>Wqnq+Cr3XbK z#}ZbJ&$2sW+*7Hbx89wp?mta_B;#E0WE~?R_pPTXffz@j_MUI{*tT09RGALFTSk*h zs0tYxhx(s&J?1n$DMV-q7Ad{Li?!<4H?5^ixd>PkIn3uZes4(rTiwC`CZPK@Z1?Z& z2}WiUiaX993)tyPml0+%H}d8r*$f3$j+OJuHq`P6g>85>TxiwJzu-OgO)wvR32Q|a ztLMj(ew&E{gxOMDtExt)s%q%{9p#R9EFjMbe1= zzkZ~QYm13dfWGMQ^{1PLH4{Y8UF zLcXW@Y)UC>!D+rSR`Ah5`S{6N%Yb}fsQYPaNrQGc`NEBS@UGvvu7~L+p*g~9K0zS= zJ^(XX{Dv7H>GFHDlXkJ?hXTdebqn)svaz(tNUq&%(e(S0YuhhJhtmj6?v zC4R0OzauXq>UfoPBGyAusmj|y2(P!$Cs5&E?}w4lG5#!nxw_6j+gKs8yZmiSuyHBYIa?ssceRAw8Xo^ z&5Ez}g91A`CFT;4BavthF8cbbD#1bi!O2cKmCoH;WkiLq<9=7ttR@vZqQeW=pEQv! zU9msA8pkJZzQGzDuD{aKr8Ursd00}iR#j|0J?Fy9H! zcGFUVbQ8U_^&_0>2%-*eTl9X6AzF0}%rT<6?+@f6UG^D{x{lQr$oT~BKrMtPx>1I7 zA$&xab5%tiQpxAOI8v6IpdanU93eGFZ6V5Z!2?1I{rY~S_KNYw)itbk;2_@qG2b>Y z$)b<4$XEYX7TMt0-sgAsob8wy|K_s#oEnayP}VOv!zFY>rJdBLVpss~#uQ%%Z7Kv{ z>u7DXYk{b2Ujl0TpT)uJbH&2(hq!#avkRrWoY<2&WtM3GOf>$6l__aMqnpQR2rQhMH z`ai2JH_xP05nYCUx;L~&wwsU?;;u5f3XZ%sRDaYbe29{Gkt46_J*NQNn4JVPFQT}>gk>^EvBM7OrQ^7V4`M=UHUtlD8{d+2CL2>M-#q1n2{Mg{&v(24SVe8F| z4f1jK?ZD})g%+O4!S&Kwgu+I@;8+ts1EKtMhaRcg$}j%-bg3*>U{qJleO&p#M-+eI z;XKj$*Xf0=T)77GnzS#|bZ0&FA0{g72|XoXj)c_n-nTogs^;BTtbcX2!JvG7iK~^t zR`qrSLduuR)H!Q|!5@6drs_fP7G+~x3WNxRprOc$75tcwdk9LWnCG{bF$*AM zMK942(3}wOn3R=pPhF+^LRy^tom)YIx#7eH{>#N|HGxj+{nUo4QV%1)ulVGF+BU}@ z#RNI%JJHmHb~@j7$hM>YdE2-AhHaJ-tl7iyfEyR|X~L8jX|teU^EhU)M}PVB}5Y_?IRVXcuZmPZ@$Z4EuTs3Pi`2nz|I8Z+#e{s$a?`(QpOjyTNNe$dpn5?Zet9-ftP; zf4o1(Bp9@eSl%H?Nv1QqYj&p6jPR1;ELq@L_9M&wN?W3)hw|-brAAv*fQ=Xjbk0W; z-ECRsXdiK0P0vaB2vOF1D8;+SW;nW6tC@?>UY#}~%-?&v!>}(H~zIv+dS*h^g zg*WexKde+(QGLXUQGk(04yx^F5HZvJOOJFas1#$jOTeS9lQB-pgg6_BpY=&JRA_w^>E9ecFn(%Ez+`ERK;+R<3x+U}Ax^*yZdH2tGjJauqy_f~2+@lSnq1 z=Cehj>)@Pa8Ar2hiCX-OX8yT+pX8sMF^Wi|C9?^SjJ+%-gw}?FX%xob2tVvR(AUnY z7kj~a(?I*prWoRh>#(9MZ_kWQ$(rNQLdJs!EJsOw1D>G%GZsA?s$_nqnk4e^Ue%`$ z26wz%6YWKl>$S89-}W4f2-_bzy_iLJHvNUElIK=M0xL5Kdq%Vl7u8gIn?^MRMXUBt zvQGGutkV&MQu4}&s*4*bmuC3JK`PQ&gu4|*Tp1~gj0rFOI&B6kaAZS^NY5vX`36%Z zu=Zdt`;c|AiHS==9Zqq#`srbN%+%b1EW36X;WxC&GYVHK+>VND~z4Kq%Jj%1mVjNp&fs<7LmHSEDYGbsL`6!5auj;nGhZzO5 z$J~9VyfqzOy?uQlO?-A-l@5+!DjFHYAAf9|JcGVH{IlJo4OYxT=~qg+o^o)t<_je3 zoP4{Wqh8wY#zVCclbKl(^+)N?X@?N5IMga4=FQHCCA|?iqJPC{AScCc4(6a4^Uf1p zi;uuH4X7P3nLUN3SqI}a}`@Xeg)B`99pvvE4!3O*L-qaAuTc# z`WGf?er-n(z#!>$X+TF&iV*BaA>|0VW7c-Aa1HBJ_ldkn$X+Zt)=JD3wf5`s#y}NP z{SpB>4AkZ76%O!X|CrQ@Kl58sOL%?j_ZpDMl5dRYOC@)JD4&z6V^dHKd2oFKyCKK! zUYaN??e0iR^}mO-0*(4a2O}9g=eEKv zlmgQL5%+K}W%8#DavG@Lfe)U`1wAK%zrAVxM`eiYu$|2H-=6uKn!UfOI&AwN8~x|1 z4z23{RMjE)o=QmnExpF$GzZbA-3|n2V=vne4e+eYS4NLv59wD;U_!w)@g2vxh zFWQL32X9>y?LcZ&maTD3)%k>V_AnRY8x(69ecJ6qF?KwJyUNoS{h_W>On@8n=_ta< zZlb{vh6CE;aAR|u2kj$coC@$=z~8-|y#_mZzy1>mL-CI!Oly|zTC}?P;ZMk>f0c6o zlgO0*OI{$awR_N)12um=+kY<;%2w=UCGYRme8Ke(%V0ua8b9q@^~FXIZ%Gx~o2@@Y zWX(^Z%(q&>r-iQWig563Juq`u6C-rgU~cDDHR{WQ0v__kel~DfBJWli5RTMc9QZsf z{Jo4s;0E2}Sam(z;SgIMbomHA0%g5Ub^WpNN82y0Zg~M}v(jz_q>dpsM7BPk^|-+` zD|GWY93)4n`$TmUqkWAVmWWQ|yM9*^yI~*ohZzz$i?|X>5Vi6^`@==1su@(CkNM0_ zjA9s_yr|PtJq{*zMDs5Fg@r1R7{lt!;xl9$<_C7i;3JL%&9w_U1vF;^{+47v{D(H4Qr~}N za-MmC^`iKR6WbT68o!o^=@pv)nI5|uRlp5)!L8oggIHl4udlj{3uiGlg$pEXtllM* zz>hVT6q}og$Pw?+Ds!KYuBigeBd}itkhl4);p_p13uudI+Z=KIsoQ(dSyl_*)7^A0 zq-q7vs6YPY;FI2cj@{qp8bKs!5!`eL39o-;%fw`wixz+V5W_F9B^qoUj?j&bOHZ^rp*6J z9qlur+1|y9+2g8S)w$!t@Birwj5SNJ5x}qmr6UR}%OE)iJ|$ipQKohCLgT|ZT3a`wj)+kxYT}0V55g>?wDb`~?Ue_k3Xy zV90>t7+@RT>TYp+%M?qgWRlv_xUE7ysZb3#P7SR!0mo^BXb51_U6-Yc?c2Qb;Re%I zK&pam8f@rLm+$M5!7YM5DW1XwldX8*Bt>+%bvsNA6(Or0lbO>oy>h(3kSz6@a=raa z2B)DsoE|ED-BtF)V;B?yG;L#tl)(zx$&|L!ZMJm;KCid52#?mCUkQZYe!`=)bg@AL z$l}C0(3?ESuV>mmvlq=qh^>{ex%rQ&wK|I$`! zX-3)lFTO%q1muWK;Ixy*m~||4pH^w_0gyz(H`McEAHHiGA8k6uUOQ15lac2fQ1n0HU8=+XzeC7|7(AromaL9Kt#o)TVX!62}dRn@@?#+GG@l;de;+gu4z1IYOjvCE;t;N~HIhMu;DvmI-B zr!AAi7SV-n!d-4fRU^Kxqr!rE>0~BAMhfr)1mt&U)$agnXFxWX$N#bz+-kJ%h5sUk0 zx;eLtcpi1v!dzW(UgnmxDHZ+|gaMe(FBU)M-SywxRJC9)#w*Xe$9s?)Rqd3CygY51 z$3Bm+BgUxAFa1}Od0QI9v`o`yV>jvp)+8B8EuaQC7$)w|u;*8H+KFUGxZE55UJ|Jp z^nB1YdDZM25BE_cM7X7(+vec2L&8ln8<>`V zW%2`BiJfiNDOD~Zzs{mUls9@=jO4@m2Lll3)Pee)OT*BXzBK+3|M!veeB^yuUZ_VB z3>%|^NF_U(-j^-y3Bg`Km#r5W&3S>vknlvV;H80c5M!oI*7C_{Y`uhiu%$VX4AKGy z_T|azw!IbvVwPlt1-K`kovSMdZ+`_qKZqM>5)p-SaR5Ng_WVz%$0Ol3gFR=0B?Zd z6RoG9*1@DXUu2-fQh5>4cQ6eM{EiKMj25~-rpW=+-e?BG`Q#zT6)uJa(U2sc3Te7bXiXj zOb+N9{tL22%Ehsn#PAV!p?7OVxeBmA91g<@75Lnph?zCI9@dc<@O?xE`K4VR@TeBG zn#;O+d@cT=9{Pl=$!qe#q$U-jH#dPbi{e4%jI&NTypueWH=&HLg}J8)zD? z42T#E|D2kroZ4ZJ-7`X%`j~o7-BMQzT=>b`*mJXLbWm7Dh)~_z-B^~?o`vG4aVLqV z=DK}S7n-cc@%`Q6MxaI#gY2Nfy9%N1x4;cnR$c@@X3AqgR0Fi-PM9{wybHeB8Q7Ov z;RZIKkG}MNR?kR2wfRv@B__rSU``HdE}VH4yd^Tv!xUj`{6O4=}T#cWECjb~~wq2258nWbkVK;78PFtEIhM$6+I`$xjKgLv7~@R>u_fC(w**56|p!TJ;RMaAi%a}M-8T8C28?fNbn6o3yaRZ=+GPR zBV0fPyKb|;eaS)7`ikJ_t5otZ9ul-;c!I zwylFHclwOt4eXzW+!P@jO57Um=0?O}uaqV*?Cl)jUTlp{xwqL+FwGW3ZI5q|WoaTq z|C%?u|C~2V$c~ET3HEia9ihv*=3Wr5@Cx&r^Y}>8Ai5slYLIPOn9VMq3QhVn-?5mq zBqiOVn{iLLU8{8XTp#gr&m>6Yd20g>-pB86`$ko!#Igz~PrqUEkHtM^t&=6~<>2uB z$lX5~6|u{Gw9VzfU%FUnyll1#p-kSKlMA$+4784DH%>G~M?H^o>PnSiC!Q~=`sGFv zro=dOkNlopcwTOwCTZCI5cFV6k^W{5Qq-EGPWB9mLE_k_rG_i>+{bURpTQ&?v8X32 zC%pGIH=$x2Rc70rR(=7GA6$4JduQwZ?jfkKWA$}snOQf3Q^P}$uen1I9xDIoC(t|J ztE)}yx6|G!$?;>pfT?Ub&SdmIKJq#~%(CVmFpl}ECvJKlJPrdzj4w!i7mv_@%2j`T z|5{pPM!CNFXk8PB&cIZaYuN3~<#GqcT{eiqvP~{CK!VUG=-nE$Z2Q$9&{%}Z?1J$> zMgw0;bNl_xkje?7iP?>zrJvk#N5%4puKxZ6{CpdQD}5#$U(<|jvl&z{plwCf4NaJx zM;*#Kru%_Icd8M%ZW;!Z;PF!Q0THQs^Xt>3^G)Dq$v={=T81pkY`#{)B>~xsUv+u^ zkE#LwX~V-vec)~8#?@cii-PH@zz2`Jt4yZM_MA+scQD(0IJZhHA@Gmv4h7N#Cg#2k z$PdpD(;`RAUF!BzTYFJ6^-(RkJ)7)Lv}CqF5D7CicYe!(doFuCQKxC|^&4h^A0Hs{ z%iEU~{f7WqN6^A|Ki|Ln%imFmKmCA%d-~hG_m%~KDU5>m%sL!nPle?x_V6Qn88{jT z-)$@Ahs(1N=AjvB0Ad1s7NG#(z4iUh84_ejhxYy=I}rGblY6PbLt--3z~d1}SiSfk zC!FkkX6kXEuR-_9@2w>V&)z5OIZHas|K*Q~GJ_;y!#tP&ZV9m12Eu#6KjJTDcu{f# z5xMgH{{kcc`GfzG05lf_uF7Y>xP9qAqkb!9z#!=vlb+OV{AHLb`tZTwh5_pU(r?#; zto)A6lAR4}MNA{th`DFG#cv7>0}bf`bCm4Z#(o4KULBi=U8ISEeKg>XWiJaKj{nq_ z4~R6s2{g!O39QP{JEzVL#}p_0m?tb4eV1M3dyZP|--jh5|8&}(qS3jO>;EGB=SYyprIc(&)PKc$lsgR7;@AQXaR`TQ z|A6BQYw+mFHXxZ4*$o3Oie-Tk64xyDl_$|8h6?8}ZC7lF8&wz0o}i-ESt2WiCd-qx zP5&l!w(On8$-V5}`{zJ*k9;g@d4SXci2KdzeSVpOLBztYbhs}4bi*5B%ztjr5fSE;A!@PytAP51N4&$A#am-Zi$&sW3DMhH9I2OazpfhK=Nmsc_G@^G&t<`6_h#*#v+Lk^IRCpL9#}%b`QYg*Q|b z?-E_{H~8{U1mj=$lR-jXf%6cvS%+R81nUS9{<$&MX!;H8Ot!+l|8t5ES+fz<2gS*} zSCU*8p574lN`eCS8r!45_x45q8Vy}<-lympd!pwWk#yHVRJ{q9pDO*FpC+LS6<-kf zlb*lhdRBXJy(R>Dq{{Lh_ls@K%An*om(863f$PcZ9qaU4#au6iWA1~v)oQkBh=@m* z9Qmk`N`2h+$N=8pSid#b6=lh7vrvl=s#FhQuLTTgIDWa<)Epm?Rs7 z2=0h~Py^}?<+Ni!*8?18!Pi}=!jE8O(w_moNdDq8&s{p-_v8o?t<^(9tDBwq^v{p% zZPE+^yK|sEdYdCsIhS2F*~ul|AW!dXV9|HQl2g!;(}`ca>7i+|*L0Bu8=~q44=`zX z^bj81FbzD!8(=`WGwIj8X=3}^Ri#aG2l_c5hF-D_$!0VD_e-o~SBrX4U79t2f+`|5 ziz9^R@GR5eRlkhN_j?A~(z6)j;5K$n@9^#$1FDPIK;YwR!OzPud3=9Qk-K>S&0 ztFwD7TS0B~X~Z7Bo^KS2?-_OQEjEp_)I2+09TuZ0Ab1+EyCNaEAcDAQx=6;1>IaxB zGxzEh*VVPW1r$4X)0GLS(4K^}P6`0xQTjy{r)P^=$1a*VY#m8v%>X4-h*i-6MdBGzJ!ul!d!y)ZheC|j8Re)+eA(aUR&c9R*qpt+%(SYMNzXO8y2V2a zw``XJ$n0~;PTteh_0`Re;w0B3iP!}E(KeX~t{ZL-@5Nf4noqHGB+LVg_gOH1C(6#7 zco2KNeB?(HEiUuEMc z5t&!_*R6rjI;G(JcH(8*BlJH`4BuNH>=EJ^e~In$GK3DHgXeACVsT4YGuel9>{7|6 z)9}nKkj8)wShwE?c*j88F1`B^gge_0Sg`%6vTB~n{XlZ}rusMgs`vnB+kanIW&T&2 z=D%H6wQPE~!>%7X;2egnv?IS-OMg6&1U8E6)ZR6u4=|CmFXhI;mdR>6Jm*(95*a3L zTO0%2K#5Fxr*ETTRV_QNWuiStY#|iEpK1XCKX2()e?-sI*Zq{193u@dU@04FA}8)- zlvB!?)u~ z3a>1|GX7L<;*lI2{(S|XN>(TNJfH6tqG@rkMKG3`U!*7T`A{Ec&HVMN)A`&bWrMxu zvJ6cf?YIhel7+hYdHT0D5ScDN$E$@U!sSANRbJ?w=i)HO$$B6#`Uj){mBc@c0REvX zOO*WMU(2o{mfI?~6SdUyz@qiHO#!C4wzmq}6-a{|b0(PrRDRN8?LQT034UmAfu0&` zL!Po6HSUQG>^j>eElo(d^nlJnB z+Op_=w`D!~Q(IOD-j*s~lsRSqs+1k$05y+)G`S$U7Gtd(WqjY7Ylq)&>|u}F89kB;uq8M`V@V=5YQdVcQF-U&1kg&Gtwu{U6(H`xuu!kk znX*v$kPZxb5&9H*Z}-%?B4X!pQ>B^(6<*#u(*aLycwux(7~p3)$OF>}AxlZ;CfD)B zR7uge9lEnu2XibJx2_R)zl>4)SclM=-8T_$qi;ZboY=50fF~|kDczW3dU^_Ctny+tOSIen{Bw@tzE|=>Mg5LudK*OXIXoud7 zRK4v4>F=3>O1`4O^Sc;h0Br_P6IIyeiit5az)|AC_0!EMwWOSrIV|6K_egj)p$+~gYUc7j7}`+rjHtsa5LQUK2KE~U*Tq4;C%ImVxe^nX>#H84O3 z_>p9+UrnMu+#rseqTG3$U~xak3w+?6B<4JG`W^t-`U#q_M+!BQ5)V{XkKH?k(4?bJ zBjW*R!ik@nj4l3@S)=rG-7aeDu(R<|5tMt!pRu&JKW3=XOa?8Ivd0TOIns zVIBsL7v7&%z=UV1=@4HIm)s*u9~y*CFQ;OLaLae zLPEq3@IILDN$YIWv>@fq`55d0`OiKGNT?Y!16eA7M&Fc6sPc?vinHrZ;vTfjAqX9t zKpxj6iAWag9hADo5Qy;Aewp;DV&g}p3p8+S+Iqx(#(gOLin8_D#*Z7O7%hbjlK5<| z+yboQ&NfU+{@e~kR?=k-z+;m`&wJ_)Ywa4Rf;t`LI_Ib6$wt;$?1CKlac%F1_{lF( z!A7_H1()jz&tzyfM zzqmvz|9Xf^zP(G&a`^83p9Bz{He-ERjDFxAJKZM@=yRL;tDTQuJqCy|6O7Xn+fvHx zUS93jmIAp9egJJ6S>ZD3guBZknNkD?M`Qq0jAJ-4!si3`vc?KG7l*-bGEVTX(l z&<4;fSIz-+pKN&&-hIMAD;NVwI(S>@1tTZ;DnP{rM~X3y$3ko1L>)P5oHKCg_GiY|fjR*tq*{MkYu(Q7}6RJYPu z+_x$p`$`^KdCP*}nHBzV`otw1U-$49;P+|#)#p|FZ}xeC%5c3MNjVi-?l`_{nbo%v zg{lF!ppe#Yw;<0J%d=l@!Q%&RK|LdZz%?|_w{?E?;tqv~WV~4gL4Td!m3-u7FQqChQH*hz zrj>PA0<)Yd?v{PtdLe;!efn?EKn8ZieBq)tu?RuA^V=pkR(RpaH6$hcvbdgAQUfPQ zSx>+%GJ(xL=z#(4WSNcVyTmJxBXWm)tX|BHY+!S?ckE1eCo#-pRZre};Owg45h+_s zB4o!`TCO+u1(F<9UCy}Ah#2}8-1UHuBIf(eUh{KLK8^m%5aiqzI@BP-B+G)uXM6y3 zL741l4&`|ALYyrg2MjG@5%g|=UA9DF1^m@Y$X(@EPMEs$in2%Y^Qx_Bzg% z(mSYdGxB}sJGWZ}>Jo8;xTXjTt?o%`Jci{429zY~Z`+b^)rULB*{8)29zf*7;!H4P()HDrBx?~>kx zJmz@|4KJ;)XIP`M);Y4C!~p=_!2sYL85QuZooINV;U;^Q2W3d$shj(JN>8W@b&^=m z^?P9PTQ~o`e<#i+sFnVMp;XS(U;SV+ARQXw|IfM#=oL@@T=GRl{IMsuoq&Ecu7BVj zX7LaujswN_?Y;-FK_6%+iGebW*GofeGeAAKZ^P}-bj9Q z;C~>Zs!no&&J$-WQ5Nr<-d>%%J@>~SyT_Z_rlH*W)~U94D0+zVn3~^krUUSy`7Ppb z<1%BBnTvk-^pmntRK$@z>`;TgGVu0}y}uC2!glCP!q|D@OX|J+A3yNF&!cYaL0{Vj zY<1*9${#qCr31KN91M5^PRD@*9O-tGBJoyu%BHsTWPG&bnuwlY?3)r_30_>Lhu7TZ zU!07!`>otWTv7NGf8STpHObeU-@MVYu;u>1=Mjn-X?hv;s_h;=yhj-Bw~gd`uf6Rj z5Rcgd4nVA|a(_6ExEZb*l>E8%ROOskkj$@Pm#|di0Kib+gWogw^@XLBr0+cUrR&g{ z1`8Nq>w#)>piz06*c9@!8~E#ZrEkmm)f?Lj^AnSJRpM)4iQ}_h^!hXI#0oD57XR7b z9bUTEqMyc%c?{igXPz3_WA5l0Bo3a?Ns*@$NIR23+XJWil+Yd2ep%v_j=XQ>hH|hz zN^Y)Kn7f(fVDK=YF!ig`>}k7YA3ixe(0bn>C(=4mNPiQF@5(pfRqc2p%Hf;rdE1t? zeqSsAllEDc&t$5)zH=b(%n6^c4BFbS^=9<0w5~Kbxj!aJx;T+PA{s&Hbh|=(+W3T3mIRCBYJD9{+Wt131uO|n5Emrp* z@$FVcZ{?@pgYtYN>&gP`93fq=ZQE3!e|4>yvOzEp|2)trKa>c;UGD*t+SDV*d)Ox~ z^BTMRm6nkXW6Q2#`(~kg*Yz3Tvbz4TQ&rutsR-~4j%(t|h&c2>>%HXJlOO6I6X%<_ z3*Za;s1yg>&Y_~NKN4ZGqRX)Mice$EE?#ScgeMLRU?ikgH~a+M@J+jpgxu~E-VC2S z>gIoL2dixrT-u&Qn^rgs4zDs^Vbj1o0@k#c6zy~1g~$l^X&sPyYFjFt3cDeoh2ZPH9c$7=x42b1P{ zW!utfO^&T^s}5Rt!!RGiWcua+p5ndz9X{AUZa5D<0G;fhg5tdxhgfiIA`mojRZ5s|VvH{Oy=2}8bMI;T zWT|tTBlSZ|$5l?jU!lrX{K}4h4|HyOr&1GiM%anSipv5uv0T_*za_c&4WM8C>68W= zc+#EkGU8jiL}g@r{nNBkG_d9j7+|^Jk^66={ATx1e(M0rZ}yKUKj79HJI{UKNb=D0 z>pOl7gxdh@9!%k}1!6<2iGCkN@3@`6@5Rl-FZWIO06QJf$5$=GN;-UbP6LI%_T$Fw z)3Z62D|DMYUkeB7|9I$_e#ap+@<&@hVxNuH+H_T51X(1 z?df|;hg(kUHgwx}OnrHe9erby$c}yjdrvo|SGDtpx|0>;)P30WyW}Nq%$@ zrWou_o_Ni#e+IOaHNo@(qB6MnYj$96(uO|bk2urBMmp@e(KeYhp$=3+SjIFJ!YcOf zWYfgaGgHoK*2OdR>7c|h^KXsEIJ`5why>+E=RXjXLGmP?iaN>OYe?0-OC^{AxOLPu zTs|xH>2!YwKMSJvFRZ=fLLiq^^lk6{&!vAF0e!^JHcPZH6Q6;=RM3I=UHOk5_P?Jw z|Fm}allHGA@~^dj%l~h+e+6~EIM_sIJl*+!sp3mdB!zwNjc&>WP<(fE8NDw_IgL5dOY1A^3~Gc$iKGrq-VDDnosOhjqNxeXir+r zWY*lVc=r_&inhYw|0RCGyYkI}G9X8`;LDa71W7=X#QohFAPGEP8;cY(em`Fk>r5xQ z|Kw2+lDSj0H9ZPx<2&D<6Kj?(0@&$4C?q?}0=nRl(xZz*YIg$+F06j74|fwdkA-nV zeEp*DVy003OP0O<;&#He>QoOSexPdbqR1I3r)lZC;z^||`|O8|x4)`MEnJx$cV?s5 ztPrq`0k;o>izmMnLBCQ1poEI;7kj5_+k_(qeG zH{H@B1mZB^b{9eEp!2ibb-O0Hx-Bvo%+^Z)(4!PP!A0 zpAp7fq_|ujRK6jo0Lr}j(2)G>q}QWdC~aG2A?~UT0LMC@rLWnAYfga!gJxj-*g4+m89!JpPdB-=;a zpBGi@U{|8^aN4o2_?fU`p~CshRuat3%h1)dfymm5Hto%%Wm08xmm|qd3(E2yB&2vp zgxEgZtsg|FhBHu-==J>C*4Lg@ZKh#`N z7NeCbM29uA09=QX&CoCBg-))-#N8dvzTy~SZ3PwxUB3DUp5P5nh+%YF`ai4w-Z%cQ zss53L;sE6QKUV!`|7+F%{V&zOofc62+4fZbBm&$1`TC>kp9T+DUaE~=GI_tyTJ>cy zNE6kfR$Q*&;^;rV!zCxr3^>GBV?LhS>q@Pp9uPdn3-`)kWt`M$WbobVL7mw8vJcnx zfkb8lO@&vDk&1g~D52Mr5d6?!f6y=4 zA7!x(xf!sq(y6(`6_2I;Oh`{!zSZyV30^3?i*fCZCYb3TG2gY-J7lx*2z{J{+N`KT zyIxYs9q?SL^n_cpz-8Ge7WE1ynMn@eIUkS@L$>9nrot>wJ)D+ke?)R-{tf5+yNk$& zZM*L#UwcqEB0pLt&j3y8}KOf(_XAZmwwlDlvXPKgmW_|?0ao8 zAM1b>9Gx^JONV%W)u`?dU;y>d2M>gEg`+7fqarq+?l{84oGq8gj3QHiqsIk%z+@d>UJN89b%z2kEr{@_I3Y# zoa5hg|C|54?w|2LrTaVlMfbnHulo-Ww&V1skAn%}eVj6n8^Dioa2>CfbtNI;{>V2$ zn|>Y*1M?p zvA3rtl|2qWSJ9X?(avA4?-Y11koT!(ZKCOcu&`4GE;o7^TbkY-S09XUA$)K$eF~O%G*R13V5vLU|8ms)GC3msM`+^HuZ7opxiHHj$&xT%WGIN@l=5`-pvfM~@ z%*Cn7BGoA_!_VCtfljr%-S?sjncOr1jS0*%G-wJId&!K`ntwcJXyA+mH%u23UM9B#%OX&!yJ!`mlf40BFeub{K}K z{>Z0fdK6nhc|lukFCLQHt5q{-;lvVeWV9h8P_Nv^9eE^05%OeC$e`2&qYZ|`<&wy`rO0j1myjQ@*VTuUKN$p8O=Tm%707@_|~F8(ia@jpi{>KPL^ zC*RO+CbFbi(fwrJyM+#=pE_fI;OJaa&vPBr#2%Y#taciIa{{HyJ8SIqKo?7XKHiP= za3EOWA6jBBm6L!lxi61qj3fo#-!?X6HYttNIiz9gp|F-)qUXdQT<2&a%XqF94uhh) zGB17>j{AHtjQUU~e>KuF{wgnY0zBv_A)xk)Z@j=S=XL2HuraHr-n zq1bI&L(7~D`DME8tJrC@Z}17uJpUulpCG4HY2e%jhfW8(4V9_pGGgY-aL2 zvY<_efIg+l8xq;x?ogspiN19kl?D!a(BypN{J87Jk zE;g*)QdFMy8Ty{OaJ%5!O8v_z4g3dk`Fk5UEVE53M<+x`Dd7Q@kCk9ESiI;IUh3WX zvHb&!k{TSnGfMaTQq&xco(stiKEB~KKHC4pn*qnM1z9Gf0Uy5Nl%-7J*1S&QRRp${K!?qG7~oUA>RB;32j10NyIw!>ndVvw4R_DP>Ybz7?|1I- z#YB%e5KhnO=6Vk$fi~9p{1=|hG4*X9)xRS&3XKUSOfRZFcNf$l9sU;dVa48UsUB(T z9%JYmy{5mLT5$ZRQ>Ojg=N#Le91RH^6Jk1ea-a=U7JJcPN@(rtjSM_nl8|FQ4)t;T z!~6ptqhr}?bDA*^6`Wdl^A4{M{$PC3@KKTp8D)kAlUi`MTwTgDv}1T^T>q}<1dBQ& z#f3ErIF~YFYklIui?)*XV#<3cW+z^^5B44`ZC3D{wgTLu>-M+!?@zan2(Mh_nMrg+PrRCr)}foK7gPxmv(k5y9$q}z?9~bZL&v?jZw`mcFAbV_@pU)@cslJ zGxf;$1o7)rh~UuFo;&f5ju2D!SlDWp`tty77wd72-F7Yx0PvXQ4H7y(kp78g4%@lS zH%PGgaB4ViRH^_eQ1|uzo4gB}kMXfnFC+B&+ds7{kk+FHHgKtO%{qzBSIIvVS{D&+ zt(Ibp&(nO&W1Fd>t z;;&Y3rcZ$gL%nmZKyQVeyadhA<#RL49>QPf&T+r4o6fEvLC`hVWkFJ1U(pxL3sW(yVxA4CQs{fN04I zCXrZBDithD+D-VRf`5Ho?^I4It<6%|!ijI6z>YSwS~w?iZte|@?P~68OtsVxaqopI z`pFFogCw|2$Xus+xY}K!B9%r&n2Ut3VWOrnbi%G3x8WN4$iupw^Nf+*BdEJ5h_0h& zv~^E!tg&S&-;*B+hEQXkEGT-%&29JQt45xxWD!Gm$RUauP&@b5<=q`MBa`p zff*F0Ya#51{jVekLNJuIGhj-k-g^;v$%jb@N+sxpfg&F+XR<`SMs;KR>dSA|`2!qg zvKN-iu*{9^31u6vB8>iC^NJRuO!@>coqUlZ?c=1thHAIW~%es6vXa+`Q5y7gQ zM7<)PxM_5CW*LmSxF)Ub+4(q5xBL)?heBXaGWvEWieV01iXUb}(WW|o9iOz{+OlJL z!G<+EVXrdnC>xdEXF3pgdaYbt^+kz~DV(VhdZA7ux5tr^!K^!x6;htZ-o#p@L7~A4 zx;dYD-bJlk`Q_)#0@7(_b|qPihncpSc_oO#B+xVum@YlA!4Nb9aO8T z?v}rz81|wTY;~jwL7}mP`CP=IOh+JuC1v@G1;VU z?8D}z5{Mh35vqgJMp0#h5{QtsD_PWh0V{TsvSH^q?QYiwsa^;1nv>@lJen`Hfx8wOUhKA3>P{F~*8_q;kxxRiiOweHLdfR7ni4ybBqt<@qtd zzd_;T2qsNyD{Kf+e z(kPD;<2x+_AH`v^^Q#991)#tmrhqC+jpF;P@hv#l5+ph84=5-V? z0{0x-9&8k2R+zwH%lN>hOvWB=%SF=_C2Uh=3Ev{ansjbp%{u}pbxES)QH!c8W&uk!+5=0@-D*4#w)jWwVdfVUN*JA(t;5L>;o?k>~LBh zx>$561RdUfUiY1R1p~o&fOH~loi5P$iJNzFzaQ}Qp_Z`Kbk=}Jt7fMM_ z8qN*2-=GAse-@jNTR_nZy9Bnmcis)DB6H+5UX$SBgG!uGzEkOsfA`E$?j{R(bb@g; z@}~Xx@vde5(XajrrroA;q7ieQ0u*{~^JfaiYxPD310KHNnnTL09eyF<6Hws zq(Fj9x9|kS^U?K$$LQf^&)$o z9-dHRC5xEL-T9w30ZZEK|hNRENAS3LrqfRYwdXXBfTdho0VOjct2PANt z&K~twdau7wrOwE;An-kWs5;SOtNZR1C82NJNAG$M<2S{U=0>8vEzf(&T4cEd$SEj`q25``p80kYuL~o$}Jn{AQmFq?H3iHy|r9}D>K0n zaMz7EZ^{OB7@92BE1UsiJ>8fP@&+gs74Z;wd@%5XEGp_mvNfZ0XNw%7)fi?ke39K9T*fF}g)Zsqr59n$fjq>YVy{GIXzd%ihhE`m*sS zG$Etc83}cD_+u$f$BqtIT1iEUO#4*z8R7Ik$orgETOw2J?>8*>DB|GYpdX)ZJb6qQ zSi~vjALFvNcHh|HT+`70{PpM)V5J{BWqnc~*$c}~XGwVP86Z(dBMp=I}LDZS)wy5+xPkI!3e(|Nuw88Trh_FlH( zL;A+prooMdFv_X{|L~ISro#p3oX7;IoZ>`iNk!Q*-akgk%&UM82H zrYSQSXL&(gLXo~$;n(`E-tp5&%I?bIZMXsHSQ0Y~o~I1CBaT_x`0nxk#6S$UK)C9S zYH=$N%R52^nHwy9@cUL4yr9&EgV}ha`rTr6zAct|EOldj&S|%W$zj@aaB9TmHd%FH z!qm#z38t_q*_Mbf7(Z|MHmNVklF20fMT_rS0jY0FcE3NxxB8t-s);YMC#>KE2wtI% zco_)tg=xH{LRV7eglfx1DYuo9(@u=`+wkHx)JG}Cqq;q2Ad(v!=!Y)y|A+!)Mt+QkRxjDw8)9Tw>aog6KX;_bow_@YgQ zz@iw=-rljo@s+msJ)=pX@yi?mPcYwi*?Jm`>kX4eR8&?ED{r#WxJ}8EhxgN^k;SE6 zFB^bWGRAziRTg*vG+FD(ig3BG>~Ku%E*#4W9}c+#qj(V999Ybx@IJD1`x(=?ao2<1 zy89~W>{nG$#^fG~(dc+(RiawZ#~SooEl4$-Sto^e@Se6k+smDYpQvexCD*pq|MolSI;CWBeG9Vxgf= z5K?vexOuPQ+2N!g0I5y!1 zv#$A)>y&uD(uk2eRPaj`4A2YqwS80T?NU82ftRw<8C%QN+0}A649aP%g0c8~(F`d; zhLLZ@_n?lT2W!xaWr1#A>8(TsQA9-{A==D1dTup7*h;|!lgRzT7L>C1VAg-;+4&Q^ zh_I5EWnC*MNih}*I;G9)k7>#U9Fxh!bIddi{3y~SYoBE7bmKk0)`{z5=2~)!10#aa z@S+up2c?(3TtI#Iwkpu>Pzo@;=N0w?m3D;xa(Iu zQ#Ya(J8?1ZZ$+=5gAEFgOCRPJoFh}cb(;m>_u5aS-qZOE!t7DmmA6n!N~K#ln}JE2 z7qd3>9Av=$SFPQ7kPAt)&0NOq$41?wrJ%0t$*j?i#?e^fvCB3Cj-9s9oS}c3%3Y|$ zs0{)~p-vhypig{NcTv2p2M>bl zh%U!6?9}yzTLu7=H;x~!b=JNqYTC?7P+!PEf!-NwNjfZP6y%`#I2CWm0lGOj-M_#S zF=d8fV6a!kS;{Zy+N-L++7?<{*!g}VaN)4w$i(dJBE$j@^!<5_`$aSBYTr5vb0`S+ zG{aRxv^haxQ~~3=7nO@Ys87Hs;If+eskFj**<(555NLn&L%gQVsl7epg|(5sNK6;y z6k46IeM?0mUIf^j+rr#T-7MWe#t`F5|-P)BnKi0_7B zvb|5|+N2By&}wLHJSj5TB87VemV=6`k6PJ;QQiBtaf3=*ABB%;4*Ge;YHuFjCb_V_ zo=X>KWH>nuqjx}no2S}8^;MX!iOs-^^d) zDlIHAdNbKxQ4?&?k1TRte8;2`6jx@FF>gFB=0XxZ-u8gcr??g3rq`}Ark|~jp_Dc! z?QG-zF=!fxX z9)x?}?$~9YiyRc&g3RXp}8=P-Ssj|M! z@z`ei)IQq0H0khM`Nu}5d)8w0Yl&&DZT}~{9hY`!(_c!)Dpkk53sK$>95SSsf5&83 z*X^sa)K+buK%@L1Ltl2GWr%GK-ycZc{6@-r6p{hzt%qUf@0_(qbw>KeOOE$cI}hV)yL>!RDMsyq;BEx^QLnIw*U(o~x^1S44mwbcpot zo7M>UpsL}B7r7qCdv$^8wgE-CzQtY6R7Dw-WF4o?V;W{ePz}}LDQBMN@IhI+#hU|5 zBgbQwcFX*>EG#eKonY#ijo}a&c3_FHavR?5QjB1_I-Yw@pzrBa)#Y750+tDIcc&L_IRYkDBnzUd0rb7&~zc&_DmMr__= zLO~^hW2Lor?iO0D$E@vjy}K|)$ma4T2e#YKMoJ_N6@TqDl*{y#L%OTJsCOYnN7qA+ z2{3Q!hkOQhTeVayN$jj~ONE89Jvea+UQ5;hW9*S|w87hF!@%j&t%t<{rw=F9iho>+ z@pL_eYkS@fks!^i!%0PqviPb|Z72_xR9atGwQl&5@`+>_JSSNrgr0D(Gp+cecuBTNfiw4Ua=udY=EE)~++EsjgY8NK?83p%)dfpi%?`qy+&* zMGzI~MI=Zw6ahmo35YZc2pBLZAkqYB2@-@LAOQ)69zqLX=snW8C#d)Hz2CjRzMDT; zYiFPAnK`rPnP=v#vzK@kQyQkW-z+Q5GNs{@na74MJv_2)VlOIBcwvZS{Rqmv@z}3|sN2l z6W4W_qz_AWdh)jtU47SuXZL?A4T-RPtIwnoE2MtA1-|a3xV@UBxN?2WsiezXuZKlEBqb#? zJy^j%*?9JG4%IfDG=(d8AL8cb4m1Q|oDyC~e~F$o?r)(b$x72_u0;cf@3N*o5Drp> z(?<66dIE!zsTuUC=e&KV*`NPo8jJ9+GHJ<}F7Q#*1frd|_OpmoYlcaMQ9PaIL4p8@ zv=1uZ2CMbNeR348j@XsIj&K3Y$MmP>V9VV&AqA|no(*6|$X~a!I3$#A1*!!O(03n? zXQHUU9`?BhyW&6l|1s_#E@}}ORU!mYz3~L1xt)0wun+fDQEKNeTbz+h~=)0#uAL-~j(TY6kXg9MJk@bjkB<AZ?>EF7d4ZoBbwfm0-E?kPyk%yL|4oTnW+);J>mru7Z z&bghK(FayqE(2^>tGum}9x!>aG1tESbOl1`pM2W6zwOJJJILtfDJ#NC|L3?DvQN>b+Y?W^Rq>>kxKw>snB`Gt_ASM|kO$(dcyK zvQWBqo){cevr7^QcA7wPw2-?1I-yq2$;c!!FrXISq2+~67}yiu9w1FlT!3cH;KEJ* zqM?tcf@9*mdd(c~)Nno8-}b}<8h{LhB^mgTeHktI&Bxas&s2DJ2GMV`0^k~13))?m zC(TPf3yQzcsgUB6#^^&8NHF`}$C9O>hO^%T1?6nGG5m~K!<&W-qSH1G$A4<;l<{S# z8QEL7vUN$l80k`DFptr3#V6?Gp?2iH&0YK4*Mn49=@EzbxcbGABvvEruZ}*_-i1UfLhB)P)i0paqwkPk>a>wMHpq z`=mhjSnv$Z;s^9EssHq0?_~*NaM0BWP+ttgT|EC9LgKU2h_`P(3g&$m1O~qWofB;D zHh+9}ih1u1f-;?(q)Fxt*skiu%>*AWjZ@^W#g~x(&?}A&IV9PT=Y4Qv4xZ$?Q}o{N z040xNCVSe#>?Qow;EyUJpDnTT{ z^MI^Jy$>jVN-TW1@0tagD}W_Fwy2ft{=4ZY%Ncg;M}oUk;CEC>o9))nsYTITFA70fuC=E{CfL zH3cn)F=}xtgVAv;=QN|xGKbhz8ALO_>baTp(9#j*VH9y1FqB|qRV4Y#`!3X)^ z=&9Dan+1D;lu-V{brozsFCh4iN*p&1!|C_kEK~)LbR?={7i8Q5LgQ^(z_Vr1dsXV? zg)?+-QoAA~@@S0fYi1I{#lN}+;pM{$1By2@=j%w(Kpi}6 zQQzULBCkrUs{M(yq?u$7x-(~{0EYswh8d}Il&_575z;EcIU1D(EoitqQnanjt?@wI zAz7fNL%Gt%lDae#Se8RFCDFql)dj%2r>_^l4|F6U*}%2!pX%wqeq;qMZ#ASt*DfbLo6$s8>00H=Dc`^+ zwcWHH*lUnKS&wVr$)}|c1X|o)P?)ur0b3F_-+x@Ie>O7*k^jL-7ob&&jjYfVJat@s zt3L+jgy#}dN866PfU+|Lt{8Z7L!LL+B#o?BhZpA!GpuK1XiY16gb!j|g{wQ+$yF9j z9LX6@?>~KVn8)Gep2Tjk>QQ9aw&vzvyNh+5Mh*mC&#!Tega`_TFRU0vJx}>z;U0Vv zz0`<@9ix?}k=ha$zRJPfflTZskcmZgFS1#7O|}lO7;;Qf| z!q#MqQt_7kbe)a>P@IR{0+I{~{Nilgd?C|g?AzZQ@NTd29DTT~Ze0q>a)gtIrW2}uPP{sX11gf~s zXky;IwM)s5W{ANTBHux4TTY5|ZwO^*=Ltn%D-O0wf1D;|e+bGMix6qRC3dS|Ze^dc zRh}$m3Yb)vgyr<`POYcK-0I7AQ!R=d)=Li*RbG)!r0oz_#GuPqJ{vX$>O{V_-b=?F z(~gXz>AFrMKsggUyVkr4ShHzW-09d#WUFfjYG?cS z^p(D_m!tCi0Zv*|SQf9fMVb8AXnpUmRsn0?La?_r#}bFh{h5$1p(KcLrY^-{p zg-d;4@Eydf0@f&!btG)-ks@#Yt+F6}>jBo)i3;QXW-B>>X&>)na{&3nM1@3v18D=O zG*(h*YB!U*dgXx0EOz^-2~}}f^P*3XyuH$ok?9+9n}aQl^1L$+*7_Z#x6^2FI@SmMrj`p^Xqbk=KaXebt|ZO_`yxc#XiV zcq`Dhe|%QI@FP(XYnPLQ4}JD*`KY<>q>bNbueNnIPP;c zXA_f5R?TT7wGBR|va+6OGfwx(HoOO?yHNUw)=SM#9}B@dBaN(_M}nqi<|a3b;qDY_ zR8NqJ(B=BAR+Y~(!oZ>WMiP6j3~2uM^Q93|K7oSus)RP5N)fOFq? zS9&VA1AttfENpQXfznfL@3MlpRAeJ5hIzZfnQ4azG&k+c0|xAYMoRN2_@}8^_uv|i zPq*=?cu=Epf9GXi4CT}qAa|<%0Ww2(i^TWkRKnp=-s~gw#iIQk8Mm7qmwht8PfW!^ z9U)?~y3p540fUb=s#JsXQ5!3frwp_1TyBwSd|%pnpO1|h7bi&j|Xg@ zcK5{l@;^oX*k~XPj;-wmY_I(Q9bLFq03J+M8jTm?_~tUmqm!l>fzOh=c<2X<0uE4EJrt~M0ex${u!9`#7*ba_6+WU$4--n z@^N-em1hDM!41dZg&)Y3-yJ^}U{%NtGuE2Vt z(2Jf?LQbF6Zw;h_5?0^UM6)5r{4ZvFo-$s*eK~+|9`yU=ny|X{jQK_=0Gbbhu&-^6 zKLo9O!wHXXn-2`TpppQygB4Pgk)mhb``V3FLQ^M5a=l9kdh3?n@e`0DryMjqt*ys~ zdt;vW^k4#7gDplH%A7Wy^YxYHAI$wfpm>+i{)O)0_iA|z-V)rlcA08a_Ac!HuZh54 znZeb(ixMd+eT3*|xiOA-XIivORcN-tmnfP;8a-2OzKxiz>)_iox>MEqX1@1l!^{p5 z#8LJfpX6^2p#%@lXV`gCZ3ZOoM|*9tq(WS$<&zCh=fMcZYzKACVc!hdwLeB4$kZ(Vq$=)_JKtB$};tfC*h0&67Z?Wx4rCLx=dU#!D-fO8ko@r3u8J|1ljnUI zC;-Y@rPLs)x+0^x&JJm|m{@*fl#)vbERqzIKyjN@YaG&7TetIjz$M|8yQTP> Date: Mon, 26 Jul 2021 03:53:48 +0000 Subject: [PATCH 49/63] debug --- docs/en_US/Compression/Pruner.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en_US/Compression/Pruner.rst b/docs/en_US/Compression/Pruner.rst index bae2a38b3a..052398fe00 100644 --- a/docs/en_US/Compression/Pruner.rst +++ b/docs/en_US/Compression/Pruner.rst @@ -785,7 +785,7 @@ Usage 1: one-shot pruning, same sparsity for all the layers (PyTorch code) } config_list = [{ 'sparsity': 0.5, - 'op_types': ["Linear"] + 'op_types': ["Linear"] }] pruner = TransformerHeadPruner(model, config_list, **kwargs) pruner.compress() From 61679f1fb609b6de857f019d90921ad32391d87e Mon Sep 17 00:00:00 2001 From: Di Wu Date: Mon, 26 Jul 2021 08:22:20 +0000 Subject: [PATCH 50/63] example fix --- .../pruning/transformers/transformer_pruning.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/examples/model_compress/pruning/transformers/transformer_pruning.py b/examples/model_compress/pruning/transformers/transformer_pruning.py index 68f9bbecc1..6f04e13eef 100644 --- a/examples/model_compress/pruning/transformers/transformer_pruning.py +++ b/examples/model_compress/pruning/transformers/transformer_pruning.py @@ -1,3 +1,6 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + import argparse import logging import math @@ -183,9 +186,9 @@ def dry_run_or_finetune(args, model, train_dataloader, optimizer, device, epoch_ Otherwise, finetune the model for 1 epoch. This is called by the pruner during pruning iterations. """ if epoch_num == 0: - print("Running forward and backward on the entire dataset without updating parameters...") + logger.info("Running forward and backward on the entire dataset without updating parameters...") else: - print("Finetuning for 1 epoch...") + logger.info("Finetuning for 1 epoch...") progress_bar = tqdm(range(len(train_dataloader)), position=0, leave=True) train_epoch = args.num_train_epochs if epoch_num is None else 1 From c0b93ed23fa4c8cbd373ddbb5dc9ae60ba437452 Mon Sep 17 00:00:00 2001 From: Di Wu Date: Mon, 26 Jul 2021 08:42:43 +0000 Subject: [PATCH 51/63] handle empty groups caused by config --- .../pytorch/pruning/transformer_pruner.py | 9 ++++-- .../transformer_pruning_head_masker.py | 32 +++++++++++-------- 2 files changed, 26 insertions(+), 15 deletions(-) diff --git a/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py b/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py index c6b1a9b35a..283e0ba0e7 100644 --- a/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py +++ b/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py @@ -172,6 +172,9 @@ def validate_weight_groups(self): errmsg = 'Attention weight group sanity check not passed' sparsity = None for group in self.masking_groups: + # allow empty groups - may be caused by config list filtering + if len(group) == 0: + continue assert len(group) == 4, errmsg + ': each group must have four weights' assert group[0].module.weight.size() == group[1].module.weight.size() and \ group[1].module.weight.size() == group[2].module.weight.size(), \ @@ -306,8 +309,10 @@ def _calc_mask_global(self): overall_sparsity = self.get_modules_wrapper()[0].config['sparsity'] / self.num_iterations n_heads_total = 0 - for q_proj, _, _, _ in self.masking_groups: - n_heads_total += int(q_proj.module.weight.size()[0] / self.head_hidden_dim) + for group in self.masking_groups: + if len(group) != 0: + q_proj, _, _, _ = group + n_heads_total += int(q_proj.module.weight.size()[0] / self.head_hidden_dim) n_heads_to_prune = int(n_heads_total * overall_sparsity) return self.masker.calc_mask_global(n_heads_to_prune) diff --git a/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py b/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py index cf6db493b7..fc7f6f5808 100644 --- a/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py +++ b/nni/algorithms/compression/pytorch/pruning/transformer_pruning_head_masker.py @@ -61,12 +61,14 @@ def calc_mask(self, sparsity, wrapper=None, wrapper_idx=None, weight_group=None, 'bias_mask': bias mask tensor (optional) """ assert weight_group is not None - num_total = weight_group[0].module.weight.data.size(0) // self.head_hidden_dim - if num_total < 2: + if len(weight_group) == 0: return None - num_prune = max(int(num_total * sparsity), 1) - - return self.get_mask(num_prune, weight_group, **kwargs) + else: + num_total = weight_group[0].module.weight.data.size(0) // self.head_hidden_dim + if num_total < 2: + return None + num_prune = max(int(num_total * sparsity), 1) + return self.get_mask(num_prune, weight_group, **kwargs) def calc_mask_global(self, n_heads_to_prune): """ @@ -84,10 +86,11 @@ def calc_mask_global(self, n_heads_to_prune): # calculate scores as normal (this step does not require global information) head_importance_scores = [] for group_idx, group in enumerate(self.pruner.masking_groups): - scores = self.get_head_importance_scores(group) - n_heads = group[0].module.weight.size(0) // self.head_hidden_dim - for head_idx in range(n_heads): - head_importance_scores.append([group_idx, head_idx, scores[head_idx]]) + if len(group) != 0: + scores = self.get_head_importance_scores(group) + n_heads = group[0].module.weight.size(0) // self.head_hidden_dim + for head_idx in range(n_heads): + head_importance_scores.append([group_idx, head_idx, scores[head_idx]]) # determine which head to prune for each layer n_selected = 0 @@ -103,10 +106,13 @@ def calc_mask_global(self, n_heads_to_prune): # generate masks all_masks = [] for group_idx, group in enumerate(self.pruner.masking_groups): - n_heads = group[0].module.weight.size(0) // self.head_hidden_dim - device = group[0].module.weight.device - head_level_mask = torch.tensor([i not in self.pruner.pruned_heads[group_idx] for i in range(n_heads)], device=device) # pylint: disable=not-callable - masks = self._get_layer_masks_from_head_mask(group, head_level_mask) + if len(group) == 0: + masks = None + else: + n_heads = group[0].module.weight.size(0) // self.head_hidden_dim + device = group[0].module.weight.device + head_level_mask = torch.tensor([i not in self.pruner.pruned_heads[group_idx] for i in range(n_heads)], device=device) # pylint: disable=not-callable + masks = self._get_layer_masks_from_head_mask(group, head_level_mask) all_masks.append(masks) return all_masks From 856ee2aa18b8856c98979e573e86dc429c39d2f0 Mon Sep 17 00:00:00 2001 From: Di Wu Date: Mon, 26 Jul 2021 09:36:06 +0000 Subject: [PATCH 52/63] sanity check --- .../compression/pytorch/pruning/transformer_pruner.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py b/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py index 283e0ba0e7..ebb05ed60a 100644 --- a/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py +++ b/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py @@ -189,9 +189,8 @@ def validate_weight_groups(self): if self.global_sort: assert sparsity == group[0].config['sparsity'], \ errmsg + ': for global_sort=True, the sparsity for all modules must be the same' - t = group[0].module.weight.size(0) / self.head_hidden_dim - assert t % 1 == 0, errmsg + ': head_hidden_dim must be a divisor of the output dimension of the ' \ - 'projection weights' + assert group[0].module.weight.size(0) % self.head_hidden_dim == 0, \ + errmsg + ': head_hidden_dim must be a divisor of the output dimension of the projection weights' def remove_ungrouped_modules(self): """ From 32bf76a121caa35f148189b41695969b3b3b786a Mon Sep 17 00:00:00 2001 From: Di Wu Date: Tue, 27 Jul 2021 05:23:44 +0000 Subject: [PATCH 53/63] head indexing --- docs/en_US/Compression/Pruner.rst | 2 +- .../model_compress/pruning/transformers/transformer_pruning.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/en_US/Compression/Pruner.rst b/docs/en_US/Compression/Pruner.rst index 052398fe00..13ff794ffa 100644 --- a/docs/en_US/Compression/Pruner.rst +++ b/docs/en_US/Compression/Pruner.rst @@ -841,7 +841,7 @@ Usage 3: one-shot pruning, setting different sparsity for different layers (PyTo { 'sparsity': 0.25, 'op_types': ["Linear"], - 'op_names': [x for layer in attention_name_groups[:6] for x in layer] # last six layers + 'op_names': [x for layer in attention_name_groups[6:] for x in layer] # last six layers } ] pruner = TransformerHeadPruner(model, config_list, **kwargs) diff --git a/examples/model_compress/pruning/transformers/transformer_pruning.py b/examples/model_compress/pruning/transformers/transformer_pruning.py index 6f04e13eef..27e1936f95 100644 --- a/examples/model_compress/pruning/transformers/transformer_pruning.py +++ b/examples/model_compress/pruning/transformers/transformer_pruning.py @@ -347,7 +347,7 @@ def trainer(model, optimizer, criterion, epoch): { "sparsity": args.sparsity / 2, "op_types": ["Linear"], - "op_names": [x for layer in attention_name_groups[:6] for x in layer] + "op_names": [x for layer in attention_name_groups[6:] for x in layer] } ] From ed11f42c858677d6b498d65e884af94a8499b080 Mon Sep 17 00:00:00 2001 From: Di Wu Date: Tue, 27 Jul 2021 05:26:20 +0000 Subject: [PATCH 54/63] default args --- .../compression/pytorch/pruning/transformer_pruner.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py b/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py index ebb05ed60a..06749e0e42 100644 --- a/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py +++ b/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py @@ -74,7 +74,7 @@ class TransformerHeadPruner(Pruner): For example, you can use ``torch.nn.CrossEntropyLoss()`` as input. """ def __init__(self, model, config_list,head_hidden_dim, attention_name_groups=None, dummy_input=None, - ranking_criterion='taylorfo', global_sort=False, num_iterations=1, epochs_per_iteration=1, + ranking_criterion='l1_weight', global_sort=False, num_iterations=1, epochs_per_iteration=1, optimizer=None, trainer=None, criterion=None, **algo_kwargs): super().__init__(model, config_list) From 50d9cb93eea6cb6079d82e81fbdf14c00b5d142b Mon Sep 17 00:00:00 2001 From: Di Wu Date: Tue, 27 Jul 2021 05:44:41 +0000 Subject: [PATCH 55/63] improve docs --- docs/en_US/Compression/Pruner.rst | 61 +++++++++---------------------- 1 file changed, 18 insertions(+), 43 deletions(-) diff --git a/docs/en_US/Compression/Pruner.rst b/docs/en_US/Compression/Pruner.rst index 13ff794ffa..0ef779ec93 100644 --- a/docs/en_US/Compression/Pruner.rst +++ b/docs/en_US/Compression/Pruner.rst @@ -769,28 +769,7 @@ Suppose we want to prune a BERT with Huggingface implementation, which has the f :target: ../../img/huggingface_bert_architecture.png :alt: -Usage 1: one-shot pruning, same sparsity for all the layers (PyTorch code) - -.. code-block:: python - - from nni.algorithms.compression.pytorch.pruning import TransformerHeadPruner - kwargs = {'ranking_criterion': "l1_weight", - 'global_sort': False, - 'num_iterations': 1, - 'epochs_per_iteration': 1, # this is ignored when num_iterations = 1 - 'head_hidden_dim': 64, - 'dummy_input': dummy_input, - 'trainer': trainer, - 'optimizer': optimizer - } - config_list = [{ - 'sparsity': 0.5, - 'op_types': ["Linear"] - }] - pruner = TransformerHeadPruner(model, config_list, **kwargs) - pruner.compress() - -Usage 2: same effect as usage 1, the only change is passing names to the pruner instead of dummy input (PyTorch code) +Usage 1: one-shot pruning, assigning sparsity 0.5 to the first six layers and sparsity 0.25 to the last six layers (PyTorch code). Note that here we specify `op_names` in the config list to assign different sparsity to different layers. Meanwhile, we pass `attention_name_groups` to the pruner so that the pruner may group together the weights belonging to the same attention layer. Alternatively, you can pass a `dummy_input` parameter and omit the `attention_name_groups`, and the pruner will attempt to group the layers together (see usage 2). .. code-block:: python @@ -804,50 +783,46 @@ Usage 2: same effect as usage 1, the only change is passing names to the pruner 'num_iterations': 1, 'epochs_per_iteration': 1, # this is ignored when num_iterations = 1 'head_hidden_dim': 64, - 'attention_name_groups': attention_name_groups, + 'attention_name_groups': attention_name_groups, # can change to dummy_input here 'trainer': trainer, 'optimizer': optimizer } config_list = [{ 'sparsity': 0.5, - 'op_types': ["Linear"] - }] + 'op_types': ["Linear"], + 'op_names': [x for layer in attention_name_groups[:6] for x in layer] # first six layers + }, + { + 'sparsity': 0.25, + 'op_types': ["Linear"], + 'op_names': [x for layer in attention_name_groups[6:] for x in layer] # last six layers + } + ] pruner = TransformerHeadPruner(model, config_list, **kwargs) pruner.compress() -Usage 3: one-shot pruning, setting different sparsity for different layers (PyTorch code) +Usage 2: one-shot pruning, same sparsity for all the layers (PyTorch code). Here we replace the `attention_name_groups` parameter with `dummy_input` (for our current implementation, either parameter will work). Since in this example we prune all the attention layers with the same sparsity, the config list can be simplied and specified without `op_names`. Note that although other `Linear` layers such as those in feed-forward layers will be matched by this config, they will not be pruned since this pruner only prunes attention heads. .. code-block:: python from nni.algorithms.compression.pytorch.pruning import TransformerHeadPruner - attention_name_groups = list(zip(['encoder.layer.{}.attention.self.query'.format(i) for i in range(12)], - ['encoder.layer.{}.attention.self.key'.format(i) for i in range(12)], - ['encoder.layer.{}.attention.self.value'.format(i) for i in range(12)], - ['encoder.layer.{}.attention.output.dense'.format(i) for i in range(12)])) kwargs = {'ranking_criterion': "l1_weight", 'global_sort': False, 'num_iterations': 1, - 'epochs_per_iteration': 1, # this is ignored when num_iterations = 1 + 'epochs_per_iteration': 1, # this is ignored when num_iterations = 1 'head_hidden_dim': 64, - 'attention_name_groups': attention_name_groups, # can change to dummy_input here + 'dummy_input': dummy_input, 'trainer': trainer, 'optimizer': optimizer } config_list = [{ 'sparsity': 0.5, - 'op_types': ["Linear"], - 'op_names': [x for layer in attention_name_groups[:6] for x in layer] # first six layers - }, - { - 'sparsity': 0.25, - 'op_types': ["Linear"], - 'op_names': [x for layer in attention_name_groups[6:] for x in layer] # last six layers - } - ] + 'op_types': ["Linear"] + }] pruner = TransformerHeadPruner(model, config_list, **kwargs) pruner.compress() - - + + User configuration for Transformer Head Pruner ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ From a6727d50cf19913f8d7a30c9c8c9bf0c014d7d44 Mon Sep 17 00:00:00 2001 From: Di Wu Date: Tue, 27 Jul 2021 05:47:50 +0000 Subject: [PATCH 56/63] debug --- docs/en_US/Compression/Pruner.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/en_US/Compression/Pruner.rst b/docs/en_US/Compression/Pruner.rst index 0ef779ec93..a36443e1dc 100644 --- a/docs/en_US/Compression/Pruner.rst +++ b/docs/en_US/Compression/Pruner.rst @@ -735,7 +735,7 @@ Transformer Head Pruner is a tool designed for pruning attention heads from the Typically, each attention layer in the Transformer models consists of four weights: three projection matrices for query, key, value, and an output projection matrix. The outputs of the former three matrices contains the projected results for all heads. Normally, the results are then reshaped so that each head performs that attention computation independently. The final results are concatenated back before fed into the output projection. Therefore, when an attention head is pruned, the same weights corresponding to that heads in the three projection matrices are pruned. Also, the weights in the output projection corresponding to the head's output are pruned. In our implementation, we calculate and apply masks to the four matrices together. -Note: currently, the pruner can only handle models with projection weights written as separate `Linear` modules, i.e., it expects four `Linear` modules corresponding to query, key, value, and an output projections. Therefore, in the `config_list`, you should either write `['Linear']` for the `op_types` field, or write names corresponding to `Linear` modules for the `op_names` field. +Note: currently, the pruner can only handle models with projection weights written as separate ``Linear`` modules, i.e., it expects four ``Linear`` modules corresponding to query, key, value, and an output projections. Therefore, in the ``config_list``, you should either write ``['Linear']`` for the ``op_types`` field, or write names corresponding to ``Linear`` modules for the ``op_names`` field. The pruner implements the following algorithm: @@ -763,13 +763,13 @@ In addition to the following usage guide, we provide a more detailed example of Usage ^^^^^ -Suppose we want to prune a BERT with Huggingface implementation, which has the following architecture (obtained by calling `print(model)`). Note that we only show the first layer of the repeated layers in the encoder's `ModuleList layer`. +Suppose we want to prune a BERT with Huggingface implementation, which has the following architecture (obtained by calling ``print(model)``). Note that we only show the first layer of the repeated layers in the encoder's ``ModuleList layer``. .. image:: ../../img/huggingface_bert_architecture.png :target: ../../img/huggingface_bert_architecture.png :alt: -Usage 1: one-shot pruning, assigning sparsity 0.5 to the first six layers and sparsity 0.25 to the last six layers (PyTorch code). Note that here we specify `op_names` in the config list to assign different sparsity to different layers. Meanwhile, we pass `attention_name_groups` to the pruner so that the pruner may group together the weights belonging to the same attention layer. Alternatively, you can pass a `dummy_input` parameter and omit the `attention_name_groups`, and the pruner will attempt to group the layers together (see usage 2). +Usage 1: one-shot pruning, assigning sparsity 0.5 to the first six layers and sparsity 0.25 to the last six layers (PyTorch code). Note that here we specify ``op_names`` in the config list to assign different sparsity to different layers. Meanwhile, we pass ``attention_name_groups`` to the pruner so that the pruner may group together the weights belonging to the same attention layer. Alternatively, you can pass a ``dummy_input`` parameter and omit the ``attention_name_groups``, and the pruner will attempt to group the layers together (see usage 2). .. code-block:: python @@ -801,7 +801,7 @@ Usage 1: one-shot pruning, assigning sparsity 0.5 to the first six layers and sp pruner = TransformerHeadPruner(model, config_list, **kwargs) pruner.compress() -Usage 2: one-shot pruning, same sparsity for all the layers (PyTorch code). Here we replace the `attention_name_groups` parameter with `dummy_input` (for our current implementation, either parameter will work). Since in this example we prune all the attention layers with the same sparsity, the config list can be simplied and specified without `op_names`. Note that although other `Linear` layers such as those in feed-forward layers will be matched by this config, they will not be pruned since this pruner only prunes attention heads. +Usage 2: one-shot pruning, same sparsity for all the layers (PyTorch code). Here we replace the ``attention_name_groups`` parameter with ``dummy_input`` (for our current implementation, either parameter will work). Since in this example we prune all the attention layers with the same sparsity, the config list can be simplied and specified without ``op_names``. Note that although other ``Linear`` layers such as those in feed-forward layers will be matched by this config, they will not be pruned since this pruner only prunes attention heads. .. code-block:: python From 941199886da87dff7326aeeb891850e89412ed03 Mon Sep 17 00:00:00 2001 From: Di Wu Date: Tue, 27 Jul 2021 05:49:42 +0000 Subject: [PATCH 57/63] debug --- docs/en_US/Compression/Pruner.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/en_US/Compression/Pruner.rst b/docs/en_US/Compression/Pruner.rst index a36443e1dc..3678cbae58 100644 --- a/docs/en_US/Compression/Pruner.rst +++ b/docs/en_US/Compression/Pruner.rst @@ -769,7 +769,7 @@ Suppose we want to prune a BERT with Huggingface implementation, which has the f :target: ../../img/huggingface_bert_architecture.png :alt: -Usage 1: one-shot pruning, assigning sparsity 0.5 to the first six layers and sparsity 0.25 to the last six layers (PyTorch code). Note that here we specify ``op_names`` in the config list to assign different sparsity to different layers. Meanwhile, we pass ``attention_name_groups`` to the pruner so that the pruner may group together the weights belonging to the same attention layer. Alternatively, you can pass a ``dummy_input`` parameter and omit the ``attention_name_groups``, and the pruner will attempt to group the layers together (see usage 2). +**Usage 1: one-shot pruning, assigning sparsity 0.5 to the first six layers and sparsity 0.25 to the last six layers (PyTorch code)**. Note that here we specify ``op_names`` in the config list to assign different sparsity to different layers. Meanwhile, we pass ``attention_name_groups`` to the pruner so that the pruner may group together the weights belonging to the same attention layer. Alternatively, you can pass a ``dummy_input`` parameter and omit the ``attention_name_groups``, and the pruner will attempt to group the layers together (see usage 2). .. code-block:: python @@ -801,7 +801,7 @@ Usage 1: one-shot pruning, assigning sparsity 0.5 to the first six layers and sp pruner = TransformerHeadPruner(model, config_list, **kwargs) pruner.compress() -Usage 2: one-shot pruning, same sparsity for all the layers (PyTorch code). Here we replace the ``attention_name_groups`` parameter with ``dummy_input`` (for our current implementation, either parameter will work). Since in this example we prune all the attention layers with the same sparsity, the config list can be simplied and specified without ``op_names``. Note that although other ``Linear`` layers such as those in feed-forward layers will be matched by this config, they will not be pruned since this pruner only prunes attention heads. +**Usage 2: one-shot pruning, same sparsity for all the layers (PyTorch code)**. Here we replace the ``attention_name_groups`` parameter with ``dummy_input`` (for our current implementation, either parameter will work). Since in this example we prune all the attention layers with the same sparsity, the config list can be simplied and specified without ``op_names``. Note that although other ``Linear`` layers such as those in feed-forward layers will be matched by this config, they will not be pruned since this pruner only prunes attention heads. .. code-block:: python From ae31c181319d2bfc01573fe067244f2e3827a707 Mon Sep 17 00:00:00 2001 From: Di Wu Date: Tue, 27 Jul 2021 05:58:08 +0000 Subject: [PATCH 58/63] epoch parameter to trainer --- .../pruning/transformers/transformer_pruning.py | 10 +++++----- .../compression/pytorch/pruning/transformer_pruner.py | 8 +++++--- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/examples/model_compress/pruning/transformers/transformer_pruning.py b/examples/model_compress/pruning/transformers/transformer_pruning.py index 27e1936f95..75ea24d7b9 100644 --- a/examples/model_compress/pruning/transformers/transformer_pruning.py +++ b/examples/model_compress/pruning/transformers/transformer_pruning.py @@ -180,12 +180,12 @@ def train_model(args, model, is_regression, train_dataloader, eval_dataloader, o def dry_run_or_finetune(args, model, train_dataloader, optimizer, device, epoch_num=None): """ This function is used for to create a "trainer" that is passed to the pruner. - If epoch_num is 0, the function just runs forward and backward without updating the parameters. - This allows the pruner to collect data without parameter update (for activation or gradient based - pruning methods). + If epoch_num is 0 or None, the function just runs forward and backward without updating the + parameters. This allows the pruner to collect data without parameter update (for activation or + gradient based pruning methods). Otherwise, finetune the model for 1 epoch. This is called by the pruner during pruning iterations. """ - if epoch_num == 0: + if not epoch_num or epoch_num == 0: logger.info("Running forward and backward on the entire dataset without updating parameters...") else: logger.info("Finetuning for 1 epoch...") @@ -198,7 +198,7 @@ def dry_run_or_finetune(args, model, train_dataloader, optimizer, device, epoch_ batch[field] = batch[field].to(device) outputs = model(**batch) outputs.loss.backward() - if epoch_num != 0: + if epoch_num and epoch_num != 0: optimizer.step() optimizer.zero_grad() progress_bar.update(1) diff --git a/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py b/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py index 06749e0e42..fe57936dfa 100644 --- a/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py +++ b/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py @@ -67,8 +67,10 @@ class TransformerHeadPruner(Pruner): Optimizer used to train model trainer: function Function used to train the model. - Users should write this function as a normal function to train the Pytorch model - and include `model, optimizer, criterion, epoch` as function arguments. + Users should write this function as a normal function to train the Pytorch model and include + `model, optimizer, criterion, epoch` as function arguments. Note that the trainer may be used for collecting + data for pruning. In that case, ``epoch=None`` will be passed. If you do not want to perform parameter update, + please avoid ``optimizer.step()`` when ``epoch == None``. criterion: function Function used to calculate the loss between the target and the output. For example, you can use ``torch.nn.CrossEntropyLoss()`` as input. @@ -230,7 +232,7 @@ def compress(self): if self.ranking_criterion in ['l1_activation', 'l2_activation', 'taylorfo']: training = self.bound_model.training self.bound_model.eval() - self._trainer(self.bound_model, optimizer=self._optimizer, criterion=self._criterion, epoch=0) + self._trainer(self.bound_model, optimizer=self._optimizer, criterion=self._criterion, epoch=None) self.update_mask() self.bound_model.train(training) else: From 266ec1ce23a44bf4d69c5bc5958f0ace141b2c2e Mon Sep 17 00:00:00 2001 From: Di Wu Date: Tue, 27 Jul 2021 07:21:04 +0000 Subject: [PATCH 59/63] update example --- .../pruning/transformers/transformer_pruning.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/examples/model_compress/pruning/transformers/transformer_pruning.py b/examples/model_compress/pruning/transformers/transformer_pruning.py index 75ea24d7b9..81d3a5e21b 100644 --- a/examples/model_compress/pruning/transformers/transformer_pruning.py +++ b/examples/model_compress/pruning/transformers/transformer_pruning.py @@ -180,25 +180,25 @@ def train_model(args, model, is_regression, train_dataloader, eval_dataloader, o def dry_run_or_finetune(args, model, train_dataloader, optimizer, device, epoch_num=None): """ This function is used for to create a "trainer" that is passed to the pruner. - If epoch_num is 0 or None, the function just runs forward and backward without updating the + If epoch_num is None, the function just runs forward and backward without updating the parameters. This allows the pruner to collect data without parameter update (for activation or gradient based pruning methods). Otherwise, finetune the model for 1 epoch. This is called by the pruner during pruning iterations. """ - if not epoch_num or epoch_num == 0: + if epoch_num is None: logger.info("Running forward and backward on the entire dataset without updating parameters...") else: logger.info("Finetuning for 1 epoch...") progress_bar = tqdm(range(len(train_dataloader)), position=0, leave=True) - train_epoch = args.num_train_epochs if epoch_num is None else 1 + train_epoch = 1 for epoch in range(train_epoch): for step, batch in enumerate(train_dataloader): for field in batch.keys(): batch[field] = batch[field].to(device) outputs = model(**batch) outputs.loss.backward() - if epoch_num and epoch_num != 0: + if epoch_num is not None: optimizer.step() optimizer.zero_grad() progress_bar.update(1) From a512ef3d14a12f7ab7ce43af32c113ff5ce38589 Mon Sep 17 00:00:00 2001 From: Di Wu Date: Tue, 27 Jul 2021 09:39:46 +0000 Subject: [PATCH 60/63] forward_runner API v1 --- .../pruning/transformers/run.sh | 3 +- .../transformers/transformer_pruning.py | 65 ++++++++++++------- .../pytorch/pruning/transformer_pruner.py | 35 +++++++--- test/ut/sdk/test_transformer_pruners.py | 21 ++++-- 4 files changed, 84 insertions(+), 40 deletions(-) diff --git a/examples/model_compress/pruning/transformers/run.sh b/examples/model_compress/pruning/transformers/run.sh index 8599edc376..88a8ff8691 100755 --- a/examples/model_compress/pruning/transformers/run.sh +++ b/examples/model_compress/pruning/transformers/run.sh @@ -8,7 +8,7 @@ PRETRAINED_MODEL="bert-base-uncased" # "distilbert-base-uncased", "robe # parameters for pruning # change USAGE to different numbers (1, 2, 3) to run examples with different configs -USAGE=2 +USAGE=3 SPARSITY=0.5 RANKING_CRITERION=l1_weight # "l1_weight", "l2_weight", "l1_activation", "l2_activation", "taylorfo" NUM_ITERATIONS=1 # 1 for one-shot pruning @@ -27,6 +27,7 @@ TASK_LIST=("cola" "sst2" "mrpc" "stsb" "qqp" "mnli" "qnli" "rte" "wnli") if [[ ${TASK_LIST[*]} =~ (^|[[:space:]])$TASK_NAME($|[[:space:]]) ]]; then mkdir $OUTDIR python transformer_pruning.py \ + --usage $USAGE \ --sparsity $SPARSITY \ --ranking_criterion $RANKING_CRITERION \ --num_iterations $NUM_ITERATIONS \ diff --git a/examples/model_compress/pruning/transformers/transformer_pruning.py b/examples/model_compress/pruning/transformers/transformer_pruning.py index 81d3a5e21b..b25e118078 100644 --- a/examples/model_compress/pruning/transformers/transformer_pruning.py +++ b/examples/model_compress/pruning/transformers/transformer_pruning.py @@ -177,18 +177,13 @@ def train_model(args, model, is_regression, train_dataloader, eval_dataloader, o logger.info(f"epoch {epoch}: {eval_metric}") -def dry_run_or_finetune(args, model, train_dataloader, optimizer, device, epoch_num=None): +def trainer_helper(model, train_dataloader, optimizer, device): """ This function is used for to create a "trainer" that is passed to the pruner. - If epoch_num is None, the function just runs forward and backward without updating the - parameters. This allows the pruner to collect data without parameter update (for activation or - gradient based pruning methods). - Otherwise, finetune the model for 1 epoch. This is called by the pruner during pruning iterations. + Finetune the model for 1 epoch. This function is called by the pruner during pruning iterations (or called to + calculate scores for pruning when ranking criterion is "taylorfo"). """ - if epoch_num is None: - logger.info("Running forward and backward on the entire dataset without updating parameters...") - else: - logger.info("Finetuning for 1 epoch...") + logger.info("Training for 1 epoch...") progress_bar = tqdm(range(len(train_dataloader)), position=0, leave=True) train_epoch = 1 @@ -198,10 +193,28 @@ def dry_run_or_finetune(args, model, train_dataloader, optimizer, device, epoch_ batch[field] = batch[field].to(device) outputs = model(**batch) outputs.loss.backward() - if epoch_num is not None: - optimizer.step() + optimizer.step() optimizer.zero_grad() progress_bar.update(1) + + +def forward_runner_helper(model, train_dataloader, device): + """ + This function is used for to create a "forward_runner" that is passed to the pruner. + The function just runs forward on the train set without updating the parameters. + This allows the pruner to collect data for activation-based pruning methods. + """ + logger.info("Running forward on the entire train set without updating parameters...") + progress_bar = tqdm(range(len(train_dataloader)), position=0, leave=True) + + forward_epoch = 1 + for epoch in range(forward_epoch): + for step, batch in enumerate(train_dataloader): + for field in batch.keys(): + batch[field] = batch[field].to(device) + _ = model(**batch) + # note: no loss.backward or optimizer.step() is performed here + progress_bar.update(1) def final_eval_for_mnli(args, model, processed_datasets, metric, data_collator): @@ -280,7 +293,10 @@ def main(): # Here criterion is embedded in the model. Upper levels can just pass None to trainer. def trainer(model, optimizer, criterion, epoch): - return dry_run_or_finetune(args, model, train_dataloader, optimizer, device, epoch_num=epoch) + return trainer_helper(model, train_dataloader, optimizer, device) + + def forward_runner(model): + return forward_runner_helper(model, train_dataloader, device) # We provide three usage scenarios. # Set the "usage" parameter in the command line argument to run each one of them. @@ -293,7 +309,8 @@ def trainer(model, optimizer, criterion, epoch): "head_hidden_dim": 64, "dummy_input": dummy_input, "trainer": trainer, - "optimizer": optimizer} + "optimizer": optimizer, + "forward_runner": forward_runner} config_list = [{ "sparsity": args.sparsity, @@ -302,10 +319,10 @@ def trainer(model, optimizer, criterion, epoch): # example 2: prune different layers with uniform sparsity, but specify names group instead of dummy_input elif args.usage == 2: - attention_name_groups = list(zip(["encoder.layer.{}.attention.self.query".format(i) for i in range(12)], - ["encoder.layer.{}.attention.self.key".format(i) for i in range(12)], - ["encoder.layer.{}.attention.self.value".format(i) for i in range(12)], - ["encoder.layer.{}.attention.output.dense".format(i) for i in range(12)])) + attention_name_groups = list(zip(["bert.encoder.layer.{}.attention.self.query".format(i) for i in range(12)], + ["bert.encoder.layer.{}.attention.self.key".format(i) for i in range(12)], + ["bert.encoder.layer.{}.attention.self.value".format(i) for i in range(12)], + ["bert.encoder.layer.{}.attention.output.dense".format(i) for i in range(12)])) kwargs = {"ranking_criterion": args.ranking_criterion, "global_sort": args.global_sort, @@ -314,7 +331,8 @@ def trainer(model, optimizer, criterion, epoch): "attention_name_groups": attention_name_groups, "head_hidden_dim": 64, "trainer": trainer, - "optimizer": optimizer} + "optimizer": optimizer, + "forward_runner": forward_runner} config_list = [{ "sparsity": args.sparsity, @@ -325,10 +343,10 @@ def trainer(model, optimizer, criterion, epoch): # example 3: prune different layers with different sparsity elif args.usage == 3: - attention_name_groups = list(zip(["encoder.layer.{}.attention.self.query".format(i) for i in range(12)], - ["encoder.layer.{}.attention.self.key".format(i) for i in range(12)], - ["encoder.layer.{}.attention.self.value".format(i) for i in range(12)], - ["encoder.layer.{}.attention.output.dense".format(i) for i in range(12)])) + attention_name_groups = list(zip(["bert.encoder.layer.{}.attention.self.query".format(i) for i in range(12)], + ["bert.encoder.layer.{}.attention.self.key".format(i) for i in range(12)], + ["bert.encoder.layer.{}.attention.self.value".format(i) for i in range(12)], + ["bert.encoder.layer.{}.attention.output.dense".format(i) for i in range(12)])) kwargs = {"ranking_criterion": args.ranking_criterion, "global_sort": args.global_sort, @@ -337,7 +355,8 @@ def trainer(model, optimizer, criterion, epoch): "attention_name_groups": attention_name_groups, "head_hidden_dim": 64, "trainer": trainer, - "optimizer": optimizer} + "optimizer": optimizer, + "forward_runner": forward_runner} config_list = [{ "sparsity": args.sparsity, diff --git a/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py b/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py index fe57936dfa..5f5d9b5dfd 100644 --- a/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py +++ b/nni/algorithms/compression/pytorch/pruning/transformer_pruner.py @@ -61,23 +61,32 @@ class TransformerHeadPruner(Pruner): Number of pruning iterations. Defaults to 1 (ont-shot pruning). If num_iterations > 1, the pruner will split the sparsity specified in config_list uniformly and assign a fraction to each pruning iteration. epochs_per_iteration : int - Number of finetuning epochs before the next pruning iteration. This only has effect when num_iterations > 1. + Number of finetuning epochs before the next pruning iteration. + Only used when num_iterations > 1. If num_iterations is 1, then no finetuning is performed by the pruner after pruning. optimizer: torch.optim.Optimizer Optimizer used to train model trainer: function - Function used to train the model. - Users should write this function as a normal function to train the Pytorch model and include - `model, optimizer, criterion, epoch` as function arguments. Note that the trainer may be used for collecting - data for pruning. In that case, ``epoch=None`` will be passed. If you do not want to perform parameter update, - please avoid ``optimizer.step()`` when ``epoch == None``. + Function used to finetune the model between pruning iterations. + Only used when num_iterations > 1 or ranking_criterion is 'taylorfo'. + Users should write this function as a normal function to train the PyTorch model and include + `model, optimizer, criterion, epoch` as function arguments. Note that the trainer is also used for collecting + gradients for pruning if ranking_criterion is 'taylorfo'. In that case, ``epoch=None`` will be passed. criterion: function Function used to calculate the loss between the target and the output. + Only used when num_iterations > 1 or ranking_criterion is 'taylorfo'. For example, you can use ``torch.nn.CrossEntropyLoss()`` as input. + forward_runner: function + Function used to perform a "dry run" on the model on the entire train/validation dataset in order to collect + data for pruning required by the criteria 'l1_activation' or 'l2_activation'. + Only used when ranking_criterion is 'l1_activation' or 'l2_activation'. + Users should write this function as a normal function that accepts a PyTorch model and runs forward on the model + using the entire train/validation dataset. This function is not expected to perform any backpropagation or + parameter updates. """ def __init__(self, model, config_list,head_hidden_dim, attention_name_groups=None, dummy_input=None, ranking_criterion='l1_weight', global_sort=False, num_iterations=1, epochs_per_iteration=1, - optimizer=None, trainer=None, criterion=None, + optimizer=None, trainer=None, criterion=None, forward_runner=None, **algo_kwargs): super().__init__(model, config_list) @@ -94,9 +103,12 @@ def __init__(self, model, config_list,head_hidden_dim, attention_name_groups=Non self._optimizer = optimizer self._trainer = trainer self._criterion = criterion - if self.ranking_criterion in ['l1_activation', 'l2_activation', 'taylorfo'] or num_iterations > 1: + self._forward_runner = forward_runner + if self.ranking_criterion in ['taylorfo'] or num_iterations > 1: assert self._trainer is not None assert self._optimizer is not None + if self.ranking_criterion in ['l1_activation', 'l2_activation']: + assert self._forward_runner is not None # Group generation: one group per attention layer, four weights per group self.masking_groups = [] @@ -229,12 +241,15 @@ def validate_config(self, model, config_list): def compress(self): for pruning_iter in range(self.num_iterations): - if self.ranking_criterion in ['l1_activation', 'l2_activation', 'taylorfo']: + if self.ranking_criterion in ['l1_activation', 'l2_activation']: training = self.bound_model.training self.bound_model.eval() - self._trainer(self.bound_model, optimizer=self._optimizer, criterion=self._criterion, epoch=None) + self._forward_runner(self.bound_model) # dry run, forward only self.update_mask() self.bound_model.train(training) + elif self.ranking_criterion in ['taylorfo']: + self._trainer(self.bound_model, optimizer=self._optimizer, criterion=self._criterion, epoch=None) + self.update_mask() else: self.update_mask() diff --git a/test/ut/sdk/test_transformer_pruners.py b/test/ut/sdk/test_transformer_pruners.py index 3f10eff1e1..a875c49485 100644 --- a/test/ut/sdk/test_transformer_pruners.py +++ b/test/ut/sdk/test_transformer_pruners.py @@ -16,6 +16,7 @@ sys.path.append(os.path.dirname(__file__)) from models.pytorch_models.transformer import TransformerEncoder + def validate_sparsity(wrapper, sparsity, bias=False): masks = [wrapper.weight_mask] if bias and wrapper.bias_mask is not None: @@ -45,7 +46,6 @@ def forward(self, x, mask): def train(model, dataloader, criterion, optimizer): model.train() device = next(model.parameters()).device - for _ in range(2): y = torch.ones(10).to(device) out = model(torch.randint(0, 100, (4, 10)).to(device), torch.ones(10).to(device)) @@ -55,6 +55,13 @@ def train(model, dataloader, criterion, optimizer): optimizer.step() +def dry_run(model): + device = next(model.parameters()).device + for _ in range(2): + y = torch.ones(10).to(device) + _ = model(torch.randint(0, 100, (4, 10)).to(device), torch.ones(10).to(device)) + + def head_pruner_tests(criterion, global_sort, use_graph, iterative): print("Testing criterion {} with global_sort={} and use_graph={}".format(criterion, global_sort, use_graph)) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") @@ -94,17 +101,19 @@ def trainer(model, optimizer, criterion, epoch): kwargs['trainer'] = trainer kwargs['criterion'] = nn.BCELoss() + def forward_runner(model): + return dry_run(model) + kwargs['forward_runner'] = forward_runner + # create pruner and call compress() pruner = TransformerHeadPruner(model, config_list, **kwargs) pruner.compress() # test model and mask export pruner.export_model('./model_tmp.pth', './mask_tmp.pth', device=device) - - # TODO: test exporting to onnx when we can pass dummy_input instead of input_shape to export_model - # dummy_input = (torch.randint(0, 100, (10, 32)).to(device), torch.ones(32).to(device)) - # pruner.export_model('./model_tmp.pth', './mask_tmp.pth', './onnx_tmp.pth', input_shape=None, - # dummy_input=dummy_input, device=device) + dummy_input = (torch.randint(0, 100, (10, 32)).to(device), torch.ones(32).to(device)) + pruner.export_model('./model_tmp.pth', './mask_tmp.pth', './onnx_tmp.pth', input_shape=None, device=None, + dummy_input=dummy_input, opset_version=10) # validate sparsity if not global_sort: From 00ad0efe9f4b0b14109fbf14aa63cd820b2ac6a6 Mon Sep 17 00:00:00 2001 From: Di Wu Date: Tue, 27 Jul 2021 09:59:13 +0000 Subject: [PATCH 61/63] delete some usages --- docs/en_US/Compression/Pruner.rst | 64 +++++------- .../pruning/transformers/run.sh | 3 - .../transformers/transformer_pruning.py | 99 +++++-------------- 3 files changed, 47 insertions(+), 119 deletions(-) diff --git a/docs/en_US/Compression/Pruner.rst b/docs/en_US/Compression/Pruner.rst index a0c2b120e1..a429c7ba83 100644 --- a/docs/en_US/Compression/Pruner.rst +++ b/docs/en_US/Compression/Pruner.rst @@ -744,7 +744,7 @@ The pruner implements the following algorithm: .. code-block:: bash Repeat for each pruning iteration (1 for one-shot pruning): - 1. Calculate importance scores for each head in each specified layer using a specific criterion + 1. Calculate importance scores for each head in each specified layer using a specific criterion. 2. Sort heads locally or globally, and prune out some heads with lowest scores. The number of pruned heads is determined according to the sparsity specified in the config. 3. If the specified pruning iteration is larger than 1 (iterative pruning), finetune the model for a while before the next pruning iteration. @@ -758,7 +758,7 @@ Currently, the following head sorting criteria are supported: We support local sorting (i.e., sorting heads within a layer) and global sorting (sorting all heads together), and you can control by setting the ``global_sort`` parameter. Note that if ``global_sort=True`` is passed, all weights must have the same sparsity in the config list. However, this does not mean that each layer will be prune to the same sparsity as specified. This sparsity value will be interpreted as a global sparsity, and each layer is likely to have different sparsity after pruning by global sort. -In our implementation, we support two ways to group the four weights in the same layer together. You can either pass a nested list containing the names of these modules as the pruner's initialization parameters (usage 1 below), or simply pass a dummy input and the pruner will run ``torch.jit.trace`` to group the weights (usage 2 below). However, if you would like to assign different sparsity to each layer, you can only use the first option, i.e., passing names of the weights to the pruner (usage 3 below). Also note that weights belonging to the same layer must have the same sparsity. +In our implementation, we support two ways to group the four weights in the same layer together. You can either pass a nested list containing the names of these modules as the pruner's initialization parameters (usage below), or simply pass a dummy input and the pruner will run ``torch.jit.trace`` to group the weights (experimental feature). However, if you would like to assign different sparsity to each layer, you can only use the first option, i.e., passing names of the weights to the pruner (see usage below). Also note that weights belonging to the same layer must have the same sparsity. In addition to the following usage guide, we provide a more detailed example of pruning BERT for tasks from the GLUE benchmark. Please find it in this :githublink:`page `. @@ -771,59 +771,39 @@ Suppose we want to prune a BERT with Huggingface implementation, which has the f :target: ../../img/huggingface_bert_architecture.png :alt: -**Usage 1: one-shot pruning, assigning sparsity 0.5 to the first six layers and sparsity 0.25 to the last six layers (PyTorch code)**. Note that here we specify ``op_names`` in the config list to assign different sparsity to different layers. Meanwhile, we pass ``attention_name_groups`` to the pruner so that the pruner may group together the weights belonging to the same attention layer. Alternatively, you can pass a ``dummy_input`` parameter and omit the ``attention_name_groups``, and the pruner will attempt to group the layers together (see usage 2). +**Usage Example: one-shot pruning, assigning sparsity 0.5 to the first six layers and sparsity 0.25 to the last six layers (PyTorch code)**. Note that here we specify ``op_names`` in the config list to assign different sparsity to different layers. Meanwhile, we pass ``attention_name_groups`` to the pruner so that the pruner may group together the weights belonging to the same attention layer. .. code-block:: python from nni.algorithms.compression.pytorch.pruning import TransformerHeadPruner - attention_name_groups = list(zip(['encoder.layer.{}.attention.self.query'.format(i) for i in range(12)], - ['encoder.layer.{}.attention.self.key'.format(i) for i in range(12)], - ['encoder.layer.{}.attention.self.value'.format(i) for i in range(12)], - ['encoder.layer.{}.attention.output.dense'.format(i) for i in range(12)])) - kwargs = {'ranking_criterion': "l1_weight", - 'global_sort': False, - 'num_iterations': 1, - 'epochs_per_iteration': 1, # this is ignored when num_iterations = 1 - 'head_hidden_dim': 64, - 'attention_name_groups': attention_name_groups, # can change to dummy_input here - 'trainer': trainer, - 'optimizer': optimizer + attention_name_groups = list(zip(["encoder.layer.{}.attention.self.query".format(i) for i in range(12)], + ["encoder.layer.{}.attention.self.key".format(i) for i in range(12)], + ["encoder.layer.{}.attention.self.value".format(i) for i in range(12)], + ["encoder.layer.{}.attention.output.dense".format(i) for i in range(12)])) + kwargs = {"ranking_criterion": "l1_weight", + "global_sort": False, + "num_iterations": 1, + "epochs_per_iteration": 1, # this is ignored when num_iterations = 1 + "head_hidden_dim": 64, + "attention_name_groups": attention_name_groups, + "trainer": trainer, + "optimizer": optimizer, + "forward_runner": forward_runner } config_list = [{ - 'sparsity': 0.5, - 'op_types': ["Linear"], - 'op_names': [x for layer in attention_name_groups[:6] for x in layer] # first six layers + "sparsity": 0.5, + "op_types": ["Linear"], + "op_names": [x for layer in attention_name_groups[:6] for x in layer] # first six layers }, { - 'sparsity': 0.25, - 'op_types': ["Linear"], - 'op_names': [x for layer in attention_name_groups[6:] for x in layer] # last six layers + "sparsity": 0.25, + "op_types": ["Linear"], + "op_names": [x for layer in attention_name_groups[6:] for x in layer] # last six layers } ] pruner = TransformerHeadPruner(model, config_list, **kwargs) pruner.compress() -**Usage 2: one-shot pruning, same sparsity for all the layers (PyTorch code)**. Here we replace the ``attention_name_groups`` parameter with ``dummy_input`` (for our current implementation, either parameter will work). Since in this example we prune all the attention layers with the same sparsity, the config list can be simplied and specified without ``op_names``. Note that although other ``Linear`` layers such as those in feed-forward layers will be matched by this config, they will not be pruned since this pruner only prunes attention heads. - -.. code-block:: python - - from nni.algorithms.compression.pytorch.pruning import TransformerHeadPruner - kwargs = {'ranking_criterion': "l1_weight", - 'global_sort': False, - 'num_iterations': 1, - 'epochs_per_iteration': 1, # this is ignored when num_iterations = 1 - 'head_hidden_dim': 64, - 'dummy_input': dummy_input, - 'trainer': trainer, - 'optimizer': optimizer - } - config_list = [{ - 'sparsity': 0.5, - 'op_types': ["Linear"] - }] - pruner = TransformerHeadPruner(model, config_list, **kwargs) - pruner.compress() - User configuration for Transformer Head Pruner ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/examples/model_compress/pruning/transformers/run.sh b/examples/model_compress/pruning/transformers/run.sh index 88a8ff8691..af00f1cf8e 100755 --- a/examples/model_compress/pruning/transformers/run.sh +++ b/examples/model_compress/pruning/transformers/run.sh @@ -7,8 +7,6 @@ TASK_NAME=$2 # "cola", "sst2", "mrpc", "stsb", PRETRAINED_MODEL="bert-base-uncased" # "distilbert-base-uncased", "roberta-base", "bert-base-cased", ... # parameters for pruning -# change USAGE to different numbers (1, 2, 3) to run examples with different configs -USAGE=3 SPARSITY=0.5 RANKING_CRITERION=l1_weight # "l1_weight", "l2_weight", "l1_activation", "l2_activation", "taylorfo" NUM_ITERATIONS=1 # 1 for one-shot pruning @@ -27,7 +25,6 @@ TASK_LIST=("cola" "sst2" "mrpc" "stsb" "qqp" "mnli" "qnli" "rte" "wnli") if [[ ${TASK_LIST[*]} =~ (^|[[:space:]])$TASK_NAME($|[[:space:]]) ]]; then mkdir $OUTDIR python transformer_pruning.py \ - --usage $USAGE \ --sparsity $SPARSITY \ --ranking_criterion $RANKING_CRITERION \ --num_iterations $NUM_ITERATIONS \ diff --git a/examples/model_compress/pruning/transformers/transformer_pruning.py b/examples/model_compress/pruning/transformers/transformer_pruning.py index b25e118078..c98e0ec744 100644 --- a/examples/model_compress/pruning/transformers/transformer_pruning.py +++ b/examples/model_compress/pruning/transformers/transformer_pruning.py @@ -47,8 +47,6 @@ def parse_args(): choices=["cola", "mnli", "mrpc", "qnli", "qqp", "rte", "sst2", "stsb", "wnli"]) parser.add_argument("--output_dir", type=str, default=None, help="Where to store the model and mask.") - parser.add_argument("--usage", type=int, default=1, - help="Select which pruning config example to run") parser.add_argument("--sparsity", type=float, required=True, help="Sparsity: proportion of heads to prune (should be between 0 and 1)") parser.add_argument("--global_sort", action="store_true", default=False, @@ -298,80 +296,33 @@ def trainer(model, optimizer, criterion, epoch): def forward_runner(model): return forward_runner_helper(model, train_dataloader, device) - # We provide three usage scenarios. - # Set the "usage" parameter in the command line argument to run each one of them. - # example 1: prune all layers with uniform sparsity - if args.usage == 1: - kwargs = {"ranking_criterion": args.ranking_criterion, - "global_sort": args.global_sort, - "num_iterations": args.num_iterations, - "epochs_per_iteration": args.epochs_per_iteration, - "head_hidden_dim": 64, - "dummy_input": dummy_input, - "trainer": trainer, - "optimizer": optimizer, - "forward_runner": forward_runner} - - config_list = [{ - "sparsity": args.sparsity, + # example: prune different layers with different sparsity + attention_name_groups = list(zip(["bert.encoder.layer.{}.attention.self.query".format(i) for i in range(12)], + ["bert.encoder.layer.{}.attention.self.key".format(i) for i in range(12)], + ["bert.encoder.layer.{}.attention.self.value".format(i) for i in range(12)], + ["bert.encoder.layer.{}.attention.output.dense".format(i) for i in range(12)])) + + kwargs = {"ranking_criterion": args.ranking_criterion, + "global_sort": args.global_sort, + "num_iterations": args.num_iterations, + "epochs_per_iteration": args.epochs_per_iteration, + "attention_name_groups": attention_name_groups, + "head_hidden_dim": 64, + "trainer": trainer, + "optimizer": optimizer, + "forward_runner": forward_runner} + + config_list = [{ + "sparsity": args.sparsity, + "op_types": ["Linear"], + "op_names": [x for layer in attention_name_groups[:6] for x in layer] + }, + { + "sparsity": args.sparsity / 2, "op_types": ["Linear"], - }] - - # example 2: prune different layers with uniform sparsity, but specify names group instead of dummy_input - elif args.usage == 2: - attention_name_groups = list(zip(["bert.encoder.layer.{}.attention.self.query".format(i) for i in range(12)], - ["bert.encoder.layer.{}.attention.self.key".format(i) for i in range(12)], - ["bert.encoder.layer.{}.attention.self.value".format(i) for i in range(12)], - ["bert.encoder.layer.{}.attention.output.dense".format(i) for i in range(12)])) - - kwargs = {"ranking_criterion": args.ranking_criterion, - "global_sort": args.global_sort, - "num_iterations": args.num_iterations, - "epochs_per_iteration": args.epochs_per_iteration, - "attention_name_groups": attention_name_groups, - "head_hidden_dim": 64, - "trainer": trainer, - "optimizer": optimizer, - "forward_runner": forward_runner} - - config_list = [{ - "sparsity": args.sparsity, - "op_types": ["Linear"], - "op_names": [x for layer in attention_name_groups for x in layer] + "op_names": [x for layer in attention_name_groups[6:] for x in layer] } - ] - - # example 3: prune different layers with different sparsity - elif args.usage == 3: - attention_name_groups = list(zip(["bert.encoder.layer.{}.attention.self.query".format(i) for i in range(12)], - ["bert.encoder.layer.{}.attention.self.key".format(i) for i in range(12)], - ["bert.encoder.layer.{}.attention.self.value".format(i) for i in range(12)], - ["bert.encoder.layer.{}.attention.output.dense".format(i) for i in range(12)])) - - kwargs = {"ranking_criterion": args.ranking_criterion, - "global_sort": args.global_sort, - "num_iterations": args.num_iterations, - "epochs_per_iteration": args.epochs_per_iteration, - "attention_name_groups": attention_name_groups, - "head_hidden_dim": 64, - "trainer": trainer, - "optimizer": optimizer, - "forward_runner": forward_runner} - - config_list = [{ - "sparsity": args.sparsity, - "op_types": ["Linear"], - "op_names": [x for layer in attention_name_groups[:6] for x in layer] - }, - { - "sparsity": args.sparsity / 2, - "op_types": ["Linear"], - "op_names": [x for layer in attention_name_groups[6:] for x in layer] - } - ] - - else: - raise RuntimeError("Wrong usage number") + ] pruner = TransformerHeadPruner(model, config_list, **kwargs) pruner.compress() From a62f9de2246a6bbaa76dddf1c503c721f9c3d256 Mon Sep 17 00:00:00 2001 From: Di Wu Date: Tue, 27 Jul 2021 10:10:06 +0000 Subject: [PATCH 62/63] update docs --- docs/en_US/Compression/Pruner.rst | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/docs/en_US/Compression/Pruner.rst b/docs/en_US/Compression/Pruner.rst index a429c7ba83..4fb0aa8674 100644 --- a/docs/en_US/Compression/Pruner.rst +++ b/docs/en_US/Compression/Pruner.rst @@ -771,7 +771,13 @@ Suppose we want to prune a BERT with Huggingface implementation, which has the f :target: ../../img/huggingface_bert_architecture.png :alt: -**Usage Example: one-shot pruning, assigning sparsity 0.5 to the first six layers and sparsity 0.25 to the last six layers (PyTorch code)**. Note that here we specify ``op_names`` in the config list to assign different sparsity to different layers. Meanwhile, we pass ``attention_name_groups`` to the pruner so that the pruner may group together the weights belonging to the same attention layer. +**Usage Example: one-shot pruning, assigning sparsity 0.5 to the first six layers and sparsity 0.25 to the last six layers (PyTorch code)**. Note that + +* Here we specify ``op_names`` in the config list to assign different sparsity to different layers. +* Meanwhile, we pass ``attention_name_groups`` to the pruner so that the pruner may group together the weights belonging to the same attention layer. +* Since in this example we want to do one-shot pruning, the ``num_iterations`` parameter is set to 1, and the parameter ``epochs_per_iteration`` is ignored. If you would like to do iterative pruning instead, you can set the ``num_iterations`` parameter to the number of pruning iterations, and the ``epochs_per_iteration`` parameter to the number of finetuning epochs between two iterations. +* The arguments ``trainer`` and ``optimizer`` are only used when we want to do iterative pruning, or the ranking criterion is ``taylorfo``. Here these two parameters are ignored by the pruner. +* The argument ``forward_runner`` is only used when the ranking criterion is ``l1_activation`` or ``l2_activation``. Here this parameter is ignored by the pruner. .. code-block:: python From 80383af61a5b4feef602fcc2b705cdb238ec8ba6 Mon Sep 17 00:00:00 2001 From: Di Wu Date: Tue, 27 Jul 2021 10:20:00 +0000 Subject: [PATCH 63/63] update ut --- test/ut/sdk/test_transformer_pruners.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/ut/sdk/test_transformer_pruners.py b/test/ut/sdk/test_transformer_pruners.py index a875c49485..762a7bdf9b 100644 --- a/test/ut/sdk/test_transformer_pruners.py +++ b/test/ut/sdk/test_transformer_pruners.py @@ -112,7 +112,7 @@ def forward_runner(model): # test model and mask export pruner.export_model('./model_tmp.pth', './mask_tmp.pth', device=device) dummy_input = (torch.randint(0, 100, (10, 32)).to(device), torch.ones(32).to(device)) - pruner.export_model('./model_tmp.pth', './mask_tmp.pth', './onnx_tmp.pth', input_shape=None, device=None, + pruner.export_model('./model_tmp.pth', './mask_tmp.pth', './onnx_tmp.pth', dummy_input=dummy_input, opset_version=10) # validate sparsity