From ebdee9478ca7ba65497b9b96f7457698c6ee5115 Mon Sep 17 00:00:00 2001 From: Jeff Bolz Date: Sun, 22 Dec 2024 03:44:01 -0600 Subject: [PATCH 01/81] vulkan: build fixes for 32b (#10927) * vulkan: build fixes for 32b Should fix #10923 * vulkan: initialize some buffer/offset variables --- ggml/src/ggml-vulkan/ggml-vulkan.cpp | 44 ++++++++++++++-------------- 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp index 1696b6e27..323ce7cf3 100644 --- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp +++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp @@ -3205,8 +3205,8 @@ static void ggml_vk_buffer_write_nc_async(ggml_backend_vk_context * ctx, vk_cont GGML_ABORT("fatal error"); } // Check if src is pinned memory - vk_buffer buf; - size_t buf_offset; + vk_buffer buf = nullptr; + size_t buf_offset = 0; ggml_vk_host_get(ctx->device, tensor->data, buf, buf_offset); const uint64_t ne0 = tensor->ne[0]; @@ -3269,7 +3269,7 @@ static void ggml_vk_buffer_write_nc_async(ggml_backend_vk_context * ctx, vk_cont VkBufferCopy buf_copy{ 0, offset, copy_size }; ggml_vk_sync_buffers(subctx); - vkCmdCopyBuffer(subctx->s->buffer, staging->buffer, dst->buffer, 1, &buf_copy); + vkCmdCopyBuffer(subctx->s->buffer, (VkBuffer)staging->buffer, (VkBuffer)dst->buffer, 1, &buf_copy); for (uint64_t i3 = 0; i3 < ne3; i3++) { for (uint64_t i2 = 0; i2 < ne2; i2++) { @@ -3302,7 +3302,7 @@ static void ggml_vk_buffer_write_2d_async(vk_context subctx, vk_buffer& dst, siz } // Check if src is pinned memory vk_buffer buf = nullptr; - size_t buf_offset; + size_t buf_offset = 0; ggml_vk_host_get(dst->device, src, buf, buf_offset); if (buf != nullptr) { @@ -3344,7 +3344,7 @@ static void ggml_vk_buffer_write_2d_async(vk_context subctx, vk_buffer& dst, siz copy_size}; ggml_vk_sync_buffers(subctx); - vkCmdCopyBuffer(subctx->s->buffer, staging_buffer->buffer, dst->buffer, 1, &buf_copy); + vkCmdCopyBuffer(subctx->s->buffer, (VkBuffer)staging_buffer->buffer, (VkBuffer)dst->buffer, 1, &buf_copy); if (width == spitch) { deferred_memcpy((uint8_t *)staging_buffer->ptr, src, width * height, &subctx->in_memcpys); @@ -3400,7 +3400,7 @@ static void ggml_vk_buffer_read_2d_async(vk_context subctx, vk_buffer& src, size // Check if dst is pinned memory vk_buffer buf = nullptr; - size_t buf_offset; + size_t buf_offset = 0; ggml_vk_host_get(src->device, dst, buf, buf_offset); std::vector slices(1); @@ -3480,7 +3480,7 @@ static void ggml_vk_buffer_copy_async(vk_context& ctx, vk_buffer& dst, size_t ds VkBufferCopy bc{ src_offset, dst_offset, size }; - vkCmdCopyBuffer(ctx->s->buffer, src->buffer, dst->buffer, 1, &bc); + vkCmdCopyBuffer(ctx->s->buffer, (VkBuffer)src->buffer, (VkBuffer)dst->buffer, 1, &bc); } static void ggml_vk_buffer_copy(vk_buffer& dst, size_t dst_offset, vk_buffer& src, size_t src_offset, size_t size) { @@ -3732,9 +3732,9 @@ static void ggml_vk_mul_mat_q_f16(ggml_backend_vk_context * ctx, vk_context& sub ggml_backend_vk_buffer_context * src0_buf_ctx = (ggml_backend_vk_buffer_context *)src0->buffer->context; ggml_backend_vk_buffer_context * src1_buf_ctx = (ggml_backend_vk_buffer_context *)src1->buffer->context; - vk_buffer d_Qx; + vk_buffer d_Qx = nullptr; size_t qx_buf_offset = 0; - vk_buffer d_Qy; + vk_buffer d_Qy = nullptr; size_t qy_buf_offset = 0; bool src0_uma = false; @@ -3934,9 +3934,9 @@ static void ggml_vk_mul_mat_vec_q_f16(ggml_backend_vk_context * ctx, vk_context& ggml_backend_vk_buffer_context * src0_buf_ctx = (ggml_backend_vk_buffer_context *)src0->buffer->context; ggml_backend_vk_buffer_context * src1_buf_ctx = (ggml_backend_vk_buffer_context *)src1->buffer->context; - vk_buffer d_Qx; + vk_buffer d_Qx = nullptr; size_t qx_buf_offset = 0; - vk_buffer d_Qy; + vk_buffer d_Qy = nullptr; size_t qy_buf_offset = 0; bool src0_uma = false; @@ -4112,7 +4112,7 @@ static void ggml_vk_mul_mat_vec_p021_f16_f32(ggml_backend_vk_context * ctx, vk_c ggml_backend_vk_buffer_context * src0_buf_ctx = (ggml_backend_vk_buffer_context *)src0->buffer->context; ggml_backend_vk_buffer_context * src1_buf_ctx = (ggml_backend_vk_buffer_context *)src1->buffer->context; - vk_buffer d_Qy; + vk_buffer d_Qy = nullptr; size_t qy_buf_offset = 0; bool src1_uma = false; @@ -4300,11 +4300,11 @@ static void ggml_vk_mul_mat_id_q_f16(ggml_backend_vk_context * ctx, vk_context& ggml_backend_vk_buffer_context * src1_buf_ctx = (ggml_backend_vk_buffer_context *)src1->buffer->context; ggml_backend_vk_buffer_context * ids_buf_ctx = (ggml_backend_vk_buffer_context *)ids->buffer->context; - vk_buffer d_Qx; + vk_buffer d_Qx = nullptr; size_t qx_buf_offset = 0; - vk_buffer d_Qy; + vk_buffer d_Qy = nullptr; size_t qy_buf_offset = 0; - vk_buffer d_ids; + vk_buffer d_ids = nullptr; size_t ids_buf_offset = 0; bool src0_uma = false; @@ -4505,11 +4505,11 @@ static void ggml_vk_mul_mat_vec_id_q_f16(ggml_backend_vk_context * ctx, vk_conte ggml_backend_vk_buffer_context * src1_buf_ctx = (ggml_backend_vk_buffer_context *)src1->buffer->context; ggml_backend_vk_buffer_context * ids_buf_ctx = (ggml_backend_vk_buffer_context *)ids->buffer->context; - vk_buffer d_Qx; + vk_buffer d_Qx = nullptr; size_t qx_buf_offset = 0; - vk_buffer d_Qy; + vk_buffer d_Qy = nullptr; size_t qy_buf_offset = 0; - vk_buffer d_ids; + vk_buffer d_ids = nullptr; size_t ids_buf_offset = 0; bool src0_uma = false; @@ -4768,8 +4768,8 @@ static void ggml_vk_flash_attn(ggml_backend_vk_context * ctx, vk_context& subctx ggml_vk_sync_buffers(subctx); - vk_buffer d_Q, d_K, d_V, d_D, d_M; - uint64_t q_buf_offset, k_buf_offset, v_buf_offset, d_buf_offset, m_buf_offset; + vk_buffer d_Q = nullptr, d_K = nullptr, d_V = nullptr, d_D = nullptr, d_M = nullptr; + size_t q_buf_offset = 0, k_buf_offset = 0, v_buf_offset = 0, d_buf_offset = 0, m_buf_offset = 0; bool Q_uma = false, K_uma = false, V_uma = false, D_uma = false, M_uma = false; @@ -5474,8 +5474,8 @@ static void ggml_vk_op_f32_rwkv6(ggml_backend_vk_context * ctx, vk_context& subc ggml_vk_sync_buffers(subctx); - vk_buffer d_D, d_K, d_V, d_R, d_TF, d_TD, d_State; - uint64_t k_offset, v_offset, r_offset, tf_offset, td_offset, state_offset, dst_offset; + vk_buffer d_D = nullptr, d_K = nullptr, d_V = nullptr, d_R = nullptr, d_TF = nullptr, d_TD = nullptr, d_State = nullptr; + size_t k_offset = 0, v_offset = 0, r_offset = 0, tf_offset = 0, td_offset = 0, state_offset = 0, dst_offset = 0; bool K_uma = false, V_uma = false, R_uma = false, TF_uma = false, TD_uma = false, STATE_uma = false, DST_uma = false; if (ctx->device->uma) { From 7ae33a616f44ecc081f3dcb589be20962d1d4a92 Mon Sep 17 00:00:00 2001 From: Billel Mokeddem Date: Mon, 23 Dec 2024 01:09:58 +0300 Subject: [PATCH 02/81] llama : add Falcon3 support (#10883) * Add Falcon3 model support * Add fix for adding bos to added special tokens * Add comment explaining the logic behind the if statement * Add a log message to better track the when the following line of code is triggered * Update log to only print when input and output characters are different * Fix handling pre-normalized tokens * Refactoring --- convert_hf_to_gguf.py | 13 +++++++++++++ convert_hf_to_gguf_update.py | 1 + src/llama.cpp | 16 +++++++++++++++- 3 files changed, 29 insertions(+), 1 deletion(-) diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index 976425182..66aa7f5b1 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -529,9 +529,19 @@ class Model: else: token: str = reverse_vocab[i] if token in added_vocab: + # The tokenizer in llama.cpp assumes the CONTROL and USER_DEFINED tokens are pre-normalized. + # To avoid unexpected issues - we make sure to normalize non-normalized tokens + if not tokenizer.added_tokens_decoder[i].normalized: + previous_token = token + token = tokenizer.decode(tokenizer.encode(token, add_special_tokens=False)) + if previous_token != token: + logger.info(f"{repr(previous_token)} is encoded and decoded back to {repr(token)} using AutoTokenizer") + if tokenizer.added_tokens_decoder[i].special or self.does_token_look_special(token): toktypes.append(gguf.TokenType.CONTROL) else: + # NOTE: this was added for Gemma. + # Encoding and decoding the tokens above isn't sufficient for this case. token = token.replace(b"\xe2\x96\x81".decode("utf-8"), " ") # pre-normalize user-defined spaces toktypes.append(gguf.TokenType.USER_DEFINED) else: @@ -575,6 +585,9 @@ class Model: if chkhsh == "8aeee3860c56296a157a1fe2fad249ec40aa59b1bb5709f4ade11c4e6fe652ed": # ref: https://huggingface.co/tiiuae/falcon-7b res = "falcon" + if chkhsh == "9d032fcbd5501f4a38150912590928bfb36091efb5df11b8e2124b0390e3fb1e": + # ref: https://huggingface.co/tiiuae/Falcon3-7B-Base + res = "falcon3" if chkhsh == "0876d13b50744004aa9aeae05e7b0647eac9d801b5ba4668afc01e709c15e19f": # ref: https://huggingface.co/BAAI/bge-small-en-v1.5 res = "bert-bge" diff --git a/convert_hf_to_gguf_update.py b/convert_hf_to_gguf_update.py index 88058442f..2ba346640 100755 --- a/convert_hf_to_gguf_update.py +++ b/convert_hf_to_gguf_update.py @@ -72,6 +72,7 @@ models = [ {"name": "deepseek-coder", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/deepseek-ai/deepseek-coder-6.7b-base", }, {"name": "falcon", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/tiiuae/falcon-7b", }, {"name": "bert-bge", "tokt": TOKENIZER_TYPE.WPM, "repo": "https://huggingface.co/BAAI/bge-small-en-v1.5", }, + {"name": "falcon3", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/tiiuae/Falcon3-7B-Base", }, {"name": "bert-bge-large", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/BAAI/bge-large-zh-v1.5", }, {"name": "mpt", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/mosaicml/mpt-7b", }, {"name": "starcoder", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/bigcode/starcoder2-3b", }, diff --git a/src/llama.cpp b/src/llama.cpp index b442781a0..115ef9080 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -1673,6 +1673,7 @@ enum llm_chat_template { LLM_CHAT_TEMPLATE_MISTRAL_V3_TEKKEN, LLM_CHAT_TEMPLATE_MISTRAL_V7, LLM_CHAT_TEMPLATE_PHI_3, + LLM_CHAT_TEMPLATE_FALCON_3, LLM_CHAT_TEMPLATE_ZEPHYR, LLM_CHAT_TEMPLATE_MONARCH, LLM_CHAT_TEMPLATE_GEMMA, @@ -1705,6 +1706,7 @@ static const std::map LLM_CHAT_TEMPLATES = { { "mistral-v3-tekken", LLM_CHAT_TEMPLATE_MISTRAL_V3_TEKKEN }, { "mistral-v7", LLM_CHAT_TEMPLATE_MISTRAL_V7 }, { "phi3", LLM_CHAT_TEMPLATE_PHI_3 }, + { "falcon3", LLM_CHAT_TEMPLATE_FALCON_3 }, { "zephyr", LLM_CHAT_TEMPLATE_ZEPHYR }, { "monarch", LLM_CHAT_TEMPLATE_MONARCH }, { "gemma", LLM_CHAT_TEMPLATE_GEMMA }, @@ -6562,7 +6564,8 @@ static void llm_load_vocab( } else if ( tokenizer_pre == "llama3" || tokenizer_pre == "llama-v3" || - tokenizer_pre == "llama-bpe") { + tokenizer_pre == "llama-bpe"|| + tokenizer_pre == "falcon3") { vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_LLAMA3; vocab.tokenizer_ignore_merges = true; vocab.tokenizer_add_bos = true; @@ -22615,6 +22618,8 @@ static llm_chat_template llama_chat_detect_template(const std::string & tmpl) { } } else if (tmpl_contains("<|assistant|>") && tmpl_contains("<|end|>")) { return LLM_CHAT_TEMPLATE_PHI_3; + } else if (tmpl_contains("<|assistant|>") && tmpl_contains("<|user|>")) { + return LLM_CHAT_TEMPLATE_FALCON_3; } else if (tmpl_contains("<|user|>") && tmpl_contains("<|endoftext|>")) { return LLM_CHAT_TEMPLATE_ZEPHYR; } else if (tmpl_contains("bos_token + message['role']")) { @@ -22767,6 +22772,15 @@ static int32_t llama_chat_apply_template_internal( if (add_ass) { ss << "<|assistant|>\n"; } + } else if (tmpl == LLM_CHAT_TEMPLATE_FALCON_3) { + // Falcon 3 + for (auto message : chat) { + std::string role(message->role); + ss << "<|" << role << "|>\n" << message->content << "\n"; + } + if (add_ass) { + ss << "<|assistant|>\n"; + } } else if (tmpl == LLM_CHAT_TEMPLATE_ZEPHYR) { // zephyr template for (auto message : chat) { From 7c0e28585843b366864b43b48f92425e2ea17df6 Mon Sep 17 00:00:00 2001 From: Rudi Servo Date: Sun, 22 Dec 2024 21:22:58 -0100 Subject: [PATCH 03/81] devops : add docker-multi-stage builds (#10832) --- .devops/cpu.Dockerfile | 81 ++++++++++++++++++ .devops/cuda.Dockerfile | 94 ++++++++++++++++++++ .devops/full-cuda.Dockerfile | 33 -------- .devops/full-musa.Dockerfile | 33 -------- .devops/full-rocm.Dockerfile | 50 ----------- .devops/full.Dockerfile | 38 --------- .devops/intel.Dockerfile | 91 ++++++++++++++++++++ .devops/llama-cli-cuda.Dockerfile | 38 --------- .devops/llama-cli-intel.Dockerfile | 28 ------ .devops/llama-cli-musa.Dockerfile | 38 --------- .devops/llama-cli-rocm.Dockerfile | 45 ---------- .devops/llama-cli-vulkan.Dockerfile | 27 ------ .devops/llama-cli.Dockerfile | 29 ------- .devops/llama-server-cuda.Dockerfile | 43 ---------- .devops/llama-server-intel.Dockerfile | 34 -------- .devops/llama-server-musa.Dockerfile | 43 ---------- .devops/llama-server-rocm.Dockerfile | 54 ------------ .devops/llama-server-vulkan.Dockerfile | 31 ------- .devops/llama-server.Dockerfile | 33 -------- .devops/musa.Dockerfile | 108 +++++++++++++++++++++++ .devops/rocm.Dockerfile | 113 +++++++++++++++++++++++++ .devops/vulkan.Dockerfile | 88 +++++++++++++++++++ .github/workflows/docker.yml | 104 +++++++++++++++++------ 23 files changed, 651 insertions(+), 625 deletions(-) create mode 100644 .devops/cpu.Dockerfile create mode 100644 .devops/cuda.Dockerfile delete mode 100644 .devops/full-cuda.Dockerfile delete mode 100644 .devops/full-musa.Dockerfile delete mode 100644 .devops/full-rocm.Dockerfile delete mode 100644 .devops/full.Dockerfile create mode 100644 .devops/intel.Dockerfile delete mode 100644 .devops/llama-cli-cuda.Dockerfile delete mode 100644 .devops/llama-cli-intel.Dockerfile delete mode 100644 .devops/llama-cli-musa.Dockerfile delete mode 100644 .devops/llama-cli-rocm.Dockerfile delete mode 100644 .devops/llama-cli-vulkan.Dockerfile delete mode 100644 .devops/llama-cli.Dockerfile delete mode 100644 .devops/llama-server-cuda.Dockerfile delete mode 100644 .devops/llama-server-intel.Dockerfile delete mode 100644 .devops/llama-server-musa.Dockerfile delete mode 100644 .devops/llama-server-rocm.Dockerfile delete mode 100644 .devops/llama-server-vulkan.Dockerfile delete mode 100644 .devops/llama-server.Dockerfile create mode 100644 .devops/musa.Dockerfile create mode 100644 .devops/rocm.Dockerfile create mode 100644 .devops/vulkan.Dockerfile diff --git a/.devops/cpu.Dockerfile b/.devops/cpu.Dockerfile new file mode 100644 index 000000000..8d020f16c --- /dev/null +++ b/.devops/cpu.Dockerfile @@ -0,0 +1,81 @@ +ARG UBUNTU_VERSION=22.04 + +FROM ubuntu:$UBUNTU_VERSION AS build + +RUN apt-get update && \ + apt-get install -y build-essential git cmake libcurl4-openssl-dev + +WORKDIR /app + +COPY . . + +RUN cmake -S . -B build -DGGML_BACKEND_DL=ON -DGGML_NATIVE=OFF -DGGML_CPU_ALL_VARIANTS=ON -DLLAMA_CURL=ON -DCMAKE_BUILD_TYPE=Release && \ + cmake --build build -j $(nproc) + +RUN mkdir -p /app/lib && \ + find build -name "*.so" -exec cp {} /app/lib \; + +RUN mkdir -p /app/full \ + && cp build/bin/* /app/full \ + && cp *.py /app/full \ + && cp -r gguf-py /app/full \ + && cp -r requirements /app/full \ + && cp requirements.txt /app/full \ + && cp .devops/tools.sh /app/full/tools.sh + +## Base image +FROM ubuntu:$UBUNTU_VERSION AS base + +RUN apt-get update \ + && apt-get install -y libgomp1 curl\ + && apt autoremove -y \ + && apt clean -y \ + && rm -rf /tmp/* /var/tmp/* \ + && find /var/cache/apt/archives /var/lib/apt/lists -not -name lock -type f -delete \ + && find /var/cache -type f -delete + +COPY --from=build /app/lib/ /app + +### Full +FROM base AS full + +COPY --from=build /app/full /app + +WORKDIR /app + +RUN apt-get update \ + && apt-get install -y \ + git \ + python3 \ + python3-pip \ + && pip install --upgrade pip setuptools wheel \ + && pip install -r requirements.txt \ + && apt autoremove -y \ + && apt clean -y \ + && rm -rf /tmp/* /var/tmp/* \ + && find /var/cache/apt/archives /var/lib/apt/lists -not -name lock -type f -delete \ + && find /var/cache -type f -delete + +ENTRYPOINT ["/app/tools.sh"] + +### Light, CLI only +FROM base AS light + +COPY --from=build /app/full/llama-cli /app + +WORKDIR /app + +ENTRYPOINT [ "/app/llama-cli" ] + +### Server, Server only +FROM base AS server + +ENV LLAMA_ARG_HOST=0.0.0.0 + +COPY --from=build /app/full/llama-server /app + +WORKDIR /app + +HEALTHCHECK CMD [ "curl", "-f", "http://localhost:8080/health" ] + +ENTRYPOINT [ "/app/llama-server" ] diff --git a/.devops/cuda.Dockerfile b/.devops/cuda.Dockerfile new file mode 100644 index 000000000..974dd78a8 --- /dev/null +++ b/.devops/cuda.Dockerfile @@ -0,0 +1,94 @@ +ARG UBUNTU_VERSION=22.04 +# This needs to generally match the container host's environment. +ARG CUDA_VERSION=12.6.0 +# Target the CUDA build image +ARG BASE_CUDA_DEV_CONTAINER=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VERSION} + +ARG BASE_CUDA_RUN_CONTAINER=nvidia/cuda:${CUDA_VERSION}-runtime-ubuntu${UBUNTU_VERSION} + +FROM ${BASE_CUDA_DEV_CONTAINER} AS build + +# CUDA architecture to build for (defaults to all supported archs) +ARG CUDA_DOCKER_ARCH=default + +RUN apt-get update && \ + apt-get install -y build-essential cmake python3 python3-pip git libcurl4-openssl-dev libgomp1 + +WORKDIR /app + +COPY . . + +RUN if [ "${CUDA_DOCKER_ARCH}" != "default" ]; then \ + export CMAKE_ARGS="-DCMAKE_CUDA_ARCHITECTURES=${CUDA_DOCKER_ARCH}"; \ + fi && \ + cmake -B build -DGGML_NATIVE=OFF -DGGML_CUDA=ON -DLLAMA_CURL=ON ${CMAKE_ARGS} -DCMAKE_EXE_LINKER_FLAGS=-Wl,--allow-shlib-undefined . && \ + cmake --build build --config Release -j$(nproc) + +RUN mkdir -p /app/lib && \ + find build -name "*.so" -exec cp {} /app/lib \; + +RUN mkdir -p /app/full \ + && cp build/bin/* /app/full \ + && cp *.py /app/full \ + && cp -r gguf-py /app/full \ + && cp -r requirements /app/full \ + && cp requirements.txt /app/full \ + && cp .devops/tools.sh /app/full/tools.sh + +## Base image +FROM ${BASE_CUDA_RUN_CONTAINER} AS base + +RUN apt-get update \ + && apt-get install -y libgomp1 curl\ + && apt autoremove -y \ + && apt clean -y \ + && rm -rf /tmp/* /var/tmp/* \ + && find /var/cache/apt/archives /var/lib/apt/lists -not -name lock -type f -delete \ + && find /var/cache -type f -delete + +COPY --from=build /app/lib/ /app + +### Full +FROM base AS full + +COPY --from=build /app/full /app + +WORKDIR /app + +RUN apt-get update \ + && apt-get install -y \ + git \ + python3 \ + python3-pip \ + && pip install --upgrade pip setuptools wheel \ + && pip install -r requirements.txt \ + && apt autoremove -y \ + && apt clean -y \ + && rm -rf /tmp/* /var/tmp/* \ + && find /var/cache/apt/archives /var/lib/apt/lists -not -name lock -type f -delete \ + && find /var/cache -type f -delete + + +ENTRYPOINT ["/app/tools.sh"] + +### Light, CLI only +FROM base AS light + +COPY --from=build /app/full/llama-cli /app + +WORKDIR /app + +ENTRYPOINT [ "/app/llama-cli" ] + +### Server, Server only +FROM base AS server + +ENV LLAMA_ARG_HOST=0.0.0.0 + +COPY --from=build /app/full/llama-server /app + +WORKDIR /app + +HEALTHCHECK CMD [ "curl", "-f", "http://localhost:8080/health" ] + +ENTRYPOINT [ "/app/llama-server" ] diff --git a/.devops/full-cuda.Dockerfile b/.devops/full-cuda.Dockerfile deleted file mode 100644 index 05bff1bdf..000000000 --- a/.devops/full-cuda.Dockerfile +++ /dev/null @@ -1,33 +0,0 @@ -ARG UBUNTU_VERSION=22.04 -# This needs to generally match the container host's environment. -ARG CUDA_VERSION=12.6.0 -# Target the CUDA build image -ARG BASE_CUDA_DEV_CONTAINER=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VERSION} - -FROM ${BASE_CUDA_DEV_CONTAINER} AS build - -# CUDA architecture to build for (defaults to all supported archs) -ARG CUDA_DOCKER_ARCH=default - -RUN apt-get update && \ - apt-get install -y build-essential cmake python3 python3-pip git libcurl4-openssl-dev libgomp1 - -COPY requirements.txt requirements.txt -COPY requirements requirements - -RUN pip install --upgrade pip setuptools wheel \ - && pip install -r requirements.txt - -WORKDIR /app - -COPY . . - -# Use the default CUDA archs if not specified -RUN if [ "${CUDA_DOCKER_ARCH}" != "default" ]; then \ - export CMAKE_ARGS="-DCMAKE_CUDA_ARCHITECTURES=${CUDA_DOCKER_ARCH}"; \ - fi && \ - cmake -B build -DGGML_NATIVE=OFF -DGGML_CUDA=ON -DLLAMA_CURL=ON ${CMAKE_ARGS} -DCMAKE_EXE_LINKER_FLAGS=-Wl,--allow-shlib-undefined . && \ - cmake --build build --config Release -j$(nproc) && \ - cp build/bin/* . - -ENTRYPOINT ["/app/.devops/tools.sh"] diff --git a/.devops/full-musa.Dockerfile b/.devops/full-musa.Dockerfile deleted file mode 100644 index 3193fea1e..000000000 --- a/.devops/full-musa.Dockerfile +++ /dev/null @@ -1,33 +0,0 @@ -ARG UBUNTU_VERSION=22.04 -# This needs to generally match the container host's environment. -ARG MUSA_VERSION=rc3.1.0 -# Target the MUSA build image -ARG BASE_MUSA_DEV_CONTAINER=mthreads/musa:${MUSA_VERSION}-devel-ubuntu${UBUNTU_VERSION} - -FROM ${BASE_MUSA_DEV_CONTAINER} AS build - -# MUSA architecture to build for (defaults to all supported archs) -ARG MUSA_DOCKER_ARCH=default - -RUN apt-get update && \ - apt-get install -y build-essential cmake python3 python3-pip git libcurl4-openssl-dev libgomp1 - -COPY requirements.txt requirements.txt -COPY requirements requirements - -RUN pip install --upgrade pip setuptools wheel \ - && pip install -r requirements.txt - -WORKDIR /app - -COPY . . - -# Use the default MUSA archs if not specified -RUN if [ "${MUSA_DOCKER_ARCH}" != "default" ]; then \ - export CMAKE_ARGS="-DMUSA_ARCHITECTURES=${MUSA_DOCKER_ARCH}"; \ - fi && \ - cmake -B build -DGGML_NATIVE=OFF -DGGML_MUSA=ON -DLLAMA_CURL=ON ${CMAKE_ARGS} -DCMAKE_EXE_LINKER_FLAGS=-Wl,--allow-shlib-undefined . && \ - cmake --build build --config Release -j$(nproc) && \ - cp build/bin/* . - -ENTRYPOINT ["/app/.devops/tools.sh"] diff --git a/.devops/full-rocm.Dockerfile b/.devops/full-rocm.Dockerfile deleted file mode 100644 index df496bcd2..000000000 --- a/.devops/full-rocm.Dockerfile +++ /dev/null @@ -1,50 +0,0 @@ -ARG UBUNTU_VERSION=22.04 - -# This needs to generally match the container host's environment. -ARG ROCM_VERSION=5.6 - -# Target the CUDA build image -ARG BASE_ROCM_DEV_CONTAINER=rocm/dev-ubuntu-${UBUNTU_VERSION}:${ROCM_VERSION}-complete - -FROM ${BASE_ROCM_DEV_CONTAINER} AS build - -# Unless otherwise specified, we make a fat build. -# List from https://github.com/ggerganov/llama.cpp/pull/1087#issuecomment-1682807878 -# This is mostly tied to rocBLAS supported archs. -ARG ROCM_DOCKER_ARCH="\ - gfx803 \ - gfx900 \ - gfx906 \ - gfx908 \ - gfx90a \ - gfx1010 \ - gfx1030 \ - gfx1100 \ - gfx1101 \ - gfx1102" - -COPY requirements.txt requirements.txt -COPY requirements requirements - -RUN pip install --upgrade pip setuptools wheel \ - && pip install -r requirements.txt - -WORKDIR /app - -COPY . . - -# Set nvcc architecture -ENV AMDGPU_TARGETS=${ROCM_DOCKER_ARCH} -# Enable ROCm -ENV GGML_HIPBLAS=1 -ENV CC=/opt/rocm/llvm/bin/clang -ENV CXX=/opt/rocm/llvm/bin/clang++ - -# Enable cURL -ENV LLAMA_CURL=1 -RUN apt-get update && \ - apt-get install -y libcurl4-openssl-dev - -RUN make -j$(nproc) - -ENTRYPOINT ["/app/.devops/tools.sh"] diff --git a/.devops/full.Dockerfile b/.devops/full.Dockerfile deleted file mode 100644 index d93c0be6a..000000000 --- a/.devops/full.Dockerfile +++ /dev/null @@ -1,38 +0,0 @@ -ARG UBUNTU_VERSION=22.04 - -FROM ubuntu:$UBUNTU_VERSION AS build - -RUN apt-get update && \ - apt-get install -y build-essential git cmake libcurl4-openssl-dev - -WORKDIR /app - -COPY . . - -RUN cmake -S . -B build -DGGML_BACKEND_DL=ON -DGGML_NATIVE=OFF -DGGML_CPU_ALL_VARIANTS=ON -DLLAMA_CURL=ON -DCMAKE_BUILD_TYPE=Release && \ - cmake --build build -j $(nproc) && \ - mkdir -p /app/lib && \ - find build -name "*.so" -exec cp {} /app/lib/ \; - -FROM ubuntu:$UBUNTU_VERSION as runtime - -WORKDIR /app - -RUN apt-get update && \ - apt-get install -y build-essential python3 python3-pip git libcurl4-openssl-dev libgomp1 - -COPY requirements.txt /app/requirements.txt -COPY requirements /app/requirements -COPY .devops/tools.sh /app/tools.sh - -RUN pip install --upgrade pip setuptools wheel && \ - pip install -r /app/requirements.txt - -COPY --from=build /app/build/bin/ /app/ -COPY --from=build /app/lib/ /app/ -COPY --from=build /app/convert_hf_to_gguf.py /app/ -COPY --from=build /app/gguf-py /app/gguf-py - -ENV LC_ALL=C.utf8 - -ENTRYPOINT ["/app/tools.sh"] diff --git a/.devops/intel.Dockerfile b/.devops/intel.Dockerfile new file mode 100644 index 000000000..af783f5e9 --- /dev/null +++ b/.devops/intel.Dockerfile @@ -0,0 +1,91 @@ +ARG ONEAPI_VERSION=2025.0.0-0-devel-ubuntu22.04 + +## Build Image + +FROM intel/oneapi-basekit:$ONEAPI_VERSION AS build + +ARG GGML_SYCL_F16=OFF +RUN apt-get update && \ + apt-get install -y git libcurl4-openssl-dev + +WORKDIR /app + +COPY . . + +RUN if [ "${GGML_SYCL_F16}" = "ON" ]; then \ + echo "GGML_SYCL_F16 is set" \ + && export OPT_SYCL_F16="-DGGML_SYCL_F16=ON"; \ + fi && \ + echo "Building with dynamic libs" && \ + cmake -B build -DGGML_NATIVE=OFF -DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DLLAMA_CURL=ON ${OPT_SYCL_F16} && \ + cmake --build build --config Release -j$(nproc) + +RUN mkdir -p /app/lib && \ + find build -name "*.so" -exec cp {} /app/lib \; + +RUN mkdir -p /app/full \ + && cp build/bin/* /app/full \ + && cp *.py /app/full \ + && cp -r gguf-py /app/full \ + && cp -r requirements /app/full \ + && cp requirements.txt /app/full \ + && cp .devops/tools.sh /app/full/tools.sh + +FROM intel/oneapi-basekit:$ONEAPI_VERSION AS base + +RUN apt-get update \ + && apt-get install -y libgomp1 curl\ + && apt autoremove -y \ + && apt clean -y \ + && rm -rf /tmp/* /var/tmp/* \ + && find /var/cache/apt/archives /var/lib/apt/lists -not -name lock -type f -delete \ + && find /var/cache -type f -delete + +### Full +FROM base AS full + +COPY --from=build /app/lib/ /app +COPY --from=build /app/full /app + +WORKDIR /app + +RUN apt-get update \ + && apt-get install -y \ + git \ + python3 \ + python3-pip \ + && pip install --upgrade pip setuptools wheel \ + && pip install -r requirements.txt \ + && apt autoremove -y \ + && apt clean -y \ + && rm -rf /tmp/* /var/tmp/* \ + && find /var/cache/apt/archives /var/lib/apt/lists -not -name lock -type f -delete \ + && find /var/cache -type f -delete + + +ENTRYPOINT ["/app/tools.sh"] + +### Light, CLI only +FROM base AS light + +COPY --from=build /app/lib/ /app +COPY --from=build /app/full/llama-cli /app + +WORKDIR /app + +ENTRYPOINT [ "/app/llama-cli" ] + +### Server, Server only +FROM base AS server + +ENV LLAMA_ARG_HOST=0.0.0.0 + +COPY --from=build /app/lib/ /app +COPY --from=build /app/full/llama-server /app + +WORKDIR /app + +HEALTHCHECK CMD [ "curl", "-f", "http://localhost:8080/health" ] + +ENTRYPOINT [ "/app/llama-server" ] + diff --git a/.devops/llama-cli-cuda.Dockerfile b/.devops/llama-cli-cuda.Dockerfile deleted file mode 100644 index 7796891d5..000000000 --- a/.devops/llama-cli-cuda.Dockerfile +++ /dev/null @@ -1,38 +0,0 @@ -ARG UBUNTU_VERSION=22.04 -# This needs to generally match the container host's environment. -ARG CUDA_VERSION=12.6.0 -# Target the CUDA build image -ARG BASE_CUDA_DEV_CONTAINER=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VERSION} -# Target the CUDA runtime image -ARG BASE_CUDA_RUN_CONTAINER=nvidia/cuda:${CUDA_VERSION}-runtime-ubuntu${UBUNTU_VERSION} - -FROM ${BASE_CUDA_DEV_CONTAINER} AS build - -# CUDA architecture to build for (defaults to all supported archs) -ARG CUDA_DOCKER_ARCH=default - -RUN apt-get update && \ - apt-get install -y build-essential git cmake - -WORKDIR /app - -COPY . . - -# Use the default CUDA archs if not specified -RUN if [ "${CUDA_DOCKER_ARCH}" != "default" ]; then \ - export CMAKE_ARGS="-DCMAKE_CUDA_ARCHITECTURES=${CUDA_DOCKER_ARCH}"; \ - fi && \ - cmake -B build -DGGML_NATIVE=OFF -DGGML_CUDA=ON ${CMAKE_ARGS} -DCMAKE_EXE_LINKER_FLAGS=-Wl,--allow-shlib-undefined . && \ - cmake --build build --config Release --target llama-cli -j$(nproc) && \ - mkdir -p /app/lib && \ - find build -name "*.so" -exec cp {} /app/lib \; - -FROM ${BASE_CUDA_RUN_CONTAINER} AS runtime - -RUN apt-get update && \ - apt-get install -y libgomp1 - -COPY --from=build /app/lib/ / -COPY --from=build /app/build/bin/llama-cli / - -ENTRYPOINT [ "/llama-cli" ] diff --git a/.devops/llama-cli-intel.Dockerfile b/.devops/llama-cli-intel.Dockerfile deleted file mode 100644 index 0706f732a..000000000 --- a/.devops/llama-cli-intel.Dockerfile +++ /dev/null @@ -1,28 +0,0 @@ -ARG ONEAPI_VERSION=2025.0.0-0-devel-ubuntu22.04 - -FROM intel/oneapi-basekit:$ONEAPI_VERSION AS build - -ARG GGML_SYCL_F16=OFF -RUN apt-get update && \ - apt-get install -y git - -WORKDIR /app - -COPY . . - -RUN if [ "${GGML_SYCL_F16}" = "ON" ]; then \ - echo "GGML_SYCL_F16 is set" && \ - export OPT_SYCL_F16="-DGGML_SYCL_F16=ON"; \ - fi && \ - echo "Building with static libs" && \ - cmake -B build -DGGML_NATIVE=OFF -DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx \ - ${OPT_SYCL_F16} -DBUILD_SHARED_LIBS=OFF && \ - cmake --build build --config Release --target llama-cli - -FROM intel/oneapi-basekit:$ONEAPI_VERSION AS runtime - -COPY --from=build /app/build/bin/llama-cli /llama-cli - -ENV LC_ALL=C.utf8 - -ENTRYPOINT [ "/llama-cli" ] diff --git a/.devops/llama-cli-musa.Dockerfile b/.devops/llama-cli-musa.Dockerfile deleted file mode 100644 index e7c75af20..000000000 --- a/.devops/llama-cli-musa.Dockerfile +++ /dev/null @@ -1,38 +0,0 @@ -ARG UBUNTU_VERSION=22.04 -# This needs to generally match the container host's environment. -ARG MUSA_VERSION=rc3.1.0 -# Target the MUSA build image -ARG BASE_MUSA_DEV_CONTAINER=mthreads/musa:${MUSA_VERSION}-devel-ubuntu${UBUNTU_VERSION} -# Target the MUSA runtime image -ARG BASE_MUSA_RUN_CONTAINER=mthreads/musa:${MUSA_VERSION}-runtime-ubuntu${UBUNTU_VERSION} - -FROM ${BASE_MUSA_DEV_CONTAINER} AS build - -# MUSA architecture to build for (defaults to all supported archs) -ARG MUSA_DOCKER_ARCH=default - -RUN apt-get update && \ - apt-get install -y build-essential git cmake - -WORKDIR /app - -COPY . . - -# Use the default MUSA archs if not specified -RUN if [ "${MUSA_DOCKER_ARCH}" != "default" ]; then \ - export CMAKE_ARGS="-DMUSA_ARCHITECTURES=${MUSA_DOCKER_ARCH}"; \ - fi && \ - cmake -B build -DGGML_NATIVE=OFF -DGGML_MUSA=ON ${CMAKE_ARGS} -DCMAKE_EXE_LINKER_FLAGS=-Wl,--allow-shlib-undefined . && \ - cmake --build build --config Release --target llama-cli -j$(nproc) && \ - mkdir -p /app/lib && \ - find build -name "*.so" -exec cp {} /app/lib \; - -FROM ${BASE_MUSA_RUN_CONTAINER} AS runtime - -RUN apt-get update && \ - apt-get install -y libgomp1 - -COPY --from=build /app/lib/ / -COPY --from=build /app/build/bin/llama-cli /llama-cli - -ENTRYPOINT [ "/llama-cli" ] diff --git a/.devops/llama-cli-rocm.Dockerfile b/.devops/llama-cli-rocm.Dockerfile deleted file mode 100644 index e60c747bd..000000000 --- a/.devops/llama-cli-rocm.Dockerfile +++ /dev/null @@ -1,45 +0,0 @@ -ARG UBUNTU_VERSION=22.04 - -# This needs to generally match the container host's environment. -ARG ROCM_VERSION=5.6 - -# Target the CUDA build image -ARG BASE_ROCM_DEV_CONTAINER=rocm/dev-ubuntu-${UBUNTU_VERSION}:${ROCM_VERSION}-complete - -FROM ${BASE_ROCM_DEV_CONTAINER} AS build - -# Unless otherwise specified, we make a fat build. -# List from https://github.com/ggerganov/llama.cpp/pull/1087#issuecomment-1682807878 -# This is mostly tied to rocBLAS supported archs. -ARG ROCM_DOCKER_ARCH="\ - gfx803 \ - gfx900 \ - gfx906 \ - gfx908 \ - gfx90a \ - gfx1010 \ - gfx1030 \ - gfx1100 \ - gfx1101 \ - gfx1102" - -COPY requirements.txt requirements.txt -COPY requirements requirements - -RUN pip install --upgrade pip setuptools wheel \ - && pip install -r requirements.txt - -WORKDIR /app - -COPY . . - -# Set nvcc architecture -ENV AMDGPU_TARGETS=${ROCM_DOCKER_ARCH} -# Enable ROCm -ENV GGML_HIPBLAS=1 -ENV CC=/opt/rocm/llvm/bin/clang -ENV CXX=/opt/rocm/llvm/bin/clang++ - -RUN make -j$(nproc) llama-cli - -ENTRYPOINT [ "/app/llama-cli" ] diff --git a/.devops/llama-cli-vulkan.Dockerfile b/.devops/llama-cli-vulkan.Dockerfile deleted file mode 100644 index 92a6e0479..000000000 --- a/.devops/llama-cli-vulkan.Dockerfile +++ /dev/null @@ -1,27 +0,0 @@ -ARG UBUNTU_VERSION=jammy - -FROM ubuntu:$UBUNTU_VERSION AS build - -# Install build tools -RUN apt update && apt install -y git build-essential cmake wget libgomp1 - -# Install Vulkan SDK -RUN wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | apt-key add - && \ - wget -qO /etc/apt/sources.list.d/lunarg-vulkan-jammy.list https://packages.lunarg.com/vulkan/lunarg-vulkan-jammy.list && \ - apt update -y && \ - apt-get install -y vulkan-sdk - -# Build it -WORKDIR /app -COPY . . -RUN cmake -B build -DGGML_NATIVE=OFF -DGGML_VULKAN=1 && \ - cmake --build build --config Release --target llama-cli - -# Clean up -WORKDIR / -RUN cp /app/build/bin/llama-cli /llama-cli && \ - rm -rf /app - -ENV LC_ALL=C.utf8 - -ENTRYPOINT [ "/llama-cli" ] diff --git a/.devops/llama-cli.Dockerfile b/.devops/llama-cli.Dockerfile deleted file mode 100644 index be234d55d..000000000 --- a/.devops/llama-cli.Dockerfile +++ /dev/null @@ -1,29 +0,0 @@ -ARG UBUNTU_VERSION=22.04 - -FROM ubuntu:$UBUNTU_VERSION AS build - -RUN apt-get update && \ - apt-get install -y build-essential git cmake libcurl4-openssl-dev - -WORKDIR /app - -COPY . . - -RUN cmake -S . -B build -DGGML_BACKEND_DL=ON -DGGML_NATIVE=OFF -DGGML_CPU_ALL_VARIANTS=ON -DLLAMA_CURL=ON -DCMAKE_BUILD_TYPE=Release && \ - cmake --build build -j $(nproc) && \ - mkdir -p /app/lib && \ - find build -name "*.so" -exec cp {} /app/lib/ \; - -FROM ubuntu:$UBUNTU_VERSION AS runtime - -WORKDIR /app - -RUN apt-get update && \ - apt-get install -y libcurl4-openssl-dev libgomp1 curl - -COPY --from=build /app/build/bin/llama-cli /app/ -COPY --from=build /app/lib/ /app/ - -ENV LC_ALL=C.utf8 - -ENTRYPOINT [ "/app/llama-cli" ] diff --git a/.devops/llama-server-cuda.Dockerfile b/.devops/llama-server-cuda.Dockerfile deleted file mode 100644 index bf8a198f9..000000000 --- a/.devops/llama-server-cuda.Dockerfile +++ /dev/null @@ -1,43 +0,0 @@ -ARG UBUNTU_VERSION=22.04 -# This needs to generally match the container host's environment. -ARG CUDA_VERSION=12.6.0 -# Target the CUDA build image -ARG BASE_CUDA_DEV_CONTAINER=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VERSION} -# Target the CUDA runtime image -ARG BASE_CUDA_RUN_CONTAINER=nvidia/cuda:${CUDA_VERSION}-runtime-ubuntu${UBUNTU_VERSION} - -FROM ${BASE_CUDA_DEV_CONTAINER} AS build - -# CUDA architecture to build for (defaults to all supported archs) -ARG CUDA_DOCKER_ARCH=default - -RUN apt-get update && \ - apt-get install -y build-essential git cmake libcurl4-openssl-dev - -WORKDIR /app - -COPY . . - -# Use the default CUDA archs if not specified -RUN if [ "${CUDA_DOCKER_ARCH}" != "default" ]; then \ - export CMAKE_ARGS="-DCMAKE_CUDA_ARCHITECTURES=${CUDA_DOCKER_ARCH}"; \ - fi && \ - cmake -B build -DGGML_NATIVE=OFF -DGGML_CUDA=ON -DLLAMA_CURL=ON ${CMAKE_ARGS} -DCMAKE_EXE_LINKER_FLAGS=-Wl,--allow-shlib-undefined . && \ - cmake --build build --config Release --target llama-server -j$(nproc) && \ - mkdir -p /app/lib && \ - find build -name "*.so" -exec cp {} /app/lib \; - -FROM ${BASE_CUDA_RUN_CONTAINER} AS runtime - -RUN apt-get update && \ - apt-get install -y libcurl4-openssl-dev libgomp1 curl - -COPY --from=build /app/lib/ / -COPY --from=build /app/build/bin/llama-server /llama-server - -# Must be set to 0.0.0.0 so it can listen to requests from host machine -ENV LLAMA_ARG_HOST=0.0.0.0 - -HEALTHCHECK CMD [ "curl", "-f", "http://localhost:8080/health" ] - -ENTRYPOINT [ "/llama-server" ] diff --git a/.devops/llama-server-intel.Dockerfile b/.devops/llama-server-intel.Dockerfile deleted file mode 100644 index b503b8cfe..000000000 --- a/.devops/llama-server-intel.Dockerfile +++ /dev/null @@ -1,34 +0,0 @@ -ARG ONEAPI_VERSION=2025.0.0-0-devel-ubuntu22.04 - -FROM intel/oneapi-basekit:$ONEAPI_VERSION AS build - -ARG GGML_SYCL_F16=OFF -RUN apt-get update && \ - apt-get install -y git libcurl4-openssl-dev - -WORKDIR /app - -COPY . . - -RUN if [ "${GGML_SYCL_F16}" = "ON" ]; then \ - echo "GGML_SYCL_F16 is set" && \ - export OPT_SYCL_F16="-DGGML_SYCL_F16=ON"; \ - fi && \ - echo "Building with dynamic libs" && \ - cmake -B build -DGGML_NATIVE=OFF -DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DLLAMA_CURL=ON ${OPT_SYCL_F16} && \ - cmake --build build --config Release --target llama-server - -FROM intel/oneapi-basekit:$ONEAPI_VERSION AS runtime - -RUN apt-get update && \ - apt-get install -y libcurl4-openssl-dev curl - -COPY --from=build /app/build/bin/llama-server /llama-server - -ENV LC_ALL=C.utf8 -# Must be set to 0.0.0.0 so it can listen to requests from host machine -ENV LLAMA_ARG_HOST=0.0.0.0 - -HEALTHCHECK CMD [ "curl", "-f", "http://localhost:8080/health" ] - -ENTRYPOINT [ "/llama-server" ] diff --git a/.devops/llama-server-musa.Dockerfile b/.devops/llama-server-musa.Dockerfile deleted file mode 100644 index cebe51d42..000000000 --- a/.devops/llama-server-musa.Dockerfile +++ /dev/null @@ -1,43 +0,0 @@ -ARG UBUNTU_VERSION=22.04 -# This needs to generally match the container host's environment. -ARG MUSA_VERSION=rc3.1.0 -# Target the MUSA build image -ARG BASE_MUSA_DEV_CONTAINER=mthreads/musa:${MUSA_VERSION}-devel-ubuntu${UBUNTU_VERSION} -# Target the MUSA runtime image -ARG BASE_MUSA_RUN_CONTAINER=mthreads/musa:${MUSA_VERSION}-runtime-ubuntu${UBUNTU_VERSION} - -FROM ${BASE_MUSA_DEV_CONTAINER} AS build - -# MUSA architecture to build for (defaults to all supported archs) -ARG MUSA_DOCKER_ARCH=default - -RUN apt-get update && \ - apt-get install -y build-essential git cmake libcurl4-openssl-dev - -WORKDIR /app - -COPY . . - -# Use the default MUSA archs if not specified -RUN if [ "${MUSA_DOCKER_ARCH}" != "default" ]; then \ - export CMAKE_ARGS="-DMUSA_ARCHITECTURES=${MUSA_DOCKER_ARCH}"; \ - fi && \ - cmake -B build -DGGML_NATIVE=OFF -DGGML_MUSA=ON -DLLAMA_CURL=ON ${CMAKE_ARGS} -DCMAKE_EXE_LINKER_FLAGS=-Wl,--allow-shlib-undefined . && \ - cmake --build build --config Release --target llama-server -j$(nproc) && \ - mkdir -p /app/lib && \ - find build -name "*.so" -exec cp {} /app/lib \; - -FROM ${BASE_MUSA_RUN_CONTAINER} AS runtime - -RUN apt-get update && \ - apt-get install -y libcurl4-openssl-dev libgomp1 curl - -COPY --from=build /app/lib/ / -COPY --from=build /app/build/bin/llama-server /llama-server - -# Must be set to 0.0.0.0 so it can listen to requests from host machine -ENV LLAMA_ARG_HOST=0.0.0.0 - -HEALTHCHECK CMD [ "curl", "-f", "http://localhost:8080/health" ] - -ENTRYPOINT [ "/llama-server" ] diff --git a/.devops/llama-server-rocm.Dockerfile b/.devops/llama-server-rocm.Dockerfile deleted file mode 100644 index 8553af75b..000000000 --- a/.devops/llama-server-rocm.Dockerfile +++ /dev/null @@ -1,54 +0,0 @@ -ARG UBUNTU_VERSION=22.04 - -# This needs to generally match the container host's environment. -ARG ROCM_VERSION=5.6 - -# Target the CUDA build image -ARG BASE_ROCM_DEV_CONTAINER=rocm/dev-ubuntu-${UBUNTU_VERSION}:${ROCM_VERSION}-complete - -FROM ${BASE_ROCM_DEV_CONTAINER} AS build - -# Unless otherwise specified, we make a fat build. -# List from https://github.com/ggerganov/llama.cpp/pull/1087#issuecomment-1682807878 -# This is mostly tied to rocBLAS supported archs. -ARG ROCM_DOCKER_ARCH="\ - gfx803 \ - gfx900 \ - gfx906 \ - gfx908 \ - gfx90a \ - gfx1010 \ - gfx1030 \ - gfx1100 \ - gfx1101 \ - gfx1102" - -COPY requirements.txt requirements.txt -COPY requirements requirements - -RUN pip install --upgrade pip setuptools wheel \ - && pip install -r requirements.txt - -WORKDIR /app - -COPY . . - -# Set nvcc architecture -ENV AMDGPU_TARGETS=${ROCM_DOCKER_ARCH} -# Enable ROCm -ENV GGML_HIPBLAS=1 -ENV CC=/opt/rocm/llvm/bin/clang -ENV CXX=/opt/rocm/llvm/bin/clang++ -# Must be set to 0.0.0.0 so it can listen to requests from host machine -ENV LLAMA_ARG_HOST=0.0.0.0 - -# Enable cURL -ENV LLAMA_CURL=1 -RUN apt-get update && \ - apt-get install -y libcurl4-openssl-dev curl - -RUN make -j$(nproc) llama-server - -HEALTHCHECK CMD [ "curl", "-f", "http://localhost:8080/health" ] - -ENTRYPOINT [ "/app/llama-server" ] diff --git a/.devops/llama-server-vulkan.Dockerfile b/.devops/llama-server-vulkan.Dockerfile deleted file mode 100644 index 6aa786779..000000000 --- a/.devops/llama-server-vulkan.Dockerfile +++ /dev/null @@ -1,31 +0,0 @@ -ARG UBUNTU_VERSION=jammy - -FROM ubuntu:$UBUNTU_VERSION AS build - -# Install build tools -RUN apt update && apt install -y git build-essential cmake wget - -# Install Vulkan SDK and cURL -RUN wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | apt-key add - && \ - wget -qO /etc/apt/sources.list.d/lunarg-vulkan-jammy.list https://packages.lunarg.com/vulkan/lunarg-vulkan-jammy.list && \ - apt update -y && \ - apt-get install -y vulkan-sdk libcurl4-openssl-dev curl - -# Build it -WORKDIR /app -COPY . . -RUN cmake -B build -DGGML_NATIVE=OFF -DGGML_VULKAN=1 -DLLAMA_CURL=1 && \ - cmake --build build --config Release --target llama-server - -# Clean up -WORKDIR / -RUN cp /app/build/bin/llama-server /llama-server && \ - rm -rf /app - -ENV LC_ALL=C.utf8 -# Must be set to 0.0.0.0 so it can listen to requests from host machine -ENV LLAMA_ARG_HOST=0.0.0.0 - -HEALTHCHECK CMD [ "curl", "-f", "http://localhost:8080/health" ] - -ENTRYPOINT [ "/llama-server" ] diff --git a/.devops/llama-server.Dockerfile b/.devops/llama-server.Dockerfile deleted file mode 100644 index 72ccde2fe..000000000 --- a/.devops/llama-server.Dockerfile +++ /dev/null @@ -1,33 +0,0 @@ -ARG UBUNTU_VERSION=22.04 - -FROM ubuntu:$UBUNTU_VERSION AS build - -RUN apt-get update && \ - apt-get install -y build-essential git cmake libcurl4-openssl-dev - -WORKDIR /app - -COPY . . - -RUN cmake -S . -B build -DGGML_BACKEND_DL=ON -DGGML_NATIVE=OFF -DGGML_CPU_ALL_VARIANTS=ON -DLLAMA_CURL=ON -DCMAKE_BUILD_TYPE=Release && \ - cmake --build build -j $(nproc) && \ - mkdir -p /app/lib && \ - find build -name "*.so" -exec cp {} /app/lib/ \; - -FROM ubuntu:$UBUNTU_VERSION AS runtime - -WORKDIR /app - -RUN apt-get update && \ - apt-get install -y libcurl4-openssl-dev libgomp1 curl - -COPY --from=build /app/build/bin/llama-server /app/ -COPY --from=build /app/lib/ /app/ - -ENV LC_ALL=C.utf8 -# Must be set to 0.0.0.0 so it can listen to requests from host machine -ENV LLAMA_ARG_HOST=0.0.0.0 - -HEALTHCHECK CMD [ "curl", "-f", "http://localhost:8080/health" ] - -ENTRYPOINT [ "/app/llama-server" ] diff --git a/.devops/musa.Dockerfile b/.devops/musa.Dockerfile new file mode 100644 index 000000000..bfd7fc1c1 --- /dev/null +++ b/.devops/musa.Dockerfile @@ -0,0 +1,108 @@ +ARG UBUNTU_VERSION=22.04 +# This needs to generally match the container host's environment. +ARG MUSA_VERSION=rc3.1.0 +# Target the MUSA build image +ARG BASE_MUSA_DEV_CONTAINER=mthreads/musa:${MUSA_VERSION}-devel-ubuntu${UBUNTU_VERSION} + +ARG BASE_MUSA_RUN_CONTAINER=mthreads/musa:${MUSA_VERSION}-runtime-ubuntu${UBUNTU_VERSION} + +FROM ${BASE_MUSA_DEV_CONTAINER} AS build + +# MUSA architecture to build for (defaults to all supported archs) +ARG MUSA_DOCKER_ARCH=default + +RUN apt-get update && \ + apt-get install -y \ + build-essential \ + cmake \ + python3 \ + python3-pip \ + git \ + libcurl4-openssl-dev \ + libgomp1 + +COPY requirements.txt requirements.txt +COPY requirements requirements + +RUN pip install --upgrade pip setuptools wheel \ + && pip install -r requirements.txt + +WORKDIR /app + +COPY . . + +# Use the default MUSA archs if not specified +RUN if [ "${MUSA_DOCKER_ARCH}" != "default" ]; then \ + export CMAKE_ARGS="-DMUSA_ARCHITECTURES=${MUSA_DOCKER_ARCH}"; \ + fi && \ + cmake -B build -DGGML_NATIVE=OFF -DGGML_MUSA=ON -DLLAMA_CURL=ON ${CMAKE_ARGS} -DCMAKE_EXE_LINKER_FLAGS=-Wl,--allow-shlib-undefined . && \ + cmake --build build --config Release -j$(nproc) + +RUN mkdir -p /app/lib && \ + find build -name "*.so" -exec cp {} /app/lib \; + +RUN mkdir -p /app/full \ + && cp build/bin/* /app/full \ + && cp *.py /app/full \ + && cp -r gguf-py /app/full \ + && cp -r requirements /app/full \ + && cp requirements.txt /app/full \ + && cp .devops/tools.sh /app/full/tools.sh + +## Base image +FROM ${BASE_MUSA_RUN_CONTAINER} AS base + +RUN apt-get update \ + && apt-get install -y libgomp1 curl\ + && apt autoremove -y \ + && apt clean -y \ + && rm -rf /tmp/* /var/tmp/* \ + && find /var/cache/apt/archives /var/lib/apt/lists -not -name lock -type f -delete \ + && find /var/cache -type f -delete + +COPY --from=build /app/lib/ /app + +### Full +FROM base AS full + +COPY --from=build /app/full /app + +WORKDIR /app + +RUN apt-get update \ + && apt-get install -y \ + git \ + python3 \ + python3-pip \ + && pip install --upgrade pip setuptools wheel \ + && pip install -r requirements.txt \ + && apt autoremove -y \ + && apt clean -y \ + && rm -rf /tmp/* /var/tmp/* \ + && find /var/cache/apt/archives /var/lib/apt/lists -not -name lock -type f -delete \ + && find /var/cache -type f -delete + + +ENTRYPOINT ["/app/tools.sh"] + +### Light, CLI only +FROM base AS light + +COPY --from=build /app/full/llama-cli /app + +WORKDIR /app + +ENTRYPOINT [ "/app/llama-cli" ] + +### Server, Server only +FROM base AS server + +ENV LLAMA_ARG_HOST=0.0.0.0 + +COPY --from=build /app/full/llama-server /app + +WORKDIR /app + +HEALTHCHECK CMD [ "curl", "-f", "http://localhost:8080/health" ] + +ENTRYPOINT [ "/app/llama-server" ] diff --git a/.devops/rocm.Dockerfile b/.devops/rocm.Dockerfile new file mode 100644 index 000000000..a8088ea00 --- /dev/null +++ b/.devops/rocm.Dockerfile @@ -0,0 +1,113 @@ +ARG UBUNTU_VERSION=24.04 + +# This needs to generally match the container host's environment. +ARG ROCM_VERSION=6.3 +ARG AMDGPU_VERSION=6.3 + +# Target the CUDA build image +ARG BASE_ROCM_DEV_CONTAINER=rocm/dev-ubuntu-${UBUNTU_VERSION}:${ROCM_VERSION}-complete + +### Build image +FROM ${BASE_ROCM_DEV_CONTAINER} AS build + +# Unless otherwise specified, we make a fat build. +# List from https://github.com/ggerganov/llama.cpp/pull/1087#issuecomment-1682807878 +# This is mostly tied to rocBLAS supported archs. +# gfx803, gfx900, gfx1032, gfx1101, gfx1102,not officialy supported +# gfx906 is deprecated +#check https://rocm.docs.amd.com/projects/install-on-linux/en/docs-6.2.4/reference/system-requirements.html + +#ARG ROCM_DOCKER_ARCH='gfx803,gfx900,gfx906,gfx908,gfx90a,gfx942,gfx1010,gfx1030,gfx1032,gfx1100,gfx1101,gfx1102' +ARG ROCM_DOCKER_ARCH=gfx1100 + +# Set nvcc architectured +ENV AMDGPU_TARGETS=${ROCM_DOCKER_ARCH} +# Enable ROCm +# ENV CC=/opt/rocm/llvm/bin/clang +# ENV CXX=/opt/rocm/llvm/bin/clang++ + +RUN apt-get update \ + && apt-get install -y \ + build-essential \ + cmake \ + git \ + libcurl4-openssl-dev \ + curl \ + libgomp1 + +WORKDIR /app + +COPY . . + +RUN HIPCXX="$(hipconfig -l)/clang" HIP_PATH="$(hipconfig -R)" \ + cmake -S . -B build -DGGML_HIP=ON -DAMDGPU_TARGETS=$ROCM_DOCKER_ARCH -DCMAKE_BUILD_TYPE=Release -DLLAMA_CURL=ON \ + && cmake --build build --config Release -j$(nproc) + +RUN mkdir -p /app/lib \ + && find build -name "*.so" -exec cp {} /app/lib \; + +RUN mkdir -p /app/full \ + && cp build/bin/* /app/full \ + && cp *.py /app/full \ + && cp -r gguf-py /app/full \ + && cp -r requirements /app/full \ + && cp requirements.txt /app/full \ + && cp .devops/tools.sh /app/full/tools.sh + +## Base image +FROM ${BASE_ROCM_DEV_CONTAINER} AS base + +RUN apt-get update \ + && apt-get install -y libgomp1 curl\ + && apt autoremove -y \ + && apt clean -y \ + && rm -rf /tmp/* /var/tmp/* \ + && find /var/cache/apt/archives /var/lib/apt/lists -not -name lock -type f -delete \ + && find /var/cache -type f -delete + +COPY --from=build /app/lib/ /app + +### Full +FROM base AS full + +COPY --from=build /app/full /app + +WORKDIR /app + +RUN apt-get update \ + && apt-get install -y \ + git \ + python3-pip \ + python3 \ + python3-wheel\ + && pip install --break-system-packages --upgrade setuptools \ + && pip install --break-system-packages -r requirements.txt \ + && apt autoremove -y \ + && apt clean -y \ + && rm -rf /tmp/* /var/tmp/* \ + && find /var/cache/apt/archives /var/lib/apt/lists -not -name lock -type f -delete \ + && find /var/cache -type f -delete + +ENTRYPOINT ["/app/tools.sh"] + +### Light, CLI only +FROM base AS light + +COPY --from=build /app/full/llama-cli /app + +WORKDIR /app + +ENTRYPOINT [ "/app/llama-cli" ] + +### Server, Server only +FROM base AS server + +ENV LLAMA_ARG_HOST=0.0.0.0 + +COPY --from=build /app/full/llama-server /app + +WORKDIR /app + +HEALTHCHECK CMD [ "curl", "-f", "http://localhost:8080/health" ] + +ENTRYPOINT [ "/app/llama-server" ] diff --git a/.devops/vulkan.Dockerfile b/.devops/vulkan.Dockerfile new file mode 100644 index 000000000..cfc2162e3 --- /dev/null +++ b/.devops/vulkan.Dockerfile @@ -0,0 +1,88 @@ +ARG UBUNTU_VERSION=jammy + +FROM ubuntu:$UBUNTU_VERSION AS build + +# Install build tools +RUN apt update && apt install -y git build-essential cmake wget + +# Install Vulkan SDK and cURL +RUN wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | apt-key add - && \ + wget -qO /etc/apt/sources.list.d/lunarg-vulkan-jammy.list https://packages.lunarg.com/vulkan/lunarg-vulkan-jammy.list && \ + apt update -y && \ + apt-get install -y vulkan-sdk libcurl4-openssl-dev curl + +# Build it +WORKDIR /app + +COPY . . + +RUN cmake -B build -DGGML_NATIVE=OFF -DGGML_VULKAN=1 -DLLAMA_CURL=1 && \ + cmake --build build --config Release -j$(nproc) + +RUN mkdir -p /app/lib && \ + find build -name "*.so" -exec cp {} /app/lib \; + +RUN mkdir -p /app/full \ + && cp build/bin/* /app/full \ + && cp *.py /app/full \ + && cp -r gguf-py /app/full \ + && cp -r requirements /app/full \ + && cp requirements.txt /app/full \ + && cp .devops/tools.sh /app/full/tools.sh + +## Base image +FROM ubuntu:$UBUNTU_VERSION AS base + +RUN apt-get update \ + && apt-get install -y libgomp1 curl\ + && apt autoremove -y \ + && apt clean -y \ + && rm -rf /tmp/* /var/tmp/* \ + && find /var/cache/apt/archives /var/lib/apt/lists -not -name lock -type f -delete \ + && find /var/cache -type f -delete + +COPY --from=build /app/lib/ /app + +### Full +FROM base AS full + +COPY --from=build /app/full /app + +WORKDIR /app + +RUN apt-get update \ + && apt-get install -y \ + git \ + python3 \ + python3-pip \ + && pip install --upgrade pip setuptools wheel \ + && pip install -r requirements.txt \ + && apt autoremove -y \ + && apt clean -y \ + && rm -rf /tmp/* /var/tmp/* \ + && find /var/cache/apt/archives /var/lib/apt/lists -not -name lock -type f -delete \ + && find /var/cache -type f -delete + +ENTRYPOINT ["/app/tools.sh"] + +### Light, CLI only +FROM base AS light + +COPY --from=build /app/full/llama-cli /app + +WORKDIR /app + +ENTRYPOINT [ "/app/llama-cli" ] + +### Server, Server only +FROM base AS server + +ENV LLAMA_ARG_HOST=0.0.0.0 + +COPY --from=build /app/full/llama-server /app + +WORKDIR /app + +HEALTHCHECK CMD [ "curl", "-f", "http://localhost:8080/health" ] + +ENTRYPOINT [ "/app/llama-server" ] diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index bc2e5020d..41f1a89ee 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -34,21 +34,14 @@ jobs: strategy: matrix: config: - - { tag: "light", dockerfile: ".devops/llama-cli.Dockerfile", platforms: "linux/amd64,linux/arm64" } - - { tag: "server", dockerfile: ".devops/llama-server.Dockerfile", platforms: "linux/amd64,linux/arm64" } - - { tag: "full", dockerfile: ".devops/full.Dockerfile", platforms: "linux/amd64,linux/arm64" } - - { tag: "light-cuda", dockerfile: ".devops/llama-cli-cuda.Dockerfile", platforms: "linux/amd64" } - - { tag: "server-cuda", dockerfile: ".devops/llama-server-cuda.Dockerfile", platforms: "linux/amd64" } - - { tag: "full-cuda", dockerfile: ".devops/full-cuda.Dockerfile", platforms: "linux/amd64" } - - { tag: "light-musa", dockerfile: ".devops/llama-cli-musa.Dockerfile", platforms: "linux/amd64" } - - { tag: "server-musa", dockerfile: ".devops/llama-server-musa.Dockerfile", platforms: "linux/amd64" } - - { tag: "full-musa", dockerfile: ".devops/full-musa.Dockerfile", platforms: "linux/amd64" } + # Multi-stage build + - { tag: "cpu", dockerfile: ".devops/cpu.Dockerfile", platforms: "linux/amd64,linux/arm64", full: true, light: true, server: true, freediskspace: false} + - { tag: "cuda", dockerfile: ".devops/cuda.Dockerfile", platforms: "linux/amd64", full: true, light: true, server: true, freediskspace: false} + - { tag: "musa", dockerfile: ".devops/musa.Dockerfile", platforms: "linux/amd64", full: true, light: true, server: true, freediskspace: false} + - { tag: "intel", dockerfile: ".devops/intel.Dockerfile", platforms: "linux/amd64", full: true, light: true, server: true, freediskspace: false} + - { tag: "vulkan", dockerfile: ".devops/vulkan.Dockerfile", platforms: "linux/amd64", full: true, light: true, server: true, freediskspace: false} # Note: the rocm images are failing due to a compiler error and are disabled until this is fixed to allow the workflow to complete - #- { tag: "light-rocm", dockerfile: ".devops/llama-cli-rocm.Dockerfile", platforms: "linux/amd64,linux/arm64" } - #- { tag: "server-rocm", dockerfile: ".devops/llama-server-rocm.Dockerfile", platforms: "linux/amd64,linux/arm64" } - #- { tag: "full-rocm", dockerfile: ".devops/full-rocm.Dockerfile", platforms: "linux/amd64,linux/arm64" } - - { tag: "light-intel", dockerfile: ".devops/llama-cli-intel.Dockerfile", platforms: "linux/amd64" } - - { tag: "server-intel", dockerfile: ".devops/llama-server-intel.Dockerfile", platforms: "linux/amd64" } + #- {tag: "rocm", dockerfile: ".devops/rocm.Dockerfile", platforms: "linux/amd64,linux/arm64", full: true, light: true, server: true, freediskspace: true } steps: - name: Check out the repo uses: actions/checkout@v4 @@ -56,10 +49,10 @@ jobs: fetch-depth: 0 # preserve git history, so we can determine the build number - name: Set up QEMU - uses: docker/setup-qemu-action@v2 + uses: docker/setup-qemu-action@v3 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 - name: Log in to Docker Hub uses: docker/login-action@v2 @@ -79,25 +72,34 @@ jobs: # determine tag name postfix (build number, commit hash) if [[ "${{ env.GITHUB_BRANCH_NAME }}" == "master" ]]; then - TAG_POSTFIX="b${BUILD_NUMBER}" + TAG_POSTFIX="-b${BUILD_NUMBER}" else SAFE_NAME=$(echo "${{ env.GITHUB_BRANCH_NAME }}" | tr '/' '-') - TAG_POSTFIX="${SAFE_NAME}-${SHORT_HASH}" + TAG_POSTFIX="-${SAFE_NAME}-${SHORT_HASH}" fi - # list all tags possible - TAGS="" - TAGS="${TAGS}ghcr.io/${REPO_OWNER}/${REPO_NAME}:${{ matrix.config.tag }}," - TAGS="${TAGS}ghcr.io/${REPO_OWNER}/${REPO_NAME}:${{ matrix.config.tag }}-${TAG_POSTFIX}" - - echo "output_tags=$TAGS" >> $GITHUB_OUTPUT - echo "output_tags=$TAGS" # print out for debugging + if [[ "${{ matrix.config.tag }}" == "cpu" ]]; then + TYPE="" + else + TYPE="-${{ matrix.config.tag }}" + fi + PREFIX="ghcr.io/${REPO_OWNER}/${REPO_NAME}:" + FULLTAGS="${PREFIX}full${TYPE},${PREFIX}full${TYPE}${TAG_POSTFIX}" + LIGHTTAGS="${PREFIX}light${TYPE},${PREFIX}light${TYPE}${TAG_POSTFIX}" + SERVERTAGS="${PREFIX}server${TYPE},${PREFIX}server${TYPE}${TAG_POSTFIX}" + echo "full_output_tags=$FULLTAGS" >> $GITHUB_OUTPUT + echo "light_output_tags=$LIGHTTAGS" >> $GITHUB_OUTPUT + echo "server_output_tags=$SERVERTAGS" >> $GITHUB_OUTPUT + echo "full_output_tags=$FULLTAGS" # print out for debugging + echo "light_output_tags=$LIGHTTAGS" # print out for debugging + echo "server_output_tags=$SERVERTAGS" # print out for debugging env: GITHUB_BRANCH_NAME: ${{ github.head_ref || github.ref_name }} GITHUB_REPOSITORY_OWNER: '${{ github.repository_owner }}' # https://github.com/jlumbroso/free-disk-space/tree/54081f138730dfa15788a46383842cd2f914a1be#example - name: Free Disk Space (Ubuntu) + if: ${{ matrix.config.free_disk_space == true }} uses: jlumbroso/free-disk-space@main with: # this might remove tools that are actually needed, @@ -113,13 +115,59 @@ jobs: docker-images: true swap-storage: true - - name: Build and push Docker image (tagged + versioned) - if: ${{ github.event_name == 'push' || github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' }} + - name: Build and push Full Docker image (tagged + versioned) + if: ${{ (github.event_name == 'push' || github.event_name == 'schedule' || github.event_name == 'workflow_dispatch') && matrix.config.full == true }} uses: docker/build-push-action@v6 with: context: . push: true platforms: ${{ matrix.config.platforms }} # tag list is generated from step above - tags: ${{ steps.tag.outputs.output_tags }} + tags: ${{ steps.tag.outputs.full_output_tags }} file: ${{ matrix.config.dockerfile }} + target: full + provenance: false + # using github experimental cache + cache-from: type=gha + cache-to: type=gha,mode=max + # return to this if the experimental github cache is having issues + #cache-to: type=local,dest=/tmp/.buildx-cache + #cache-from: type=local,src=/tmp/.buildx-cache + + - name: Build and push Light Docker image (tagged + versioned) + if: ${{ (github.event_name == 'push' || github.event_name == 'schedule' || github.event_name == 'workflow_dispatch') && matrix.config.light == true }} + uses: docker/build-push-action@v6 + with: + context: . + push: true + platforms: ${{ matrix.config.platforms }} + # tag list is generated from step above + tags: ${{ steps.tag.outputs.light_output_tags }} + file: ${{ matrix.config.dockerfile }} + target: light + provenance: false + # using github experimental cache + cache-from: type=gha + cache-to: type=gha,mode=max + # return to this if the experimental github cache is having issues + #cache-to: type=local,dest=/tmp/.buildx-cache + #cache-from: type=local,src=/tmp/.buildx-cache + + - name: Build and push Server Docker image (tagged + versioned) + if: ${{ (github.event_name == 'push' || github.event_name == 'schedule' || github.event_name == 'workflow_dispatch') && matrix.config.server == true }} + uses: docker/build-push-action@v6 + with: + context: . + push: true + platforms: ${{ matrix.config.platforms }} + # tag list is generated from step above + tags: ${{ steps.tag.outputs.server_output_tags }} + file: ${{ matrix.config.dockerfile }} + target: server + provenance: false + # using github experimental cache + cache-from: type=gha + cache-to: type=gha,mode=max + # return to this if the experimental github cache is having issues + #cache-to: type=local,dest=/tmp/.buildx-cache + #cache-from: type=local,src=/tmp/.buildx-cache From 7024d59e6a572730626cb11896829d115043a1b1 Mon Sep 17 00:00:00 2001 From: "yuri@FreeBSD" Date: Sun, 22 Dec 2024 16:20:11 -0800 Subject: [PATCH 04/81] ggml : fix run-time on FreeBSD in get_executable_path() (#10948) --- ggml/src/ggml-backend-reg.cpp | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/ggml/src/ggml-backend-reg.cpp b/ggml/src/ggml-backend-reg.cpp index 66927148a..31ee31e39 100644 --- a/ggml/src/ggml-backend-reg.cpp +++ b/ggml/src/ggml-backend-reg.cpp @@ -402,12 +402,16 @@ static std::string get_executable_path() { base_path = base_path.substr(0, last_slash); } return base_path + "/"; -#elif defined(__linux__) +#elif defined(__linux__) || defined(__FreeBSD__) std::string base_path = "."; std::vector path(1024); while (true) { // get executable path +# if defined(__linux__) ssize_t len = readlink("/proc/self/exe", path.data(), path.size()); +# elif defined(__FreeBSD__) + ssize_t len = readlink("/proc/curproc/file", path.data(), path.size()); +# endif if (len == -1) { break; } From dab76c92cc63072d9495ba87f2f3f3a4872d4f57 Mon Sep 17 00:00:00 2001 From: Eric Curtin Date: Mon, 23 Dec 2024 00:21:40 +0000 Subject: [PATCH 05/81] llama-run : include temperature option (#10899) This commit updates the `examples/run/README.md` file to include a new option for setting the temperature and updates the `run.cpp` file to parse this option. Signed-off-by: Eric Curtin --- examples/run/README.md | 2 + examples/run/run.cpp | 111 +++++++++++++++++++++++++++-------------- 2 files changed, 75 insertions(+), 38 deletions(-) diff --git a/examples/run/README.md b/examples/run/README.md index 874293516..a06805441 100644 --- a/examples/run/README.md +++ b/examples/run/README.md @@ -19,6 +19,8 @@ Options: Context size (default: 2048) -n, --ngl Number of GPU layers (default: 0) + --temp + Temperature (default: 0.8) -v, --verbose, --log-verbose Set verbosity level to infinity (i.e. log all messages, useful for debugging) -h, --help diff --git a/examples/run/run.cpp b/examples/run/run.cpp index 03da54ca3..f89d041c4 100644 --- a/examples/run/run.cpp +++ b/examples/run/run.cpp @@ -55,29 +55,51 @@ static int printe(const char * fmt, ...) { class Opt { public: int init(int argc, const char ** argv) { + ctx_params = llama_context_default_params(); + model_params = llama_model_default_params(); + context_size_default = ctx_params.n_batch; + ngl_default = model_params.n_gpu_layers; + common_params_sampling sampling; + temperature_default = sampling.temp; + + if (argc < 2) { + printe("Error: No arguments provided.\n"); + print_help(); + return 1; + } + // Parse arguments if (parse(argc, argv)) { printe("Error: Failed to parse arguments.\n"); - help(); + print_help(); return 1; } // If help is requested, show help and exit - if (help_) { - help(); + if (help) { + print_help(); return 2; } + ctx_params.n_batch = context_size >= 0 ? context_size : context_size_default; + model_params.n_gpu_layers = ngl >= 0 ? ngl : ngl_default; + temperature = temperature >= 0 ? temperature : temperature_default; + return 0; // Success } + llama_context_params ctx_params; + llama_model_params model_params; std::string model_; - std::string user_; - int context_size_ = -1, ngl_ = -1; - bool verbose_ = false; + std::string user; + int context_size = -1, ngl = -1; + float temperature = -1; + bool verbose = false; private: - bool help_ = false; + int context_size_default = -1, ngl_default = -1; + float temperature_default = -1; + bool help = false; bool parse_flag(const char ** argv, int i, const char * short_opt, const char * long_opt) { return strcmp(argv[i], short_opt) == 0 || strcmp(argv[i], long_opt) == 0; @@ -89,6 +111,17 @@ class Opt { } option_value = std::atoi(argv[++i]); + + return 0; + } + + int handle_option_with_value(int argc, const char ** argv, int & i, float & option_value) { + if (i + 1 >= argc) { + return 1; + } + + option_value = std::atof(argv[++i]); + return 0; } @@ -96,18 +129,22 @@ class Opt { bool options_parsing = true; for (int i = 1, positional_args_i = 0; i < argc; ++i) { if (options_parsing && (strcmp(argv[i], "-c") == 0 || strcmp(argv[i], "--context-size") == 0)) { - if (handle_option_with_value(argc, argv, i, context_size_) == 1) { + if (handle_option_with_value(argc, argv, i, context_size) == 1) { return 1; } } else if (options_parsing && (strcmp(argv[i], "-n") == 0 || strcmp(argv[i], "--ngl") == 0)) { - if (handle_option_with_value(argc, argv, i, ngl_) == 1) { + if (handle_option_with_value(argc, argv, i, ngl) == 1) { + return 1; + } + } else if (options_parsing && strcmp(argv[i], "--temp") == 0) { + if (handle_option_with_value(argc, argv, i, temperature) == 1) { return 1; } } else if (options_parsing && (parse_flag(argv, i, "-v", "--verbose") || parse_flag(argv, i, "-v", "--log-verbose"))) { - verbose_ = true; + verbose = true; } else if (options_parsing && parse_flag(argv, i, "-h", "--help")) { - help_ = true; + help = true; return 0; } else if (options_parsing && strcmp(argv[i], "--") == 0) { options_parsing = false; @@ -120,16 +157,16 @@ class Opt { model_ = argv[i]; } else if (positional_args_i == 1) { ++positional_args_i; - user_ = argv[i]; + user = argv[i]; } else { - user_ += " " + std::string(argv[i]); + user += " " + std::string(argv[i]); } } return 0; } - void help() const { + void print_help() const { printf( "Description:\n" " Runs a llm\n" @@ -142,6 +179,8 @@ class Opt { " Context size (default: %d)\n" " -n, --ngl \n" " Number of GPU layers (default: %d)\n" + " --temp \n" + " Temperature (default: %.1f)\n" " -v, --verbose, --log-verbose\n" " Set verbosity level to infinity (i.e. log all messages, useful for debugging)\n" " -h, --help\n" @@ -170,7 +209,7 @@ class Opt { " llama-run file://some-file3.gguf\n" " llama-run --ngl 999 some-file4.gguf\n" " llama-run --ngl 999 some-file5.gguf Hello World\n", - llama_context_default_params().n_batch, llama_model_default_params().n_gpu_layers); + context_size_default, ngl_default, temperature_default); } }; @@ -495,12 +534,12 @@ class LlamaData { return 1; } - context = initialize_context(model, opt.context_size_); + context = initialize_context(model, opt); if (!context) { return 1; } - sampler = initialize_sampler(); + sampler = initialize_sampler(opt); return 0; } @@ -619,14 +658,12 @@ class LlamaData { // Initializes the model and returns a unique pointer to it llama_model_ptr initialize_model(Opt & opt) { ggml_backend_load_all(); - llama_model_params model_params = llama_model_default_params(); - model_params.n_gpu_layers = opt.ngl_ >= 0 ? opt.ngl_ : model_params.n_gpu_layers; resolve_model(opt.model_); printe( "\r%*s" "\rLoading model", get_terminal_width(), " "); - llama_model_ptr model(llama_load_model_from_file(opt.model_.c_str(), model_params)); + llama_model_ptr model(llama_load_model_from_file(opt.model_.c_str(), opt.model_params)); if (!model) { printe("%s: error: unable to load model from file: %s\n", __func__, opt.model_.c_str()); } @@ -636,10 +673,8 @@ class LlamaData { } // Initializes the context with the specified parameters - llama_context_ptr initialize_context(const llama_model_ptr & model, const int n_ctx) { - llama_context_params ctx_params = llama_context_default_params(); - ctx_params.n_ctx = ctx_params.n_batch = n_ctx >= 0 ? n_ctx : ctx_params.n_batch; - llama_context_ptr context(llama_new_context_with_model(model.get(), ctx_params)); + llama_context_ptr initialize_context(const llama_model_ptr & model, const Opt & opt) { + llama_context_ptr context(llama_new_context_with_model(model.get(), opt.ctx_params)); if (!context) { printe("%s: error: failed to create the llama_context\n", __func__); } @@ -648,10 +683,10 @@ class LlamaData { } // Initializes and configures the sampler - llama_sampler_ptr initialize_sampler() { + llama_sampler_ptr initialize_sampler(const Opt & opt) { llama_sampler_ptr sampler(llama_sampler_chain_init(llama_sampler_chain_default_params())); llama_sampler_chain_add(sampler.get(), llama_sampler_init_min_p(0.05f, 1)); - llama_sampler_chain_add(sampler.get(), llama_sampler_init_temp(0.8f)); + llama_sampler_chain_add(sampler.get(), llama_sampler_init_temp(opt.temperature)); llama_sampler_chain_add(sampler.get(), llama_sampler_init_dist(LLAMA_DEFAULT_SEED)); return sampler; @@ -798,9 +833,9 @@ static int apply_chat_template_with_error_handling(LlamaData & llama_data, const } // Helper function to handle user input -static int handle_user_input(std::string & user_input, const std::string & user_) { - if (!user_.empty()) { - user_input = user_; +static int handle_user_input(std::string & user_input, const std::string & user) { + if (!user.empty()) { + user_input = user; return 0; // No need for interactive input } @@ -832,17 +867,17 @@ static bool is_stdout_a_terminal() { } // Function to tokenize the prompt -static int chat_loop(LlamaData & llama_data, const std::string & user_) { +static int chat_loop(LlamaData & llama_data, const std::string & user) { int prev_len = 0; llama_data.fmtted.resize(llama_n_ctx(llama_data.context.get())); static const bool stdout_a_terminal = is_stdout_a_terminal(); while (true) { // Get user input std::string user_input; - while (handle_user_input(user_input, user_)) { + while (handle_user_input(user_input, user)) { } - add_message("user", user_.empty() ? user_input : user_, llama_data); + add_message("user", user.empty() ? user_input : user, llama_data); int new_len; if (apply_chat_template_with_error_handling(llama_data, true, new_len) < 0) { return 1; @@ -854,7 +889,7 @@ static int chat_loop(LlamaData & llama_data, const std::string & user_) { return 1; } - if (!user_.empty()) { + if (!user.empty()) { break; } @@ -869,7 +904,7 @@ static int chat_loop(LlamaData & llama_data, const std::string & user_) { static void log_callback(const enum ggml_log_level level, const char * text, void * p) { const Opt * opt = static_cast(p); - if (opt->verbose_ || level == GGML_LOG_LEVEL_ERROR) { + if (opt->verbose || level == GGML_LOG_LEVEL_ERROR) { printe("%s", text); } } @@ -890,11 +925,11 @@ int main(int argc, const char ** argv) { } if (!is_stdin_a_terminal()) { - if (!opt.user_.empty()) { - opt.user_ += "\n\n"; + if (!opt.user.empty()) { + opt.user += "\n\n"; } - opt.user_ += read_pipe_data(); + opt.user += read_pipe_data(); } llama_log_set(log_callback, &opt); @@ -903,7 +938,7 @@ int main(int argc, const char ** argv) { return 1; } - if (chat_loop(llama_data, opt.user_)) { + if (chat_loop(llama_data, opt.user)) { return 1; } From 6f0c9e034bb398915a6617ee4acc62adb87d387d Mon Sep 17 00:00:00 2001 From: ymcki <84055651+ymcki@users.noreply.github.com> Date: Mon, 23 Dec 2024 08:22:33 +0800 Subject: [PATCH 06/81] llama : support for Llama-3_1-Nemotron-51B (#10669) * conflict resolution * move comments after bracket to its own line --- convert_hf_to_gguf.py | 178 ++++++++++++++++++++++ gguf-py/gguf/constants.py | 26 ++++ gguf-py/gguf/tensor_mapping.py | 1 + src/llama.cpp | 267 ++++++++++++++++++++++++++++++++- 4 files changed, 471 insertions(+), 1 deletion(-) diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index 66aa7f5b1..d95fb1296 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -1692,6 +1692,184 @@ class LlamaModel(Model): raise ValueError(f"Unprocessed experts: {experts}") +@Model.register("DeciLMForCausalLM") +class DeciModel(Model): + model_arch = gguf.MODEL_ARCH.DECI + + @staticmethod + def _ffn_mult_to_intermediate_size(ffn_mult: float, n_embd: int) -> int: + # DeciLM-specific code + intermediate_size = int(2 * ffn_mult * n_embd / 3) + return DeciModel._find_multiple(intermediate_size, 256) + + @staticmethod + def _find_multiple(n: int, k: int) -> int: + # DeciLM-specific code + if n % k == 0: + return n + return n + k - (n % k) + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + if "block_configs" in self.hparams: # Llama-3_1-Nemotron-51B + _block_configs: list[dict[str,Any]] = self.hparams["block_configs"] + assert self.block_count == len(_block_configs) + self._num_kv_heads = list() + self._num_heads = list() + _ffn_multipliers = list() + # ***linear attention layer*** + # if n_heads_in_group is None and replace_with_linear is True + # then _num_kv_heads[il] is 0 and _num_heads[il] is num_attention_heads + # ***attention-free layer*** + # if n_heads_in_group is None and replace_with_linear is False + # then _num_kv_heads[il] is 0 and _num_heads[il] is 0 + # ***normal attention-layer*** + # if n_heads_in_group is not None, then + # _num_kv_heads[il] is num_attention_head // n_heads_in_group and + # _num_heads[il] is num_attention_head + for il in range(len(_block_configs)): + if _block_configs[il]["attention"]["n_heads_in_group"] is None: + if _block_configs[il]["attention"]["replace_with_linear"] is True: + self._num_kv_heads.append(0) + self._num_heads.append(self.hparams["num_attention_heads"]) + else: + self._num_kv_heads.append(0) + self._num_heads.append(0) + else: + self._num_kv_heads.append(self.hparams["num_attention_heads"] // _block_configs[il]["attention"]["n_heads_in_group"]) + self._num_heads.append(self.hparams["num_attention_heads"]) + _ffn_multipliers.append(_block_configs[il]["ffn"]["ffn_mult"]) + assert self.block_count == len(self._num_kv_heads) + assert self.block_count == len(self._num_heads) + assert self.block_count == len(_ffn_multipliers) + assert isinstance(self._num_kv_heads, list) and isinstance(self._num_kv_heads[0], int) + assert isinstance(self._num_heads, list) and isinstance(self._num_heads[0], int) + assert isinstance(_ffn_multipliers, list) and isinstance(_ffn_multipliers[0], float) + self._ffn_dims: list[int] = [ + DeciModel._ffn_mult_to_intermediate_size(multiplier, self.hparams["hidden_size"]) + for multiplier in _ffn_multipliers + ] + + def set_vocab(self): + # Please change tokenizer_config.json of Llama-3_1-Nemotron-51B's + # eos_token from '|eot_id|' to '|end_of_text|' + if self.hparams.get("vocab_size", 128256) == 128256: + tokens, toktypes, tokpre = self.get_vocab_base() + self.gguf_writer.add_tokenizer_model("gpt2") + self.gguf_writer.add_tokenizer_pre(tokpre) + self.gguf_writer.add_token_list(tokens) + self.gguf_writer.add_token_types(toktypes) + + special_vocab = gguf.SpecialVocab( + self.dir_model, load_merges=True, + special_token_types = ['bos', 'eos', 'eom', 'eot'] + ) + special_vocab._set_special_token("bos", 128000) + special_vocab._set_special_token("eos", 128001) + special_vocab._set_special_token("eom", 128008) + special_vocab._set_special_token("eot", 128009) + special_vocab.add_to_gguf(self.gguf_writer) + else: + # DeciLM-7B + self._set_vocab_llama_hf() +# self._set_vocab_gpt2() + + def set_gguf_parameters(self): + if "block_configs" in self.hparams: # Llama-3_1-Nemotron-51B + assert self.block_count == len(self._num_kv_heads) + assert self.block_count == len(self._num_heads) + assert self.block_count == len(self._ffn_dims) + self.gguf_writer.add_head_count_kv(self._num_kv_heads) + self.gguf_writer.add_head_count(self._num_heads) + self.gguf_writer.add_feed_forward_length(self._ffn_dims) + self.gguf_writer.add_block_count(self.block_count) + self.gguf_writer.add_context_length(self.hparams["max_position_embeddings"]) + self.gguf_writer.add_embedding_length(self.hparams["hidden_size"]) + self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"]) + self.gguf_writer.add_key_length(self.hparams["hidden_size"] // self.hparams["num_attention_heads"]) + self.gguf_writer.add_value_length(self.hparams["hidden_size"] // self.hparams["num_attention_heads"]) + self.gguf_writer.add_file_type(self.ftype) + else: # DeciLM-7B + super().set_gguf_parameters() + if "num_key_value_heads_per_layer" in self.hparams: # DeciLM-7B + self._num_kv_heads: list[int] = self.hparams["num_key_value_heads_per_layer"] + assert self.block_count == len(self._num_kv_heads) + self.gguf_writer.add_head_count_kv(self._num_kv_heads) + hparams = self.hparams + self.gguf_writer.add_vocab_size(hparams["vocab_size"]) + + if "head_dim" in hparams: + rope_dim = hparams["head_dim"] + else: + rope_dim = hparams["hidden_size"] // hparams["num_attention_heads"] + self.gguf_writer.add_rope_dimension_count(rope_dim) + + if self.hparams.get("rope_scaling") is not None and "factor" in self.hparams["rope_scaling"]: + if self.hparams["rope_scaling"].get("type") == "linear": + self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR) + self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"]) + + @staticmethod + def permute(weights: Tensor, n_head: int, n_head_kv: int | None): + if n_head_kv is not None and n_head != n_head_kv: + n_head = n_head_kv + return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:]) + .swapaxes(1, 2) + .reshape(weights.shape)) + + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: + n_head = self.hparams["num_attention_heads"] + if bid is not None: + if "num_key_value_heads_per_layer" in self.hparams: + n_kv_head = self.hparams["num_key_value_heads_per_layer"][bid] + elif "block_configs" in self.hparams: + n_kv_head = self._num_kv_heads[bid] + n_head = self._num_heads[bid] + else: + n_kv_head = self.hparams.get("num_key_value_heads") + else: + n_kv_head = self.hparams.get("num_key_value_heads") + + if name.endswith(("q_proj.weight", "q_proj.bias")): + data_torch = DeciModel.permute(data_torch, n_head, n_head) + if name.endswith(("k_proj.weight", "k_proj.bias")): + data_torch = DeciModel.permute(data_torch, n_head, n_kv_head) + return [(self.map_tensor_name(name), data_torch)] + + def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]: + if rope_scaling := self.find_hparam(["rope_scaling"], optional=True): + if rope_scaling.get("rope_type", '').lower() == "llama3": + base = self.hparams.get("rope_theta", 10000.0) + dim = self.hparams.get("head_dim", self.hparams["hidden_size"] // self.hparams["num_attention_heads"]) + freqs = 1.0 / (base ** (torch.arange(0, dim, 2, dtype=torch.float32) / dim)) + + factor = rope_scaling.get("factor", 8.0) + low_freq_factor = rope_scaling.get("low_freq_factor", 1.0) + high_freq_factor = rope_scaling.get("high_freq_factor", 4.0) + old_context_len = self.hparams.get("original_max_position_embeddings", 8192) + + low_freq_wavelen = old_context_len / low_freq_factor + high_freq_wavelen = old_context_len / high_freq_factor + assert low_freq_wavelen != high_freq_wavelen + + rope_factors = [] + for freq in freqs: + wavelen = 2 * math.pi / freq + if wavelen < high_freq_wavelen: + rope_factors.append(1) + elif wavelen > low_freq_wavelen: + rope_factors.append(factor) + else: + smooth = (old_context_len / wavelen - low_freq_factor) / (high_freq_factor - low_freq_factor) + rope_factors.append(1 / ((1 - smooth) / factor + smooth)) + + yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FREQS), torch.tensor(rope_factors, dtype=torch.float32)) + + def prepare_tensors(self): + super().prepare_tensors() + + @Model.register("BitnetForCausalLM") class BitnetModel(Model): model_arch = gguf.MODEL_ARCH.BITNET diff --git a/gguf-py/gguf/constants.py b/gguf-py/gguf/constants.py index a40df974d..273370370 100644 --- a/gguf-py/gguf/constants.py +++ b/gguf-py/gguf/constants.py @@ -221,6 +221,7 @@ class GGUFType: class MODEL_ARCH(IntEnum): LLAMA = auto() + DECI = auto() FALCON = auto() BAICHUAN = auto() GROK = auto() @@ -402,6 +403,7 @@ class MODEL_TENSOR(IntEnum): MODEL_ARCH_NAMES: dict[MODEL_ARCH, str] = { MODEL_ARCH.LLAMA: "llama", + MODEL_ARCH.DECI: "deci", MODEL_ARCH.FALCON: "falcon", MODEL_ARCH.BAICHUAN: "baichuan", MODEL_ARCH.GROK: "grok", @@ -602,6 +604,26 @@ MODEL_TENSORS: dict[MODEL_ARCH, list[MODEL_TENSOR]] = { MODEL_TENSOR.FFN_DOWN_EXP, MODEL_TENSOR.FFN_UP_EXP, ], + MODEL_ARCH.DECI: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.ROPE_FREQS, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_Q, + MODEL_TENSOR.ATTN_K, + MODEL_TENSOR.ATTN_V, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.ATTN_ROT_EMBD, + MODEL_TENSOR.FFN_GATE_INP, + MODEL_TENSOR.FFN_NORM, + MODEL_TENSOR.FFN_GATE, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_UP, + MODEL_TENSOR.FFN_GATE_EXP, + MODEL_TENSOR.FFN_DOWN_EXP, + MODEL_TENSOR.FFN_UP_EXP, + ], MODEL_ARCH.GROK: [ MODEL_TENSOR.TOKEN_EMBD, MODEL_TENSOR.OUTPUT_NORM, @@ -1448,6 +1470,10 @@ MODEL_TENSOR_SKIP: dict[MODEL_ARCH, list[MODEL_TENSOR]] = { MODEL_TENSOR.ROPE_FREQS, MODEL_TENSOR.ATTN_ROT_EMBD, ], + MODEL_ARCH.DECI: [ + MODEL_TENSOR.ROPE_FREQS, + MODEL_TENSOR.ATTN_ROT_EMBD, + ], MODEL_ARCH.BAICHUAN: [ MODEL_TENSOR.ROPE_FREQS, MODEL_TENSOR.ATTN_ROT_EMBD, diff --git a/gguf-py/gguf/tensor_mapping.py b/gguf-py/gguf/tensor_mapping.py index 82cdb121a..7009a11d4 100644 --- a/gguf-py/gguf/tensor_mapping.py +++ b/gguf-py/gguf/tensor_mapping.py @@ -198,6 +198,7 @@ class TensorNameMap: "transformer.h.{bid}.self_attention.dense", # falcon "h.{bid}.self_attention.dense", # bloom "model.layers.{bid}.self_attn.o_proj", # llama-hf nemotron olmoe olmo2 + "model.layers.{bid}.self_attn.linear_attn", # deci "layers.{bid}.attention.wo", # llama-pth "encoder.layer.{bid}.attention.output.dense", # bert "transformer.h.{bid}.attn.out_proj", # gpt-j diff --git a/src/llama.cpp b/src/llama.cpp index 115ef9080..c1524d06b 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -146,6 +146,7 @@ static std::string format(const char * fmt, ...) { enum llm_arch { LLM_ARCH_LLAMA, + LLM_ARCH_DECI, LLM_ARCH_FALCON, LLM_ARCH_BAICHUAN, LLM_ARCH_GROK, @@ -203,6 +204,7 @@ enum llm_arch { static const std::map LLM_ARCH_NAMES = { { LLM_ARCH_LLAMA, "llama" }, + { LLM_ARCH_DECI, "deci" }, { LLM_ARCH_FALCON, "falcon" }, { LLM_ARCH_GROK, "grok" }, { LLM_ARCH_GPT2, "gpt2" }, @@ -674,6 +676,32 @@ static const std::map> LLM_TENSOR_N { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" }, }, }, + { + LLM_ARCH_DECI, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ROPE_FREQS, "rope_freqs" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" }, + { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + { LLM_TENSOR_FFN_GATE_EXP, "blk.%d.ffn_gate.%d" }, + { LLM_TENSOR_FFN_DOWN_EXP, "blk.%d.ffn_down.%d" }, + { LLM_TENSOR_FFN_UP_EXP, "blk.%d.ffn_up.%d" }, + { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" }, + { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" }, + { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" }, + }, + }, { LLM_ARCH_BAICHUAN, { @@ -5694,7 +5722,7 @@ static void llm_load_hparams( ml.get_key(LLM_KV_ROPE_DIMENSION_COUNT, hparams.n_rot, false); - if (model.arch == LLM_ARCH_LLAMA || model.arch == LLM_ARCH_FALCON) { + if (model.arch == LLM_ARCH_LLAMA || model.arch == LLM_ARCH_DECI || model.arch == LLM_ARCH_FALCON) { if (hparams.n_rot != hparams.n_embd_head_k) { throw std::runtime_error(format("invalid n_rot: %u, expected %u", hparams.n_rot, hparams.n_embd_head_k)); } @@ -5734,6 +5762,15 @@ static void llm_load_hparams( } } } break; + case LLM_ARCH_DECI: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + switch (hparams.n_layer) { + case 32: model.type = e_model::MODEL_7B; break; + case 80: model.type = e_model::MODEL_70B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; case LLM_ARCH_MINICPM: { ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); @@ -7939,6 +7976,68 @@ static bool llm_load_tensors( } } } break; + case LLM_ARCH_DECI: + { + model.tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); + + // output + model.output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0); + model.output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_NOT_REQUIRED); + + // if output is NULL, init from the input tok embed + if (model.output == NULL) { + model.output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED); + } + + for (int i = 0; i < n_layer; ++i) { + auto & layer = model.layers[i]; + const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa(i); + const int64_t n_embd_v_gqa = hparams.n_embd_v_gqa(i); + const int64_t n_embd_gqa = hparams.n_embd_v_gqa(i); + const int64_t n_ff = hparams.n_ff(i); + const int64_t n_head = hparams.n_head(i); + const int64_t n_head_kv = hparams.n_head_kv(i); + + if (n_head_kv == 0 && n_head > 0) { + // linear attention for DeciLMCausalModel + layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0); + layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0); + } + else if (n_head_kv > 0) { + layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0); + + layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * n_head}, 0); + layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa}, 0); + layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa}, 0); + layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0); + } + + // optional bias tensors + layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED); + layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, llama_model_loader::TENSOR_NOT_REQUIRED); + layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, llama_model_loader::TENSOR_NOT_REQUIRED); + layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED); + + layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0); + + if (hparams.rope_scaling_type_train == LLAMA_ROPE_SCALING_TYPE_LONGROPE) { + layer.rope_long = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_LONG, "weight", i), {n_rot/2}, llama_model_loader::TENSOR_NOT_REQUIRED | (i != 0 ? llama_model_loader::TENSOR_DUPLICATED : 0)); + layer.rope_short = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_SHORT, "weight", i), {n_rot/2}, llama_model_loader::TENSOR_NOT_REQUIRED | (i != 0 ? llama_model_loader::TENSOR_DUPLICATED : 0)); + } + else { + layer.rope_freqs = create_tensor(tn(LLM_TENSOR_ROPE_FREQS, "weight", i), {n_rot/2}, llama_model_loader::TENSOR_NOT_REQUIRED | (i != 0 ? llama_model_loader::TENSOR_DUPLICATED : 0)); + } + + layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0); + layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0); + layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0); + + // optional MLP bias + layer.ffn_gate_b = create_tensor(tn(LLM_TENSOR_FFN_GATE, "bias", i), {n_ff}, llama_model_loader::TENSOR_NOT_REQUIRED); + layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED); + layer.ffn_up_b = create_tensor(tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, llama_model_loader::TENSOR_NOT_REQUIRED); + } + } break; case LLM_ARCH_MINICPM3: { const int64_t n_embd_head_qk_rope = hparams.n_rot; @@ -11308,6 +11407,167 @@ struct llm_build_context { return gf; } + struct ggml_cgraph * build_deci() { + struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false); + + // mutable variable, needed during the last layer of the computation to skip unused tokens + int32_t n_tokens = this->n_tokens; + + const int64_t n_embd_head = hparams.n_embd_head_v; + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + GGML_ASSERT(n_embd_head == hparams.n_rot); + + struct ggml_tensor * cur; + struct ggml_tensor * inpL; + + inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb); + + // inp_pos - contains the positions + struct ggml_tensor * inp_pos = build_inp_pos(); + + // KQ_mask (mask for 1 head, it will be broadcasted to all heads) + struct ggml_tensor * KQ_mask = build_inp_KQ_mask(); + + const float kq_scale = hparams.f_attention_scale == 0.0f ? 1.0f/sqrtf(float(n_embd_head)) : hparams.f_attention_scale; + for (int il = 0; il < n_layer; ++il) { + struct ggml_tensor * inpSA = inpL; + const int64_t n_head_kv = hparams.n_head_kv(il); + const int64_t n_head = hparams.n_head(il); + + if (n_head == 0) { + // attention-free layer of Llama-3_1-Nemotron-51B + cur = inpL; + } else { + // norm + cur = llm_build_norm(ctx0, inpL, hparams, + model.layers[il].attn_norm, NULL, + LLM_NORM_RMS, cb, il); + cb(cur, "attn_norm", il); + } + + if (n_head > 0 && n_head_kv == 0) { + // "linear attention" of Llama-3_1-Nemotron-51B + cur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wo, cur); + cb(cur, "wo", il); + } else if (n_head > 0) { + // self-attention + // rope freq factors for llama3; may return nullptr for llama2 and other models + struct ggml_tensor * rope_factors = build_rope_factors(il); + + // compute Q and K and RoPE them + struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur); + cb(Qcur, "Qcur", il); + if (model.layers[il].bq) { + Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); + cb(Qcur, "Qcur", il); + } + + struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur); + cb(Kcur, "Kcur", il); + if (model.layers[il].bk) { + Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); + cb(Kcur, "Kcur", il); + } + + struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur); + cb(Vcur, "Vcur", il); + if (model.layers[il].bv) { + Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); + cb(Vcur, "Vcur", il); + } + + Qcur = ggml_rope_ext( + ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, rope_factors, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Qcur, "Qcur", il); + + Kcur = ggml_rope_ext( + ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, rope_factors, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Kcur, "Kcur", il); + + cur = llm_build_kv(ctx0, lctx, kv_self, gf, + model.layers[il].wo, model.layers[il].bo, + Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, kq_scale, cb, il); + } + + if (il == n_layer - 1) { + // skip computing output for unused tokens + struct ggml_tensor * inp_out_ids = build_inp_out_ids(); + n_tokens = n_outputs; + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); + } + + // For Granite architecture + if (hparams.f_residual_scale) { + cur = ggml_scale(ctx0, cur, hparams.f_residual_scale); + } + + // modified to support attention-free layer of Llama-3_1-Nemotron-51B + struct ggml_tensor * ffn_inp = cur; + if (n_head > 0) { + ffn_inp = ggml_add(ctx0, cur, inpSA); + cb(ffn_inp, "ffn_inp", il); + } + + // feed-forward network + if (model.layers[il].ffn_gate_inp == nullptr) { + cur = llm_build_norm(ctx0, ffn_inp, hparams, + model.layers[il].ffn_norm, NULL, + LLM_NORM_RMS, cb, il); + cb(cur, "ffn_norm", il); + + cur = llm_build_ffn(ctx0, lctx, cur, + model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL, + model.layers[il].ffn_gate, model.layers[il].ffn_gate_b, NULL, + model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL, + NULL, + LLM_FFN_SILU, LLM_FFN_PAR, cb, il); + cb(cur, "ffn_out", il); + } + + // For Granite architecture + if (hparams.f_residual_scale) { + cur = ggml_scale(ctx0, cur, hparams.f_residual_scale); + } + + cur = ggml_add(ctx0, cur, ffn_inp); + cb(cur, "ffn_out", il); + + cur = lctx.cvec.apply_to(ctx0, cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + + cur = llm_build_norm(ctx0, cur, hparams, + model.output_norm, NULL, + LLM_NORM_RMS, cb, -1); + cb(cur, "result_norm", -1); + + // lm_head + cur = llm_build_lora_mm(lctx, ctx0, model.output, cur); + + // For Granite architecture + if (hparams.f_logit_scale) { + cur = ggml_scale(ctx0, cur, 1.0f / hparams.f_logit_scale); + } + + cb(cur, "result_output", -1); + + ggml_build_forward_expand(gf, cur); + + return gf; + } + struct ggml_cgraph * build_baichuan() { struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false); @@ -17422,6 +17682,10 @@ static struct ggml_cgraph * llama_build_graph( { result = llm.build_llama(); } break; + case LLM_ARCH_DECI: + { + result = llm.build_deci(); + } break; case LLM_ARCH_BAICHUAN: { result = llm.build_baichuan(); @@ -20797,6 +21061,7 @@ enum llama_rope_type llama_rope_type(const struct llama_model * model) { // use what we call a normal RoPE, operating on pairs of consecutive head values case LLM_ARCH_LLAMA: + case LLM_ARCH_DECI: case LLM_ARCH_BAICHUAN: case LLM_ARCH_STARCODER: case LLM_ARCH_PLAMO: From b92a14a841fb4dfaf27b29d982ec8ba5289a3bff Mon Sep 17 00:00:00 2001 From: Yun Dou Date: Mon, 23 Dec 2024 08:35:44 +0800 Subject: [PATCH 07/81] llama : support InfiniAI Megrez 3b (#10893) * Support InfiniAI Megrez 3b * Fix tokenizer_clean_spaces for megrez --- convert_hf_to_gguf.py | 3 +++ convert_hf_to_gguf_update.py | 1 + src/llama.cpp | 17 +++++++++++++++++ tests/test-chat-template.cpp | 4 ++++ 4 files changed, 25 insertions(+) diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index d95fb1296..b6c15da94 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -684,6 +684,9 @@ class Model: if chkhsh == "ad851be1dba641f2e3711822f816db2c265f788b37c63b4e1aeacb9ee92de8eb": # ref: https://huggingface.co/ai-sage/GigaChat-20B-A3B-instruct res = "gigachat" + if chkhsh == "d4c8f286ea6b520b3d495c4455483cfa2302c0cfcd4be05d781b6a8a0a7cdaf1": + # ref: https://huggingface.co/Infinigence/Megrez-3B-Instruct + res = "megrez" if res is None: logger.warning("\n") diff --git a/convert_hf_to_gguf_update.py b/convert_hf_to_gguf_update.py index 2ba346640..fea23ddb4 100755 --- a/convert_hf_to_gguf_update.py +++ b/convert_hf_to_gguf_update.py @@ -106,6 +106,7 @@ models = [ {"name": "minerva-7b", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/sapienzanlp/Minerva-7B-base-v1.0", }, {"name": "roberta-bpe", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/sentence-transformers/stsb-roberta-base"}, {"name": "gigachat", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/ai-sage/GigaChat-20B-A3B-instruct"}, + {"name": "megrez", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/Infinigence/Megrez-3B-Instruct"}, ] diff --git a/src/llama.cpp b/src/llama.cpp index c1524d06b..4d41602fe 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -1720,6 +1720,7 @@ enum llm_chat_template { LLM_CHAT_TEMPLATE_RWKV_WORLD, LLM_CHAT_TEMPLATE_GRANITE, LLM_CHAT_TEMPLATE_GIGACHAT, + LLM_CHAT_TEMPLATE_MEGREZ, LLM_CHAT_TEMPLATE_UNKNOWN, }; @@ -1753,6 +1754,7 @@ static const std::map LLM_CHAT_TEMPLATES = { { "rwkv-world", LLM_CHAT_TEMPLATE_RWKV_WORLD }, { "granite", LLM_CHAT_TEMPLATE_GRANITE }, { "gigachat", LLM_CHAT_TEMPLATE_GIGACHAT }, + { "megrez", LLM_CHAT_TEMPLATE_MEGREZ }, }; static llm_arch llm_arch_from_string(const std::string & name) { @@ -6703,6 +6705,9 @@ static void llm_load_vocab( } else if ( tokenizer_pre == "minerva-7b") { vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_MINERVA; + } else if ( + tokenizer_pre == "megrez") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_QWEN2; } else { throw std::runtime_error(format("unknown pre-tokenizer type: '%s'", tokenizer_pre.c_str())); } @@ -22931,6 +22936,8 @@ static llm_chat_template llama_chat_detect_template(const std::string & tmpl) { return LLM_CHAT_TEMPLATE_GRANITE; } else if (tmpl_contains("message['role'] + additional_special_tokens[0] + message['content'] + additional_special_tokens[1]")) { return LLM_CHAT_TEMPLATE_GIGACHAT; + } else if (tmpl_contains("<|role_start|>")) { + return LLM_CHAT_TEMPLATE_MEGREZ; } return LLM_CHAT_TEMPLATE_UNKNOWN; } @@ -23289,6 +23296,16 @@ static int32_t llama_chat_apply_template_internal( if (add_ass) { ss << "assistant<|role_sep|>"; } + } else if (tmpl == LLM_CHAT_TEMPLATE_MEGREZ) { + // Megrez template + for (auto message : chat) { + std::string role(message->role); + ss << "<|role_start|>" << role << "<|role_end|>" << message->content << "<|turn_end|>"; + } + + if (add_ass) { + ss << "<|role_start|>assistant<|role_end|>"; + } } else { // template not supported return -1; diff --git a/tests/test-chat-template.cpp b/tests/test-chat-template.cpp index 30a910ad5..51bfb155b 100644 --- a/tests/test-chat-template.cpp +++ b/tests/test-chat-template.cpp @@ -77,6 +77,8 @@ int main(void) { "{{ bos_token }}{% for message in messages %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + '[/INST]' }}{% elif message['role'] == 'system' %}{{ '[SYSTEM_PROMPT] ' + message['content'] + '[/SYSTEM_PROMPT]' }}{% elif message['role'] == 'assistant' %}{{ ' ' + message['content'] + eos_token }}{% else %}{{ raise_exception('Only user, system and assistant roles are supported!') }}{% endif %}{% endfor %}", // ai-sage/GigaChat-20B-A3B-instruct "{% if messages[0]['role'] == 'system' -%}\n {%- set loop_messages = messages[1:] -%}\n {%- set system_message = bos_token + messages[0]['content'] + additional_special_tokens[1] -%}\n{%- else -%}\n {%- set loop_messages = messages -%}\n {%- set system_message = bos_token + '' -%}\n{%- endif -%}\n{%- for message in loop_messages %}\n {% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}\n {{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}\n {% endif %}\n \n {%- if loop.index0 == 0 -%}\n {{ system_message -}}\n {%- endif -%}\n {%- if message['role'] == 'user' -%}\n {{ message['role'] + additional_special_tokens[0] + message['content'] + additional_special_tokens[1] -}}\n {{ 'available functions' + additional_special_tokens[0] + additional_special_tokens[2] + additional_special_tokens[3] + additional_special_tokens[1] -}}\n {%- endif -%}\n {%- if message['role'] == 'assistant' -%}\n {{ message['role'] + additional_special_tokens[0] + message['content'] + additional_special_tokens[1] -}}\n {%- endif -%}\n {%- if loop.last and add_generation_prompt -%}\n {{ 'assistant' + additional_special_tokens[0] -}}\n {%- endif -%}\n{%- endfor %}", + // Infinigence/Megrez-3B-Instruct + u8"{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|role_start|>system<|role_end|>你是Megrez-3B-Instruct,将针对用户的问题给出详细的、积极的回答。<|turn_end|>' }}{% endif %}{{ '<|role_start|>' + message['role'] + '<|role_end|>' + message['content'] + '<|turn_end|>' }}{% endfor %}{% if add_generation_prompt %}{{ '<|role_start|>assistant<|role_end|>' }}{% endif %}" }; std::vector expected_output = { // teknium/OpenHermes-2.5-Mistral-7B @@ -133,6 +135,8 @@ int main(void) { "[SYSTEM_PROMPT] You are a helpful assistant[/SYSTEM_PROMPT][INST] Hello[/INST] Hi there[INST] Who are you[/INST] I am an assistant [INST] Another question[/INST]", // ai-sage/GigaChat-20B-A3B-instruct "You are a helpful assistant<|message_sep|>user<|role_sep|>Hello<|message_sep|>available functions<|role_sep|>[]<|message_sep|>assistant<|role_sep|>Hi there<|message_sep|>user<|role_sep|>Who are you<|message_sep|>available functions<|role_sep|>[]<|message_sep|>assistant<|role_sep|> I am an assistant <|message_sep|>user<|role_sep|>Another question<|message_sep|>available functions<|role_sep|>[]<|message_sep|>assistant<|role_sep|>", + // Infinigence/Megrez-3B-Instruct + "<|role_start|>system<|role_end|>You are a helpful assistant<|turn_end|><|role_start|>user<|role_end|>Hello<|turn_end|><|role_start|>assistant<|role_end|>Hi there<|turn_end|><|role_start|>user<|role_end|>Who are you<|turn_end|><|role_start|>assistant<|role_end|> I am an assistant <|turn_end|><|role_start|>user<|role_end|>Another question<|turn_end|><|role_start|>assistant<|role_end|>", }; std::vector formatted_chat(1024); int32_t res; From 86bf31cfe684849157f0875b4f0ebccac7034547 Mon Sep 17 00:00:00 2001 From: Radoslav Gerganov Date: Mon, 23 Dec 2024 10:39:30 +0200 Subject: [PATCH 08/81] rpc-server : add support for the SYCL backend (#10934) --- examples/rpc/rpc-server.cpp | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/examples/rpc/rpc-server.cpp b/examples/rpc/rpc-server.cpp index 5fe70dac7..8b1b23eda 100644 --- a/examples/rpc/rpc-server.cpp +++ b/examples/rpc/rpc-server.cpp @@ -12,6 +12,10 @@ #include "ggml-vulkan.h" #endif +#ifdef GGML_USE_SYCL +#include "ggml-sycl.h" +#endif + #include "ggml-rpc.h" #ifdef _WIN32 # include @@ -91,6 +95,12 @@ static ggml_backend_t create_backend() { if (!backend) { fprintf(stderr, "%s: ggml_backend_vulkan_init() failed\n", __func__); } +#elif GGML_USE_SYCL + fprintf(stderr, "%s: using SYCL backend\n", __func__); + backend = ggml_backend_sycl_init(0); // init device 0 + if (!backend) { + fprintf(stderr, "%s: ggml_backend_sycl_init() failed\n", __func__); + } #endif // if there aren't GPU Backends fallback to CPU backend @@ -106,6 +116,8 @@ static void get_backend_memory(size_t * free_mem, size_t * total_mem) { ggml_backend_cuda_get_device_memory(0, free_mem, total_mem); #elif GGML_USE_VULKAN ggml_backend_vk_get_device_memory(0, free_mem, total_mem); +#elif GGML_USE_SYCL + ggml_backend_sycl_get_device_memory(0, free_mem, total_mem); #else #ifdef _WIN32 MEMORYSTATUSEX status; From 485dc01214f266afff7004bc702498b491abc404 Mon Sep 17 00:00:00 2001 From: Xuan Son Nguyen Date: Mon, 23 Dec 2024 12:02:44 +0100 Subject: [PATCH 09/81] server : add system_fingerprint to chat/completion (#10917) * server : add system_fingerprint to chat/completion * update README --- examples/server/README.md | 3 +- examples/server/server.cpp | 32 +++++++++++-------- .../server/tests/unit/test_chat_completion.py | 3 ++ examples/server/utils.hpp | 2 ++ 4 files changed, 25 insertions(+), 15 deletions(-) diff --git a/examples/server/README.md b/examples/server/README.md index 6d6465692..5e3d6a6e6 100644 --- a/examples/server/README.md +++ b/examples/server/README.md @@ -724,7 +724,8 @@ This endpoint is public (no API key check). By default, it is read-only. To make }, "total_slots": 1, "model_path": "../models/Meta-Llama-3.1-8B-Instruct-Q4_K_M.gguf", - "chat_template": "..." + "chat_template": "...", + "build_info": "b(build number)-(build commit hash)" } ``` diff --git a/examples/server/server.cpp b/examples/server/server.cpp index fa3682a92..c571ed3c1 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -595,10 +595,11 @@ struct server_task_result_cmpl_final : server_task_result { std::time_t t = std::time(0); json res = json { - {"choices", json::array({choice})}, - {"created", t}, - {"model", oaicompat_model}, - {"object", "chat.completion"}, + {"choices", json::array({choice})}, + {"created", t}, + {"model", oaicompat_model}, + {"system_fingerprint", build_info}, + {"object", "chat.completion"}, {"usage", json { {"completion_tokens", n_decoded}, {"prompt_tokens", n_prompt_tokens}, @@ -632,11 +633,12 @@ struct server_task_result_cmpl_final : server_task_result { }; json ret = json { - {"choices", json::array({choice})}, - {"created", t}, - {"id", oaicompat_cmpl_id}, - {"model", oaicompat_model}, - {"object", "chat.completion.chunk"}, + {"choices", json::array({choice})}, + {"created", t}, + {"id", oaicompat_cmpl_id}, + {"model", oaicompat_model}, + {"system_fingerprint", build_info}, + {"object", "chat.completion.chunk"}, {"usage", json { {"completion_tokens", n_decoded}, {"prompt_tokens", n_prompt_tokens}, @@ -761,11 +763,12 @@ struct server_task_result_cmpl_partial : server_task_result { } json ret = json { - {"choices", choices}, - {"created", t}, - {"id", oaicompat_cmpl_id}, - {"model", oaicompat_model}, - {"object", "chat.completion.chunk"} + {"choices", choices}, + {"created", t}, + {"id", oaicompat_cmpl_id}, + {"model", oaicompat_model}, + {"system_fingerprint", build_info}, + {"object", "chat.completion.chunk"} }; if (timings.prompt_n >= 0) { @@ -3476,6 +3479,7 @@ int main(int argc, char ** argv) { { "total_slots", ctx_server.params_base.n_parallel }, { "model_path", ctx_server.params_base.model }, { "chat_template", llama_get_chat_template(ctx_server.model) }, + { "build_info", build_info }, }; res_ok(res, data); diff --git a/examples/server/tests/unit/test_chat_completion.py b/examples/server/tests/unit/test_chat_completion.py index 0fa1a17c1..885497081 100644 --- a/examples/server/tests/unit/test_chat_completion.py +++ b/examples/server/tests/unit/test_chat_completion.py @@ -31,6 +31,7 @@ def test_chat_completion(model, system_prompt, user_prompt, max_tokens, re_conte }) assert res.status_code == 200 assert "cmpl" in res.body["id"] # make sure the completion id has the expected format + assert res.body["system_fingerprint"].startswith("b") assert res.body["model"] == model if model is not None else server.model_alias assert res.body["usage"]["prompt_tokens"] == n_prompt assert res.body["usage"]["completion_tokens"] == n_predicted @@ -63,6 +64,7 @@ def test_chat_completion_stream(system_prompt, user_prompt, max_tokens, re_conte last_cmpl_id = None for data in res: choice = data["choices"][0] + assert data["system_fingerprint"].startswith("b") assert "gpt-3.5" in data["model"] # DEFAULT_OAICOMPAT_MODEL, maybe changed in the future if last_cmpl_id is None: last_cmpl_id = data["id"] @@ -92,6 +94,7 @@ def test_chat_completion_with_openai_library(): seed=42, temperature=0.8, ) + assert res.system_fingerprint is not None and res.system_fingerprint.startswith("b") assert res.choices[0].finish_reason == "length" assert res.choices[0].message.content is not None assert match_regex("(Suddenly)+", res.choices[0].message.content) diff --git a/examples/server/utils.hpp b/examples/server/utils.hpp index 94bb285b6..1987acac8 100644 --- a/examples/server/utils.hpp +++ b/examples/server/utils.hpp @@ -56,6 +56,8 @@ static T json_value(const json & body, const std::string & key, const T & defaul } } +const static std::string build_info("b" + std::to_string(LLAMA_BUILD_NUMBER) + "-" + LLAMA_COMMIT); + // // tokenizer and input processing utils // From 14b699ecde8f1e9e251ebff9eca39ebc5603b83b Mon Sep 17 00:00:00 2001 From: Xuan Son Nguyen Date: Mon, 23 Dec 2024 12:52:25 +0100 Subject: [PATCH 10/81] server : fix missing model id in /model endpoint (#10957) * server : fix missing model id in /model endpoint * fix ci --- examples/server/server.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/server/server.cpp b/examples/server/server.cpp index c571ed3c1..476a9225f 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -3701,7 +3701,7 @@ int main(int argc, char ** argv) { {"object", "list"}, {"data", { { - {"id", params.model_alias}, + {"id", params.model_alias.empty() ? params.model : params.model_alias}, {"object", "model"}, {"created", std::time(0)}, {"owned_by", "llamacpp"}, From 32d6ee6385b3fc908b283f509b845f757a6e7206 Mon Sep 17 00:00:00 2001 From: Diego Devesa Date: Mon, 23 Dec 2024 20:25:52 +0100 Subject: [PATCH 11/81] ggml : fix const usage in SSE path (#10962) --- ggml/src/ggml-cpu/ggml-cpu.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ggml/src/ggml-cpu/ggml-cpu.c b/ggml/src/ggml-cpu/ggml-cpu.c index 67e67a089..18d194479 100644 --- a/ggml/src/ggml-cpu/ggml-cpu.c +++ b/ggml/src/ggml-cpu/ggml-cpu.c @@ -986,7 +986,7 @@ inline static void __wasm_f16x4_store(ggml_fp16_t * p, v128_t x) { #define GGML_F16_STEP 32 #define GGML_F16_EPR 4 -static inline __m128 __sse_f16x4_load(ggml_fp16_t *x) { +static inline __m128 __sse_f16x4_load(const ggml_fp16_t * x) { float tmp[4]; tmp[0] = GGML_FP16_TO_FP32(x[0]); @@ -997,7 +997,7 @@ static inline __m128 __sse_f16x4_load(ggml_fp16_t *x) { return _mm_loadu_ps(tmp); } -static inline void __sse_f16x4_store(ggml_fp16_t *x, __m128 y) { +static inline void __sse_f16x4_store(ggml_fp16_t * x, __m128 y) { float arr[4]; _mm_storeu_ps(arr, y); From 3327bb0f8dea381118f8e66c18ea14db56d3b942 Mon Sep 17 00:00:00 2001 From: Diego Devesa Date: Tue, 24 Dec 2024 04:05:17 +0100 Subject: [PATCH 12/81] ggml : fix arm enabled features check (#10961) --- ggml/src/ggml-cpu/CMakeLists.txt | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/ggml/src/ggml-cpu/CMakeLists.txt b/ggml/src/ggml-cpu/CMakeLists.txt index 12d790825..e357860a5 100644 --- a/ggml/src/ggml-cpu/CMakeLists.txt +++ b/ggml/src/ggml-cpu/CMakeLists.txt @@ -135,14 +135,20 @@ function(ggml_add_cpu_backend_variant_impl tag_name) endif() # show enabled features + if (CMAKE_HOST_SYSTEM_NAME STREQUAL "Windows") + set(FEAT_INPUT_FILE "NUL") + else() + set(FEAT_INPUT_FILE "/dev/null") + endif() + execute_process( COMMAND ${CMAKE_C_COMPILER} ${ARCH_FLAGS} -dM -E - - INPUT_FILE "/dev/null" + INPUT_FILE ${FEAT_INPUT_FILE} OUTPUT_VARIABLE ARM_FEATURE RESULT_VARIABLE ARM_FEATURE_RESULT ) if (ARM_FEATURE_RESULT) - message(FATAL_ERROR "Failed to get ARM features") + message(WARNING "Failed to get ARM features") else() foreach(feature DOTPROD SVE MATMUL_INT8 FMA FP16_VECTOR_ARITHMETIC) string(FIND "${ARM_FEATURE}" "__ARM_FEATURE_${feature} 1" feature_pos) From 60cfa728e27c28537657d4e627ed432508eb9537 Mon Sep 17 00:00:00 2001 From: Diego Devesa Date: Tue, 24 Dec 2024 04:05:27 +0100 Subject: [PATCH 13/81] ggml : use wstring for backend search paths (#10960) ggml-ci --- ggml/src/CMakeLists.txt | 1 + ggml/src/ggml-backend-reg.cpp | 117 ++++++++++++++++++------------- ggml/src/ggml-cpu/CMakeLists.txt | 5 ++ 3 files changed, 75 insertions(+), 48 deletions(-) diff --git a/ggml/src/CMakeLists.txt b/ggml/src/CMakeLists.txt index bf5ee5fc2..a5f7f7b5b 100644 --- a/ggml/src/CMakeLists.txt +++ b/ggml/src/CMakeLists.txt @@ -234,6 +234,7 @@ function(ggml_add_backend_library backend) # write the shared library to the output directory set_target_properties(${backend} PROPERTIES LIBRARY_OUTPUT_DIRECTORY ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}) target_compile_definitions(${backend} PRIVATE GGML_BACKEND_DL) + add_dependencies(ggml ${backend}) else() add_library(${backend} ${ARGN}) target_link_libraries(ggml PUBLIC ${backend}) diff --git a/ggml/src/ggml-backend-reg.cpp b/ggml/src/ggml-backend-reg.cpp index 31ee31e39..7ddd178b5 100644 --- a/ggml/src/ggml-backend-reg.cpp +++ b/ggml/src/ggml-backend-reg.cpp @@ -66,6 +66,26 @@ #include "ggml-kompute.h" #endif +// disable C++17 deprecation warning for std::codecvt_utf8 +#if defined(__clang__) +# pragma clang diagnostic push +# pragma clang diagnostic ignored "-Wdeprecated-declarations" +#endif + +static std::wstring utf8_to_utf16(const std::string & str) { + std::wstring_convert> converter; + return converter.from_bytes(str); +} + +static std::string utf16_to_utf8(const std::wstring & str) { + std::wstring_convert> converter; + return converter.to_bytes(str); +} + +#if defined(__clang__) +# pragma clang diagnostic pop +#endif + #ifdef _WIN32 using dl_handle = std::remove_pointer_t; @@ -88,11 +108,6 @@ static dl_handle * dl_load_library(const std::wstring & path) { return handle; } -static dl_handle * dl_load_library(const std::string & path) { - std::wstring_convert> converter; - return dl_load_library(converter.from_bytes(path)); -} - static void * dl_get_sym(dl_handle * handle, const char * name) { DWORD old_mode = SetErrorMode(SEM_FAILCRITICALERRORS); SetErrorMode(old_mode | SEM_FAILCRITICALERRORS); @@ -114,8 +129,8 @@ struct dl_handle_deleter { } }; -static void * dl_load_library(const std::string & path) { - dl_handle * handle = dlopen(path.c_str(), RTLD_NOW | RTLD_LOCAL); +static void * dl_load_library(const std::wstring & path) { + dl_handle * handle = dlopen(utf16_to_utf8(path).c_str(), RTLD_NOW | RTLD_LOCAL); return handle; } @@ -202,11 +217,11 @@ struct ggml_backend_registry { devices.push_back(device); } - ggml_backend_reg_t load_backend(const char * path, bool silent) { + ggml_backend_reg_t load_backend(const std::wstring & path, bool silent) { dl_handle_ptr handle { dl_load_library(path) }; if (!handle) { if (!silent) { - GGML_LOG_ERROR("%s: failed to load %s\n", __func__, path); + GGML_LOG_ERROR("%s: failed to load %s\n", __func__, utf16_to_utf8(path).c_str()); } return nullptr; } @@ -214,7 +229,7 @@ struct ggml_backend_registry { auto score_fn = (ggml_backend_score_t) dl_get_sym(handle.get(), "ggml_backend_score"); if (score_fn && score_fn() == 0) { if (!silent) { - GGML_LOG_INFO("%s: backend %s is not supported on this system\n", __func__, path); + GGML_LOG_INFO("%s: backend %s is not supported on this system\n", __func__, utf16_to_utf8(path).c_str()); } return nullptr; } @@ -222,7 +237,7 @@ struct ggml_backend_registry { auto backend_init_fn = (ggml_backend_init_t) dl_get_sym(handle.get(), "ggml_backend_init"); if (!backend_init_fn) { if (!silent) { - GGML_LOG_ERROR("%s: failed to find ggml_backend_init in %s\n", __func__, path); + GGML_LOG_ERROR("%s: failed to find ggml_backend_init in %s\n", __func__, utf16_to_utf8(path).c_str()); } return nullptr; } @@ -231,16 +246,16 @@ struct ggml_backend_registry { if (!reg || reg->api_version != GGML_BACKEND_API_VERSION) { if (!silent) { if (!reg) { - GGML_LOG_ERROR("%s: failed to initialize backend from %s: ggml_backend_init returned NULL\n", __func__, path); + GGML_LOG_ERROR("%s: failed to initialize backend from %s: ggml_backend_init returned NULL\n", __func__, utf16_to_utf8(path).c_str()); } else { GGML_LOG_ERROR("%s: failed to initialize backend from %s: incompatible API version (backend: %d, current: %d)\n", - __func__, path, reg->api_version, GGML_BACKEND_API_VERSION); + __func__, utf16_to_utf8(path).c_str(), reg->api_version, GGML_BACKEND_API_VERSION); } } return nullptr; } - GGML_LOG_INFO("%s: loaded %s backend from %s\n", __func__, ggml_backend_reg_name(reg), path); + GGML_LOG_INFO("%s: loaded %s backend from %s\n", __func__, ggml_backend_reg_name(reg), utf16_to_utf8(path).c_str()); register_backend(reg, std::move(handle)); @@ -376,14 +391,14 @@ ggml_backend_t ggml_backend_init_best(void) { // Dynamic loading ggml_backend_reg_t ggml_backend_load(const char * path) { - return get_reg().load_backend(path, false); + return get_reg().load_backend(utf8_to_utf16(path), false); } void ggml_backend_unload(ggml_backend_reg_t reg) { get_reg().unload_backend(reg, true); } -static std::string get_executable_path() { +static std::wstring get_executable_path() { #if defined(__APPLE__) // get executable path std::vector path; @@ -401,7 +416,7 @@ static std::string get_executable_path() { if (last_slash != std::string::npos) { base_path = base_path.substr(0, last_slash); } - return base_path + "/"; + return utf8_to_utf16(base_path + "/"); #elif defined(__linux__) || defined(__FreeBSD__) std::string base_path = "."; std::vector path(1024); @@ -427,57 +442,63 @@ static std::string get_executable_path() { path.resize(path.size() * 2); } - return base_path + "/"; + return utf8_to_utf16(base_path + "/"); #elif defined(_WIN32) - std::vector path(MAX_PATH); - DWORD len = GetModuleFileNameA(NULL, path.data(), path.size()); + std::vector path(MAX_PATH); + DWORD len = GetModuleFileNameW(NULL, path.data(), path.size()); if (len == 0) { - return ""; + return {}; } - std::string base_path(path.data(), len); + std::wstring base_path(path.data(), len); // remove executable name auto last_slash = base_path.find_last_of('\\'); if (last_slash != std::string::npos) { base_path = base_path.substr(0, last_slash); } - return base_path + "\\"; + return base_path + L"\\"; +#else + return {}; #endif } -static std::string backend_filename_prefix() { +static std::wstring backend_filename_prefix() { #ifdef _WIN32 - return "ggml-"; + return L"ggml-"; #else - return "libggml-"; + return L"libggml-"; #endif } -static std::string backend_filename_suffix() { +static std::wstring backend_filename_suffix() { #ifdef _WIN32 - return ".dll"; + return L".dll"; #else - return ".so"; + return L".so"; +#endif +} + +static std::wstring path_separator() { +#ifdef _WIN32 + return L"\\"; +#else + return L"/"; #endif } static ggml_backend_reg_t ggml_backend_load_best(const char * name, bool silent, const char * user_search_path) { // enumerate all the files that match [lib]ggml-name-*.[so|dll] in the search paths // TODO: search system paths - std::string file_prefix = backend_filename_prefix() + name + "-"; - std::vector search_paths; + std::wstring file_prefix = backend_filename_prefix() + utf8_to_utf16(name) + L"-"; + std::vector search_paths; if (user_search_path == nullptr) { - search_paths.push_back("./"); + search_paths.push_back(L"." + path_separator()); search_paths.push_back(get_executable_path()); } else { -#if defined(_WIN32) - search_paths.push_back(std::string(user_search_path) + "\\"); -#else - search_paths.push_back(std::string(user_search_path) + "/"); -#endif + search_paths.push_back(utf8_to_utf16(user_search_path) + path_separator()); } int best_score = 0; - std::string best_path; + std::wstring best_path; namespace fs = std::filesystem; for (const auto & search_path : search_paths) { @@ -487,27 +508,27 @@ static ggml_backend_reg_t ggml_backend_load_best(const char * name, bool silent, fs::directory_iterator dir_it(search_path, fs::directory_options::skip_permission_denied); for (const auto & entry : dir_it) { if (entry.is_regular_file()) { - std::string filename = entry.path().filename().string(); - std::string ext = entry.path().extension().string(); + std::wstring filename = entry.path().filename().wstring(); + std::wstring ext = entry.path().extension().wstring(); if (filename.find(file_prefix) == 0 && ext == backend_filename_suffix()) { - dl_handle_ptr handle { dl_load_library(entry.path().c_str()) }; + dl_handle_ptr handle { dl_load_library(entry.path().wstring()) }; if (!handle && !silent) { - GGML_LOG_ERROR("%s: failed to load %s\n", __func__, entry.path().string().c_str()); + GGML_LOG_ERROR("%s: failed to load %s\n", __func__, utf16_to_utf8(entry.path().wstring()).c_str()); } if (handle) { auto score_fn = (ggml_backend_score_t) dl_get_sym(handle.get(), "ggml_backend_score"); if (score_fn) { int s = score_fn(); #ifndef NDEBUG - GGML_LOG_DEBUG("%s: %s score: %d\n", __func__, entry.path().string().c_str(), s); + GGML_LOG_DEBUG("%s: %s score: %d\n", __func__, utf16_to_utf8(entry.path().wstring()).c_str(), s); #endif if (s > best_score) { best_score = s; - best_path = entry.path().string(); + best_path = entry.path().wstring(); } } else { if (!silent) { - GGML_LOG_INFO("%s: failed to find ggml_backend_score in %s\n", __func__, entry.path().string().c_str()); + GGML_LOG_INFO("%s: failed to find ggml_backend_score in %s\n", __func__, utf16_to_utf8(entry.path().wstring()).c_str()); } } } @@ -519,15 +540,15 @@ static ggml_backend_reg_t ggml_backend_load_best(const char * name, bool silent, if (best_score == 0) { // try to load the base backend for (const auto & search_path : search_paths) { - std::string path = search_path + backend_filename_prefix() + name + backend_filename_suffix(); + std::wstring path = search_path + backend_filename_prefix() + utf8_to_utf16(name) + backend_filename_suffix(); if (fs::exists(path)) { - return get_reg().load_backend(path.c_str(), silent); + return get_reg().load_backend(path, silent); } } return nullptr; } - return get_reg().load_backend(best_path.c_str(), silent); + return get_reg().load_backend(best_path, silent); } void ggml_backend_load_all() { diff --git a/ggml/src/ggml-cpu/CMakeLists.txt b/ggml/src/ggml-cpu/CMakeLists.txt index e357860a5..f0aecac1b 100644 --- a/ggml/src/ggml-cpu/CMakeLists.txt +++ b/ggml/src/ggml-cpu/CMakeLists.txt @@ -323,6 +323,11 @@ function(ggml_add_cpu_backend_variant_impl tag_name) target_compile_definitions(${GGML_CPU_NAME} PRIVATE ${ARCH_DEFINITIONS}) if (GGML_BACKEND_DL) + if (GGML_NATIVE) + # the feature check relies on ARCH_DEFINITIONS, but it is not set with GGML_NATIVE + message(FATAL_ERROR "GGML_NATIVE is not compatible with GGML_BACKEND_DL, consider using GGML_CPU_ALL_VARIANTS") + endif() + # The feature detection code is compiled as a separate target so that # it can be built without the architecture flags # Since multiple variants of the CPU backend may be included in the same From 30caac3a68a54de8396b21e20ba972554c587230 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Tue, 24 Dec 2024 09:44:20 +0200 Subject: [PATCH 14/81] llama : the WPM vocabs use the CLS token as BOS (#10930) * llama : the WPM vocabs use the CLS token as BOS ggml-ci * llama : add comment --- src/llama-vocab.cpp | 2 +- src/llama-vocab.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/llama-vocab.cpp b/src/llama-vocab.cpp index 7f2725f94..0a477d6dd 100644 --- a/src/llama-vocab.cpp +++ b/src/llama-vocab.cpp @@ -1657,7 +1657,7 @@ bool llama_token_is_control_impl(const struct llama_vocab & vocab, llama_token t } llama_token llama_token_bos_impl(const struct llama_vocab & vocab) { - return vocab.special_bos_id; + return vocab.type != LLAMA_VOCAB_TYPE_WPM ? vocab.special_bos_id : vocab.special_cls_id; } llama_token llama_token_eos_impl(const struct llama_vocab & vocab) { diff --git a/src/llama-vocab.h b/src/llama-vocab.h index 4bb16d2e4..a9b0da5ef 100644 --- a/src/llama-vocab.h +++ b/src/llama-vocab.h @@ -45,7 +45,7 @@ struct llama_vocab { id special_unk_id = 0; id special_sep_id = LLAMA_TOKEN_NULL; id special_pad_id = LLAMA_TOKEN_NULL; - id special_cls_id = LLAMA_TOKEN_NULL; + id special_cls_id = LLAMA_TOKEN_NULL; // TODO: revisit if this is really needed https://github.com/ggerganov/llama.cpp/pull/10930 id special_mask_id = LLAMA_TOKEN_NULL; id linefeed_id = 13; From 09fe2e76137dde850b13313f720e7ffa17efdefa Mon Sep 17 00:00:00 2001 From: NeverLucky <92274250+nvrxq@users.noreply.github.com> Date: Tue, 24 Dec 2024 19:39:49 +0300 Subject: [PATCH 15/81] server: allow filtering llama server response fields (#10940) * llama_server_response_fields * llama_server_response_fields_fix_issues * params fixes * fix * clarify docs * change to "response_fields" --------- Co-authored-by: Xuan Son Nguyen --- examples/server/README.md | 2 ++ examples/server/server.cpp | 6 +++- examples/server/tests/unit/test_completion.py | 34 +++++++++++++++++++ examples/server/utils.hpp | 22 ++++++++++++ 4 files changed, 63 insertions(+), 1 deletion(-) diff --git a/examples/server/README.md b/examples/server/README.md index 5e3d6a6e6..c7d91be99 100644 --- a/examples/server/README.md +++ b/examples/server/README.md @@ -450,6 +450,8 @@ These words will not be included in the completion, so make sure to add them to `post_sampling_probs`: Returns the probabilities of top `n_probs` tokens after applying sampling chain. +`response_fields`: A list of response fields, for example: `"response_fields": ["content", "generation_settings/n_predict"]`. If the specified field is missing, it will simply be omitted from the response without triggering an error. + **Response format** - Note: In streaming mode (`stream`), only `content`, `tokens` and `stop` will be returned until end of completion. Responses are sent using the [Server-sent events](https://html.spec.whatwg.org/multipage/server-sent-events.html) standard. Note: the browser's `EventSource` interface cannot be used due to its lack of `POST` request support. diff --git a/examples/server/server.cpp b/examples/server/server.cpp index 476a9225f..3fbfb13c4 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -92,6 +92,7 @@ struct slot_params { int64_t t_max_predict_ms = -1; // if positive, limit the generation phase to this time limit std::vector antiprompt; + std::vector response_fields; bool timings_per_token = false; bool post_sampling_probs = false; bool ignore_eos = false; @@ -209,6 +210,7 @@ struct server_task { params.n_discard = json_value(data, "n_discard", defaults.n_discard); //params.t_max_prompt_ms = json_value(data, "t_max_prompt_ms", defaults.t_max_prompt_ms); // TODO: implement params.t_max_predict_ms = json_value(data, "t_max_predict_ms", defaults.t_max_predict_ms); + params.response_fields = json_value(data, "response_fields", std::vector()); params.sampling.top_k = json_value(data, "top_k", defaults.sampling.top_k); params.sampling.top_p = json_value(data, "top_p", defaults.sampling.top_p); @@ -522,6 +524,7 @@ struct server_task_result_cmpl_final : server_task_result { bool post_sampling_probs; std::vector probs_output; + std::vector response_fields; slot_params generation_params; @@ -568,7 +571,7 @@ struct server_task_result_cmpl_final : server_task_result { if (!stream && !probs_output.empty()) { res["completion_probabilities"] = completion_token_output::probs_vector_to_json(probs_output, post_sampling_probs); } - return res; + return response_fields.empty() ? res : json_get_nested_values(response_fields, res); } json to_json_oaicompat_chat() { @@ -2066,6 +2069,7 @@ struct server_context { res->tokens = slot.generated_tokens; res->timings = slot.get_timings(); res->prompt = common_detokenize(ctx, slot.prompt_tokens, true); + res->response_fields = slot.params.response_fields; res->truncated = slot.truncated; res->n_decoded = slot.n_decoded; diff --git a/examples/server/tests/unit/test_completion.py b/examples/server/tests/unit/test_completion.py index b88d45f18..00d5ce391 100644 --- a/examples/server/tests/unit/test_completion.py +++ b/examples/server/tests/unit/test_completion.py @@ -257,6 +257,40 @@ def test_completion_parallel_slots(n_slots: int, n_requests: int): # assert match_regex(re_content, res.body["content"]) +@pytest.mark.parametrize( + "prompt,n_predict,response_fields", + [ + ("I believe the meaning of life is", 8, []), + ("I believe the meaning of life is", 32, ["content", "generation_settings/n_predict", "prompt"]), + ], +) +def test_completion_response_fields( + prompt: str, n_predict: int, response_fields: list[str] +): + global server + server.start() + res = server.make_request( + "POST", + "/completion", + data={ + "n_predict": n_predict, + "prompt": prompt, + "response_fields": response_fields, + }, + ) + assert res.status_code == 200 + assert "content" in res.body + assert len(res.body["content"]) + if len(response_fields): + assert res.body["generation_settings/n_predict"] == n_predict + assert res.body["prompt"] == " " + prompt + assert isinstance(res.body["content"], str) + assert len(res.body) == len(response_fields) + else: + assert len(res.body) + assert "generation_settings" in res.body + + def test_n_probs(): global server server.start() diff --git a/examples/server/utils.hpp b/examples/server/utils.hpp index 1987acac8..043d8b528 100644 --- a/examples/server/utils.hpp +++ b/examples/server/utils.hpp @@ -90,6 +90,28 @@ static bool json_is_array_of_mixed_numbers_strings(const json & data) { return false; } +// get value by path(key1 / key2) +static json json_get_nested_values(const std::vector & paths, const json & js) { + json result = json::object(); + + for (const std::string & path : paths) { + json current = js; + const auto keys = string_split(path, /*separator*/ '/'); + bool valid_path = true; + for (const std::string & k : keys) { + if (valid_path && current.is_object() && current.contains(k)) { + current = current[k]; + } else { + valid_path = false; + } + } + if (valid_path) { + result[path] = current; + } + } + return result; +} + /** * this handles 2 cases: * - only string, example: "string" From 2cd43f4900ba0e34124fdcbf02a7f9df25a10a3d Mon Sep 17 00:00:00 2001 From: Djip007 <3705339+Djip007@users.noreply.github.com> Date: Tue, 24 Dec 2024 18:54:49 +0100 Subject: [PATCH 16/81] ggml : more perfo with llamafile tinyblas on x86_64 (#10714) * more perfo with llamafile tinyblas on x86_64. - add bf16 suport - change dispache strategie (thanks: https://github.com/ikawrakow/ik_llama.cpp/pull/71 ) - reduce memory bandwidth simple tinyblas dispache and more cache freindly * tinyblas dynamic dispaching * sgemm: add M blocs. * - git 2.47 use short id of len 9. - show-progress is not part of GNU Wget2 * remove not stable test --- examples/server/tests/unit/test_completion.py | 7 +- ggml/src/ggml-cpu/ggml-cpu.c | 8 +- ggml/src/ggml-cpu/llamafile/sgemm.cpp | 522 +++++++++--------- ggml/src/ggml-cpu/llamafile/sgemm.h | 4 +- scripts/compare-llama-bench.py | 22 +- scripts/hf.sh | 2 +- 6 files changed, 287 insertions(+), 278 deletions(-) diff --git a/examples/server/tests/unit/test_completion.py b/examples/server/tests/unit/test_completion.py index 00d5ce391..a6b215944 100644 --- a/examples/server/tests/unit/test_completion.py +++ b/examples/server/tests/unit/test_completion.py @@ -95,7 +95,7 @@ def test_consistent_result_same_seed(n_slots: int): res = server.make_request("POST", "/completion", data={ "prompt": "I believe the meaning of life is", "seed": 42, - "temperature": 1.0, + "temperature": 0.0, "cache_prompt": False, # TODO: remove this once test_cache_vs_nocache_prompt is fixed }) if last_res is not None: @@ -120,9 +120,10 @@ def test_different_result_different_seed(n_slots: int): assert res.body["content"] != last_res.body["content"] last_res = res - +# TODO figure why it don't work with temperature = 1 +# @pytest.mark.parametrize("temperature", [0.0, 1.0]) @pytest.mark.parametrize("n_batch", [16, 32]) -@pytest.mark.parametrize("temperature", [0.0, 1.0]) +@pytest.mark.parametrize("temperature", [0.0]) def test_consistent_result_different_batch_size(n_batch: int, temperature: float): global server server.n_batch = n_batch diff --git a/ggml/src/ggml-cpu/ggml-cpu.c b/ggml/src/ggml-cpu/ggml-cpu.c index 18d194479..b7fefb9dd 100644 --- a/ggml/src/ggml-cpu/ggml-cpu.c +++ b/ggml/src/ggml-cpu/ggml-cpu.c @@ -7419,14 +7419,14 @@ static void ggml_compute_forward_mul_mat( if (src1_cont) { for (int64_t i13 = 0; i13 < ne13; i13++) for (int64_t i12 = 0; i12 < ne12; i12++) - if (!llamafile_sgemm(ne01, ne11, ne00/ggml_blck_size(src0->type), + if (!llamafile_sgemm(params, + ne01, ne11, ne00/ggml_blck_size(src0->type), (const char *)src0->data + i12/r2*nb02 + i13/r3*nb03, nb01/ggml_type_size(src0->type), (const char *)src1->data + i12*nb12 + i13*nb13, nb11/ggml_type_size(src1->type), (char *)dst->data + i12*nb2 + i13*nb3, nb1/ggml_type_size(dst->type), - ith, nth, src0->type, src1->type, dst->type)) @@ -7471,14 +7471,14 @@ UseGgmlGemm1:; for (int64_t i13 = 0; i13 < ne13; i13++) for (int64_t i12 = 0; i12 < ne12; i12++) - if (!llamafile_sgemm(ne01, ne11, ne00/ggml_blck_size(src0->type), + if (!llamafile_sgemm(params, + ne01, ne11, ne00/ggml_blck_size(src0->type), (const char *)src0->data + i12/r2*nb02 + i13/r3*nb03, nb01/ggml_type_size(src0->type), (const char *)wdata + (i12*ne11 + i13*ne12*ne11)*row_size, row_size/ggml_type_size(vec_dot_type), (char *)dst->data + i12*nb2 + i13*nb3, nb1/ggml_type_size(dst->type), - ith, nth, src0->type, vec_dot_type, dst->type)) diff --git a/ggml/src/ggml-cpu/llamafile/sgemm.cpp b/ggml/src/ggml-cpu/llamafile/sgemm.cpp index f80a72781..00f7f1170 100644 --- a/ggml/src/ggml-cpu/llamafile/sgemm.cpp +++ b/ggml/src/ggml-cpu/llamafile/sgemm.cpp @@ -53,6 +53,8 @@ #include "ggml-cpu-impl.h" #include "ggml-quants.h" +#include + #ifdef _MSC_VER #define NOINLINE __declspec(noinline) #else @@ -134,6 +136,16 @@ inline __m512 madd(__m512 a, __m512 b, __m512 c) { return _mm512_fmadd_ps(a, b, c); } #endif +#if defined(__AVX512BF16__) +template <> +inline __m512 madd(__m512bh a, __m512bh b, __m512 c) { + return _mm512_dpbf16_ps(c, a, b); +} +template <> +inline __m256 madd(__m256bh a, __m256bh b, __m256 c) { + return _mm256_dpbf16_ps(c, a, b); +} +#endif #endif #if defined(__ARM_FEATURE_FMA) @@ -226,6 +238,13 @@ template <> inline __m256 load(const float *p) { } #endif // __AVX__ +#if defined(__AVX2__) || defined(__AVX512F__) +template <> inline __m256 load(const ggml_bf16_t *p) { + return _mm256_castsi256_ps( + _mm256_slli_epi32(_mm256_cvtepu16_epi32(_mm_loadu_si128((const __m128i *)p)), 16)); +} +#endif // __AVX2__ + #if defined(__F16C__) template <> inline __m256 load(const ggml_fp16_t *p) { return _mm256_cvtph_ps(_mm_loadu_si128((const __m128i *)p)); @@ -239,8 +258,27 @@ template <> inline __m512 load(const float *p) { template <> inline __m512 load(const ggml_fp16_t *p) { return _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)p)); } +template <> inline __m512 load(const ggml_bf16_t *p) { + return _mm512_castsi512_ps( + _mm512_slli_epi32(_mm512_cvtepu16_epi32(_mm256_loadu_si256((const __m256i *)p)), 16)); +} #endif // __AVX512F__ +#if defined(__AVX512BF16__) +template <> inline __m512bh load(const ggml_bf16_t *p) { + return (__m512bh)_mm512_loadu_ps((const float *)p); +} +template <> inline __m256bh load(const ggml_bf16_t *p) { + return (__m256bh)_mm256_loadu_ps((const float *)p); +} +template <> inline __m512bh load(const float *p) { + return _mm512_cvtne2ps_pbh(_mm512_loadu_ps(p + 16), _mm512_loadu_ps(p)); +} +template <> inline __m256bh load(const float *p) { + return _mm512_cvtneps_pbh(_mm512_loadu_ps(p)); +} +#endif + //////////////////////////////////////////////////////////////////////////////////////////////////// // CONSTANTS @@ -252,199 +290,170 @@ static const __m128i iq4nlt = _mm_loadu_si128((const __m128i *) kvalues_iq4nl); //////////////////////////////////////////////////////////////////////////////////////////////////// // FLOATING POINT MATRIX MULTIPLICATION +template +static inline int64_t BLOCK_SIZE(size_t m) { + const int64_t NB_BLOC_M = (m + M - 1) / M; + return (m % NB_BLOC_M == 0) ? m / NB_BLOC_M : (m / NB_BLOC_M) + 1; +} + +static constexpr inline int64_t BLOC_POS(int64_t ib, int64_t ibN, int64_t bloc_size) { + return ib < ibN ? ib * bloc_size : ibN * bloc_size + (ib - ibN) * (bloc_size - 1); +} + template class tinyBLAS { public: - tinyBLAS(int64_t k, + tinyBLAS(const ggml_compute_params * params, int64_t k, const TA *A, int64_t lda, const TB *B, int64_t ldb, - TC *C, int64_t ldc, - int ith, int nth) - : A(A), B(B), C(C), k(k), lda(lda), ldb(ldb), ldc(ldc), ith(ith), nth(nth) { + TC *C, int64_t ldc) + : params(params), A(A), B(B), C(C), k(k), lda(lda), ldb(ldb), ldc(ldc) { } - void matmul(int64_t m, int64_t n) { - mnpack(0, m, 0, n); + bool matmul(int64_t m, int64_t n) { + if (k % KN != 0) + return false; + // compute RM for only need tile with size RM&RM-1 +#if VECTOR_REGISTERS == 32 + if (m % 16 == 0 && (m/16 >= params->nth)) { + const int64_t SIZE_N = BLOCK_SIZE<6>(n); + mnpack<4, 6, 4>(m, n, SIZE_N, 12); + return true; + } + if (m % 8 == 0 ) { + const int64_t SIZE_N = BLOCK_SIZE<6>(n); + mnpack<4, 6, 2>(m, n, SIZE_N, 12); + return true; + } + if (m % 4 == 0) { + const int64_t SIZE_N = BLOCK_SIZE<6>(n); + mnpack<4, 6, 1>(m, n, SIZE_N, 12); + return true; + } +#else // VECTOR_REGISTERS == 16 + if (m % 16 == 0 && (m/16 >= params->nth)) { + const int64_t SIZE_N = BLOCK_SIZE<3>(n); + mnpack<4, 3, 4>(m, n, SIZE_N, 24); + return true; + } + if (m % 8 == 0 ) { + const int64_t SIZE_N = BLOCK_SIZE<3>(n); + mnpack<4, 3, 2>(m, n, SIZE_N, 24); + return true; + } + if (m % 4 == 0) { + const int64_t SIZE_N = BLOCK_SIZE<3>(n); + mnpack<4, 3, 1>(m, n, SIZE_N, 24); + return true; + } +#endif + return false; } private: - NOINLINE void mnpack(int64_t m0, int64_t m, int64_t n0, int64_t n) { - int64_t mc, nc, mp, np; - switch ((MIN(m - m0, 5) << 4) | MIN(n - n0, 5)) { -#if VECTOR_REGISTERS == 32 - case 0x55: - mc = 5; - nc = 5; - gemm<5, 5>(m0, m, n0, n); - break; - case 0x45: - mc = 4; - nc = 5; - gemm<4, 5>(m0, m, n0, n); - break; - case 0x54: - mc = 5; - nc = 4; - gemm<5, 4>(m0, m, n0, n); - break; - case 0x44: - mc = 4; - nc = 4; - gemm<4, 4>(m0, m, n0, n); - break; - case 0x53: - mc = 5; - nc = 3; - gemm<5, 3>(m0, m, n0, n); - break; - case 0x35: - mc = 3; - nc = 5; - gemm<3, 5>(m0, m, n0, n); - break; - case 0x43: - mc = 4; - nc = 3; - gemm<4, 3>(m0, m, n0, n); - break; -#else - case 0x55: - case 0x54: - case 0x53: - case 0x45: - case 0x44: - case 0x43: - mc = 4; - nc = 3; - gemm<4, 3>(m0, m, n0, n); - break; - case 0x35: -#endif - case 0x34: - mc = 3; - nc = 4; - gemm<3, 4>(m0, m, n0, n); - break; - case 0x52: - mc = 5; - nc = 2; - gemm<5, 2>(m0, m, n0, n); - break; - case 0x33: - mc = 3; - nc = 3; - gemm<3, 3>(m0, m, n0, n); - break; - case 0x25: - mc = 2; - nc = 5; - gemm<2, 5>(m0, m, n0, n); - break; - case 0x42: - mc = 4; - nc = 2; - gemm<4, 2>(m0, m, n0, n); - break; - case 0x24: - mc = 2; - nc = 4; - gemm<2, 4>(m0, m, n0, n); - break; - case 0x32: - mc = 3; - nc = 2; - gemm<3, 2>(m0, m, n0, n); - break; - case 0x23: - mc = 2; - nc = 3; - gemm<2, 3>(m0, m, n0, n); - break; - case 0x51: - mc = 5; - nc = 1; - gemm<5, 1>(m0, m, n0, n); - break; - case 0x41: - mc = 4; - nc = 1; - gemm<4, 1>(m0, m, n0, n); - break; - case 0x22: - mc = 2; - nc = 2; - gemm<2, 2>(m0, m, n0, n); - break; - case 0x15: - mc = 1; - nc = 5; - gemm<1, 5>(m0, m, n0, n); - break; - case 0x14: - mc = 1; - nc = 4; - gemm<1, 4>(m0, m, n0, n); - break; - case 0x31: - mc = 3; - nc = 1; - gemm<3, 1>(m0, m, n0, n); - break; - case 0x13: - mc = 1; - nc = 3; - gemm<1, 3>(m0, m, n0, n); - break; - case 0x21: - mc = 2; - nc = 1; - gemm<2, 1>(m0, m, n0, n); - break; - case 0x12: - mc = 1; - nc = 2; - gemm<1, 2>(m0, m, n0, n); - break; - case 0x11: - mc = 1; - nc = 1; - gemm<1, 1>(m0, m, n0, n); - break; - default: - return; + template + inline void mnpack(int64_t m, int64_t n, int64_t SIZE_N, int64_t BN) { + if (SIZE_N == RN) { + return gemm(m, n, BN); + } + if constexpr (RN > 1) { + return mnpack(m, n, SIZE_N, BN); + } else { + GGML_LOG_ERROR("mnpack<%d, %d> bloc size not supported\n", RM, (int)SIZE_N); + GGML_ASSERT(false); // we have miss something. } - mp = m0 + (m - m0) / mc * mc; - np = n0 + (n - n0) / nc * nc; - mnpack(mp, m, n0, np); - mnpack(m0, m, np, n); } template - NOINLINE void gemm(int64_t m0, int64_t m, int64_t n0, int64_t n) { - int64_t ytiles = (m - m0) / RM; - int64_t xtiles = (n - n0) / RN; - int64_t tiles = xtiles * ytiles; - int64_t duty = (tiles + nth - 1) / nth; - int64_t start = duty * ith; - int64_t end = start + duty; - if (end > tiles) - end = tiles; - for (int64_t job = start; job < end; ++job) { - int64_t ii = m0 + job / xtiles * RM; - int64_t jj = n0 + job % xtiles * RN; - D Cv[RN][RM] = {}; - for (int64_t l = 0; l < k; l += KN) - for (int64_t j = 0; j < RN; ++j) - for (int64_t i = 0; i < RM; ++i) - Cv[j][i] = madd(load(A + lda * (ii + i) + l), - load(B + ldb * (jj + j) + l), - Cv[j][i]); - for (int64_t j = 0; j < RN; ++j) - for (int64_t i = 0; i < RM; ++i) - C[ldc * (jj + j) + (ii + i)] = hsum(Cv[j][i]); + inline void gemm_bloc(int64_t ii, int64_t jj) { + D Cv[RN][RM] = {}; + for (int64_t l = 0; l < k; l += KN) { + // help compiler for op order. + if constexpr (RM <= RN) { + V Av[RM]; + for (int64_t i = 0; i < RM; ++i) { + Av[i] = load(A + lda * (ii + i) + l); + } + for (int64_t j = 0; j < RN; ++j) { + V Bv = load(B + ldb * (jj + j) + l); + for (int64_t i = 0; i < RM; ++i) { + Cv[j][i] = madd(Av[i], Bv, Cv[j][i]); + } + } + } else { + V Bv[RN]; + for (int64_t j = 0; j < RN; ++j) { + Bv[j] = load(B + ldb * (jj + j) + l); + } + for (int64_t i = 0; i < RM; ++i) { + V Av = load(A + lda * (ii + i) + l); + for (int64_t j = 0; j < RN; ++j) { + Cv[j][i] = madd(Av, Bv[j], Cv[j][i]); + } + } + } } + for (int64_t j = 0; j < RN; ++j) + for (int64_t i = 0; i < RM; ++i) + C[ldc * (jj + j) + (ii + i)] = hsum(Cv[j][i]); } + template + NOINLINE void gemm(int64_t m, int64_t n, int64_t BN) { + static std::atomic current_chunk; + + GGML_ASSERT(m % (RM * BM) == 0); + const int64_t ytiles = m / (RM * BM); + const int64_t xtiles = (n + RN -1) / RN; + const int64_t jj_RN = (xtiles - (xtiles * RN - n)); + + // "round" bloc_size to "nearest" BN + const int64_t NB_BN = xtiles < BN ? 1 : (xtiles + BN / 2) / BN; + const int64_t SIZE_BN = xtiles % NB_BN == 0 ? xtiles / NB_BN : xtiles / NB_BN + 1; + const int64_t jj_BN = (NB_BN - (NB_BN * SIZE_BN - xtiles)); + const int64_t nb_job = ytiles * NB_BN; + + if (params->ith == 0) { + GGML_ASSERT( jj_BN * SIZE_BN + (NB_BN - jj_BN) * (SIZE_BN - 1) == xtiles); + // Every thread starts at ith, so the first unprocessed chunk is nth. This save a bit of coordination right at the start. + std::atomic_store_explicit(¤t_chunk, (int64_t)params->nth, std::memory_order_relaxed); + } + + ggml_barrier(params->threadpool); + + int64_t job = params->ith; + while (job < nb_job) { + const int64_t ii = (job % ytiles) * RM * BM; + const int64_t jb = job / ytiles; + const int64_t jr0 = BLOC_POS(jb , jj_BN, SIZE_BN); + const int64_t jrN = BLOC_POS(jb+1, jj_BN, SIZE_BN); + + const int64_t jj0 = BLOC_POS(jr0, jj_RN, RN); + const int64_t jj2 = BLOC_POS(jrN, jj_RN, RN); + const int64_t jj1 = jj2 < jj_RN * RN ? jj2 : jj_RN * RN; + + for (int64_t bi = 0; bi < BM * RM; bi += RM) { + int64_t jj = jj0; + for (; jj < jj1; jj += RN) { + gemm_bloc(ii + bi, jj); + } + if constexpr (RN > 1) { + for (; jj < jj2; jj += RN - 1) { + gemm_bloc(ii + bi, jj); + } + } + GGML_ASSERT(jj == jj2); + } + + // next step. + job = std::atomic_fetch_add_explicit(¤t_chunk, (int64_t)1, std::memory_order_relaxed); + } + + ggml_barrier(params->threadpool); + return; + } + + const ggml_compute_params * params; const TA *const A; const TB *const B; TC *const C; @@ -452,8 +461,6 @@ class tinyBLAS { const int64_t lda; const int64_t ldb; const int64_t ldc; - const int ith; - const int nth; }; ////////////////////////////////////////////////////////////////////////////////////////// @@ -1657,8 +1664,9 @@ class tinyBLAS_PPC { * @param Ctype is GGML data type of `C` * @return true if this function was able to service the matmul request */ -bool llamafile_sgemm(int64_t m, int64_t n, int64_t k, const void *A, int64_t lda, const void *B, int64_t ldb, void *C, - int64_t ldc, int ith, int nth, int Atype, int Btype, int Ctype) { +bool llamafile_sgemm(const struct ggml_compute_params * params, int64_t m, int64_t n, int64_t k, + const void *A, int64_t lda, const void *B, int64_t ldb, void *C, + int64_t ldc, int Atype, int Btype, int Ctype) { assert(m >= 0); assert(n >= 0); @@ -1666,8 +1674,8 @@ bool llamafile_sgemm(int64_t m, int64_t n, int64_t k, const void *A, int64_t lda assert(lda >= k); assert(ldb >= k); assert(ldc >= m); - assert(nth > 0); - assert(ith < nth); + assert(params->nth > 0); + assert(params->ith < params->nth); // only enable sgemm for prompt processing if (n < 2) @@ -1682,37 +1690,25 @@ bool llamafile_sgemm(int64_t m, int64_t n, int64_t k, const void *A, int64_t lda if (Btype != GGML_TYPE_F32) return false; #if defined(__AVX512F__) - if (k % 16) - return false; - tinyBLAS<16, __m512, __m512, float, float, float> tb{ + tinyBLAS<16, __m512, __m512, float, float, float> tb{ params, k, (const float *)A, lda, (const float *)B, ldb, - (float *)C, ldc, - ith, nth}; - tb.matmul(m, n); - return true; + (float *)C, ldc}; + return tb.matmul(m, n); #elif defined(__AVX__) || defined(__AVX2__) - if (k % 8) - return false; - tinyBLAS<8, __m256, __m256, float, float, float> tb{ + tinyBLAS<8, __m256, __m256, float, float, float> tb{ params, k, (const float *)A, lda, (const float *)B, ldb, - (float *)C, ldc, - ith, nth}; - tb.matmul(m, n); - return true; + (float *)C, ldc}; + return tb.matmul(m, n); #elif defined(__ARM_NEON) if (n < 4) return false; - if (k % 4) - return false; - tinyBLAS<4, float32x4_t, float32x4_t, float, float, float> tb{ + tinyBLAS<4, float32x4_t, float32x4_t, float, float, float> tb{ params, k, (const float *)A, lda, (const float *)B, ldb, - (float *)C, ldc, - ith, nth}; - tb.matmul(m, n); - return true; + (float *)C, ldc}; + return tb.matmul(m, n); #elif defined(__MMA__) if (k % 8) return false; @@ -1720,7 +1716,7 @@ bool llamafile_sgemm(int64_t m, int64_t n, int64_t k, const void *A, int64_t lda k, (const float *)A, lda, (const float *)B, ldb, (float *)C, ldc, - ith, nth}; + params->ith, params->nth}; tb.matmul(m, n); return true; #else @@ -1728,60 +1724,71 @@ bool llamafile_sgemm(int64_t m, int64_t n, int64_t k, const void *A, int64_t lda #endif } + case GGML_TYPE_BF16: { +#if defined(__AVX512BF16__) + if (Btype == GGML_TYPE_BF16) { + tinyBLAS<32, __m512, __m512bh, ggml_bf16_t, ggml_bf16_t, float> tb{ params, k, + (const ggml_bf16_t *)A, lda, + (const ggml_bf16_t *)B, ldb, + (float *)C, ldc}; + return tb.matmul(m, n); + } +#elif defined(__AVX512F__) + if (Btype == GGML_TYPE_BF16) { + tinyBLAS<16, __m512, __m512, ggml_bf16_t, ggml_bf16_t, float> tb{ params, k, + (const ggml_bf16_t *)A, lda, + (const ggml_bf16_t *)B, ldb, + (float *)C, ldc}; + return tb.matmul(m, n); + } +#elif defined(__AVX2__) + if (Btype == GGML_TYPE_BF16) { + tinyBLAS<8, __m256, __m256, ggml_bf16_t, ggml_bf16_t, float> tb{ params, k, + (const ggml_bf16_t *)A, lda, + (const ggml_bf16_t *)B, ldb, + (float *)C, ldc}; + return tb.matmul(m, n); + } +#endif + return false; + } case GGML_TYPE_F16: { #if defined(__AVX512F__) - if (k % 16) - return false; - if (Btype != GGML_TYPE_F32) - return false; - tinyBLAS<16, __m512, __m512, ggml_fp16_t, float, float> tb{ - k, (const ggml_fp16_t *)A, lda, - (const float *)B, ldb, - (float *)C, ldc, - ith, nth}; - tb.matmul(m, n); - return true; + if (Btype == GGML_TYPE_F16) { + tinyBLAS<16, __m512, __m512, ggml_fp16_t, ggml_fp16_t, float> tb{ params, k, + (const ggml_fp16_t *)A, lda, + (const ggml_fp16_t *)B, ldb, + (float *)C, ldc}; + return tb.matmul(m, n); + } #elif (defined(__AVX__) || defined(__AVX2__)) && defined(__F16C__) - if (k % 8) - return false; - if (Btype != GGML_TYPE_F32) - return false; - tinyBLAS<8, __m256, __m256, ggml_fp16_t, float, float> tb{ - k, (const ggml_fp16_t *)A, lda, - (const float *)B, ldb, - (float *)C, ldc, - ith, nth}; - tb.matmul(m, n); - return true; + if (Btype == GGML_TYPE_F16) { + tinyBLAS<8, __m256, __m256, ggml_fp16_t, ggml_fp16_t, float> tb{ params, k, + (const ggml_fp16_t *)A, lda, + (const ggml_fp16_t *)B, ldb, + (float *)C, ldc}; + return tb.matmul(m, n); + } #elif defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && !defined(_MSC_VER) if (n < 8) return false; - if (k % 8) - return false; - if (Btype != GGML_TYPE_F16) - return false; - tinyBLAS<8, float16x8_t, float16x8_t, ggml_fp16_t, ggml_fp16_t, float> tb{ - k, (const ggml_fp16_t *)A, lda, - (const ggml_fp16_t *)B, ldb, - (float *)C, ldc, - ith, nth}; - tb.matmul(m, n); - return true; + if (Btype == GGML_TYPE_F16) { + tinyBLAS<8, float16x8_t, float16x8_t, ggml_fp16_t, ggml_fp16_t, float> tb{ params, + k, (const ggml_fp16_t *)A, lda, + (const ggml_fp16_t *)B, ldb, + (float *)C, ldc}; + return tb.matmul(m, n); + } #elif defined(__ARM_NEON) && !defined(_MSC_VER) - if (k % 4) - return false; - if (Btype != GGML_TYPE_F32) - return false; - tinyBLAS<4, float32x4_t, float32x4_t, ggml_fp16_t, float, float> tb{ - k, (const ggml_fp16_t *)A, lda, - (const float *)B, ldb, - (float *)C, ldc, - ith, nth}; - tb.matmul(m, n); - return true; -#else - return false; + if (Btype == GGML_TYPE_F32) { + tinyBLAS<4, float32x4_t, float32x4_t, ggml_fp16_t, float, float> tb{ params, + k, (const ggml_fp16_t *)A, lda, + (const float *)B, ldb, + (float *)C, ldc}; + return tb.matmul(m, n); + } #endif + return false; } case GGML_TYPE_Q8_0: { @@ -1792,7 +1799,7 @@ bool llamafile_sgemm(int64_t m, int64_t n, int64_t k, const void *A, int64_t lda k, (const block_q8_0 *)A, lda, (const block_q8_0 *)B, ldb, (float *)C, ldc, - ith, nth}; + params->ith, params->nth}; tb.matmul(m, n); return true; #elif defined(__ARM_FEATURE_DOTPROD) @@ -1800,7 +1807,7 @@ bool llamafile_sgemm(int64_t m, int64_t n, int64_t k, const void *A, int64_t lda k, (const block_q8_0 *)A, lda, (const block_q8_0 *)B, ldb, (float *)C, ldc, - ith, nth}; + params->ith, params->nth}; tb.matmul(m, n); return true; #else @@ -1816,7 +1823,7 @@ bool llamafile_sgemm(int64_t m, int64_t n, int64_t k, const void *A, int64_t lda k, (const block_q4_0 *)A, lda, (const block_q8_0 *)B, ldb, (float *)C, ldc, - ith, nth}; + params->ith, params->nth}; tb.matmul(m, n); return true; #elif defined(__ARM_FEATURE_DOTPROD) @@ -1824,7 +1831,7 @@ bool llamafile_sgemm(int64_t m, int64_t n, int64_t k, const void *A, int64_t lda k, (const block_q4_0 *)A, lda, (const block_q8_0 *)B, ldb, (float *)C, ldc, - ith, nth}; + params->ith, params->nth}; tb.matmul(m, n); return true; #else @@ -1840,7 +1847,7 @@ bool llamafile_sgemm(int64_t m, int64_t n, int64_t k, const void *A, int64_t lda k, (const block_q5_0 *)A, lda, (const block_q8_0 *)B, ldb, (float *)C, ldc, - ith, nth}; + params->ith, params->nth}; tb.matmul(m, n); return true; #else @@ -1856,7 +1863,7 @@ bool llamafile_sgemm(int64_t m, int64_t n, int64_t k, const void *A, int64_t lda k, (const block_iq4_nl *)A, lda, (const block_q8_0 *)B, ldb, (float *)C, ldc, - ith, nth}; + params->ith, params->nth}; tb.matmul(m, n); return true; #else @@ -1868,6 +1875,7 @@ bool llamafile_sgemm(int64_t m, int64_t n, int64_t k, const void *A, int64_t lda return false; } + (void)params; (void)m; (void)n; (void)k; @@ -1877,8 +1885,6 @@ bool llamafile_sgemm(int64_t m, int64_t n, int64_t k, const void *A, int64_t lda (void)ldb; (void)C; (void)ldc; - (void)ith; - (void)nth; (void)Atype; (void)Btype; (void)Ctype; diff --git a/ggml/src/ggml-cpu/llamafile/sgemm.h b/ggml/src/ggml-cpu/llamafile/sgemm.h index caf6dd556..3d2909515 100644 --- a/ggml/src/ggml-cpu/llamafile/sgemm.h +++ b/ggml/src/ggml-cpu/llamafile/sgemm.h @@ -5,8 +5,8 @@ extern "C" { #endif -bool llamafile_sgemm(int64_t, int64_t, int64_t, const void *, int64_t, - const void *, int64_t, void *, int64_t, int, int, +bool llamafile_sgemm(const struct ggml_compute_params * params, int64_t, int64_t, int64_t, + const void *, int64_t, const void *, int64_t, void *, int64_t, int, int, int); #ifdef __cplusplus diff --git a/scripts/compare-llama-bench.py b/scripts/compare-llama-bench.py index 5069ae638..239c458d8 100755 --- a/scripts/compare-llama-bench.py +++ b/scripts/compare-llama-bench.py @@ -126,6 +126,8 @@ connection = sqlite3.connect(input_file) cursor = connection.cursor() builds = cursor.execute("SELECT DISTINCT build_commit FROM test;").fetchall() +commit_short_len = len(builds[0][0]) + try: repo = git.Repo(".", search_parent_directories=True) except git.InvalidGitRepositoryError: @@ -138,11 +140,11 @@ def find_parent_in_data(commit: git.Commit): seen_hexsha8 = set() while heap: depth, current_commit = heapq.heappop(heap) - current_hexsha8 = commit.hexsha[:8] + current_hexsha8 = commit.hexsha[:commit_short_len] if (current_hexsha8,) in builds: return current_hexsha8 for parent in commit.parents: - parent_hexsha8 = parent.hexsha[:8] + parent_hexsha8 = parent.hexsha[:commit_short_len] if parent_hexsha8 not in seen_hexsha8: seen_hexsha8.add(parent_hexsha8) heapq.heappush(heap, (depth + 1, parent)) @@ -156,9 +158,9 @@ def get_all_parent_hexsha8s(commit: git.Commit): while unvisited: current_commit = unvisited.pop(0) - visited.append(current_commit.hexsha[:8]) + visited.append(current_commit.hexsha[:commit_short_len]) for parent in current_commit.parents: - if parent.hexsha[:8] not in visited: + if parent.hexsha[:commit_short_len] not in visited: unvisited.append(parent) return visited @@ -169,10 +171,10 @@ def get_commit_name(hexsha8): if repo is None: return hexsha8 for h in repo.heads: - if h.commit.hexsha[:8] == hexsha8: + if h.commit.hexsha[:commit_short_len] == hexsha8: return h.name for t in repo.tags: - if t.commit.hexsha[:8] == hexsha8: + if t.commit.hexsha[:commit_short_len] == hexsha8: return t.name return hexsha8 @@ -183,13 +185,13 @@ def get_commit_hexsha8(name): return None for h in repo.heads: if h.name == name: - return h.commit.hexsha[:8] + return h.commit.hexsha[:commit_short_len] for t in repo.tags: if t.name == name: - return t.commit.hexsha[:8] + return t.commit.hexsha[:commit_short_len] for c in repo.iter_commits("--all"): - if c.hexsha[:8] == name[:8]: - return c.hexsha[:8] + if c.hexsha[:commit_short_len] == name[:commit_short_len]: + return c.hexsha[:commit_short_len] return None diff --git a/scripts/hf.sh b/scripts/hf.sh index 85c2c4d9a..b251925fa 100755 --- a/scripts/hf.sh +++ b/scripts/hf.sh @@ -26,7 +26,7 @@ function has_cmd { } if has_cmd wget; then - cmd="wget -q --show-progress -c -O %s/%s %s" + cmd="wget -q -c -O %s/%s %s" elif has_cmd curl; then cmd="curl -C - -f --output-dir %s -o %s -L %s" else From 9ba399dfa7f115effc63d48e6860a94c9faa31b2 Mon Sep 17 00:00:00 2001 From: Reza Kakhki Date: Tue, 24 Dec 2024 21:33:04 +0100 Subject: [PATCH 17/81] server : add support for "encoding_format": "base64" to the */embeddings endpoints (#10967) * add support for base64 * fix base64 test * improve test --------- Co-authored-by: Xuan Son Nguyen --- examples/server/CMakeLists.txt | 1 + examples/server/server.cpp | 13 ++++++- examples/server/tests/unit/test_embedding.py | 41 ++++++++++++++++++++ examples/server/utils.hpp | 28 ++++++++++--- 4 files changed, 76 insertions(+), 7 deletions(-) diff --git a/examples/server/CMakeLists.txt b/examples/server/CMakeLists.txt index a27597cbc..1b7cc8c13 100644 --- a/examples/server/CMakeLists.txt +++ b/examples/server/CMakeLists.txt @@ -34,6 +34,7 @@ endforeach() add_executable(${TARGET} ${TARGET_SRCS}) install(TARGETS ${TARGET} RUNTIME) +target_include_directories(${TARGET} PRIVATE ${CMAKE_SOURCE_DIR}) target_link_libraries(${TARGET} PRIVATE common ${CMAKE_THREAD_LIBS_INIT}) if (LLAMA_SERVER_SSL) diff --git a/examples/server/server.cpp b/examples/server/server.cpp index 3fbfb13c4..30ff3b149 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -3790,6 +3790,17 @@ int main(int argc, char ** argv) { return; } + bool use_base64 = false; + if (body.count("encoding_format") != 0) { + const std::string& format = body.at("encoding_format"); + if (format == "base64") { + use_base64 = true; + } else if (format != "float") { + res_error(res, format_error_response("The format to return the embeddings in. Can be either float or base64", ERROR_TYPE_INVALID_REQUEST)); + return; + } + } + std::vector tokenized_prompts = tokenize_input_prompts(ctx_server.ctx, prompt, true, true); for (const auto & tokens : tokenized_prompts) { // this check is necessary for models that do not add BOS token to the input @@ -3841,7 +3852,7 @@ int main(int argc, char ** argv) { } // write JSON response - json root = oaicompat ? format_embeddings_response_oaicompat(body, responses) : json(responses); + json root = oaicompat ? format_embeddings_response_oaicompat(body, responses, use_base64) : json(responses); res_ok(res, root); }; diff --git a/examples/server/tests/unit/test_embedding.py b/examples/server/tests/unit/test_embedding.py index 43e372fc7..8b0eb42b0 100644 --- a/examples/server/tests/unit/test_embedding.py +++ b/examples/server/tests/unit/test_embedding.py @@ -1,3 +1,5 @@ +import base64 +import struct import pytest from openai import OpenAI from utils import * @@ -194,3 +196,42 @@ def test_embedding_usage_multiple(): assert res.status_code == 200 assert res.body['usage']['prompt_tokens'] == res.body['usage']['total_tokens'] assert res.body['usage']['prompt_tokens'] == 2 * 9 + + +def test_embedding_openai_library_base64(): + server.start() + test_input = "Test base64 embedding output" + + # get embedding in default format + res = server.make_request("POST", "/v1/embeddings", data={ + "input": test_input + }) + assert res.status_code == 200 + vec0 = res.body["data"][0]["embedding"] + + # get embedding in base64 format + res = server.make_request("POST", "/v1/embeddings", data={ + "input": test_input, + "encoding_format": "base64" + }) + + assert res.status_code == 200 + assert "data" in res.body + assert len(res.body["data"]) == 1 + + embedding_data = res.body["data"][0] + assert "embedding" in embedding_data + assert isinstance(embedding_data["embedding"], str) + + # Verify embedding is valid base64 + decoded = base64.b64decode(embedding_data["embedding"]) + # Verify decoded data can be converted back to float array + float_count = len(decoded) // 4 # 4 bytes per float + floats = struct.unpack(f'{float_count}f', decoded) + assert len(floats) > 0 + assert all(isinstance(x, float) for x in floats) + assert len(floats) == len(vec0) + + # make sure the decoded data is the same as the original + for x, y in zip(floats, vec0): + assert abs(x - y) < EPSILON diff --git a/examples/server/utils.hpp b/examples/server/utils.hpp index 043d8b528..334f2f192 100644 --- a/examples/server/utils.hpp +++ b/examples/server/utils.hpp @@ -3,6 +3,7 @@ #include "common.h" #include "log.h" #include "llama.h" +#include "common/base64.hpp" #ifndef NDEBUG // crash the server in debug mode, otherwise send an http 500 error @@ -613,16 +614,31 @@ static json oaicompat_completion_params_parse( return llama_params; } -static json format_embeddings_response_oaicompat(const json & request, const json & embeddings) { +static json format_embeddings_response_oaicompat(const json & request, const json & embeddings, bool use_base64 = false) { json data = json::array(); int32_t n_tokens = 0; int i = 0; for (const auto & elem : embeddings) { - data.push_back(json{ - {"embedding", json_value(elem, "embedding", json::array())}, - {"index", i++}, - {"object", "embedding"} - }); + json embedding_obj; + + if (use_base64) { + const auto& vec = json_value(elem, "embedding", json::array()).get>(); + const char* data_ptr = reinterpret_cast(vec.data()); + size_t data_size = vec.size() * sizeof(float); + embedding_obj = { + {"embedding", base64::encode(data_ptr, data_size)}, + {"index", i++}, + {"object", "embedding"}, + {"encoding_format", "base64"} + }; + } else { + embedding_obj = { + {"embedding", json_value(elem, "embedding", json::array())}, + {"index", i++}, + {"object", "embedding"} + }; + } + data.push_back(embedding_obj); n_tokens += json_value(elem, "tokens_evaluated", 0); } From d283d02bf254a7f2991e1502066330cc0d4321a6 Mon Sep 17 00:00:00 2001 From: Peter Date: Fri, 27 Dec 2024 00:59:11 +1100 Subject: [PATCH 18/81] examples, ggml : fix GCC compiler warnings (#10983) Warning types fixed (observed under MSYS2 GCC 14.2.0): * format '%ld' expects argument of type 'long int', but argument has type 'size_t' * llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp:81:46: warning: missing initializer for member '_STARTUPINFOA::lpDesktop' [-Wmissing-field-initializers] (emitted for all struct field except first) --- examples/cvector-generator/mean.hpp | 2 +- examples/cvector-generator/pca.hpp | 2 +- examples/export-lora/export-lora.cpp | 6 +++--- ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp | 3 ++- 4 files changed, 7 insertions(+), 6 deletions(-) diff --git a/examples/cvector-generator/mean.hpp b/examples/cvector-generator/mean.hpp index 16be5ce3e..4eeac1eeb 100644 --- a/examples/cvector-generator/mean.hpp +++ b/examples/cvector-generator/mean.hpp @@ -15,7 +15,7 @@ static void run( for (size_t il = 0; il < v_input.size(); ++il) { // prepare output vector struct ggml_tensor * ctrl_out = v_output[il]; - ggml_format_name(ctrl_out, "direction.%ld", il+1); + ggml_format_name(ctrl_out, "direction.%zu", il+1); // calculate mean vector struct ggml_tensor * t_layer = v_input[il]; diff --git a/examples/cvector-generator/pca.hpp b/examples/cvector-generator/pca.hpp index f6e307fbc..e88bbdde9 100644 --- a/examples/cvector-generator/pca.hpp +++ b/examples/cvector-generator/pca.hpp @@ -302,7 +302,7 @@ static void run_pca( // prepare output vector struct ggml_tensor * ctrl_out = v_output[il]; - ggml_format_name(ctrl_out, "direction.%ld", il+1); + ggml_format_name(ctrl_out, "direction.%zu", il+1); // run power_iteration params.i_layer = il; diff --git a/examples/export-lora/export-lora.cpp b/examples/export-lora/export-lora.cpp index 67662313d..058b5cc86 100644 --- a/examples/export-lora/export-lora.cpp +++ b/examples/export-lora/export-lora.cpp @@ -265,8 +265,8 @@ struct lora_merge_ctx { fout.write((const char *)data.data(), data.size()); } - printf("%s : merged %ld tensors with lora adapters\n", __func__, n_merged); - printf("%s : wrote %ld tensors to output file\n", __func__, trans.size()); + printf("%s : merged %zu tensors with lora adapters\n", __func__, n_merged); + printf("%s : wrote %zu tensors to output file\n", __func__, trans.size()); } void copy_tensor(struct ggml_tensor * base) { @@ -352,7 +352,7 @@ struct lora_merge_ctx { const float scale = alpha ? adapters[i]->scale * alpha / rank : adapters[i]->scale; delta = ggml_scale(ctx0, delta, scale); cur = ggml_add(ctx0, delta, cur); - printf("%s : + merging from adapter[%ld] type=%s\n", __func__, i, ggml_type_name(inp_a[i]->type)); + printf("%s : + merging from adapter[%zu] type=%s\n", __func__, i, ggml_type_name(inp_a[i]->type)); printf("%s : input_scale=%f calculated_scale=%f rank=%d\n", __func__, adapters[i]->scale, scale, (int) inp_b[i]->ne[0]); } cur = ggml_cast(ctx0, cur, out->type); diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp b/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp index 7a0d7285d..8111c0638 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp @@ -78,7 +78,8 @@ void execute_command(const std::string& command, std::string& stdout_str, std::s } PROCESS_INFORMATION pi; - STARTUPINFOA si = { sizeof(STARTUPINFOA) }; + STARTUPINFOA si = {}; + si.cb = sizeof(STARTUPINFOA); si.dwFlags = STARTF_USESTDHANDLES; si.hStdOutput = stdout_write; si.hStdError = stderr_write; From d79d8f39b4da6deca4aea8bf130c6034c482b320 Mon Sep 17 00:00:00 2001 From: Eve <139727413+netrunnereve@users.noreply.github.com> Date: Thu, 26 Dec 2024 10:54:44 -0500 Subject: [PATCH 19/81] vulkan: multi-row k quants (#10846) * multi row k quant shaders! * better row selection * more row choices * readjust row selection * rm_kq=2 by default --- ggml/src/ggml-vulkan/ggml-vulkan.cpp | 81 ++++--- .../vulkan-shaders/mul_mat_vec_q2_k.comp | 126 ++++++---- .../vulkan-shaders/mul_mat_vec_q3_k.comp | 102 ++++---- .../vulkan-shaders/mul_mat_vec_q4_k.comp | 174 ++++++++------ .../vulkan-shaders/mul_mat_vec_q5_k.comp | 226 ++++++++++-------- .../vulkan-shaders/mul_mat_vec_q6_k.comp | 130 +++++----- 6 files changed, 472 insertions(+), 367 deletions(-) diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp index 323ce7cf3..c0a43631c 100644 --- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp +++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp @@ -1855,53 +1855,58 @@ static void ggml_vk_load_shaders(vk_device& device) { // mul mat vec - // AMD GCN and Intel graphics cards perform best when the number of rows per shader is doubled - uint32_t rm = 1; - if ((device->vendor_id == VK_VENDOR_ID_AMD && device->subgroup_min_size == 64 && device->subgroup_max_size == 64) || device->vendor_id == VK_VENDOR_ID_INTEL) - rm = 2; + // the number of rows computed per shader depends on GPU model and quant + uint32_t rm_stdq = 1; + uint32_t rm_kq = 2; + if (device->vendor_id == VK_VENDOR_ID_AMD) { + if (device->subgroup_min_size == 64 && device->subgroup_max_size == 64) { // GCN + rm_stdq = 2; + rm_kq = 4; + } + } else if (device->vendor_id == VK_VENDOR_ID_INTEL) + rm_stdq = 2; - // computing additional rows per workgroup is a benefit for Q4_0 -> Q5_1, but not for Q8_0. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_F32 ], "mul_mat_vec_f32_f32_f32", mul_mat_vec_f32_f32_f32_len, mul_mat_vec_f32_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2, 1, 1}, {device->subgroup_size, 2}, 1); ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_F16 ], "mul_mat_vec_f16_f32_f32", mul_mat_vec_f16_f32_f32_len, mul_mat_vec_f16_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2, 1, 1}, {device->subgroup_size, 2}, 1); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q4_0], "mul_mat_vec_q4_0_f32_f32", mul_mat_vec_q4_0_f32_f32_len, mul_mat_vec_q4_0_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm, 1, 1}, {device->subgroup_size, 2*rm}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q4_1], "mul_mat_vec_q4_1_f32_f32", mul_mat_vec_q4_1_f32_f32_len, mul_mat_vec_q4_1_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm, 1, 1}, {device->subgroup_size, 2*rm}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q5_0], "mul_mat_vec_q5_0_f32_f32", mul_mat_vec_q5_0_f32_f32_len, mul_mat_vec_q5_0_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm, 1, 1}, {device->subgroup_size, 2*rm}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q5_1], "mul_mat_vec_q5_1_f32_f32", mul_mat_vec_q5_1_f32_f32_len, mul_mat_vec_q5_1_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm, 1, 1}, {device->subgroup_size, 2*rm}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q8_0], "mul_mat_vec_q8_0_f32_f32", mul_mat_vec_q8_0_f32_f32_len, mul_mat_vec_q8_0_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1*rm, 1, 1}, {device->subgroup_size, 1*rm}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q2_K], "mul_mat_vec_q2_k_f32_f32", mul_mat_vec_q2_k_f32_f32_len, mul_mat_vec_q2_k_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, {subgroup_size_16}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q3_K], "mul_mat_vec_q3_k_f32_f32", mul_mat_vec_q3_k_f32_f32_len, mul_mat_vec_q3_k_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, {subgroup_size_16}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q4_K], "mul_mat_vec_q4_k_f32_f32", mul_mat_vec_q4_k_f32_f32_len, mul_mat_vec_q4_k_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, {subgroup_size_16}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q5_K], "mul_mat_vec_q5_k_f32_f32", mul_mat_vec_q5_k_f32_f32_len, mul_mat_vec_q5_k_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, {subgroup_size_16}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q6_K], "mul_mat_vec_q6_k_f32_f32", mul_mat_vec_q6_k_f32_f32_len, mul_mat_vec_q6_k_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, {subgroup_size_16}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_IQ4_NL], "mul_mat_vec_iq4_nl_f32_f32", mul_mat_vec_iq4_nl_f32_f32_len, mul_mat_vec_iq4_nl_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm, 1, 1}, {subgroup_size_16, 2*rm}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q4_0], "mul_mat_vec_q4_0_f32_f32", mul_mat_vec_q4_0_f32_f32_len, mul_mat_vec_q4_0_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q4_1], "mul_mat_vec_q4_1_f32_f32", mul_mat_vec_q4_1_f32_f32_len, mul_mat_vec_q4_1_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q5_0], "mul_mat_vec_q5_0_f32_f32", mul_mat_vec_q5_0_f32_f32_len, mul_mat_vec_q5_0_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q5_1], "mul_mat_vec_q5_1_f32_f32", mul_mat_vec_q5_1_f32_f32_len, mul_mat_vec_q5_1_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q8_0], "mul_mat_vec_q8_0_f32_f32", mul_mat_vec_q8_0_f32_f32_len, mul_mat_vec_q8_0_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1*rm_stdq, 1, 1}, {device->subgroup_size, 1*rm_stdq}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q2_K], "mul_mat_vec_q2_k_f32_f32", mul_mat_vec_q2_k_f32_f32_len, mul_mat_vec_q2_k_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q3_K], "mul_mat_vec_q3_k_f32_f32", mul_mat_vec_q3_k_f32_f32_len, mul_mat_vec_q3_k_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q4_K], "mul_mat_vec_q4_k_f32_f32", mul_mat_vec_q4_k_f32_f32_len, mul_mat_vec_q4_k_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q5_K], "mul_mat_vec_q5_k_f32_f32", mul_mat_vec_q5_k_f32_f32_len, mul_mat_vec_q5_k_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q6_K], "mul_mat_vec_q6_k_f32_f32", mul_mat_vec_q6_k_f32_f32_len, mul_mat_vec_q6_k_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_IQ4_NL], "mul_mat_vec_iq4_nl_f32_f32", mul_mat_vec_iq4_nl_f32_f32_len, mul_mat_vec_iq4_nl_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {subgroup_size_16, 2*rm_stdq}, 1, true); ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_F32 ], "mul_mat_vec_f32_f16_f32", mul_mat_vec_f32_f16_f32_len, mul_mat_vec_f32_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2, 1, 1}, {device->subgroup_size, 2}, 1); ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_F16 ], "mul_mat_vec_f16_f16_f32", mul_mat_vec_f16_f16_f32_len, mul_mat_vec_f16_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2, 1, 1}, {device->subgroup_size, 2}, 1); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q4_0], "mul_mat_vec_q4_0_f16_f32", mul_mat_vec_q4_0_f16_f32_len, mul_mat_vec_q4_0_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm, 1, 1}, {device->subgroup_size, 2*rm}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q4_1], "mul_mat_vec_q4_1_f16_f32", mul_mat_vec_q4_1_f16_f32_len, mul_mat_vec_q4_1_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm, 1, 1}, {device->subgroup_size, 2*rm}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q5_0], "mul_mat_vec_q5_0_f16_f32", mul_mat_vec_q5_0_f16_f32_len, mul_mat_vec_q5_0_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm, 1, 1}, {device->subgroup_size, 2*rm}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q5_1], "mul_mat_vec_q5_1_f16_f32", mul_mat_vec_q5_1_f16_f32_len, mul_mat_vec_q5_1_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm, 1, 1}, {device->subgroup_size, 2*rm}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q8_0], "mul_mat_vec_q8_0_f16_f32", mul_mat_vec_q8_0_f16_f32_len, mul_mat_vec_q8_0_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1*rm, 1, 1}, {device->subgroup_size, 1*rm}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q2_K], "mul_mat_vec_q2_k_f16_f32", mul_mat_vec_q2_k_f16_f32_len, mul_mat_vec_q2_k_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, {subgroup_size_16}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q3_K], "mul_mat_vec_q3_k_f16_f32", mul_mat_vec_q3_k_f16_f32_len, mul_mat_vec_q3_k_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, {subgroup_size_16}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q4_K], "mul_mat_vec_q4_k_f16_f32", mul_mat_vec_q4_k_f16_f32_len, mul_mat_vec_q4_k_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, {subgroup_size_16}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q5_K], "mul_mat_vec_q5_k_f16_f32", mul_mat_vec_q5_k_f16_f32_len, mul_mat_vec_q5_k_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, {subgroup_size_16}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q6_K], "mul_mat_vec_q6_k_f16_f32", mul_mat_vec_q6_k_f16_f32_len, mul_mat_vec_q6_k_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, {subgroup_size_16}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_IQ4_NL], "mul_mat_vec_iq4_nl_f16_f32", mul_mat_vec_iq4_nl_f16_f32_len, mul_mat_vec_iq4_nl_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm, 1, 1}, {subgroup_size_16, 2*rm}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q4_0], "mul_mat_vec_q4_0_f16_f32", mul_mat_vec_q4_0_f16_f32_len, mul_mat_vec_q4_0_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q4_1], "mul_mat_vec_q4_1_f16_f32", mul_mat_vec_q4_1_f16_f32_len, mul_mat_vec_q4_1_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q5_0], "mul_mat_vec_q5_0_f16_f32", mul_mat_vec_q5_0_f16_f32_len, mul_mat_vec_q5_0_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q5_1], "mul_mat_vec_q5_1_f16_f32", mul_mat_vec_q5_1_f16_f32_len, mul_mat_vec_q5_1_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q8_0], "mul_mat_vec_q8_0_f16_f32", mul_mat_vec_q8_0_f16_f32_len, mul_mat_vec_q8_0_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1*rm_stdq, 1, 1}, {device->subgroup_size, 1*rm_stdq}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q2_K], "mul_mat_vec_q2_k_f16_f32", mul_mat_vec_q2_k_f16_f32_len, mul_mat_vec_q2_k_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q3_K], "mul_mat_vec_q3_k_f16_f32", mul_mat_vec_q3_k_f16_f32_len, mul_mat_vec_q3_k_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q4_K], "mul_mat_vec_q4_k_f16_f32", mul_mat_vec_q4_k_f16_f32_len, mul_mat_vec_q4_k_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q5_K], "mul_mat_vec_q5_k_f16_f32", mul_mat_vec_q5_k_f16_f32_len, mul_mat_vec_q5_k_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q6_K], "mul_mat_vec_q6_k_f16_f32", mul_mat_vec_q6_k_f16_f32_len, mul_mat_vec_q6_k_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_IQ4_NL], "mul_mat_vec_iq4_nl_f16_f32", mul_mat_vec_iq4_nl_f16_f32_len, mul_mat_vec_iq4_nl_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {subgroup_size_16, 2*rm_stdq}, 1, true); ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_F32 ], "mul_mat_vec_id_f32_f32", mul_mat_vec_id_f32_f32_len, mul_mat_vec_id_f32_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {2, 1, 1}, {device->subgroup_size, 2}, 1); ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_F16 ], "mul_mat_vec_id_f16_f32", mul_mat_vec_id_f16_f32_len, mul_mat_vec_id_f16_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {2, 1, 1}, {device->subgroup_size, 2}, 1); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q4_0], "mul_mat_vec_id_q4_0_f32", mul_mat_vec_id_q4_0_f32_len, mul_mat_vec_id_q4_0_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {2*rm, 1, 1}, {device->subgroup_size, 2*rm}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q4_1], "mul_mat_vec_id_q4_1_f32", mul_mat_vec_id_q4_1_f32_len, mul_mat_vec_id_q4_1_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {2*rm, 1, 1}, {device->subgroup_size, 2*rm}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q5_0], "mul_mat_vec_id_q5_0_f32", mul_mat_vec_id_q5_0_f32_len, mul_mat_vec_id_q5_0_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {2*rm, 1, 1}, {device->subgroup_size, 2*rm}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q5_1], "mul_mat_vec_id_q5_1_f32", mul_mat_vec_id_q5_1_f32_len, mul_mat_vec_id_q5_1_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {2*rm, 1, 1}, {device->subgroup_size, 2*rm}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q8_0], "mul_mat_vec_id_q8_0_f32", mul_mat_vec_id_q8_0_f32_len, mul_mat_vec_id_q8_0_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {1*rm, 1, 1}, {device->subgroup_size, 1*rm}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q2_K], "mul_mat_vec_id_q2_k_f32", mul_mat_vec_id_q2_k_f32_len, mul_mat_vec_id_q2_k_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {1, 1, 1}, {subgroup_size_16}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q3_K], "mul_mat_vec_id_q3_k_f32", mul_mat_vec_id_q3_k_f32_len, mul_mat_vec_id_q3_k_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {1, 1, 1}, {subgroup_size_16}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q4_K], "mul_mat_vec_id_q4_k_f32", mul_mat_vec_id_q4_k_f32_len, mul_mat_vec_id_q4_k_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {1, 1, 1}, {subgroup_size_16}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q5_K], "mul_mat_vec_id_q5_k_f32", mul_mat_vec_id_q5_k_f32_len, mul_mat_vec_id_q5_k_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {1, 1, 1}, {subgroup_size_16}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q6_K], "mul_mat_vec_id_q6_k_f32", mul_mat_vec_id_q6_k_f32_len, mul_mat_vec_id_q6_k_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {1, 1, 1}, {subgroup_size_16}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_IQ4_NL], "mul_mat_vec_id_iq4_nl_f32", mul_mat_vec_id_iq4_nl_f32_len, mul_mat_vec_id_iq4_nl_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {2*rm, 1, 1}, {subgroup_size_16, 2*rm}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q4_0], "mul_mat_vec_id_q4_0_f32", mul_mat_vec_id_q4_0_f32_len, mul_mat_vec_id_q4_0_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q4_1], "mul_mat_vec_id_q4_1_f32", mul_mat_vec_id_q4_1_f32_len, mul_mat_vec_id_q4_1_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q5_0], "mul_mat_vec_id_q5_0_f32", mul_mat_vec_id_q5_0_f32_len, mul_mat_vec_id_q5_0_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q5_1], "mul_mat_vec_id_q5_1_f32", mul_mat_vec_id_q5_1_f32_len, mul_mat_vec_id_q5_1_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q8_0], "mul_mat_vec_id_q8_0_f32", mul_mat_vec_id_q8_0_f32_len, mul_mat_vec_id_q8_0_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {1*rm_stdq, 1, 1}, {device->subgroup_size, 1*rm_stdq}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q2_K], "mul_mat_vec_id_q2_k_f32", mul_mat_vec_id_q2_k_f32_len, mul_mat_vec_id_q2_k_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q3_K], "mul_mat_vec_id_q3_k_f32", mul_mat_vec_id_q3_k_f32_len, mul_mat_vec_id_q3_k_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q4_K], "mul_mat_vec_id_q4_k_f32", mul_mat_vec_id_q4_k_f32_len, mul_mat_vec_id_q4_k_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q5_K], "mul_mat_vec_id_q5_k_f32", mul_mat_vec_id_q5_k_f32_len, mul_mat_vec_id_q5_k_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q6_K], "mul_mat_vec_id_q6_k_f32", mul_mat_vec_id_q6_k_f32_len, mul_mat_vec_id_q6_k_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_IQ4_NL], "mul_mat_vec_id_iq4_nl_f32", mul_mat_vec_id_iq4_nl_f32_len, mul_mat_vec_id_iq4_nl_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {2*rm_stdq, 1, 1}, {subgroup_size_16, 2*rm_stdq}, 1, true); // dequant shaders ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_F32 ], "f32_to_f16", dequant_f32_len, dequant_f32_data, "main", 2, 5 * sizeof(uint32_t), {256 * 16, 1, 1}, {}, 1); diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q2_k.comp b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q2_k.comp index 1a5350d99..138ad0184 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q2_k.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q2_k.comp @@ -6,21 +6,15 @@ layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in; layout (constant_id = 0) const uint BLOCK_SIZE = 32; +layout (constant_id = 1) const uint NUM_ROWS = 1; -shared FLOAT_TYPE tmp[BLOCK_SIZE]; - -void main() { - const uint row = gl_WorkGroupID.x + gl_NumWorkGroups.x * gl_WorkGroupID.z; - - if (row >= p.stride_d) { - return; - } +shared FLOAT_TYPE tmpsh[NUM_ROWS][BLOCK_SIZE]; +void compute_outputs(const uint32_t first_row, const uint32_t num_rows) { uint a_offset, b_offset, d_offset; get_offsets(a_offset, b_offset, d_offset); const uint num_blocks_per_row = p.ncols / QUANT_K; - const uint ib0 = a_offset / QUANT_K + row*num_blocks_per_row; // 16 threads are used to process each block const uint it_size = gl_WorkGroupSize.x/16; @@ -38,15 +32,15 @@ void main() { const uint s_offset = 8*v_im; const uint y_offset = 128*v_im + l0; - FLOAT_TYPE temp = FLOAT_TYPE(0.0); // partial sum for thread in warp + FLOAT_TYPE temp[NUM_ROWS]; + + [[unroll]] for (uint i = 0; i < NUM_ROWS; ++i) { + temp[i] = FLOAT_TYPE(0); + } [[unroll]] for (uint i = ix; i < num_blocks_per_row; i += it_size) { const uint y_idx = i * QUANT_K + y_offset; - f16vec2 d = data_a[ib0 + i].d; - const FLOAT_TYPE dall = d.x; - const FLOAT_TYPE dmin = d.y; - B_TYPE_VEC2 b0 = data_b_v2[(b_offset + y_idx) / 2 + 0]; B_TYPE_VEC2 b16 = data_b_v2[(b_offset + y_idx) / 2 + 8]; B_TYPE_VEC2 b32 = data_b_v2[(b_offset + y_idx) / 2 + 16]; @@ -56,58 +50,84 @@ void main() { B_TYPE_VEC2 b96 = data_b_v2[(b_offset + y_idx) / 2 + 48]; B_TYPE_VEC2 b112 = data_b_v2[(b_offset + y_idx) / 2 + 56]; - uint32_t s0_u32 = data_a_packed32[ib0 + i].scales[s_offset / 4 + 0]; - uint32_t s4_u32 = data_a_packed32[ib0 + i].scales[s_offset / 4 + 1]; + [[unroll]] for (uint n = 0; n < num_rows; ++n) { + const uint ib0 = a_offset / QUANT_K + (first_row+n)*num_blocks_per_row; + f16vec2 d = data_a[ib0 + i].d; + const FLOAT_TYPE dall = d.x; + const FLOAT_TYPE dmin = d.y; - uint32_t s0_lo4_u32 = s0_u32 & 0x0F0F0F0F; - uint32_t s0_hi4_u32 = (s0_u32 >> 4) & 0x0F0F0F0F; - uint32_t s4_lo4_u32 = s4_u32 & 0x0F0F0F0F; - uint32_t s4_hi4_u32 = (s4_u32 >> 4) & 0x0F0F0F0F; + uint32_t s0_u32 = data_a_packed32[ib0 + i].scales[s_offset / 4 + 0]; + uint32_t s4_u32 = data_a_packed32[ib0 + i].scales[s_offset / 4 + 1]; - uvec4 s0_lo4 = uvec4(unpack8(s0_lo4_u32)); - uvec4 s4_lo4 = uvec4(unpack8(s4_lo4_u32)); - uvec4 s0_hi4 = uvec4(unpack8(s0_hi4_u32)); - uvec4 s4_hi4 = uvec4(unpack8(s4_hi4_u32)); + uint32_t s0_lo4_u32 = s0_u32 & 0x0F0F0F0F; + uint32_t s0_hi4_u32 = (s0_u32 >> 4) & 0x0F0F0F0F; + uint32_t s4_lo4_u32 = s4_u32 & 0x0F0F0F0F; + uint32_t s4_hi4_u32 = (s4_u32 >> 4) & 0x0F0F0F0F; - uint16_t qs0_u16 = data_a_packed16[ib0 + i].qs[q_offset / 2 + 0]; - uint16_t qs16_u16 = data_a_packed16[ib0 + i].qs[q_offset / 2 + 8]; - uvec2 qs0 = uvec2(unpack8(qs0_u16)); - uvec2 qs16 = uvec2(unpack8(qs16_u16)); + uvec4 s0_lo4 = uvec4(unpack8(s0_lo4_u32)); + uvec4 s4_lo4 = uvec4(unpack8(s4_lo4_u32)); + uvec4 s0_hi4 = uvec4(unpack8(s0_hi4_u32)); + uvec4 s4_hi4 = uvec4(unpack8(s4_hi4_u32)); - FLOAT_TYPE sum1 = FLOAT_TYPE(0.0); - FLOAT_TYPE sum2 = FLOAT_TYPE(0.0); - [[unroll]] for (int l = 0; l < 2; ++l) { - sum1 = fma(FLOAT_TYPE(b0[l]), FLOAT_TYPE(s0_lo4[0]) * FLOAT_TYPE((qs0[l] >> 0) & 3), - fma(FLOAT_TYPE(b16[l]), FLOAT_TYPE(s0_lo4[1]) * FLOAT_TYPE((qs16[l] >> 0) & 3), - fma(FLOAT_TYPE(b32[l]), FLOAT_TYPE(s0_lo4[2]) * FLOAT_TYPE((qs0[l] >> 2) & 3), - fma(FLOAT_TYPE(b48[l]), FLOAT_TYPE(s0_lo4[3]) * FLOAT_TYPE((qs16[l] >> 2) & 3), - fma(FLOAT_TYPE(b64[l]), FLOAT_TYPE(s4_lo4[0]) * FLOAT_TYPE((qs0[l] >> 4) & 3), - fma(FLOAT_TYPE(b80[l]), FLOAT_TYPE(s4_lo4[1]) * FLOAT_TYPE((qs16[l] >> 4) & 3), - fma(FLOAT_TYPE(b96[l]), FLOAT_TYPE(s4_lo4[2]) * FLOAT_TYPE((qs0[l] >> 6) & 3), - fma(FLOAT_TYPE(b112[l]), FLOAT_TYPE(s4_lo4[3]) * FLOAT_TYPE((qs16[l] >> 6) & 3), sum1)))))))); - sum2 = fma(FLOAT_TYPE(b0[l]), FLOAT_TYPE(s0_hi4[0]), - fma(FLOAT_TYPE(b16[l]), FLOAT_TYPE(s0_hi4[1]), - fma(FLOAT_TYPE(b32[l]), FLOAT_TYPE(s0_hi4[2]), - fma(FLOAT_TYPE(b48[l]), FLOAT_TYPE(s0_hi4[3]), - fma(FLOAT_TYPE(b64[l]), FLOAT_TYPE(s4_hi4[0]), - fma(FLOAT_TYPE(b80[l]), FLOAT_TYPE(s4_hi4[1]), - fma(FLOAT_TYPE(b96[l]), FLOAT_TYPE(s4_hi4[2]), - fma(FLOAT_TYPE(b112[l]), FLOAT_TYPE(s4_hi4[3]), sum2)))))))); + uint16_t qs0_u16 = data_a_packed16[ib0 + i].qs[q_offset / 2 + 0]; + uint16_t qs16_u16 = data_a_packed16[ib0 + i].qs[q_offset / 2 + 8]; + uvec2 qs0 = uvec2(unpack8(qs0_u16)); + uvec2 qs16 = uvec2(unpack8(qs16_u16)); + + FLOAT_TYPE sum1 = FLOAT_TYPE(0.0); + FLOAT_TYPE sum2 = FLOAT_TYPE(0.0); + [[unroll]] for (int l = 0; l < 2; ++l) { + sum1 = fma(FLOAT_TYPE(b0[l]), FLOAT_TYPE(s0_lo4[0]) * FLOAT_TYPE((qs0[l] >> 0) & 3), + fma(FLOAT_TYPE(b16[l]), FLOAT_TYPE(s0_lo4[1]) * FLOAT_TYPE((qs16[l] >> 0) & 3), + fma(FLOAT_TYPE(b32[l]), FLOAT_TYPE(s0_lo4[2]) * FLOAT_TYPE((qs0[l] >> 2) & 3), + fma(FLOAT_TYPE(b48[l]), FLOAT_TYPE(s0_lo4[3]) * FLOAT_TYPE((qs16[l] >> 2) & 3), + fma(FLOAT_TYPE(b64[l]), FLOAT_TYPE(s4_lo4[0]) * FLOAT_TYPE((qs0[l] >> 4) & 3), + fma(FLOAT_TYPE(b80[l]), FLOAT_TYPE(s4_lo4[1]) * FLOAT_TYPE((qs16[l] >> 4) & 3), + fma(FLOAT_TYPE(b96[l]), FLOAT_TYPE(s4_lo4[2]) * FLOAT_TYPE((qs0[l] >> 6) & 3), + fma(FLOAT_TYPE(b112[l]), FLOAT_TYPE(s4_lo4[3]) * FLOAT_TYPE((qs16[l] >> 6) & 3), sum1)))))))); + sum2 = fma(FLOAT_TYPE(b0[l]), FLOAT_TYPE(s0_hi4[0]), + fma(FLOAT_TYPE(b16[l]), FLOAT_TYPE(s0_hi4[1]), + fma(FLOAT_TYPE(b32[l]), FLOAT_TYPE(s0_hi4[2]), + fma(FLOAT_TYPE(b48[l]), FLOAT_TYPE(s0_hi4[3]), + fma(FLOAT_TYPE(b64[l]), FLOAT_TYPE(s4_hi4[0]), + fma(FLOAT_TYPE(b80[l]), FLOAT_TYPE(s4_hi4[1]), + fma(FLOAT_TYPE(b96[l]), FLOAT_TYPE(s4_hi4[2]), + fma(FLOAT_TYPE(b112[l]), FLOAT_TYPE(s4_hi4[3]), sum2)))))))); + } + temp[n] = fma(dall, sum1, fma(-dmin, sum2, temp[n])); } - temp = fma(dall, sum1, fma(-dmin, sum2, temp)); } - tmp[gl_LocalInvocationID.x] = temp; - // sum up partial sums and write back result + [[unroll]] for (uint n = 0; n < num_rows; ++n) { + tmpsh[n][tid] = temp[n]; + } barrier(); - [[unroll]] for (uint s = gl_WorkGroupSize.x/2; s > 0; s >>= 1) { + [[unroll]] for (uint s = BLOCK_SIZE/2; s > 0; s >>= 1) { if (tid < s) { - tmp[tid] += tmp[tid + s]; + [[unroll]] for (uint n = 0; n < num_rows; ++n) { + tmpsh[n][tid] += tmpsh[n][tid + s]; + } } barrier(); } if (tid == 0) { - data_d[d_offset + row] = D_TYPE(tmp[0]); + [[unroll]] for (uint n = 0; n < num_rows; ++n) { + data_d[d_offset + first_row + n] = D_TYPE(tmpsh[n][0]); + } + } +} + +void main() { + const uint first_row = NUM_ROWS * (gl_WorkGroupID.x + gl_NumWorkGroups.x * gl_WorkGroupID.z); + + // do NUM_ROWS at a time, unless there aren't enough remaining rows + if (first_row + NUM_ROWS <= p.stride_d) { + compute_outputs(first_row, NUM_ROWS); + } else { + if (first_row >= p.stride_d) { + return; + } + compute_outputs(first_row, p.stride_d - first_row); } } diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q3_k.comp b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q3_k.comp index b19c38111..82ec42d25 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q3_k.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q3_k.comp @@ -6,21 +6,15 @@ layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in; layout (constant_id = 0) const uint BLOCK_SIZE = 32; +layout (constant_id = 1) const uint NUM_ROWS = 1; -shared FLOAT_TYPE tmp[BLOCK_SIZE]; - -void main() { - const uint row = gl_WorkGroupID.x + gl_NumWorkGroups.x * gl_WorkGroupID.z; - - if (row >= p.stride_d) { - return; - } +shared FLOAT_TYPE tmpsh[NUM_ROWS][BLOCK_SIZE]; +void compute_outputs(const uint32_t first_row, const uint32_t num_rows) { uint a_offset, b_offset, d_offset; get_offsets(a_offset, b_offset, d_offset); const uint num_blocks_per_row = p.ncols / QUANT_K; - const uint ib0 = a_offset / QUANT_K + row*num_blocks_per_row; // 16 threads are used to process each block const uint it_size = gl_WorkGroupSize.x/16; @@ -35,19 +29,21 @@ void main() { const uint8_t m = uint8_t(1 << (4 * v_im)); - const uint l0 = 2*v_in; // 0...15 + const uint l0 = 2*v_in; // 0...15 const uint q_offset = 32*v_im + l0; const uint y_offset = 128*v_im + l0; - FLOAT_TYPE temp = FLOAT_TYPE(0.0); // partial sum for thread in warp + FLOAT_TYPE temp[NUM_ROWS]; + + [[unroll]] for (uint i = 0; i < NUM_ROWS; ++i) { + temp[i] = FLOAT_TYPE(0); + } const uint s_shift = 4 * v_im; [[unroll]] for (uint i = ix; i < num_blocks_per_row; i += it_size) { const uint y_idx = i * QUANT_K + y_offset; - const FLOAT_TYPE d = FLOAT_TYPE(data_a[ib0 + i].d); - B_TYPE_VEC2 b0 = data_b_v2[(b_offset + y_idx) / 2 + 0]; B_TYPE_VEC2 b16 = data_b_v2[(b_offset + y_idx) / 2 + 8]; B_TYPE_VEC2 b32 = data_b_v2[(b_offset + y_idx) / 2 + 16]; @@ -57,44 +53,68 @@ void main() { B_TYPE_VEC2 b96 = data_b_v2[(b_offset + y_idx) / 2 + 48]; B_TYPE_VEC2 b112 = data_b_v2[(b_offset + y_idx) / 2 + 56]; - uint16_t s0_16 = data_a_packed16[ib0 + i].scales[0]; - uint16_t s2_16 = data_a_packed16[ib0 + i].scales[1]; - uint16_t s4_16 = data_a_packed16[ib0 + i].scales[2]; - uint16_t s6_16 = data_a_packed16[ib0 + i].scales[3]; - uint16_t s8_16 = data_a_packed16[ib0 + i].scales[4]; - uint16_t s10_16 = data_a_packed16[ib0 + i].scales[5]; - u8vec2 s0 = unpack8(s0_16); - u8vec2 s2 = unpack8(s2_16); - u8vec2 s4 = unpack8(s4_16); - u8vec2 s6 = unpack8(s6_16); - u8vec2 s8 = unpack8(s8_16); - u8vec2 s10 = unpack8(s10_16); + [[unroll]] for (uint n = 0; n < num_rows; ++n) { + const uint ib0 = a_offset / QUANT_K + (first_row+n)*num_blocks_per_row; + const FLOAT_TYPE d = FLOAT_TYPE(data_a[ib0 + i].d); - FLOAT_TYPE sum = FLOAT_TYPE(0.0); - [[unroll]] for (int l = 0; l < 2; ++l) { - sum = fma(FLOAT_TYPE(b0[l]) * FLOAT_TYPE(int8_t(((s0[0] >> s_shift) & 0xF) | ((s8[0] >> (s_shift + 0) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l ] ) & 3) - (((data_a[ib0 + i].hmask[l0 + l ] & (m << 0)) != 0) ? 0 : 4)), - fma(FLOAT_TYPE(b32[l]) * FLOAT_TYPE(int8_t(((s2[0] >> s_shift) & 0xF) | ((s10[0] >> (s_shift + 0) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l ] >> 2) & 3) - (((data_a[ib0 + i].hmask[l0 + l ] & (m << 1)) != 0) ? 0 : 4)), - fma(FLOAT_TYPE(b64[l]) * FLOAT_TYPE(int8_t(((s4[0] >> s_shift) & 0xF) | ((s8[0] >> (s_shift + 2) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l ] >> 4) & 3) - (((data_a[ib0 + i].hmask[l0 + l ] & (m << 2)) != 0) ? 0 : 4)), - fma(FLOAT_TYPE(b96[l]) * FLOAT_TYPE(int8_t(((s6[0] >> s_shift) & 0xF) | ((s10[0] >> (s_shift + 2) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l ] >> 6) & 3) - (((data_a[ib0 + i].hmask[l0 + l ] & (m << 3)) != 0) ? 0 : 4)), - fma(FLOAT_TYPE(b16[l]) * FLOAT_TYPE(int8_t(((s0[1] >> s_shift) & 0xF) | ((s8[1] >> (s_shift + 0) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l+16] ) & 3) - (((data_a[ib0 + i].hmask[l0 + l+16] & (m << 0)) != 0) ? 0 : 4)), - fma(FLOAT_TYPE(b48[l]) * FLOAT_TYPE(int8_t(((s2[1] >> s_shift) & 0xF) | ((s10[1] >> (s_shift + 0) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l+16] >> 2) & 3) - (((data_a[ib0 + i].hmask[l0 + l+16] & (m << 1)) != 0) ? 0 : 4)), - fma(FLOAT_TYPE(b80[l]) * FLOAT_TYPE(int8_t(((s4[1] >> s_shift) & 0xF) | ((s8[1] >> (s_shift + 2) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l+16] >> 4) & 3) - (((data_a[ib0 + i].hmask[l0 + l+16] & (m << 2)) != 0) ? 0 : 4)), - fma(FLOAT_TYPE(b112[l]) * FLOAT_TYPE(int8_t(((s6[1] >> s_shift) & 0xF) | ((s10[1] >> (s_shift + 2) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l+16] >> 6) & 3) - (((data_a[ib0 + i].hmask[l0 + l+16] & (m << 3)) != 0) ? 0 : 4)), sum)))))))); + uint16_t s0_16 = data_a_packed16[ib0 + i].scales[0]; + uint16_t s2_16 = data_a_packed16[ib0 + i].scales[1]; + uint16_t s4_16 = data_a_packed16[ib0 + i].scales[2]; + uint16_t s6_16 = data_a_packed16[ib0 + i].scales[3]; + uint16_t s8_16 = data_a_packed16[ib0 + i].scales[4]; + uint16_t s10_16 = data_a_packed16[ib0 + i].scales[5]; + u8vec2 s0 = unpack8(s0_16); + u8vec2 s2 = unpack8(s2_16); + u8vec2 s4 = unpack8(s4_16); + u8vec2 s6 = unpack8(s6_16); + u8vec2 s8 = unpack8(s8_16); + u8vec2 s10 = unpack8(s10_16); + + FLOAT_TYPE sum = FLOAT_TYPE(0.0); + [[unroll]] for (int l = 0; l < 2; ++l) { + sum = fma(FLOAT_TYPE(b0[l]) * FLOAT_TYPE(int8_t(((s0[0] >> s_shift) & 0xF) | ((s8[0] >> (s_shift + 0) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l ] ) & 3) - (((data_a[ib0 + i].hmask[l0 + l ] & (m << 0)) != 0) ? 0 : 4)), + fma(FLOAT_TYPE(b32[l]) * FLOAT_TYPE(int8_t(((s2[0] >> s_shift) & 0xF) | ((s10[0] >> (s_shift + 0) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l ] >> 2) & 3) - (((data_a[ib0 + i].hmask[l0 + l ] & (m << 1)) != 0) ? 0 : 4)), + fma(FLOAT_TYPE(b64[l]) * FLOAT_TYPE(int8_t(((s4[0] >> s_shift) & 0xF) | ((s8[0] >> (s_shift + 2) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l ] >> 4) & 3) - (((data_a[ib0 + i].hmask[l0 + l ] & (m << 2)) != 0) ? 0 : 4)), + fma(FLOAT_TYPE(b96[l]) * FLOAT_TYPE(int8_t(((s6[0] >> s_shift) & 0xF) | ((s10[0] >> (s_shift + 2) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l ] >> 6) & 3) - (((data_a[ib0 + i].hmask[l0 + l ] & (m << 3)) != 0) ? 0 : 4)), + fma(FLOAT_TYPE(b16[l]) * FLOAT_TYPE(int8_t(((s0[1] >> s_shift) & 0xF) | ((s8[1] >> (s_shift + 0) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l+16] ) & 3) - (((data_a[ib0 + i].hmask[l0 + l+16] & (m << 0)) != 0) ? 0 : 4)), + fma(FLOAT_TYPE(b48[l]) * FLOAT_TYPE(int8_t(((s2[1] >> s_shift) & 0xF) | ((s10[1] >> (s_shift + 0) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l+16] >> 2) & 3) - (((data_a[ib0 + i].hmask[l0 + l+16] & (m << 1)) != 0) ? 0 : 4)), + fma(FLOAT_TYPE(b80[l]) * FLOAT_TYPE(int8_t(((s4[1] >> s_shift) & 0xF) | ((s8[1] >> (s_shift + 2) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l+16] >> 4) & 3) - (((data_a[ib0 + i].hmask[l0 + l+16] & (m << 2)) != 0) ? 0 : 4)), + fma(FLOAT_TYPE(b112[l]) * FLOAT_TYPE(int8_t(((s6[1] >> s_shift) & 0xF) | ((s10[1] >> (s_shift + 2) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l+16] >> 6) & 3) - (((data_a[ib0 + i].hmask[l0 + l+16] & (m << 3)) != 0) ? 0 : 4)), sum)))))))); + } + temp[n] = fma(d, sum, temp[n]); } - temp = fma(d, sum, temp); } - tmp[gl_LocalInvocationID.x] = temp; - // sum up partial sums and write back result + [[unroll]] for (uint n = 0; n < num_rows; ++n) { + tmpsh[n][tid] = temp[n]; + } barrier(); - [[unroll]] for (uint s = gl_WorkGroupSize.x/2; s > 0; s >>= 1) { + [[unroll]] for (uint s = BLOCK_SIZE/2; s > 0; s >>= 1) { if (tid < s) { - tmp[tid] += tmp[tid + s]; + [[unroll]] for (uint n = 0; n < num_rows; ++n) { + tmpsh[n][tid] += tmpsh[n][tid + s]; + } } barrier(); } if (tid == 0) { - data_d[d_offset + row] = D_TYPE(tmp[0]); + [[unroll]] for (uint n = 0; n < num_rows; ++n) { + data_d[d_offset + first_row + n] = D_TYPE(tmpsh[n][0]); + } + } +} + +void main() { + const uint first_row = NUM_ROWS * (gl_WorkGroupID.x + gl_NumWorkGroups.x * gl_WorkGroupID.z); + + // do NUM_ROWS at a time, unless there aren't enough remaining rows + if (first_row + NUM_ROWS <= p.stride_d) { + compute_outputs(first_row, NUM_ROWS); + } else { + if (first_row >= p.stride_d) { + return; + } + compute_outputs(first_row, p.stride_d - first_row); } } diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q4_k.comp b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q4_k.comp index b86d28589..677c207a8 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q4_k.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q4_k.comp @@ -7,21 +7,15 @@ layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in; layout (constant_id = 0) const uint BLOCK_SIZE = 32; +layout (constant_id = 1) const uint NUM_ROWS = 1; -shared FLOAT_TYPE tmp[BLOCK_SIZE]; - -void main() { - const uint row = gl_WorkGroupID.x + gl_NumWorkGroups.x * gl_WorkGroupID.z; - - if (row >= p.stride_d) { - return; - } +shared FLOAT_TYPE tmpsh[NUM_ROWS][BLOCK_SIZE]; +void compute_outputs(const uint32_t first_row, const uint32_t num_rows) { uint a_offset, b_offset, d_offset; get_offsets(a_offset, b_offset, d_offset); const uint num_blocks_per_row = p.ncols / QUANT_K; - const uint ib0 = a_offset / QUANT_K + row*num_blocks_per_row; // 16 threads are used to process each block const uint it_size = gl_WorkGroupSize.x/16; @@ -31,8 +25,8 @@ void main() { const uint step = 4; - const uint il = itid/step; // 0...3 - const uint ir = itid - step*il; // 0...7 or 0...3 + const uint il = itid/step; // 0...3 + const uint ir = itid - step*il; // 0...7 or 0...3 const uint n = 4; const uint v_im = il / 2; // 0 or 1. 0 computes 0,32 + 128,160, 1 computes 64,96 + 192,224 @@ -42,90 +36,116 @@ void main() { const uint q_offset = 32*v_im + l0; const uint y_offset = 64*v_im + l0; - FLOAT_TYPE temp = FLOAT_TYPE(0.0); // partial sum for thread in warp + FLOAT_TYPE temp[NUM_ROWS]; + + [[unroll]] for (uint i = 0; i < NUM_ROWS; ++i) { + temp[i] = FLOAT_TYPE(0); + } [[unroll]] for (uint i = ix; i < num_blocks_per_row; i += it_size) { const uint y1_idx = i * QUANT_K + y_offset; const uint y2_idx = y1_idx + 128; - f16vec2 d = data_a[ib0 + i].d; - const FLOAT_TYPE dall = FLOAT_TYPE(d.x); - const FLOAT_TYPE dmin = FLOAT_TYPE(d.y); - - uint32_t scale0_u32 = data_a_packed16[ib0 + i].scales[v_im ]; - uint32_t scale4_u32 = data_a_packed16[ib0 + i].scales[v_im + 2]; - uint32_t scale8_u32 = data_a_packed16[ib0 + i].scales[v_im + 4]; - uvec4 scale0 = uvec4(unpack8(scale0_u32)); - uvec4 scale4 = uvec4(unpack8(scale4_u32)); - uvec4 scale8 = uvec4(unpack8(scale8_u32)); - - const uint32_t sc0 = ( scale0.x & 0x3f); - const uint32_t sc1 = ( scale0.y & 0x3f); - const uint32_t sc2 = ( scale4.x & 0x3f); - const uint32_t sc3 = ( scale4.y & 0x3f); - const uint32_t sc4 = (( scale8.x & 0x0f) | ((scale0.x & 0xc0) >> 2)); - const uint32_t sc5 = (( scale8.y & 0x0f) | ((scale0.y & 0xc0) >> 2)); - const uint32_t sc6 = (((scale8.x >> 4) & 0x0f) | ((scale4.x & 0xc0) >> 2)); - const uint32_t sc7 = (((scale8.y >> 4) & 0x0f) | ((scale4.y & 0xc0) >> 2)); - - uint32_t qs0_u32 = data_a_packed32[ib0 + i].qs[q_offset / 4]; - uint32_t qs64_u32 = data_a_packed32[ib0 + i].qs[q_offset / 4 + 16]; - - uint32_t qs0_u32_lo4 = qs0_u32 & 0x0F0F0F0F; - uint32_t qs0_u32_hi4 = (qs0_u32 >> 4) & 0x0F0F0F0F; - uint32_t qs64_u32_lo4 = qs64_u32 & 0x0F0F0F0F; - uint32_t qs64_u32_hi4 = (qs64_u32 >> 4) & 0x0F0F0F0F; - - uvec4 qs0_lo4 = uvec4(unpack8(qs0_u32_lo4)); - uvec4 qs64_lo4 = uvec4(unpack8(qs64_u32_lo4)); - uvec4 qs0_hi4 = uvec4(unpack8(qs0_u32_hi4)); - uvec4 qs64_hi4 = uvec4(unpack8(qs64_u32_hi4)); - - const uint32_t q4_0 = qs0_lo4.x; - const uint32_t q4_1 = qs0_lo4.y; - const uint32_t q4_2 = qs0_lo4.z; - const uint32_t q4_3 = qs0_lo4.w; - const uint32_t q4_4 = qs0_hi4.x; - const uint32_t q4_5 = qs0_hi4.y; - const uint32_t q4_6 = qs0_hi4.z; - const uint32_t q4_7 = qs0_hi4.w; - const uint32_t q4_8 = qs64_lo4.x; - const uint32_t q4_9 = qs64_lo4.y; - const uint32_t q4_10 = qs64_lo4.z; - const uint32_t q4_11 = qs64_lo4.w; - const uint32_t q4_12 = qs64_hi4.x; - const uint32_t q4_13 = qs64_hi4.y; - const uint32_t q4_14 = qs64_hi4.z; - const uint32_t q4_15 = qs64_hi4.w; - B_TYPE_VEC4 by10 = data_b_v4[(b_offset + y1_idx) / 4]; B_TYPE_VEC4 by132 = data_b_v4[(b_offset + y1_idx) / 4 + 8]; B_TYPE_VEC4 by20 = data_b_v4[(b_offset + y2_idx) / 4]; B_TYPE_VEC4 by232 = data_b_v4[(b_offset + y2_idx) / 4 + 8]; - const FLOAT_TYPE sx = fma(FLOAT_TYPE(by10.x), q4_0, fma(FLOAT_TYPE(by10.y), q4_1, fma(FLOAT_TYPE(by10.z), q4_2, FLOAT_TYPE(by10.w) * q4_3))); - const FLOAT_TYPE sy = fma(FLOAT_TYPE(by132.x), q4_4, fma(FLOAT_TYPE(by132.y), q4_5, fma(FLOAT_TYPE(by132.z), q4_6, FLOAT_TYPE(by132.w) * q4_7))); - const FLOAT_TYPE sz = fma(FLOAT_TYPE(by20.x), q4_8, fma(FLOAT_TYPE(by20.y), q4_9, fma(FLOAT_TYPE(by20.z), q4_10, FLOAT_TYPE(by20.w) * q4_11))); - const FLOAT_TYPE sw = fma(FLOAT_TYPE(by232.x), q4_12, fma(FLOAT_TYPE(by232.y), q4_13, fma(FLOAT_TYPE(by232.z), q4_14, FLOAT_TYPE(by232.w) * q4_15))); - const FLOAT_TYPE smin = - fma(FLOAT_TYPE(by10.x), sc2, fma(FLOAT_TYPE(by132.x), sc3, fma(FLOAT_TYPE(by20.x), sc6, fma(FLOAT_TYPE(by232.x), sc7, - fma(FLOAT_TYPE(by10.y), sc2, fma(FLOAT_TYPE(by132.y), sc3, fma(FLOAT_TYPE(by20.y), sc6, fma(FLOAT_TYPE(by232.y), sc7, - fma(FLOAT_TYPE(by10.z), sc2, fma(FLOAT_TYPE(by132.z), sc3, fma(FLOAT_TYPE(by20.z), sc6, fma(FLOAT_TYPE(by232.z), sc7, - fma(FLOAT_TYPE(by10.w), sc2, fma(FLOAT_TYPE(by132.w), sc3, fma(FLOAT_TYPE(by20.w), sc6, FLOAT_TYPE(by232.w) * sc7))))))))))))))); - temp = fma(dall, fma(sx, sc0, fma(sy, sc1, fma(sz, sc4, sw * sc5))), fma(-dmin, smin, temp)); + [[unroll]] for (uint n = 0; n < num_rows; ++n) { + const uint ib0 = a_offset / QUANT_K + (first_row+n)*num_blocks_per_row; + f16vec2 d = data_a[ib0 + i].d; + const FLOAT_TYPE dall = FLOAT_TYPE(d.x); + const FLOAT_TYPE dmin = FLOAT_TYPE(d.y); + + uint32_t scale0_u32 = data_a_packed16[ib0 + i].scales[v_im ]; + uint32_t scale4_u32 = data_a_packed16[ib0 + i].scales[v_im + 2]; + uint32_t scale8_u32 = data_a_packed16[ib0 + i].scales[v_im + 4]; + uvec4 scale0 = uvec4(unpack8(scale0_u32)); + uvec4 scale4 = uvec4(unpack8(scale4_u32)); + uvec4 scale8 = uvec4(unpack8(scale8_u32)); + + const uint32_t sc0 = ( scale0.x & 0x3f); + const uint32_t sc1 = ( scale0.y & 0x3f); + const uint32_t sc2 = ( scale4.x & 0x3f); + const uint32_t sc3 = ( scale4.y & 0x3f); + const uint32_t sc4 = (( scale8.x & 0x0f) | ((scale0.x & 0xc0) >> 2)); + const uint32_t sc5 = (( scale8.y & 0x0f) | ((scale0.y & 0xc0) >> 2)); + const uint32_t sc6 = (((scale8.x >> 4) & 0x0f) | ((scale4.x & 0xc0) >> 2)); + const uint32_t sc7 = (((scale8.y >> 4) & 0x0f) | ((scale4.y & 0xc0) >> 2)); + + uint32_t qs0_u32 = data_a_packed32[ib0 + i].qs[q_offset / 4]; + uint32_t qs64_u32 = data_a_packed32[ib0 + i].qs[q_offset / 4 + 16]; + + uint32_t qs0_u32_lo4 = qs0_u32 & 0x0F0F0F0F; + uint32_t qs0_u32_hi4 = (qs0_u32 >> 4) & 0x0F0F0F0F; + uint32_t qs64_u32_lo4 = qs64_u32 & 0x0F0F0F0F; + uint32_t qs64_u32_hi4 = (qs64_u32 >> 4) & 0x0F0F0F0F; + + uvec4 qs0_lo4 = uvec4(unpack8(qs0_u32_lo4)); + uvec4 qs64_lo4 = uvec4(unpack8(qs64_u32_lo4)); + uvec4 qs0_hi4 = uvec4(unpack8(qs0_u32_hi4)); + uvec4 qs64_hi4 = uvec4(unpack8(qs64_u32_hi4)); + + const uint32_t q4_0 = qs0_lo4.x; + const uint32_t q4_1 = qs0_lo4.y; + const uint32_t q4_2 = qs0_lo4.z; + const uint32_t q4_3 = qs0_lo4.w; + const uint32_t q4_4 = qs0_hi4.x; + const uint32_t q4_5 = qs0_hi4.y; + const uint32_t q4_6 = qs0_hi4.z; + const uint32_t q4_7 = qs0_hi4.w; + const uint32_t q4_8 = qs64_lo4.x; + const uint32_t q4_9 = qs64_lo4.y; + const uint32_t q4_10 = qs64_lo4.z; + const uint32_t q4_11 = qs64_lo4.w; + const uint32_t q4_12 = qs64_hi4.x; + const uint32_t q4_13 = qs64_hi4.y; + const uint32_t q4_14 = qs64_hi4.z; + const uint32_t q4_15 = qs64_hi4.w; + + const FLOAT_TYPE sx = fma(FLOAT_TYPE(by10.x), q4_0, fma(FLOAT_TYPE(by10.y), q4_1, fma(FLOAT_TYPE(by10.z), q4_2, FLOAT_TYPE(by10.w) * q4_3))); + const FLOAT_TYPE sy = fma(FLOAT_TYPE(by132.x), q4_4, fma(FLOAT_TYPE(by132.y), q4_5, fma(FLOAT_TYPE(by132.z), q4_6, FLOAT_TYPE(by132.w) * q4_7))); + const FLOAT_TYPE sz = fma(FLOAT_TYPE(by20.x), q4_8, fma(FLOAT_TYPE(by20.y), q4_9, fma(FLOAT_TYPE(by20.z), q4_10, FLOAT_TYPE(by20.w) * q4_11))); + const FLOAT_TYPE sw = fma(FLOAT_TYPE(by232.x), q4_12, fma(FLOAT_TYPE(by232.y), q4_13, fma(FLOAT_TYPE(by232.z), q4_14, FLOAT_TYPE(by232.w) * q4_15))); + const FLOAT_TYPE smin = + fma(FLOAT_TYPE(by10.x), sc2, fma(FLOAT_TYPE(by132.x), sc3, fma(FLOAT_TYPE(by20.x), sc6, fma(FLOAT_TYPE(by232.x), sc7, + fma(FLOAT_TYPE(by10.y), sc2, fma(FLOAT_TYPE(by132.y), sc3, fma(FLOAT_TYPE(by20.y), sc6, fma(FLOAT_TYPE(by232.y), sc7, + fma(FLOAT_TYPE(by10.z), sc2, fma(FLOAT_TYPE(by132.z), sc3, fma(FLOAT_TYPE(by20.z), sc6, fma(FLOAT_TYPE(by232.z), sc7, + fma(FLOAT_TYPE(by10.w), sc2, fma(FLOAT_TYPE(by132.w), sc3, fma(FLOAT_TYPE(by20.w), sc6, FLOAT_TYPE(by232.w) * sc7))))))))))))))); + temp[n] = fma(dall, fma(sx, sc0, fma(sy, sc1, fma(sz, sc4, sw * sc5))), fma(-dmin, smin, temp[n])); + } } - tmp[gl_LocalInvocationID.x] = temp; - // sum up partial sums and write back result + [[unroll]] for (uint n = 0; n < num_rows; ++n) { + tmpsh[n][tid] = temp[n]; + } barrier(); - [[unroll]] for (uint s = gl_WorkGroupSize.x/2; s > 0; s >>= 1) { + [[unroll]] for (uint s = BLOCK_SIZE/2; s > 0; s >>= 1) { if (tid < s) { - tmp[tid] += tmp[tid + s]; + [[unroll]] for (uint n = 0; n < num_rows; ++n) { + tmpsh[n][tid] += tmpsh[n][tid + s]; + } } barrier(); } if (tid == 0) { - data_d[d_offset + row] = D_TYPE(tmp[0]); + [[unroll]] for (uint n = 0; n < num_rows; ++n) { + data_d[d_offset + first_row + n] = D_TYPE(tmpsh[n][0]); + } + } +} + +void main() { + const uint first_row = NUM_ROWS * (gl_WorkGroupID.x + gl_NumWorkGroups.x * gl_WorkGroupID.z); + + // do NUM_ROWS at a time, unless there aren't enough remaining rows + if (first_row + NUM_ROWS <= p.stride_d) { + compute_outputs(first_row, NUM_ROWS); + } else { + if (first_row >= p.stride_d) { + return; + } + compute_outputs(first_row, p.stride_d - first_row); } } diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q5_k.comp b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q5_k.comp index fd243cf91..ed3c25d89 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q5_k.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q5_k.comp @@ -7,21 +7,15 @@ layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in; layout (constant_id = 0) const uint BLOCK_SIZE = 32; +layout (constant_id = 1) const uint NUM_ROWS = 1; -shared FLOAT_TYPE tmp[BLOCK_SIZE]; - -void main() { - const uint row = gl_WorkGroupID.x + gl_NumWorkGroups.x * gl_WorkGroupID.z; - - if (row >= p.stride_d) { - return; - } +shared FLOAT_TYPE tmpsh[NUM_ROWS][BLOCK_SIZE]; +void compute_outputs(const uint32_t first_row, const uint32_t num_rows) { uint a_offset, b_offset, d_offset; get_offsets(a_offset, b_offset, d_offset); const uint num_blocks_per_row = p.ncols / QUANT_K; - const uint ib0 = a_offset / QUANT_K + row*num_blocks_per_row; // 16 threads are used to process each block const uint it_size = gl_WorkGroupSize.x/16; @@ -39,74 +33,16 @@ void main() { const uint q_offset = 32*v_im + l0; const uint y_offset = 64*v_im + l0; - FLOAT_TYPE temp = FLOAT_TYPE(0.0); // partial sum for thread in warp + FLOAT_TYPE temp[NUM_ROWS]; + + [[unroll]] for (uint i = 0; i < NUM_ROWS; ++i) { + temp[i] = FLOAT_TYPE(0); + } [[unroll]] for (uint i = ix; i < num_blocks_per_row; i += it_size) { const uint y1_idx = i * QUANT_K + y_offset; const uint y2_idx = y1_idx + 128; - f16vec2 d = data_a[ib0 + i].d; - const FLOAT_TYPE dall = FLOAT_TYPE(d.x); - const FLOAT_TYPE dmin = FLOAT_TYPE(d.y); - - uint32_t scale0_u32 = data_a_packed16[ib0 + i].scales[v_im ]; - uint32_t scale4_u32 = data_a_packed16[ib0 + i].scales[v_im + 2]; - uint32_t scale8_u32 = data_a_packed16[ib0 + i].scales[v_im + 4]; - uvec4 scale0 = uvec4(unpack8(scale0_u32)); - uvec4 scale4 = uvec4(unpack8(scale4_u32)); - uvec4 scale8 = uvec4(unpack8(scale8_u32)); - - const uint32_t sc0 = ( scale0.x & 0x3f); - const uint32_t sc1 = ( scale0.y & 0x3f); - const uint32_t sc2 = ( scale4.x & 0x3f); - const uint32_t sc3 = ( scale4.y & 0x3f); - const uint32_t sc4 = (( scale8.x & 0x0f) | ((scale0.x & 0xc0) >> 2)); - const uint32_t sc5 = (( scale8.y & 0x0f) | ((scale0.y & 0xc0) >> 2)); - const uint32_t sc6 = (((scale8.x >> 4) & 0x0f) | ((scale4.x & 0xc0) >> 2)); - const uint32_t sc7 = (((scale8.y >> 4) & 0x0f) | ((scale4.y & 0xc0) >> 2)); - - uint32_t qs0_16_u32 = uint32_t(data_a_packed16[ib0 + i].qs[q_offset / 2]) | (uint32_t(data_a_packed16[ib0 + i].qs[q_offset / 2 + 8]) << 16); - uint32_t qs64_80_u32 = uint32_t(data_a_packed16[ib0 + i].qs[q_offset / 2 + 32]) | (uint32_t(data_a_packed16[ib0 + i].qs[q_offset / 2 + 40]) << 16); - - uint32_t qs0_16_u32_lo4 = qs0_16_u32 & 0x0F0F0F0F; - uint32_t qs0_16_u32_hi4 = (qs0_16_u32 >> 4) & 0x0F0F0F0F; - uint32_t qs64_80_u32_lo4 = qs64_80_u32 & 0x0F0F0F0F; - uint32_t qs64_80_u32_hi4 = (qs64_80_u32 >> 4) & 0x0F0F0F0F; - - uint32_t qh = pack32(u16vec2(data_a_packed16[ib0 + i].qh[l0 / 2], data_a_packed16[ib0 + i].qh[l0 / 2 + 8])); - - uint32_t qs0_16_lo4_offset16 = ((qh >> (2*v_im)) & 0x01010101) << 4; - uint32_t qs0_16_hi4_offset16 = ((qh >> (2*v_im)) & 0x02020202) << 3; - uint32_t qs64_80_lo4_offset16 = ((qh >> (2*v_im)) & 0x10101010) << 0; - uint32_t qs64_80_hi4_offset16 = ((qh >> (2*v_im)) & 0x20202020) >> 1; - - qs0_16_u32_lo4 += qs0_16_lo4_offset16; - qs0_16_u32_hi4 += qs0_16_hi4_offset16; - qs64_80_u32_lo4 += qs64_80_lo4_offset16; - qs64_80_u32_hi4 += qs64_80_hi4_offset16; - - uvec4 qs0_16_lo4 = uvec4(unpack8(qs0_16_u32_lo4)); - uvec4 qs64_80_lo4 = uvec4(unpack8(qs64_80_u32_lo4)); - uvec4 qs0_16_hi4 = uvec4(unpack8(qs0_16_u32_hi4)); - uvec4 qs64_80_hi4 = uvec4(unpack8(qs64_80_u32_hi4)); - - const uint32_t q4_0 = qs0_16_lo4.x; - const uint32_t q4_1 = qs0_16_lo4.y; - const uint32_t q4_2 = qs0_16_lo4.z; - const uint32_t q4_3 = qs0_16_lo4.w; - const uint32_t q4_4 = qs0_16_hi4.x; - const uint32_t q4_5 = qs0_16_hi4.y; - const uint32_t q4_6 = qs0_16_hi4.z; - const uint32_t q4_7 = qs0_16_hi4.w; - const uint32_t q4_8 = qs64_80_lo4.x; - const uint32_t q4_9 = qs64_80_lo4.y; - const uint32_t q4_10 = qs64_80_lo4.z; - const uint32_t q4_11 = qs64_80_lo4.w; - const uint32_t q4_12 = qs64_80_hi4.x; - const uint32_t q4_13 = qs64_80_hi4.y; - const uint32_t q4_14 = qs64_80_hi4.z; - const uint32_t q4_15 = qs64_80_hi4.w; - B_TYPE_VEC2 by10 = data_b_v2[(b_offset + y1_idx) / 2]; B_TYPE_VEC2 by116 = data_b_v2[(b_offset + y1_idx) / 2 + 8]; B_TYPE_VEC2 by132 = data_b_v2[(b_offset + y1_idx) / 2 + 16]; @@ -116,45 +52,129 @@ void main() { B_TYPE_VEC2 by232 = data_b_v2[(b_offset + y2_idx) / 2 + 16]; B_TYPE_VEC2 by248 = data_b_v2[(b_offset + y2_idx) / 2 + 24]; - const FLOAT_TYPE sx = - fma(FLOAT_TYPE(by10.x), q4_0, - fma(FLOAT_TYPE(by10.y), q4_1, - fma(FLOAT_TYPE(by116.x), q4_2, - FLOAT_TYPE(by116.y) * q4_3))); - const FLOAT_TYPE sy = - fma(FLOAT_TYPE(by132.x), q4_4, - fma(FLOAT_TYPE(by132.y), q4_5, - fma(FLOAT_TYPE(by148.x), q4_6, - FLOAT_TYPE(by148.y) * q4_7))); - const FLOAT_TYPE sz = - fma(FLOAT_TYPE(by20.x), q4_8, - fma(FLOAT_TYPE(by20.y), q4_9, - fma(FLOAT_TYPE(by216.x), q4_10, - FLOAT_TYPE(by216.y) * q4_11))); - const FLOAT_TYPE sw = - fma(FLOAT_TYPE(by232.x), q4_12, - fma(FLOAT_TYPE(by232.y), q4_13, - fma(FLOAT_TYPE(by248.x), q4_14, - FLOAT_TYPE(by248.y) * q4_15))); - const FLOAT_TYPE smin = - fma(FLOAT_TYPE(by10.x) + FLOAT_TYPE(by10.y) + FLOAT_TYPE(by116.x) + FLOAT_TYPE(by116.y), sc2, - fma(FLOAT_TYPE(by132.x) + FLOAT_TYPE(by132.y) + FLOAT_TYPE(by148.x) + FLOAT_TYPE(by148.y), sc3, - fma(FLOAT_TYPE(by20.x) + FLOAT_TYPE(by20.y) + FLOAT_TYPE(by216.x) + FLOAT_TYPE(by216.y), sc6, - (FLOAT_TYPE(by232.x) + FLOAT_TYPE(by232.y) + FLOAT_TYPE(by248.x) + FLOAT_TYPE(by248.y)) * sc7))); - temp = fma(dall, fma(sx, sc0, fma(sy, sc1, fma(sz, sc4, sw * sc5))), fma(-dmin, smin, temp)); + [[unroll]] for (uint n = 0; n < num_rows; ++n) { + const uint ib0 = a_offset / QUANT_K + (first_row+n)*num_blocks_per_row; + f16vec2 d = data_a[ib0 + i].d; + const FLOAT_TYPE dall = FLOAT_TYPE(d.x); + const FLOAT_TYPE dmin = FLOAT_TYPE(d.y); + + uint32_t scale0_u32 = data_a_packed16[ib0 + i].scales[v_im ]; + uint32_t scale4_u32 = data_a_packed16[ib0 + i].scales[v_im + 2]; + uint32_t scale8_u32 = data_a_packed16[ib0 + i].scales[v_im + 4]; + uvec4 scale0 = uvec4(unpack8(scale0_u32)); + uvec4 scale4 = uvec4(unpack8(scale4_u32)); + uvec4 scale8 = uvec4(unpack8(scale8_u32)); + + const uint32_t sc0 = ( scale0.x & 0x3f); + const uint32_t sc1 = ( scale0.y & 0x3f); + const uint32_t sc2 = ( scale4.x & 0x3f); + const uint32_t sc3 = ( scale4.y & 0x3f); + const uint32_t sc4 = (( scale8.x & 0x0f) | ((scale0.x & 0xc0) >> 2)); + const uint32_t sc5 = (( scale8.y & 0x0f) | ((scale0.y & 0xc0) >> 2)); + const uint32_t sc6 = (((scale8.x >> 4) & 0x0f) | ((scale4.x & 0xc0) >> 2)); + const uint32_t sc7 = (((scale8.y >> 4) & 0x0f) | ((scale4.y & 0xc0) >> 2)); + + uint32_t qs0_16_u32 = uint32_t(data_a_packed16[ib0 + i].qs[q_offset / 2]) | (uint32_t(data_a_packed16[ib0 + i].qs[q_offset / 2 + 8]) << 16); + uint32_t qs64_80_u32 = uint32_t(data_a_packed16[ib0 + i].qs[q_offset / 2 + 32]) | (uint32_t(data_a_packed16[ib0 + i].qs[q_offset / 2 + 40]) << 16); + + uint32_t qs0_16_u32_lo4 = qs0_16_u32 & 0x0F0F0F0F; + uint32_t qs0_16_u32_hi4 = (qs0_16_u32 >> 4) & 0x0F0F0F0F; + uint32_t qs64_80_u32_lo4 = qs64_80_u32 & 0x0F0F0F0F; + uint32_t qs64_80_u32_hi4 = (qs64_80_u32 >> 4) & 0x0F0F0F0F; + + uint32_t qh = pack32(u16vec2(data_a_packed16[ib0 + i].qh[l0 / 2], data_a_packed16[ib0 + i].qh[l0 / 2 + 8])); + + uint32_t qs0_16_lo4_offset16 = ((qh >> (2*v_im)) & 0x01010101) << 4; + uint32_t qs0_16_hi4_offset16 = ((qh >> (2*v_im)) & 0x02020202) << 3; + uint32_t qs64_80_lo4_offset16 = ((qh >> (2*v_im)) & 0x10101010) << 0; + uint32_t qs64_80_hi4_offset16 = ((qh >> (2*v_im)) & 0x20202020) >> 1; + + qs0_16_u32_lo4 += qs0_16_lo4_offset16; + qs0_16_u32_hi4 += qs0_16_hi4_offset16; + qs64_80_u32_lo4 += qs64_80_lo4_offset16; + qs64_80_u32_hi4 += qs64_80_hi4_offset16; + + uvec4 qs0_16_lo4 = uvec4(unpack8(qs0_16_u32_lo4)); + uvec4 qs64_80_lo4 = uvec4(unpack8(qs64_80_u32_lo4)); + uvec4 qs0_16_hi4 = uvec4(unpack8(qs0_16_u32_hi4)); + uvec4 qs64_80_hi4 = uvec4(unpack8(qs64_80_u32_hi4)); + + const uint32_t q4_0 = qs0_16_lo4.x; + const uint32_t q4_1 = qs0_16_lo4.y; + const uint32_t q4_2 = qs0_16_lo4.z; + const uint32_t q4_3 = qs0_16_lo4.w; + const uint32_t q4_4 = qs0_16_hi4.x; + const uint32_t q4_5 = qs0_16_hi4.y; + const uint32_t q4_6 = qs0_16_hi4.z; + const uint32_t q4_7 = qs0_16_hi4.w; + const uint32_t q4_8 = qs64_80_lo4.x; + const uint32_t q4_9 = qs64_80_lo4.y; + const uint32_t q4_10 = qs64_80_lo4.z; + const uint32_t q4_11 = qs64_80_lo4.w; + const uint32_t q4_12 = qs64_80_hi4.x; + const uint32_t q4_13 = qs64_80_hi4.y; + const uint32_t q4_14 = qs64_80_hi4.z; + const uint32_t q4_15 = qs64_80_hi4.w; + + const FLOAT_TYPE sx = + fma(FLOAT_TYPE(by10.x), q4_0, + fma(FLOAT_TYPE(by10.y), q4_1, + fma(FLOAT_TYPE(by116.x), q4_2, + FLOAT_TYPE(by116.y) * q4_3))); + const FLOAT_TYPE sy = + fma(FLOAT_TYPE(by132.x), q4_4, + fma(FLOAT_TYPE(by132.y), q4_5, + fma(FLOAT_TYPE(by148.x), q4_6, + FLOAT_TYPE(by148.y) * q4_7))); + const FLOAT_TYPE sz = + fma(FLOAT_TYPE(by20.x), q4_8, + fma(FLOAT_TYPE(by20.y), q4_9, + fma(FLOAT_TYPE(by216.x), q4_10, + FLOAT_TYPE(by216.y) * q4_11))); + const FLOAT_TYPE sw = + fma(FLOAT_TYPE(by232.x), q4_12, + fma(FLOAT_TYPE(by232.y), q4_13, + fma(FLOAT_TYPE(by248.x), q4_14, + FLOAT_TYPE(by248.y) * q4_15))); + const FLOAT_TYPE smin = + fma(FLOAT_TYPE(by10.x) + FLOAT_TYPE(by10.y) + FLOAT_TYPE(by116.x) + FLOAT_TYPE(by116.y), sc2, + fma(FLOAT_TYPE(by132.x) + FLOAT_TYPE(by132.y) + FLOAT_TYPE(by148.x) + FLOAT_TYPE(by148.y), sc3, + fma(FLOAT_TYPE(by20.x) + FLOAT_TYPE(by20.y) + FLOAT_TYPE(by216.x) + FLOAT_TYPE(by216.y), sc6, + (FLOAT_TYPE(by232.x) + FLOAT_TYPE(by232.y) + FLOAT_TYPE(by248.x) + FLOAT_TYPE(by248.y)) * sc7))); + temp[n] = fma(dall, fma(sx, sc0, fma(sy, sc1, fma(sz, sc4, sw * sc5))), fma(-dmin, smin, temp[n])); + } } - tmp[gl_LocalInvocationID.x] = temp; - // sum up partial sums and write back result + [[unroll]] for (uint n = 0; n < num_rows; ++n) { + tmpsh[n][tid] = temp[n]; + } barrier(); - [[unroll]] for (uint s = gl_WorkGroupSize.x/2; s > 0; s >>= 1) { + [[unroll]] for (uint s = BLOCK_SIZE/2; s > 0; s >>= 1) { if (tid < s) { - tmp[tid] += tmp[tid + s]; + [[unroll]] for (uint n = 0; n < num_rows; ++n) { + tmpsh[n][tid] += tmpsh[n][tid + s]; + } } barrier(); } if (tid == 0) { - data_d[d_offset + row] = D_TYPE(tmp[0]); + [[unroll]] for (uint n = 0; n < num_rows; ++n) { + data_d[d_offset + first_row + n] = D_TYPE(tmpsh[n][0]); + } + } +} + +void main() { + const uint first_row = NUM_ROWS * (gl_WorkGroupID.x + gl_NumWorkGroups.x * gl_WorkGroupID.z); + + // do NUM_ROWS at a time, unless there aren't enough remaining rows + if (first_row + NUM_ROWS <= p.stride_d) { + compute_outputs(first_row, NUM_ROWS); + } else { + if (first_row >= p.stride_d) { + return; + } + compute_outputs(first_row, p.stride_d - first_row); } } diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q6_k.comp b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q6_k.comp index 760aff854..fab4ff5ff 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q6_k.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q6_k.comp @@ -7,21 +7,15 @@ layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in; layout (constant_id = 0) const uint BLOCK_SIZE = 32; +layout (constant_id = 1) const uint NUM_ROWS = 1; -shared FLOAT_TYPE tmp[BLOCK_SIZE]; - -void main() { - const uint row = gl_WorkGroupID.x + gl_NumWorkGroups.x * gl_WorkGroupID.z; - - if (row >= p.stride_d) { - return; - } +shared FLOAT_TYPE tmpsh[NUM_ROWS][BLOCK_SIZE]; +void compute_outputs(const uint32_t first_row, const uint32_t num_rows) { uint a_offset, b_offset, d_offset; get_offsets(a_offset, b_offset, d_offset); const uint num_blocks_per_row = p.ncols / QUANT_K; - const uint ib0 = a_offset / QUANT_K + row*num_blocks_per_row; // 16 threads are used to process each block const uint it_size = gl_WorkGroupSize.x/16; @@ -42,69 +36,95 @@ void main() { const uint s_offset = 8*v_im + is; const uint y_offset = 128*v_im + l0; - FLOAT_TYPE temp = FLOAT_TYPE(0.0); // partial sum for thread in warp + FLOAT_TYPE temp[NUM_ROWS]; + + [[unroll]] for (uint i = 0; i < NUM_ROWS; ++i) { + temp[i] = FLOAT_TYPE(0); + } [[unroll]] for (uint i = ix; i < num_blocks_per_row; i += it_size) { - const uint y_idx = i * QUANT_K + y_offset; - - const FLOAT_TYPE d = FLOAT_TYPE(data_a[ib0 + i].d); - - FLOAT_TYPE scales[4]; - scales[0] = FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 0]); - scales[1] = FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 2]); - scales[2] = FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 4]); - scales[3] = FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 6]); - - uint32_t ql0_u32 = uint32_t(data_a_packed16[ib0 + i].ql[ql_offset / 2]) | (uint32_t(data_a_packed16[ib0 + i].ql[ql_offset / 2 + 1]) << 16); - uint32_t ql32_u32 = uint32_t(data_a_packed16[ib0 + i].ql[ql_offset / 2 + 16]) | (uint32_t(data_a_packed16[ib0 + i].ql[ql_offset / 2 + 17]) << 16); - - uint32_t ql0_u32_lo4 = ql0_u32 & 0x0F0F0F0F; - uint32_t ql0_u32_hi4 = (ql0_u32 >> 4) & 0x0F0F0F0F; - uint32_t ql32_u32_lo4 = ql32_u32 & 0x0F0F0F0F; - uint32_t ql32_u32_hi4 = (ql32_u32 >> 4) & 0x0F0F0F0F; - - uint32_t qh_u32 = uint32_t(data_a_packed16[ib0 + i].qh[qh_offset / 2]) | (uint32_t(data_a_packed16[ib0 + i].qh[qh_offset / 2 + 1]) << 16); - uint32_t qh0_u32 = (qh_u32 & 0x03030303) << 4; - uint32_t qh2_u32 = (qh_u32 & 0x0C0C0C0C) << 2; - uint32_t qh4_u32 = (qh_u32 & 0x30303030) << 0; - uint32_t qh6_u32 = (qh_u32 & 0xC0C0C0C0) >> 2; - - uint32_t q0_u32 = ql0_u32_lo4 | qh0_u32; - uint32_t q1_u32 = ql32_u32_lo4 | qh2_u32; - uint32_t q2_u32 = ql0_u32_hi4 | qh4_u32; - uint32_t q3_u32 = ql32_u32_hi4 | qh6_u32; - - uvec4 q0 = uvec4(unpack8(q0_u32)); - uvec4 q1 = uvec4(unpack8(q1_u32)); - uvec4 q2 = uvec4(unpack8(q2_u32)); - uvec4 q3 = uvec4(unpack8(q3_u32)); + const uint y_idx = i * QUANT_K + y_offset; B_TYPE_VEC4 by0 = data_b_v4[(b_offset + y_idx) / 4]; B_TYPE_VEC4 by32 = data_b_v4[(b_offset + y_idx) / 4 + 8]; B_TYPE_VEC4 by64 = data_b_v4[(b_offset + y_idx) / 4 + 16]; B_TYPE_VEC4 by96 = data_b_v4[(b_offset + y_idx) / 4 + 24]; - FLOAT_TYPE sum = FLOAT_TYPE(0.0); - [[unroll]] for (int l = 0; l < 4; ++l) { - sum = fma(FLOAT_TYPE(by0[l]) * scales[0], FLOAT_TYPE(int8_t(q0[l]) - 32), - fma(FLOAT_TYPE(by32[l]) * scales[1], FLOAT_TYPE(int8_t(q1[l]) - 32), - fma(FLOAT_TYPE(by64[l]) * scales[2], FLOAT_TYPE(int8_t(q2[l]) - 32), - fma(FLOAT_TYPE(by96[l]) * scales[3], FLOAT_TYPE(int8_t(q3[l]) - 32), sum)))); + [[unroll]] for (uint n = 0; n < num_rows; ++n) { + const uint ib0 = a_offset / QUANT_K + (first_row+n)*num_blocks_per_row; + const FLOAT_TYPE d = FLOAT_TYPE(data_a[ib0 + i].d); + + FLOAT_TYPE scales[4]; + scales[0] = FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 0]); + scales[1] = FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 2]); + scales[2] = FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 4]); + scales[3] = FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 6]); + + uint32_t ql0_u32 = uint32_t(data_a_packed16[ib0 + i].ql[ql_offset / 2]) | (uint32_t(data_a_packed16[ib0 + i].ql[ql_offset / 2 + 1]) << 16); + uint32_t ql32_u32 = uint32_t(data_a_packed16[ib0 + i].ql[ql_offset / 2 + 16]) | (uint32_t(data_a_packed16[ib0 + i].ql[ql_offset / 2 + 17]) << 16); + + uint32_t ql0_u32_lo4 = ql0_u32 & 0x0F0F0F0F; + uint32_t ql0_u32_hi4 = (ql0_u32 >> 4) & 0x0F0F0F0F; + uint32_t ql32_u32_lo4 = ql32_u32 & 0x0F0F0F0F; + uint32_t ql32_u32_hi4 = (ql32_u32 >> 4) & 0x0F0F0F0F; + + uint32_t qh_u32 = uint32_t(data_a_packed16[ib0 + i].qh[qh_offset / 2]) | (uint32_t(data_a_packed16[ib0 + i].qh[qh_offset / 2 + 1]) << 16); + uint32_t qh0_u32 = (qh_u32 & 0x03030303) << 4; + uint32_t qh2_u32 = (qh_u32 & 0x0C0C0C0C) << 2; + uint32_t qh4_u32 = (qh_u32 & 0x30303030) << 0; + uint32_t qh6_u32 = (qh_u32 & 0xC0C0C0C0) >> 2; + + uint32_t q0_u32 = ql0_u32_lo4 | qh0_u32; + uint32_t q1_u32 = ql32_u32_lo4 | qh2_u32; + uint32_t q2_u32 = ql0_u32_hi4 | qh4_u32; + uint32_t q3_u32 = ql32_u32_hi4 | qh6_u32; + + uvec4 q0 = uvec4(unpack8(q0_u32)); + uvec4 q1 = uvec4(unpack8(q1_u32)); + uvec4 q2 = uvec4(unpack8(q2_u32)); + uvec4 q3 = uvec4(unpack8(q3_u32)); + + FLOAT_TYPE sum = FLOAT_TYPE(0.0); + [[unroll]] for (int l = 0; l < 4; ++l) { + sum = fma(FLOAT_TYPE(by0[l]) * scales[0], FLOAT_TYPE(int8_t(q0[l]) - 32), + fma(FLOAT_TYPE(by32[l]) * scales[1], FLOAT_TYPE(int8_t(q1[l]) - 32), + fma(FLOAT_TYPE(by64[l]) * scales[2], FLOAT_TYPE(int8_t(q2[l]) - 32), + fma(FLOAT_TYPE(by96[l]) * scales[3], FLOAT_TYPE(int8_t(q3[l]) - 32), sum)))); + } + temp[n] += sum * d; } - temp += sum * d; } - tmp[gl_LocalInvocationID.x] = temp; // sum up partial sums and write back result - + [[unroll]] for (uint n = 0; n < num_rows; ++n) { + tmpsh[n][tid] = temp[n]; + } barrier(); - [[unroll]] for (uint s = gl_WorkGroupSize.x/2; s > 0; s >>= 1) { + [[unroll]] for (uint s = BLOCK_SIZE/2; s > 0; s >>= 1) { if (tid < s) { - tmp[tid] += tmp[tid + s]; + [[unroll]] for (uint n = 0; n < num_rows; ++n) { + tmpsh[n][tid] += tmpsh[n][tid + s]; + } } barrier(); } if (tid == 0) { - data_d[d_offset + row] = D_TYPE(tmp[0]); + [[unroll]] for (uint n = 0; n < num_rows; ++n) { + data_d[d_offset + first_row + n] = D_TYPE(tmpsh[n][0]); + } + } +} + +void main() { + const uint first_row = NUM_ROWS * (gl_WorkGroupID.x + gl_NumWorkGroups.x * gl_WorkGroupID.z); + + // do NUM_ROWS at a time, unless there aren't enough remaining rows + if (first_row + NUM_ROWS <= p.stride_d) { + compute_outputs(first_row, NUM_ROWS); + } else { + if (first_row >= p.stride_d) { + return; + } + compute_outputs(first_row, p.stride_d - first_row); } } From 16cdce7b68218959e0658e2f95b4572573d5008e Mon Sep 17 00:00:00 2001 From: Alexey Parfenov Date: Sat, 28 Dec 2024 15:08:54 +0000 Subject: [PATCH 20/81] server : fix token duplication when streaming with stop strings (#10997) --- examples/server/server.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/examples/server/server.cpp b/examples/server/server.cpp index 30ff3b149..3558ddb7c 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -1856,6 +1856,8 @@ struct server_context { result.text_to_send = slot.generated_text.substr(pos, std::string::npos); slot.n_sent_text += result.text_to_send.size(); // add the token to slot queue and cache + } else { + result.text_to_send = ""; } slot.add_token(result); From f865ea149d71ef883e3780fced8a20a1464eccf4 Mon Sep 17 00:00:00 2001 From: Isaac McFadyen Date: Sat, 28 Dec 2024 10:09:19 -0500 Subject: [PATCH 21/81] server: added more docs for response_fields field (#10995) --- examples/server/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/server/README.md b/examples/server/README.md index c7d91be99..07436057a 100644 --- a/examples/server/README.md +++ b/examples/server/README.md @@ -450,7 +450,7 @@ These words will not be included in the completion, so make sure to add them to `post_sampling_probs`: Returns the probabilities of top `n_probs` tokens after applying sampling chain. -`response_fields`: A list of response fields, for example: `"response_fields": ["content", "generation_settings/n_predict"]`. If the specified field is missing, it will simply be omitted from the response without triggering an error. +`response_fields`: A list of response fields, for example: `"response_fields": ["content", "generation_settings/n_predict"]`. If the specified field is missing, it will simply be omitted from the response without triggering an error. Note that fields with a slash will be unnested; for example, `generation_settings/n_predict` will move the field `n_predict` from the `generation_settings` object to the root of the response and give it a new name. **Response format** From fdd21889123bec62b1db3b2fc22b5a4abab32174 Mon Sep 17 00:00:00 2001 From: Jeff Bolz Date: Sun, 29 Dec 2024 02:35:11 -0600 Subject: [PATCH 22/81] vulkan: Use push constant offset to handle misaligned descriptors (#10987) --- ggml/src/ggml-vulkan/ggml-vulkan.cpp | 74 ++++++++++++++++--- ggml/src/ggml-vulkan/vulkan-shaders/acc.comp | 4 +- ggml/src/ggml-vulkan/vulkan-shaders/add.comp | 2 +- .../src/ggml-vulkan/vulkan-shaders/clamp.comp | 4 +- .../ggml-vulkan/vulkan-shaders/concat.comp | 6 +- .../vulkan-shaders/contig_copy.comp | 8 +- ggml/src/ggml-vulkan/vulkan-shaders/copy.comp | 4 +- ggml/src/ggml-vulkan/vulkan-shaders/cos.comp | 4 +- ggml/src/ggml-vulkan/vulkan-shaders/div.comp | 2 +- .../vulkan-shaders/generic_binary_head.comp | 6 +- .../vulkan-shaders/generic_unary_head.comp | 5 +- .../ggml-vulkan/vulkan-shaders/get_rows.comp | 6 +- ggml/src/ggml-vulkan/vulkan-shaders/mul.comp | 2 +- ggml/src/ggml-vulkan/vulkan-shaders/pad.comp | 2 +- .../ggml-vulkan/vulkan-shaders/repeat.comp | 2 +- .../src/ggml-vulkan/vulkan-shaders/scale.comp | 2 +- ggml/src/ggml-vulkan/vulkan-shaders/sin.comp | 4 +- .../ggml-vulkan/vulkan-shaders/square.comp | 4 +- .../ggml-vulkan/vulkan-shaders/upscale.comp | 4 +- 19 files changed, 103 insertions(+), 42 deletions(-) diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp index c0a43631c..6dfc60c9b 100644 --- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp +++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp @@ -411,7 +411,7 @@ struct vk_op_unary_push_constants { uint32_t ne; uint32_t ne00; uint32_t ne01; uint32_t ne02; uint32_t ne03; uint32_t nb00; uint32_t nb01; uint32_t nb02; uint32_t nb03; uint32_t ne10; uint32_t ne11; uint32_t ne12; uint32_t ne13; uint32_t nb10; uint32_t nb11; uint32_t nb12; uint32_t nb13; - uint32_t d_offset; + uint32_t misalign_offsets; float param1; float param2; uint32_t ne0_012mp; uint32_t ne0_012L; uint32_t ne0_01mp; uint32_t ne0_01L; @@ -459,7 +459,7 @@ struct vk_op_binary_push_constants { uint32_t ne00; uint32_t ne01; uint32_t ne02; uint32_t ne03; uint32_t nb00; uint32_t nb01; uint32_t nb02; uint32_t nb03; uint32_t ne10; uint32_t ne11; uint32_t ne12; uint32_t ne13; uint32_t nb10; uint32_t nb11; uint32_t nb12; uint32_t nb13; uint32_t ne20; uint32_t ne21; uint32_t ne22; uint32_t ne23; uint32_t nb20; uint32_t nb21; uint32_t nb22; uint32_t nb23; - uint32_t d_offset; + uint32_t misalign_offsets; float param1; float param2; int32_t param3; }; @@ -546,7 +546,7 @@ struct vk_staging_memcpy { }; struct vk_op_upscale_push_constants { - uint32_t ne; uint32_t d_offset; + uint32_t ne; uint32_t a_offset; uint32_t d_offset; uint32_t nb00; uint32_t nb01; uint32_t nb02; uint32_t nb03; uint32_t ne10; uint32_t ne11; uint32_t ne12; uint32_t ne13; float sf0; float sf1; float sf2; float sf3; @@ -5076,6 +5076,57 @@ static bool ggml_vk_op_supports_incontiguous(ggml_op op) { } } +static uint32_t get_misalign_bytes(ggml_backend_vk_context * ctx, const ggml_tensor * t) +{ + return ((vk_tensor_offset(t) + t->view_offs) & (ctx->device->properties.limits.minStorageBufferOffsetAlignment - 1));; +} + +template void init_pushconst_tensor_offsets(ggml_backend_vk_context * ctx, T &p, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * src2, ggml_tensor * dst) { + GGML_UNUSED(p); + GGML_UNUSED(src0); + GGML_UNUSED(src1); + GGML_UNUSED(src2); + GGML_UNUSED(dst); + static_assert(!std::is_const::value, "unexpected type"); + GGML_ASSERT(!src0 || get_misalign_bytes(ctx, src0) == 0); + GGML_ASSERT(!src1 || get_misalign_bytes(ctx, src1) == 0); + GGML_ASSERT(!src2 || get_misalign_bytes(ctx, src2) == 0); + GGML_ASSERT(!dst || get_misalign_bytes(ctx, dst) == 0); +} + +template <> void init_pushconst_tensor_offsets(ggml_backend_vk_context * ctx, vk_op_unary_push_constants &p, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * src2, ggml_tensor * dst) { + const uint32_t a_offset = get_misalign_bytes(ctx, src0) / ggml_type_size(src0->type); + const uint32_t d_offset = get_misalign_bytes(ctx, dst) / ggml_type_size(dst->type); + + p.misalign_offsets = (a_offset << 16) | d_offset; + + GGML_UNUSED(src1); + GGML_UNUSED(src2); +} + +template <> void init_pushconst_tensor_offsets(ggml_backend_vk_context * ctx, vk_op_binary_push_constants &p, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * src2, ggml_tensor * dst) { + const uint32_t a_offset = get_misalign_bytes(ctx, src0) / ggml_type_size(src0->type); + const uint32_t b_offset = get_misalign_bytes(ctx, src1) / ggml_type_size(src1->type); + const uint32_t d_offset = get_misalign_bytes(ctx, dst) / ggml_type_size(dst->type); + + GGML_ASSERT(dst->op != GGML_OP_GET_ROWS || (a_offset == 0 && b_offset == 0 && d_offset == 0)); + + p.misalign_offsets = (a_offset << 16) | (b_offset << 8) | d_offset; + + GGML_UNUSED(src2); +} + +template <> void init_pushconst_tensor_offsets(ggml_backend_vk_context * ctx, vk_op_upscale_push_constants &p, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * src2, ggml_tensor * dst) { + const uint32_t a_offset = get_misalign_bytes(ctx, src0) / ggml_type_size(src0->type); + const uint32_t d_offset = get_misalign_bytes(ctx, dst) / ggml_type_size(dst->type); + + p.a_offset = a_offset; + p.d_offset = d_offset; + + GGML_UNUSED(src1); + GGML_UNUSED(src2); +} + template static void ggml_vk_op_f32(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * src2, ggml_tensor * dst, ggml_op op, PC&& pc, bool dryrun = false) { VK_LOG_DEBUG("ggml_vk_op_f32((" << src0 << ", name=" << src0->name << ", type=" << src0->type << ", ne0=" << src0->ne[0] << ", ne1=" << src0->ne[1] << ", ne2=" << src0->ne[2] << ", ne3=" << src0->ne[3] << ", nb0=" << src0->nb[0] << ", nb1=" << src0->nb[1] << ", nb2=" << src0->nb[2] << ", nb3=" << src0->nb[3]; @@ -5179,8 +5230,7 @@ static void ggml_vk_op_f32(ggml_backend_vk_context * ctx, vk_context& subctx, co } GGML_ASSERT(d_D != nullptr); - uint64_t d_buf_offset = ((vk_tensor_offset(dst) + dst->view_offs) / ctx->device->properties.limits.minStorageBufferOffsetAlignment) * ctx->device->properties.limits.minStorageBufferOffsetAlignment; - GGML_ASSERT(d_buf_offset == vk_tensor_offset(dst) || op == GGML_OP_CPY); // NOLINT + uint64_t d_buf_offset = vk_tensor_offset(dst) + dst->view_offs; if(!src0_uma) { d_X = src0_buf_ctx->dev_buffer; x_buf_offset = vk_tensor_offset(src0) + src0->view_offs; @@ -5196,6 +5246,12 @@ static void ggml_vk_op_f32(ggml_backend_vk_context * ctx, vk_context& subctx, co z_buf_offset = vk_tensor_offset(src2) + src2->view_offs; GGML_ASSERT(d_Z != nullptr); } + // Compute misalignment offset for descriptors and store it in in push constants, then align the descriptor offsets. + init_pushconst_tensor_offsets(ctx, pc, src0, src1, src2, dst); + x_buf_offset &= ~(ctx->device->properties.limits.minStorageBufferOffsetAlignment - 1); + y_buf_offset &= ~(ctx->device->properties.limits.minStorageBufferOffsetAlignment - 1); + z_buf_offset &= ~(ctx->device->properties.limits.minStorageBufferOffsetAlignment - 1); + d_buf_offset &= ~(ctx->device->properties.limits.minStorageBufferOffsetAlignment - 1); if (op_supports_incontiguous) { x_sz = ggml_nbytes(src0); @@ -5383,7 +5439,6 @@ static void ggml_vk_acc(ggml_backend_vk_context * ctx, vk_context& subctx, const const uint32_t src0_type_size = ggml_type_size(src0->type); const uint32_t src1_type_size = ggml_type_size(src1->type); const uint32_t dst_type_size = ggml_type_size(dst->type); - const uint32_t d_offset = ((vk_tensor_offset(dst) + dst->view_offs) % ctx->device->properties.limits.minStorageBufferOffsetAlignment) / dst_type_size; int nb1 = dst->op_params[0] / 4; // 4 bytes of float32 int nb2 = dst->op_params[1] / 4; // 4 bytes of float32 @@ -5395,7 +5450,7 @@ static void ggml_vk_acc(ggml_backend_vk_context * ctx, vk_context& subctx, const (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2],(uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)nb1, (uint32_t)nb2, (uint32_t)src0->nb[3] / src0_type_size, (uint32_t)src1->ne[0], (uint32_t)src1->ne[1], (uint32_t)src1->ne[2],(uint32_t)src1->ne[3], (uint32_t)src1->nb[0] / src1_type_size, (uint32_t)src1->nb[1] / src1_type_size, (uint32_t)src1->nb[2] / src1_type_size, (uint32_t)src1->nb[3] / src1_type_size, (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2],(uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t)nb1, (uint32_t)nb2, (uint32_t) dst->nb[3] / dst_type_size, - d_offset, + 0, 0.0f, 0.0f, offset, }, dryrun); } @@ -5599,7 +5654,7 @@ static void ggml_vk_upscale(ggml_backend_vk_context * ctx, vk_context& subctx, c const float sf3 = (float)dst->ne[3] / src0->ne[3]; ggml_vk_op_f32(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_UPSCALE, { - (uint32_t)ggml_nelements(dst), 0, + (uint32_t)ggml_nelements(dst), 0, 0, (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size, (uint32_t)dst->ne[0], (uint32_t)dst->ne[1], (uint32_t)dst->ne[2],(uint32_t)dst->ne[3], sf0, sf1, sf2, sf3, @@ -5709,13 +5764,12 @@ static void ggml_vk_repeat(ggml_backend_vk_context * ctx, vk_context& subctx, co static void ggml_vk_cpy(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) { const uint32_t src0_type_size = ggml_type_size(src0->type); const uint32_t dst_type_size = ggml_type_size(dst->type); - const uint32_t d_offset = ((vk_tensor_offset(dst) + dst->view_offs) % ctx->device->properties.limits.minStorageBufferOffsetAlignment) / dst_type_size; ggml_vk_op_f32(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_CPY, { (uint32_t)ggml_nelements(src0), (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2], (uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size, (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2], (uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size, - d_offset, + 0, 0.0f, 0.0f, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, dryrun); diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/acc.comp b/ggml/src/ggml-vulkan/vulkan-shaders/acc.comp index 4f5a04e71..d896f1ef0 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/acc.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/acc.comp @@ -21,9 +21,9 @@ void main() { get_indices(idx, i00, i01, i02, i03); if (ox < p.ne10 && oy < p.ne11 && oz < p.ne12) { - data_d[p.d_offset + dst_idx(i00, i01, i02, i03)] = D_TYPE(FLOAT_TYPE(data_a[src0_idx(i00, i01, i02, i03)]) + FLOAT_TYPE(data_b[ox + oy * p.ne10 + oz * p.ne10 * p.ne11])); + data_d[get_doffset() + dst_idx(i00, i01, i02, i03)] = D_TYPE(FLOAT_TYPE(data_a[get_aoffset() + src0_idx(i00, i01, i02, i03)]) + FLOAT_TYPE(data_b[get_boffset() + ox + oy * p.ne10 + oz * p.ne10 * p.ne11])); } else { - data_d[p.d_offset + dst_idx(i00, i01, i02, i03)] = D_TYPE(FLOAT_TYPE(data_a[src0_idx(i00, i01, i02, i03)])); + data_d[get_doffset() + dst_idx(i00, i01, i02, i03)] = D_TYPE(FLOAT_TYPE(data_a[get_aoffset() + src0_idx(i00, i01, i02, i03)])); } } diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/add.comp b/ggml/src/ggml-vulkan/vulkan-shaders/add.comp index da61b76df..2b4085c4f 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/add.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/add.comp @@ -22,7 +22,7 @@ void main() { uint i00, i01, i02, i03; get_indices(idx, i00, i01, i02, i03); - data_d[p.d_offset + dst_idx(i00, i01, i02, i03)] = D_TYPE(FLOAT_TYPE(data_a[src0_idx(i00, i01, i02, i03)]) + FLOAT_TYPE(data_b[src1_idx(i00, i01, i02, i03)])); + data_d[get_doffset() + dst_idx(i00, i01, i02, i03)] = D_TYPE(FLOAT_TYPE(data_a[get_aoffset() + src0_idx(i00, i01, i02, i03)]) + FLOAT_TYPE(data_b[get_boffset() + src1_idx(i00, i01, i02, i03)])); idx += num_threads; } diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/clamp.comp b/ggml/src/ggml-vulkan/vulkan-shaders/clamp.comp index ae8fa8753..1e5cb8dae 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/clamp.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/clamp.comp @@ -12,6 +12,6 @@ void main() { return; } - const FLOAT_TYPE val = FLOAT_TYPE(data_a[src0_idx(idx)]); - data_d[p.d_offset + dst_idx(idx)] = D_TYPE(val < p.param1 ? p.param1 : (val > p.param2 ? p.param2 : val)); + const FLOAT_TYPE val = FLOAT_TYPE(data_a[get_aoffset() + src0_idx(idx)]); + data_d[get_doffset() + dst_idx(idx)] = D_TYPE(val < p.param1 ? p.param1 : (val > p.param2 ? p.param2 : val)); } diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/concat.comp b/ggml/src/ggml-vulkan/vulkan-shaders/concat.comp index 683f9ac3c..9ee2f1fae 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/concat.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/concat.comp @@ -30,12 +30,12 @@ void main() { const bool is_src0 = i0 < p.ne00 && i1 < p.ne01 && i2 < p.ne02 && i3 < p.ne03; #ifndef OPTIMIZATION_ERROR_WORKAROUND - data_d[p.d_offset + dst_idx] = D_TYPE(is_src0 ? data_a[src0_idx] : data_b[src1_idx]); + data_d[get_doffset() + dst_idx] = D_TYPE(is_src0 ? data_a[get_aoffset() + src0_idx] : data_b[get_boffset() + src1_idx]); #else if (is_src0) { - data_d[p.d_offset + dst_idx] = data_a[src0_idx]; + data_d[get_doffset() + dst_idx] = data_a[get_aoffset() + src0_idx]; } else { - data_d[p.d_offset + dst_idx] = data_b[src1_idx]; + data_d[get_doffset() + dst_idx] = data_b[get_boffset() + src1_idx]; } #endif } diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/contig_copy.comp b/ggml/src/ggml-vulkan/vulkan-shaders/contig_copy.comp index 9acbdd3d2..dd828c232 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/contig_copy.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/contig_copy.comp @@ -19,9 +19,9 @@ void main() { if (idx + (num_iter-1)*num_threads < p.ne) { [[unroll]] for (uint i = 0; i < num_iter; ++i) { #ifndef OPTIMIZATION_ERROR_WORKAROUND - data_d[p.d_offset + idx] = D_TYPE(data_a[idx]); + data_d[get_doffset() + idx] = D_TYPE(data_a[get_aoffset() + idx]); #else - data_d[p.d_offset + idx] = data_a[idx]; + data_d[get_doffset() + idx] = data_a[get_aoffset() + idx]; #endif idx += num_threads; } @@ -32,9 +32,9 @@ void main() { } #ifndef OPTIMIZATION_ERROR_WORKAROUND - data_d[p.d_offset + idx] = D_TYPE(data_a[idx]); + data_d[get_doffset() + idx] = D_TYPE(data_a[get_aoffset() + idx]); #else - data_d[p.d_offset + idx] = data_a[idx]; + data_d[get_doffset() + idx] = data_a[get_aoffset() + idx]; #endif idx += num_threads; } diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/copy.comp b/ggml/src/ggml-vulkan/vulkan-shaders/copy.comp index 2775068f9..29c906494 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/copy.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/copy.comp @@ -13,8 +13,8 @@ void main() { } #ifndef OPTIMIZATION_ERROR_WORKAROUND - data_d[p.d_offset + dst_idx(idx)] = D_TYPE(data_a[src0_idx(idx)]); + data_d[get_doffset() + dst_idx(idx)] = D_TYPE(data_a[get_aoffset() + src0_idx(idx)]); #else - data_d[p.d_offset + dst_idx(idx)] = data_a[src0_idx(idx)]; + data_d[get_doffset() + dst_idx(idx)] = data_a[get_aoffset() + src0_idx(idx)]; #endif } diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/cos.comp b/ggml/src/ggml-vulkan/vulkan-shaders/cos.comp index fbd9d272c..0b8d02f58 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/cos.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/cos.comp @@ -12,6 +12,6 @@ void main() { return; } - const FLOAT_TYPE val = FLOAT_TYPE(data_a[src0_idx(idx)]); - data_d[p.d_offset + dst_idx(idx)] = D_TYPE(cos(val)); + const FLOAT_TYPE val = FLOAT_TYPE(data_a[get_aoffset() + src0_idx(idx)]); + data_d[get_doffset() + dst_idx(idx)] = D_TYPE(cos(val)); } diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/div.comp b/ggml/src/ggml-vulkan/vulkan-shaders/div.comp index e581905b3..9fb69c6c1 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/div.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/div.comp @@ -20,7 +20,7 @@ void main() { uint i00, i01, i02, i03; get_indices(idx, i00, i01, i02, i03); - data_d[p.d_offset + dst_idx(i00, i01, i02, i03)] = D_TYPE(FLOAT_TYPE(data_a[src0_idx(i00, i01, i02, i03)]) / FLOAT_TYPE(data_b[src1_idx(i00, i01, i02, i03)])); + data_d[get_doffset() + dst_idx(i00, i01, i02, i03)] = D_TYPE(FLOAT_TYPE(data_a[get_aoffset() + src0_idx(i00, i01, i02, i03)]) / FLOAT_TYPE(data_b[get_boffset() + src1_idx(i00, i01, i02, i03)])); idx += num_threads; } diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/generic_binary_head.comp b/ggml/src/ggml-vulkan/vulkan-shaders/generic_binary_head.comp index a6555fa27..062e2a4cd 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/generic_binary_head.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/generic_binary_head.comp @@ -7,7 +7,7 @@ layout (push_constant) uniform parameter uint ne00; uint ne01; uint ne02; uint ne03; uint nb00; uint nb01; uint nb02; uint nb03; uint ne10; uint ne11; uint ne12; uint ne13; uint nb10; uint nb11; uint nb12; uint nb13; uint ne20; uint ne21; uint ne22; uint ne23; uint nb20; uint nb21; uint nb22; uint nb23; - uint d_offset; + uint misalign_offsets; float param1; float param2; int param3; } p; @@ -22,6 +22,10 @@ uint get_idx() { return gl_GlobalInvocationID.z * 262144 + gl_GlobalInvocationID.y * 512 + gl_GlobalInvocationID.x; } +uint get_aoffset() { return p.misalign_offsets >> 16; } +uint get_boffset() { return (p.misalign_offsets >> 8) & 0xFF; } +uint get_doffset() { return p.misalign_offsets & 0xFF; } + // mod and div are expensive and coordinates/dimensions are often power of 2 or equal to 1 uint fastmod(uint a, uint b) { if ((b & (b-1)) == 0) { diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/generic_unary_head.comp b/ggml/src/ggml-vulkan/vulkan-shaders/generic_unary_head.comp index ab7c9d7eb..68d1bc9f1 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/generic_unary_head.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/generic_unary_head.comp @@ -6,7 +6,7 @@ layout (push_constant) uniform parameter uint ne; uint ne00; uint ne01; uint ne02; uint ne03; uint nb00; uint nb01; uint nb02; uint nb03; uint ne10; uint ne11; uint ne12; uint ne13; uint nb10; uint nb11; uint nb12; uint nb13; - uint d_offset; + uint misalign_offsets; float param1; float param2; uint ne0_012mp; uint ne0_012L; @@ -24,6 +24,9 @@ uint get_idx() { return gl_GlobalInvocationID.z * 262144 + gl_GlobalInvocationID.y * 512 + gl_GlobalInvocationID.x; } +uint get_aoffset() { return p.misalign_offsets >> 16; } +uint get_doffset() { return p.misalign_offsets & 0xFFFF; } + // see init_fastdiv_values in ggml-vulkan.cpp uint fastdiv(uint n, uint mp, uint L) { uint msbs, lsbs; diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/get_rows.comp b/ggml/src/ggml-vulkan/vulkan-shaders/get_rows.comp index a7b81e52c..e877ed779 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/get_rows.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/get_rows.comp @@ -15,10 +15,10 @@ void main() { return; } - const uint i01 = data_b[i10*p.nb10 + i11*p.nb11 + i12*p.nb12]; + const uint i01 = data_b[get_boffset() + i10*p.nb10 + i11*p.nb11 + i12*p.nb12]; - const uint a_offset = i01*p.nb01 + i11*p.nb02 + i12*p.nb03; - const uint d_offset = i10*p.nb21 + i11*p.nb22 + i12*p.nb23; + const uint a_offset = get_aoffset() + i01*p.nb01 + i11*p.nb02 + i12*p.nb03; + const uint d_offset = get_doffset() + i10*p.nb21 + i11*p.nb22 + i12*p.nb23; #ifndef OPTIMIZATION_ERROR_WORKAROUND data_d[d_offset + i00] = D_TYPE(data_a[a_offset + i00]); diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/mul.comp b/ggml/src/ggml-vulkan/vulkan-shaders/mul.comp index 5ce57cbcf..43de19df8 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/mul.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/mul.comp @@ -20,7 +20,7 @@ void main() { uint i00, i01, i02, i03; get_indices(idx, i00, i01, i02, i03); - data_d[p.d_offset + dst_idx(i00, i01, i02, i03)] = D_TYPE(FLOAT_TYPE(data_a[src0_idx(i00, i01, i02, i03)]) * FLOAT_TYPE(data_b[src1_idx(i00, i01, i02, i03)])); + data_d[get_doffset() + dst_idx(i00, i01, i02, i03)] = D_TYPE(FLOAT_TYPE(data_a[get_aoffset() + src0_idx(i00, i01, i02, i03)]) * FLOAT_TYPE(data_b[get_boffset() + src1_idx(i00, i01, i02, i03)])); idx += num_threads; } diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/pad.comp b/ggml/src/ggml-vulkan/vulkan-shaders/pad.comp index e87d8b18b..450b67fc5 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/pad.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/pad.comp @@ -24,5 +24,5 @@ void main() { const bool is_src0 = i0 < p.ne00 && i1 < p.ne01 && i2 < p.ne02 && i3 < p.ne03; - data_d[p.d_offset + dst_idx] = D_TYPE(is_src0 ? data_a[src0_idx] : 0.0f); + data_d[get_doffset() + dst_idx] = D_TYPE(is_src0 ? data_a[get_aoffset() + src0_idx] : 0.0f); } diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/repeat.comp b/ggml/src/ggml-vulkan/vulkan-shaders/repeat.comp index c03f737cc..1568b141d 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/repeat.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/repeat.comp @@ -22,5 +22,5 @@ void main() { return; } - data_d[p.d_offset + dst_idx(idx)] = D_TYPE(data_a[src0_idx_mod(idx)]); + data_d[get_doffset() + dst_idx(idx)] = D_TYPE(data_a[get_aoffset() + src0_idx_mod(idx)]); } diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/scale.comp b/ggml/src/ggml-vulkan/vulkan-shaders/scale.comp index 5cfee8c3b..4663428de 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/scale.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/scale.comp @@ -18,7 +18,7 @@ void main() { continue; } - data_d[p.d_offset + idx] = D_TYPE(FLOAT_TYPE(data_a[idx]) * FLOAT_TYPE(p.param1)); + data_d[get_doffset() + idx] = D_TYPE(FLOAT_TYPE(data_a[get_aoffset() + idx]) * FLOAT_TYPE(p.param1)); idx += num_threads; } } diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/sin.comp b/ggml/src/ggml-vulkan/vulkan-shaders/sin.comp index 67c48fb9a..d7c15a169 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/sin.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/sin.comp @@ -12,6 +12,6 @@ void main() { return; } - const FLOAT_TYPE val = FLOAT_TYPE(data_a[src0_idx(idx)]); - data_d[p.d_offset + dst_idx(idx)] = D_TYPE(sin(val)); + const FLOAT_TYPE val = FLOAT_TYPE(data_a[get_aoffset() + src0_idx(idx)]); + data_d[get_doffset() + dst_idx(idx)] = D_TYPE(sin(val)); } diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/square.comp b/ggml/src/ggml-vulkan/vulkan-shaders/square.comp index 2ff48ddc5..ef43598ba 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/square.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/square.comp @@ -12,6 +12,6 @@ void main() { return; } - const FLOAT_TYPE val = FLOAT_TYPE(data_a[src0_idx(idx)]); - data_d[p.d_offset + dst_idx(idx)] = D_TYPE(val * val); + const FLOAT_TYPE val = FLOAT_TYPE(data_a[get_aoffset() + src0_idx(idx)]); + data_d[get_doffset() + dst_idx(idx)] = D_TYPE(val * val); } diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/upscale.comp b/ggml/src/ggml-vulkan/vulkan-shaders/upscale.comp index 511a086ea..6f607380d 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/upscale.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/upscale.comp @@ -2,7 +2,7 @@ layout (push_constant) uniform parameter { - uint ne; uint d_offset; + uint ne; uint a_offset; uint d_offset; uint nb00; uint nb01; uint nb02; uint nb03; uint ne10; uint ne11; uint ne12; uint ne13; float sf0; float sf1; float sf2; float sf3; @@ -32,5 +32,5 @@ void main() { const uint i02 = uint(i12 / p.sf2); const uint i03 = uint(i13 / p.sf3); - data_d[p.d_offset + idx] = D_TYPE(data_a[i03 * p.nb03 + i02 * p.nb02 + i01 * p.nb01 + i00 * p.nb00]); + data_d[p.d_offset + idx] = D_TYPE(data_a[p.a_offset + i03 * p.nb03 + i02 * p.nb02 + i01 * p.nb01 + i00 * p.nb00]); } From a813badbbdf0d38705f249df7a0c99af5cdee678 Mon Sep 17 00:00:00 2001 From: Jeff Bolz Date: Sun, 29 Dec 2024 03:16:34 -0600 Subject: [PATCH 23/81] vulkan: im2col and matmul optimizations for stable diffusion (#10942) * tests: Add im2col perf tests * vulkan: optimize im2col, more elements per thread * vulkan: increase small tile size for NV_coopmat2 * vulkan: change im2col to 512 elements per workgroup --- ggml/src/ggml-vulkan/ggml-vulkan.cpp | 10 +-- .../ggml-vulkan/vulkan-shaders/im2col.comp | 73 +++++++++++++------ tests/test-backend-ops.cpp | 12 +++ 3 files changed, 66 insertions(+), 29 deletions(-) diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp index 6dfc60c9b..8e47e79ae 100644 --- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp +++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp @@ -1404,10 +1404,10 @@ static void ggml_vk_load_shaders(vk_device& device) { // spec constants and tile sizes for non-quant matmul/matmul_id l_warptile = { 256, 128, 256, 64 }; m_warptile = { 256, 128, 128, 64 }; - s_warptile = { 128, 32, 16, 64 }; + s_warptile = { 128, 64, 64, 64 }; l_wg_denoms = {128, 256, 1 }; m_wg_denoms = {128, 128, 1 }; - s_wg_denoms = { 32, 16, 1 }; + s_wg_denoms = { 64, 64, 1 }; // spec constants and tile sizes for quant matmul (non-Qi_K) l_warptile_mmq = { 256, 128, 256, 64 }; @@ -2017,11 +2017,11 @@ static void ggml_vk_load_shaders(vk_device& device) { ggml_vk_create_pipeline(device, device->pipeline_sum_rows_f32, "sum_rows_f32", sum_rows_f32_len, sum_rows_f32_data, "main", 2, sizeof(vk_op_push_constants), {1, 1, 1}, { device->subgroup_size }, 1); - ggml_vk_create_pipeline(device, device->pipeline_im2col_f32, "im2col_f32", im2col_f32_len, im2col_f32_data, "main", 2, sizeof(vk_op_im2col_push_constants), {256, 1, 1}, {}, 1); + ggml_vk_create_pipeline(device, device->pipeline_im2col_f32, "im2col_f32", im2col_f32_len, im2col_f32_data, "main", 2, sizeof(vk_op_im2col_push_constants), {512, 1, 1}, { device->subgroup_size }, 1, true); if (device->float_controls_rte_fp16) { - ggml_vk_create_pipeline(device, device->pipeline_im2col_f32_f16, "im2col_f32_f16", im2col_f32_f16_rte_len, im2col_f32_f16_rte_data, "main", 2, sizeof(vk_op_im2col_push_constants), {256, 1, 1}, {}, 1); + ggml_vk_create_pipeline(device, device->pipeline_im2col_f32_f16, "im2col_f32_f16", im2col_f32_f16_rte_len, im2col_f32_f16_rte_data, "main", 2, sizeof(vk_op_im2col_push_constants), {512, 1, 1}, { device->subgroup_size }, 1, true); } else { - ggml_vk_create_pipeline(device, device->pipeline_im2col_f32_f16, "im2col_f32_f16", im2col_f32_f16_len, im2col_f32_f16_data, "main", 2, sizeof(vk_op_im2col_push_constants), {256, 1, 1}, {}, 1); + ggml_vk_create_pipeline(device, device->pipeline_im2col_f32_f16, "im2col_f32_f16", im2col_f32_f16_len, im2col_f32_f16_data, "main", 2, sizeof(vk_op_im2col_push_constants), {512, 1, 1}, { device->subgroup_size }, 1, true); } ggml_vk_create_pipeline(device, device->pipeline_timestep_embedding_f32, "timestep_embedding_f32", timestep_embedding_f32_len, timestep_embedding_f32_data, "main", 2, sizeof(vk_op_timestep_embedding_push_constants), {256, 1, 1}, {}, 1); diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/im2col.comp b/ggml/src/ggml-vulkan/vulkan-shaders/im2col.comp index 966fedf8f..122b1e93f 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/im2col.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/im2col.comp @@ -2,6 +2,7 @@ #extension GL_EXT_shader_16bit_storage : require #extension GL_EXT_spirv_intrinsics: enable +#extension GL_EXT_control_flow_attributes : require #if RTE16 spirv_execution_mode(capabilities = [4467], 4462, 16); // RoundingModeRTE, 16 bits @@ -23,40 +24,64 @@ layout (push_constant) uniform parameter #include "types.comp" -#define BLOCK_SIZE 256 +layout(constant_id = 0) const uint BLOCK_SIZE = 32; -layout(local_size_x = BLOCK_SIZE, local_size_y = 1, local_size_z = 1) in; +const uint NUM_ITER = 512 / BLOCK_SIZE; + +layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in; layout (binding = 0) readonly buffer X {A_TYPE data_a[];}; layout (binding = 1) writeonly buffer D {D_TYPE data_d[];}; void main() { - const uint i = gl_GlobalInvocationID.x; - if (i >= p.pelements) { - return; - } - - const uint ksize = p.OW * (p.KH > 1 ? p.KW : 1); - const uint kx = i / ksize; - const uint kd = kx * ksize; - const uint ky = (i - kd) / p.OW; - const uint ix = i % p.OW; + const uint gidx = gl_GlobalInvocationID.x; const uint oh = gl_GlobalInvocationID.y; const uint batch = gl_GlobalInvocationID.z / p.IC; const uint ic = gl_GlobalInvocationID.z % p.IC; - const uint iiw = ix * p.s0 + kx * p.d0 - p.p0; - const uint iih = oh * p.s1 + ky * p.d1 - p.p1; - - const uint offset_dst = - ((batch * p.OH + oh) * p.OW + ix) * p.CHW + - (ic * (p.KW * p.KH) + ky * p.KW + kx); - - if (iih < 0 || iih >= p.IH || iiw < 0 || iiw >= p.IW) { - data_d[offset_dst] = D_TYPE(0.0f); - } else { - const uint offset_src = ic * p.offset_delta + batch * p.batch_offset; - data_d[offset_dst] = D_TYPE(data_a[offset_src + iih * p.IW + iiw]); + A_TYPE values[NUM_ITER]; + uint offset_dst[NUM_ITER]; + [[unroll]] for (uint idx = 0; idx < NUM_ITER; ++idx) { + values[idx] = A_TYPE(0); } + + [[unroll]] for (uint idx = 0; idx < NUM_ITER; ++idx) { + + const uint i = gidx * NUM_ITER + idx; + + const uint ksize = p.OW * (p.KH > 1 ? p.KW : 1); + const uint kx = i / ksize; + const uint kd = kx * ksize; + const uint ky = (i - kd) / p.OW; + const uint ix = i % p.OW; + + const uint iiw = ix * p.s0 + kx * p.d0 - p.p0; + const uint iih = oh * p.s1 + ky * p.d1 - p.p1; + + offset_dst[idx] = + ((batch * p.OH + oh) * p.OW + ix) * p.CHW + + (ic * (p.KW * p.KH) + ky * p.KW + kx); + + if (i >= p.pelements) { + continue; + } + + if (iih < p.IH && iiw < p.IW) { + const uint offset_src = ic * p.offset_delta + batch * p.batch_offset; + values[idx] = data_a[offset_src + iih * p.IW + iiw]; + } + } + + [[unroll]] for (uint idx = 0; idx < NUM_ITER; ++idx) { + + const uint i = gidx * NUM_ITER + idx; + + if (i >= p.pelements) { + continue; + } + + data_d[offset_dst[idx]] = D_TYPE(values[idx]); + } + } diff --git a/tests/test-backend-ops.cpp b/tests/test-backend-ops.cpp index ccdd3fb57..c79acffd2 100644 --- a/tests/test-backend-ops.cpp +++ b/tests/test-backend-ops.cpp @@ -3945,6 +3945,18 @@ static std::vector> make_test_cases_perf() { } } + for (int K : {3, 5}) { + for (int IC : {256, 2560}) { + for (int IW_IH : {32, 64, 256}) { + if (IC == 2560 && IW_IH == 256) { + // too big + continue; + } + test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F32, {IW_IH, IW_IH, IC, 1}, {K, K, IC, 1}, 1, 1, 1, 1, 1, 1, true)); + } + } + } + return test_cases; } From c250ecb3157f3bae0a45f44c3c953b5414d4c2f7 Mon Sep 17 00:00:00 2001 From: ag2s20150909 <19373730+ag2s20150909@users.noreply.github.com> Date: Mon, 30 Dec 2024 20:35:13 +0800 Subject: [PATCH 24/81] android : fix llama_batch free (#11014) --- examples/llama.android/llama/src/main/cpp/llama-android.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/examples/llama.android/llama/src/main/cpp/llama-android.cpp b/examples/llama.android/llama/src/main/cpp/llama-android.cpp index b3858ddfb..66ec2aeeb 100644 --- a/examples/llama.android/llama/src/main/cpp/llama-android.cpp +++ b/examples/llama.android/llama/src/main/cpp/llama-android.cpp @@ -305,7 +305,9 @@ Java_android_llama_cpp_LLamaAndroid_new_1batch(JNIEnv *, jobject, jint n_tokens, extern "C" JNIEXPORT void JNICALL Java_android_llama_cpp_LLamaAndroid_free_1batch(JNIEnv *, jobject, jlong batch_pointer) { - llama_batch_free(*reinterpret_cast(batch_pointer)); + //llama_batch_free(*reinterpret_cast(batch_pointer)); + const auto batch = reinterpret_cast(batch_pointer); + delete batch; } extern "C" From 716bd6dec3e044e5c325386b5b0483392b24cefe Mon Sep 17 00:00:00 2001 From: Jeff Bolz Date: Mon, 30 Dec 2024 11:27:11 -0600 Subject: [PATCH 25/81] vulkan: optimize mul_mat for small values of N (#10991) Make the mul_mat_vec shaders support N>1 (as a spec constant, NUM_COLS) where the batch_strides are overloaded to hold the row strides. Put the loads from the B matrix in the innermost loop because it should cache better. Share some code for reducing the result values to memory in mul_mat_vec_base. --- ggml/src/ggml-vulkan/ggml-vulkan.cpp | 87 +++++++------ .../vulkan-shaders/mul_mat_vec.comp | 122 ++++++++---------- .../vulkan-shaders/mul_mat_vec_base.comp | 33 +++++ .../vulkan-shaders/mul_mat_vec_q2_k.comp | 92 ++++++------- .../vulkan-shaders/mul_mat_vec_q3_k.comp | 75 +++++------ .../vulkan-shaders/mul_mat_vec_q4_k.comp | 64 ++++----- .../vulkan-shaders/mul_mat_vec_q5_k.comp | 104 ++++++--------- .../vulkan-shaders/mul_mat_vec_q6_k.comp | 58 +++------ tests/test-backend-ops.cpp | 2 +- 9 files changed, 288 insertions(+), 349 deletions(-) diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp index 8e47e79ae..020e61280 100644 --- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp +++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp @@ -145,6 +145,8 @@ class vk_perf_logger; #endif static void ggml_vk_destroy_buffer(vk_buffer& buf); +static constexpr uint32_t mul_mat_vec_max_cols = 8; + struct vk_device_struct { std::mutex mutex; @@ -202,8 +204,8 @@ struct vk_device_struct { vk_matmul_pipeline2 pipeline_dequant_mul_mat_mat_id[GGML_TYPE_COUNT]; vk_pipeline pipeline_dequant[GGML_TYPE_COUNT]; - vk_pipeline pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_COUNT]; - vk_pipeline pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_COUNT]; + vk_pipeline pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_COUNT][mul_mat_vec_max_cols]; + vk_pipeline pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_COUNT][mul_mat_vec_max_cols]; vk_pipeline pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_COUNT]; vk_pipeline pipeline_mul_mat_vec_p021_f16_f32; @@ -1866,33 +1868,35 @@ static void ggml_vk_load_shaders(vk_device& device) { } else if (device->vendor_id == VK_VENDOR_ID_INTEL) rm_stdq = 2; - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_F32 ], "mul_mat_vec_f32_f32_f32", mul_mat_vec_f32_f32_f32_len, mul_mat_vec_f32_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2, 1, 1}, {device->subgroup_size, 2}, 1); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_F16 ], "mul_mat_vec_f16_f32_f32", mul_mat_vec_f16_f32_f32_len, mul_mat_vec_f16_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2, 1, 1}, {device->subgroup_size, 2}, 1); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q4_0], "mul_mat_vec_q4_0_f32_f32", mul_mat_vec_q4_0_f32_f32_len, mul_mat_vec_q4_0_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q4_1], "mul_mat_vec_q4_1_f32_f32", mul_mat_vec_q4_1_f32_f32_len, mul_mat_vec_q4_1_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q5_0], "mul_mat_vec_q5_0_f32_f32", mul_mat_vec_q5_0_f32_f32_len, mul_mat_vec_q5_0_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q5_1], "mul_mat_vec_q5_1_f32_f32", mul_mat_vec_q5_1_f32_f32_len, mul_mat_vec_q5_1_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q8_0], "mul_mat_vec_q8_0_f32_f32", mul_mat_vec_q8_0_f32_f32_len, mul_mat_vec_q8_0_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1*rm_stdq, 1, 1}, {device->subgroup_size, 1*rm_stdq}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q2_K], "mul_mat_vec_q2_k_f32_f32", mul_mat_vec_q2_k_f32_f32_len, mul_mat_vec_q2_k_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q3_K], "mul_mat_vec_q3_k_f32_f32", mul_mat_vec_q3_k_f32_f32_len, mul_mat_vec_q3_k_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q4_K], "mul_mat_vec_q4_k_f32_f32", mul_mat_vec_q4_k_f32_f32_len, mul_mat_vec_q4_k_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q5_K], "mul_mat_vec_q5_k_f32_f32", mul_mat_vec_q5_k_f32_f32_len, mul_mat_vec_q5_k_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q6_K], "mul_mat_vec_q6_k_f32_f32", mul_mat_vec_q6_k_f32_f32_len, mul_mat_vec_q6_k_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_IQ4_NL], "mul_mat_vec_iq4_nl_f32_f32", mul_mat_vec_iq4_nl_f32_f32_len, mul_mat_vec_iq4_nl_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {subgroup_size_16, 2*rm_stdq}, 1, true); + for (uint32_t i = 0; i < mul_mat_vec_max_cols; ++i) { + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_F32 ][i], "mul_mat_vec_f32_f32_f32_"+std::to_string(i+1), mul_mat_vec_f32_f32_f32_len, mul_mat_vec_f32_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2, 1, 1}, {device->subgroup_size, 2, i+1}, 1); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_F16 ][i], "mul_mat_vec_f16_f32_f32_"+std::to_string(i+1), mul_mat_vec_f16_f32_f32_len, mul_mat_vec_f16_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2, 1, 1}, {device->subgroup_size, 2, i+1}, 1); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q4_0][i], "mul_mat_vec_q4_0_f32_f32_"+std::to_string(i+1), mul_mat_vec_q4_0_f32_f32_len, mul_mat_vec_q4_0_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq, i+1}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q4_1][i], "mul_mat_vec_q4_1_f32_f32_"+std::to_string(i+1), mul_mat_vec_q4_1_f32_f32_len, mul_mat_vec_q4_1_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq, i+1}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q5_0][i], "mul_mat_vec_q5_0_f32_f32_"+std::to_string(i+1), mul_mat_vec_q5_0_f32_f32_len, mul_mat_vec_q5_0_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq, i+1}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q5_1][i], "mul_mat_vec_q5_1_f32_f32_"+std::to_string(i+1), mul_mat_vec_q5_1_f32_f32_len, mul_mat_vec_q5_1_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq, i+1}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q8_0][i], "mul_mat_vec_q8_0_f32_f32_"+std::to_string(i+1), mul_mat_vec_q8_0_f32_f32_len, mul_mat_vec_q8_0_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1*rm_stdq, 1, 1}, {device->subgroup_size, 1*rm_stdq, i+1}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q2_K][i], "mul_mat_vec_q2_k_f32_f32_"+std::to_string(i+1), mul_mat_vec_q2_k_f32_f32_len, mul_mat_vec_q2_k_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq, i+1}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q3_K][i], "mul_mat_vec_q3_k_f32_f32_"+std::to_string(i+1), mul_mat_vec_q3_k_f32_f32_len, mul_mat_vec_q3_k_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq, i+1}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q4_K][i], "mul_mat_vec_q4_k_f32_f32_"+std::to_string(i+1), mul_mat_vec_q4_k_f32_f32_len, mul_mat_vec_q4_k_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq, i+1}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q5_K][i], "mul_mat_vec_q5_k_f32_f32_"+std::to_string(i+1), mul_mat_vec_q5_k_f32_f32_len, mul_mat_vec_q5_k_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq, i+1}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q6_K][i], "mul_mat_vec_q6_k_f32_f32_"+std::to_string(i+1), mul_mat_vec_q6_k_f32_f32_len, mul_mat_vec_q6_k_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq, i+1}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_IQ4_NL][i], "mul_mat_vec_iq4_nl_f32_f32_"+std::to_string(i+1), mul_mat_vec_iq4_nl_f32_f32_len, mul_mat_vec_iq4_nl_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {subgroup_size_16, 2*rm_stdq, i+1}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_F32 ], "mul_mat_vec_f32_f16_f32", mul_mat_vec_f32_f16_f32_len, mul_mat_vec_f32_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2, 1, 1}, {device->subgroup_size, 2}, 1); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_F16 ], "mul_mat_vec_f16_f16_f32", mul_mat_vec_f16_f16_f32_len, mul_mat_vec_f16_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2, 1, 1}, {device->subgroup_size, 2}, 1); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q4_0], "mul_mat_vec_q4_0_f16_f32", mul_mat_vec_q4_0_f16_f32_len, mul_mat_vec_q4_0_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q4_1], "mul_mat_vec_q4_1_f16_f32", mul_mat_vec_q4_1_f16_f32_len, mul_mat_vec_q4_1_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q5_0], "mul_mat_vec_q5_0_f16_f32", mul_mat_vec_q5_0_f16_f32_len, mul_mat_vec_q5_0_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q5_1], "mul_mat_vec_q5_1_f16_f32", mul_mat_vec_q5_1_f16_f32_len, mul_mat_vec_q5_1_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q8_0], "mul_mat_vec_q8_0_f16_f32", mul_mat_vec_q8_0_f16_f32_len, mul_mat_vec_q8_0_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1*rm_stdq, 1, 1}, {device->subgroup_size, 1*rm_stdq}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q2_K], "mul_mat_vec_q2_k_f16_f32", mul_mat_vec_q2_k_f16_f32_len, mul_mat_vec_q2_k_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q3_K], "mul_mat_vec_q3_k_f16_f32", mul_mat_vec_q3_k_f16_f32_len, mul_mat_vec_q3_k_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q4_K], "mul_mat_vec_q4_k_f16_f32", mul_mat_vec_q4_k_f16_f32_len, mul_mat_vec_q4_k_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q5_K], "mul_mat_vec_q5_k_f16_f32", mul_mat_vec_q5_k_f16_f32_len, mul_mat_vec_q5_k_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q6_K], "mul_mat_vec_q6_k_f16_f32", mul_mat_vec_q6_k_f16_f32_len, mul_mat_vec_q6_k_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_IQ4_NL], "mul_mat_vec_iq4_nl_f16_f32", mul_mat_vec_iq4_nl_f16_f32_len, mul_mat_vec_iq4_nl_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {subgroup_size_16, 2*rm_stdq}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_F32 ][i], "mul_mat_vec_f32_f16_f32_"+std::to_string(i+1), mul_mat_vec_f32_f16_f32_len, mul_mat_vec_f32_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2, 1, 1}, {device->subgroup_size, 2, i+1}, 1); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_F16 ][i], "mul_mat_vec_f16_f16_f32_"+std::to_string(i+1), mul_mat_vec_f16_f16_f32_len, mul_mat_vec_f16_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2, 1, 1}, {device->subgroup_size, 2, i+1}, 1); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q4_0][i], "mul_mat_vec_q4_0_f16_f32_"+std::to_string(i+1), mul_mat_vec_q4_0_f16_f32_len, mul_mat_vec_q4_0_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq, i+1}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q4_1][i], "mul_mat_vec_q4_1_f16_f32_"+std::to_string(i+1), mul_mat_vec_q4_1_f16_f32_len, mul_mat_vec_q4_1_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq, i+1}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q5_0][i], "mul_mat_vec_q5_0_f16_f32_"+std::to_string(i+1), mul_mat_vec_q5_0_f16_f32_len, mul_mat_vec_q5_0_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq, i+1}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q5_1][i], "mul_mat_vec_q5_1_f16_f32_"+std::to_string(i+1), mul_mat_vec_q5_1_f16_f32_len, mul_mat_vec_q5_1_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq, i+1}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q8_0][i], "mul_mat_vec_q8_0_f16_f32_"+std::to_string(i+1), mul_mat_vec_q8_0_f16_f32_len, mul_mat_vec_q8_0_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1*rm_stdq, 1, 1}, {device->subgroup_size, 1*rm_stdq, i+1}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q2_K][i], "mul_mat_vec_q2_k_f16_f32_"+std::to_string(i+1), mul_mat_vec_q2_k_f16_f32_len, mul_mat_vec_q2_k_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq, i+1}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q3_K][i], "mul_mat_vec_q3_k_f16_f32_"+std::to_string(i+1), mul_mat_vec_q3_k_f16_f32_len, mul_mat_vec_q3_k_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq, i+1}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q4_K][i], "mul_mat_vec_q4_k_f16_f32_"+std::to_string(i+1), mul_mat_vec_q4_k_f16_f32_len, mul_mat_vec_q4_k_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq, i+1}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q5_K][i], "mul_mat_vec_q5_k_f16_f32_"+std::to_string(i+1), mul_mat_vec_q5_k_f16_f32_len, mul_mat_vec_q5_k_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq, i+1}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q6_K][i], "mul_mat_vec_q6_k_f16_f32_"+std::to_string(i+1), mul_mat_vec_q6_k_f16_f32_len, mul_mat_vec_q6_k_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq, i+1}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_IQ4_NL][i], "mul_mat_vec_iq4_nl_f16_f32_"+std::to_string(i+1), mul_mat_vec_iq4_nl_f16_f32_len, mul_mat_vec_iq4_nl_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {subgroup_size_16, 2*rm_stdq, i+1}, 1, true); + } ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_F32 ], "mul_mat_vec_id_f32_f32", mul_mat_vec_id_f32_f32_len, mul_mat_vec_id_f32_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {2, 1, 1}, {device->subgroup_size, 2}, 1); ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_F16 ], "mul_mat_vec_id_f16_f32", mul_mat_vec_id_f16_f32_len, mul_mat_vec_id_f16_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {2, 1, 1}, {device->subgroup_size, 2}, 1); @@ -2892,9 +2896,10 @@ static vk_matmul_pipeline ggml_vk_get_mul_mat_mat_pipeline(ggml_backend_vk_conte return ctx->device->fp16 ? ctx->device->pipeline_dequant_mul_mat_mat[src0_type].f16acc : ctx->device->pipeline_dequant_mul_mat_mat[src0_type].f32acc; } -static vk_pipeline ggml_vk_get_dequantize_mul_mat_vec(ggml_backend_vk_context * ctx, ggml_type a_type, ggml_type b_type) { +static vk_pipeline ggml_vk_get_dequantize_mul_mat_vec(ggml_backend_vk_context * ctx, ggml_type a_type, ggml_type b_type, uint32_t num_cols) { VK_LOG_DEBUG("ggml_vk_get_dequantize_mul_mat_vec()"); GGML_ASSERT(b_type == GGML_TYPE_F32 || b_type == GGML_TYPE_F16); + GGML_ASSERT(num_cols >= 1 && num_cols <= mul_mat_vec_max_cols); switch (a_type) { case GGML_TYPE_F32: @@ -2915,7 +2920,7 @@ static vk_pipeline ggml_vk_get_dequantize_mul_mat_vec(ggml_backend_vk_context * return nullptr; } - return b_type == GGML_TYPE_F32 ? ctx->device->pipeline_dequant_mul_mat_vec_f32_f32[a_type] : ctx->device->pipeline_dequant_mul_mat_vec_f16_f32[a_type]; + return b_type == GGML_TYPE_F32 ? ctx->device->pipeline_dequant_mul_mat_vec_f32_f32[a_type][num_cols-1] : ctx->device->pipeline_dequant_mul_mat_vec_f16_f32[a_type][num_cols-1]; } static vk_matmul_pipeline ggml_vk_get_mul_mat_mat_id_pipeline(ggml_backend_vk_context * ctx, ggml_type src0_type, ggml_type src1_type, ggml_prec prec) { @@ -3925,8 +3930,6 @@ static void ggml_vk_mul_mat_vec_q_f16(ggml_backend_vk_context * ctx, vk_context& const uint64_t ne12 = src1->ne[2]; const uint64_t ne13 = src1->ne[3]; - GGML_ASSERT(ne11 == 1); - const uint64_t ne20 = dst->ne[0]; const uint64_t ne21 = dst->ne[1]; const uint64_t ne22 = dst->ne[2]; @@ -3935,6 +3938,11 @@ static void ggml_vk_mul_mat_vec_q_f16(ggml_backend_vk_context * ctx, vk_context& const uint64_t r2 = ne12 / ne02; const uint64_t r3 = ne13 / ne03; + // batch_n indicates that we need to compute a few vector results, and this assumes + // ne12 and ne13 are 1. It overloads the batch_strides to hold the row strides. + GGML_ASSERT(ne11 == 1 || ne12 * ne13 == 1); + bool batch_n = ne11 > 1; + ggml_backend_vk_buffer_context * dst_buf_ctx = (ggml_backend_vk_buffer_context *)dst->buffer->context; ggml_backend_vk_buffer_context * src0_buf_ctx = (ggml_backend_vk_buffer_context *)src0->buffer->context; ggml_backend_vk_buffer_context * src1_buf_ctx = (ggml_backend_vk_buffer_context *)src1->buffer->context; @@ -3985,7 +3993,7 @@ static void ggml_vk_mul_mat_vec_q_f16(ggml_backend_vk_context * ctx, vk_context& } else { to_fp16_vk_1 = ggml_vk_get_to_fp16(ctx, src1->type); } - vk_pipeline dmmv = ggml_vk_get_dequantize_mul_mat_vec(ctx, src0->type, src1->type); + vk_pipeline dmmv = ggml_vk_get_dequantize_mul_mat_vec(ctx, src0->type, src1->type, ne11); GGML_ASSERT(!qx_needs_dequant || to_fp16_vk_0 != nullptr); // NOLINT GGML_ASSERT(!qy_needs_dequant || to_fp16_vk_1 != nullptr); // NOLINT GGML_ASSERT(dmmv != nullptr); @@ -4057,8 +4065,10 @@ static void ggml_vk_mul_mat_vec_q_f16(ggml_backend_vk_context * ctx, vk_context& ggml_vk_cpy_to_contiguous(ctx, subctx, to_fp16_vk_1, src1, { d_Qy, qy_buf_offset, VK_WHOLE_SIZE }, { d_Y, 0, VK_WHOLE_SIZE }); } - uint32_t stride_batch_x = ne00*ne01; - uint32_t stride_batch_y = ne10*ne11; + // For batch_n, the A matrix is the same for each batch, and B/D use the row stride as the batch stride + uint32_t stride_batch_x = batch_n ? 0 : ne00*ne01; + uint32_t stride_batch_y = batch_n ? ne10 : (ne10*ne11); + uint32_t stride_batch_d = batch_n ? ne20 : (ne20*ne21); if (!ggml_vk_dim01_contiguous(src0) && !qx_needs_dequant) { stride_batch_x = src0->nb[0] / ggml_type_size(src0->type); @@ -4081,7 +4091,7 @@ static void ggml_vk_mul_mat_vec_q_f16(ggml_backend_vk_context * ctx, vk_context& // compute const vk_mat_vec_push_constants pc = { (uint32_t)ne00, (uint32_t)ne10, (uint32_t)ne10, (uint32_t)ne01, - stride_batch_x, stride_batch_y, (uint32_t)(ne20*ne21), + stride_batch_x, stride_batch_y, stride_batch_d, (uint32_t)ne02, (uint32_t)ne12, (uint32_t)r2, (uint32_t)r3, }; ggml_vk_sync_buffers(subctx); @@ -4261,7 +4271,10 @@ static void ggml_vk_mul_mat(ggml_backend_vk_context * ctx, vk_context& subctx, c } else if (src0->type == GGML_TYPE_F16 && !ggml_is_contiguous(src0) && !ggml_is_transposed(src1) && dst->ne[1] == 1 && !ggml_is_permuted(src0) && !ggml_is_permuted(src1)) { ggml_vk_mul_mat_vec_nc_f16_f32(ctx, subctx, src0, src1, dst, dryrun); - } else if (dst->ne[1] == 1 && (src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16 || ggml_is_quantized(src0->type))) { + // mul_mat_vec supports batching ne12*ne13 when ne11==1, or treating ne11 as the batch size (up to four) + // when ne12 and ne13 are one. + } else if ((dst->ne[1] == 1 || (dst->ne[1] <= mul_mat_vec_max_cols && src1->ne[2] * src1->ne[3] == 1)) && + (src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16 || ggml_is_quantized(src0->type))) { ggml_vk_mul_mat_vec_q_f16(ctx, subctx, src0, src1, dst, dryrun); } else { ggml_vk_mul_mat_q_f16(ctx, subctx, src0, src1, dst, dryrun); diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec.comp b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec.comp index 187c31916..24875cdcf 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec.comp @@ -9,9 +9,6 @@ layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in; -layout (constant_id = 0) const uint BLOCK_SIZE = 32; -layout (constant_id = 1) const uint NUM_ROWS = 1; - #if !defined(DATA_A_F32) && !defined(DATA_A_F16) #define K_PER_ITER 8 #else @@ -21,70 +18,70 @@ layout (constant_id = 1) const uint NUM_ROWS = 1; uint a_offset, b_offset, d_offset, y_offset; -shared FLOAT_TYPE tmpsh[NUM_ROWS][BLOCK_SIZE]; - -void iter(inout FLOAT_TYPE temp[NUM_ROWS], const uint first_row, const uint num_rows, const uint tid, const uint i, bool lastiter) +void iter(inout FLOAT_TYPE temp[NUM_COLS][NUM_ROWS], const uint first_row, const uint num_rows, const uint tid, const uint i, bool lastiter) { - const uint col = i*BLOCK_SIZE + K_PER_ITER*tid; - const uint iqs = (col%QUANT_K)/QUANT_R; // quant index - const uint iybs = col - col%QUANT_K; // y block start index + [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) { + const uint col = i*BLOCK_SIZE + K_PER_ITER*tid; + const uint iqs = (col%QUANT_K)/QUANT_R; // quant index + const uint iybs = col - col%QUANT_K; // y block start index #if K_PER_ITER == 8 #if QUANT_R == 2 - const B_TYPE_VEC4 bv02 = data_b_v4[(b_offset + iybs + iqs) / 4]; - const B_TYPE_VEC4 bv13 = data_b_v4[(b_offset + iybs + iqs + y_offset) / 4]; - const vec4 bv0 = vec4(bv02.x, bv13.x, bv02.y, bv13.y); - const vec4 bv1 = vec4(bv02.z, bv13.z, bv02.w, bv13.w); + const B_TYPE_VEC4 bv02 = data_b_v4[(j*p.batch_stride_b + b_offset + iybs + iqs) / 4]; + const B_TYPE_VEC4 bv13 = data_b_v4[(j*p.batch_stride_b + b_offset + iybs + iqs + y_offset) / 4]; + const vec4 bv0 = vec4(bv02.x, bv13.x, bv02.y, bv13.y); + const vec4 bv1 = vec4(bv02.z, bv13.z, bv02.w, bv13.w); #else - const vec4 bv0 = vec4(data_b_v4[(b_offset + iybs + iqs) / 4]); - const vec4 bv1 = vec4(data_b_v4[(b_offset + iybs + iqs) / 4 + 1]); + const vec4 bv0 = vec4(data_b_v4[(j*p.batch_stride_b + b_offset + iybs + iqs) / 4]); + const vec4 bv1 = vec4(data_b_v4[(j*p.batch_stride_b + b_offset + iybs + iqs) / 4 + 1]); #endif #else - // Check if the second of the pair of elements is OOB, and don't fetch B or - // accumulate it. We still fetch a pair of elements for A, which is fine for - // quantized formats since they'll be within the same block. We should - // probably skip fetching the second element for F16/F32, but as of now we - // still do. - const bool OOB = lastiter && (iybs + iqs + y_offset >= p.ncols); + // Check if the second of the pair of elements is OOB, and don't fetch B or + // accumulate it. We still fetch a pair of elements for A, which is fine for + // quantized formats since they'll be within the same block. We should + // probably skip fetching the second element for F16/F32, but as of now we + // still do. + const bool OOB = lastiter && (iybs + iqs + y_offset >= p.ncols); - FLOAT_TYPE b0 = 0, b1 = 0; - b0 = FLOAT_TYPE(data_b[b_offset + iybs + iqs]); - if (!OOB) { - b1 = FLOAT_TYPE(data_b[b_offset + iybs + iqs + y_offset]); - } + FLOAT_TYPE b0 = 0, b1 = 0; + b0 = FLOAT_TYPE(data_b[j*p.batch_stride_b + b_offset + iybs + iqs]); + if (!OOB) { + b1 = FLOAT_TYPE(data_b[j*p.batch_stride_b + b_offset + iybs + iqs + y_offset]); + } #endif - uint ibi = first_row*p.ncols; - [[unroll]] for (uint n = 0; n < num_rows; ++n) { - const uint ib = (ibi + col)/QUANT_K; // block index - ibi += p.ncols; + uint ibi = first_row*p.ncols; + [[unroll]] for (uint n = 0; n < num_rows; ++n) { + const uint ib = (ibi + col)/QUANT_K; // block index + ibi += p.ncols; #if K_PER_ITER == 8 - vec4 v = dequantize4(ib, iqs, a_offset); - vec4 v2 = dequantize4(ib, iqs+(4/QUANT_R), a_offset); + vec4 v = dequantize4(ib, iqs, a_offset); + vec4 v2 = dequantize4(ib, iqs+(4/QUANT_R), a_offset); - const vec2 dm = get_dm(ib, a_offset); - if (dm.y != 0) { // quant has min component - v = v * dm.x + dm.y; - v2 = v2 * dm.x + dm.y; - } + const vec2 dm = get_dm(ib, a_offset); + if (dm.y != 0) { // quant has min component + v = v * dm.x + dm.y; + v2 = v2 * dm.x + dm.y; + } - // matrix multiplication - FLOAT_TYPE rowtmp = dot(bv0, v); - rowtmp += dot(bv1, v2); + // matrix multiplication + FLOAT_TYPE rowtmp = dot(bv0, v); + rowtmp += dot(bv1, v2); - if (dm.y == 0) - rowtmp *= dm.x; + if (dm.y == 0) + rowtmp *= dm.x; - temp[n] += rowtmp; + temp[j][n] += rowtmp; #else - const vec2 v = dequantize(ib, iqs, a_offset); + const vec2 v = dequantize(ib, iqs, a_offset); - // matrix multiplication - temp[n] = fma(FLOAT_TYPE(v.x), b0, temp[n]); - if (!OOB) { - temp[n] = fma(FLOAT_TYPE(v.y), b1, temp[n]); - } + // matrix multiplication + temp[j][n] = fma(FLOAT_TYPE(v.x), b0, temp[j][n]); + if (!OOB) { + temp[j][n] = fma(FLOAT_TYPE(v.y), b1, temp[j][n]); + } #endif + } } } @@ -96,10 +93,12 @@ void compute_outputs(const uint32_t first_row, const uint32_t num_rows) { y_offset = QUANT_R == 1 ? 1 : QUANT_K/2; - FLOAT_TYPE temp[NUM_ROWS]; + FLOAT_TYPE temp[NUM_COLS][NUM_ROWS]; - for (uint i = 0; i < NUM_ROWS; ++i) { - temp[i] = FLOAT_TYPE(0); + [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) { + [[unroll]] for (uint i = 0; i < NUM_ROWS; ++i) { + temp[j][i] = FLOAT_TYPE(0); + } } uint num_iters = p.ncols / (K_PER_ITER * BLOCK_SIZE); @@ -131,24 +130,7 @@ void compute_outputs(const uint32_t first_row, const uint32_t num_rows) { i++; } - // sum up partial sums and write back result - [[unroll]] for (uint n = 0; n < num_rows; ++n) { - tmpsh[n][tid] = temp[n]; - } - barrier(); - [[unroll]] for (uint s = BLOCK_SIZE/2; s > 0; s >>= 1) { - if (tid < s) { - [[unroll]] for (uint n = 0; n < num_rows; ++n) { - tmpsh[n][tid] += tmpsh[n][tid + s]; - } - } - barrier(); - } - if (tid == 0) { - [[unroll]] for (uint n = 0; n < num_rows; ++n) { - data_d[d_offset + first_row + n] = D_TYPE(tmpsh[n][0]); - } - } + reduce_result(temp, d_offset, first_row, num_rows, tid); } void main() { diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_base.comp b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_base.comp index 3894fca82..903753c7e 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_base.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_base.comp @@ -83,3 +83,36 @@ void get_offsets(out uint a_offset, out uint b_offset, out uint d_offset) { batch_idx * p.batch_stride_d; #endif } + +layout (constant_id = 0) const uint BLOCK_SIZE = 32; +layout (constant_id = 1) const uint NUM_ROWS = 1; +layout (constant_id = 2) const uint NUM_COLS = 1; + +shared FLOAT_TYPE tmpsh[NUM_COLS][NUM_ROWS][BLOCK_SIZE]; + +void reduce_result(const in FLOAT_TYPE temp[NUM_COLS][NUM_ROWS], const in uint32_t d_offset, const in uint32_t first_row, const in uint32_t num_rows, const in uint32_t tid) { + // sum up partial sums and write back result + [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) { + [[unroll]] for (uint n = 0; n < num_rows; ++n) { + tmpsh[j][n][tid] = temp[j][n]; + } + } + barrier(); + [[unroll]] for (uint s = BLOCK_SIZE/2; s > 0; s >>= 1) { + if (tid < s) { + [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) { + [[unroll]] for (uint n = 0; n < num_rows; ++n) { + tmpsh[j][n][tid] += tmpsh[j][n][tid + s]; + } + } + } + barrier(); + } + if (tid == 0) { + [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) { + [[unroll]] for (uint n = 0; n < num_rows; ++n) { + data_d[j*p.batch_stride_d + d_offset + first_row + n] = D_TYPE(tmpsh[j][n][0]); + } + } + } +} diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q2_k.comp b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q2_k.comp index 138ad0184..934213446 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q2_k.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q2_k.comp @@ -5,11 +5,6 @@ layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in; -layout (constant_id = 0) const uint BLOCK_SIZE = 32; -layout (constant_id = 1) const uint NUM_ROWS = 1; - -shared FLOAT_TYPE tmpsh[NUM_ROWS][BLOCK_SIZE]; - void compute_outputs(const uint32_t first_row, const uint32_t num_rows) { uint a_offset, b_offset, d_offset; get_offsets(a_offset, b_offset, d_offset); @@ -32,24 +27,17 @@ void compute_outputs(const uint32_t first_row, const uint32_t num_rows) { const uint s_offset = 8*v_im; const uint y_offset = 128*v_im + l0; - FLOAT_TYPE temp[NUM_ROWS]; + FLOAT_TYPE temp[NUM_COLS][NUM_ROWS]; - [[unroll]] for (uint i = 0; i < NUM_ROWS; ++i) { - temp[i] = FLOAT_TYPE(0); + [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) { + [[unroll]] for (uint i = 0; i < NUM_ROWS; ++i) { + temp[j][i] = FLOAT_TYPE(0); + } } [[unroll]] for (uint i = ix; i < num_blocks_per_row; i += it_size) { const uint y_idx = i * QUANT_K + y_offset; - B_TYPE_VEC2 b0 = data_b_v2[(b_offset + y_idx) / 2 + 0]; - B_TYPE_VEC2 b16 = data_b_v2[(b_offset + y_idx) / 2 + 8]; - B_TYPE_VEC2 b32 = data_b_v2[(b_offset + y_idx) / 2 + 16]; - B_TYPE_VEC2 b48 = data_b_v2[(b_offset + y_idx) / 2 + 24]; - B_TYPE_VEC2 b64 = data_b_v2[(b_offset + y_idx) / 2 + 32]; - B_TYPE_VEC2 b80 = data_b_v2[(b_offset + y_idx) / 2 + 40]; - B_TYPE_VEC2 b96 = data_b_v2[(b_offset + y_idx) / 2 + 48]; - B_TYPE_VEC2 b112 = data_b_v2[(b_offset + y_idx) / 2 + 56]; - [[unroll]] for (uint n = 0; n < num_rows; ++n) { const uint ib0 = a_offset / QUANT_K + (first_row+n)*num_blocks_per_row; f16vec2 d = data_a[ib0 + i].d; @@ -74,48 +62,42 @@ void compute_outputs(const uint32_t first_row, const uint32_t num_rows) { uvec2 qs0 = uvec2(unpack8(qs0_u16)); uvec2 qs16 = uvec2(unpack8(qs16_u16)); - FLOAT_TYPE sum1 = FLOAT_TYPE(0.0); - FLOAT_TYPE sum2 = FLOAT_TYPE(0.0); - [[unroll]] for (int l = 0; l < 2; ++l) { - sum1 = fma(FLOAT_TYPE(b0[l]), FLOAT_TYPE(s0_lo4[0]) * FLOAT_TYPE((qs0[l] >> 0) & 3), - fma(FLOAT_TYPE(b16[l]), FLOAT_TYPE(s0_lo4[1]) * FLOAT_TYPE((qs16[l] >> 0) & 3), - fma(FLOAT_TYPE(b32[l]), FLOAT_TYPE(s0_lo4[2]) * FLOAT_TYPE((qs0[l] >> 2) & 3), - fma(FLOAT_TYPE(b48[l]), FLOAT_TYPE(s0_lo4[3]) * FLOAT_TYPE((qs16[l] >> 2) & 3), - fma(FLOAT_TYPE(b64[l]), FLOAT_TYPE(s4_lo4[0]) * FLOAT_TYPE((qs0[l] >> 4) & 3), - fma(FLOAT_TYPE(b80[l]), FLOAT_TYPE(s4_lo4[1]) * FLOAT_TYPE((qs16[l] >> 4) & 3), - fma(FLOAT_TYPE(b96[l]), FLOAT_TYPE(s4_lo4[2]) * FLOAT_TYPE((qs0[l] >> 6) & 3), - fma(FLOAT_TYPE(b112[l]), FLOAT_TYPE(s4_lo4[3]) * FLOAT_TYPE((qs16[l] >> 6) & 3), sum1)))))))); - sum2 = fma(FLOAT_TYPE(b0[l]), FLOAT_TYPE(s0_hi4[0]), - fma(FLOAT_TYPE(b16[l]), FLOAT_TYPE(s0_hi4[1]), - fma(FLOAT_TYPE(b32[l]), FLOAT_TYPE(s0_hi4[2]), - fma(FLOAT_TYPE(b48[l]), FLOAT_TYPE(s0_hi4[3]), - fma(FLOAT_TYPE(b64[l]), FLOAT_TYPE(s4_hi4[0]), - fma(FLOAT_TYPE(b80[l]), FLOAT_TYPE(s4_hi4[1]), - fma(FLOAT_TYPE(b96[l]), FLOAT_TYPE(s4_hi4[2]), - fma(FLOAT_TYPE(b112[l]), FLOAT_TYPE(s4_hi4[3]), sum2)))))))); + [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) { + B_TYPE_VEC2 b0 = data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 0]; + B_TYPE_VEC2 b16 = data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 8]; + B_TYPE_VEC2 b32 = data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 16]; + B_TYPE_VEC2 b48 = data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 24]; + B_TYPE_VEC2 b64 = data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 32]; + B_TYPE_VEC2 b80 = data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 40]; + B_TYPE_VEC2 b96 = data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 48]; + B_TYPE_VEC2 b112 = data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 56]; + + FLOAT_TYPE sum1 = FLOAT_TYPE(0.0); + FLOAT_TYPE sum2 = FLOAT_TYPE(0.0); + [[unroll]] for (int l = 0; l < 2; ++l) { + sum1 = fma(FLOAT_TYPE(b0[l]), FLOAT_TYPE(s0_lo4[0]) * FLOAT_TYPE((qs0[l] >> 0) & 3), + fma(FLOAT_TYPE(b16[l]), FLOAT_TYPE(s0_lo4[1]) * FLOAT_TYPE((qs16[l] >> 0) & 3), + fma(FLOAT_TYPE(b32[l]), FLOAT_TYPE(s0_lo4[2]) * FLOAT_TYPE((qs0[l] >> 2) & 3), + fma(FLOAT_TYPE(b48[l]), FLOAT_TYPE(s0_lo4[3]) * FLOAT_TYPE((qs16[l] >> 2) & 3), + fma(FLOAT_TYPE(b64[l]), FLOAT_TYPE(s4_lo4[0]) * FLOAT_TYPE((qs0[l] >> 4) & 3), + fma(FLOAT_TYPE(b80[l]), FLOAT_TYPE(s4_lo4[1]) * FLOAT_TYPE((qs16[l] >> 4) & 3), + fma(FLOAT_TYPE(b96[l]), FLOAT_TYPE(s4_lo4[2]) * FLOAT_TYPE((qs0[l] >> 6) & 3), + fma(FLOAT_TYPE(b112[l]), FLOAT_TYPE(s4_lo4[3]) * FLOAT_TYPE((qs16[l] >> 6) & 3), sum1)))))))); + sum2 = fma(FLOAT_TYPE(b0[l]), FLOAT_TYPE(s0_hi4[0]), + fma(FLOAT_TYPE(b16[l]), FLOAT_TYPE(s0_hi4[1]), + fma(FLOAT_TYPE(b32[l]), FLOAT_TYPE(s0_hi4[2]), + fma(FLOAT_TYPE(b48[l]), FLOAT_TYPE(s0_hi4[3]), + fma(FLOAT_TYPE(b64[l]), FLOAT_TYPE(s4_hi4[0]), + fma(FLOAT_TYPE(b80[l]), FLOAT_TYPE(s4_hi4[1]), + fma(FLOAT_TYPE(b96[l]), FLOAT_TYPE(s4_hi4[2]), + fma(FLOAT_TYPE(b112[l]), FLOAT_TYPE(s4_hi4[3]), sum2)))))))); + } + temp[j][n] = fma(dall, sum1, fma(-dmin, sum2, temp[j][n])); } - temp[n] = fma(dall, sum1, fma(-dmin, sum2, temp[n])); } } - // sum up partial sums and write back result - [[unroll]] for (uint n = 0; n < num_rows; ++n) { - tmpsh[n][tid] = temp[n]; - } - barrier(); - [[unroll]] for (uint s = BLOCK_SIZE/2; s > 0; s >>= 1) { - if (tid < s) { - [[unroll]] for (uint n = 0; n < num_rows; ++n) { - tmpsh[n][tid] += tmpsh[n][tid + s]; - } - } - barrier(); - } - if (tid == 0) { - [[unroll]] for (uint n = 0; n < num_rows; ++n) { - data_d[d_offset + first_row + n] = D_TYPE(tmpsh[n][0]); - } - } + reduce_result(temp, d_offset, first_row, num_rows, tid); } void main() { diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q3_k.comp b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q3_k.comp index 82ec42d25..86b0159d9 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q3_k.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q3_k.comp @@ -5,11 +5,6 @@ layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in; -layout (constant_id = 0) const uint BLOCK_SIZE = 32; -layout (constant_id = 1) const uint NUM_ROWS = 1; - -shared FLOAT_TYPE tmpsh[NUM_ROWS][BLOCK_SIZE]; - void compute_outputs(const uint32_t first_row, const uint32_t num_rows) { uint a_offset, b_offset, d_offset; get_offsets(a_offset, b_offset, d_offset); @@ -33,10 +28,12 @@ void compute_outputs(const uint32_t first_row, const uint32_t num_rows) { const uint q_offset = 32*v_im + l0; const uint y_offset = 128*v_im + l0; - FLOAT_TYPE temp[NUM_ROWS]; + FLOAT_TYPE temp[NUM_COLS][NUM_ROWS]; - [[unroll]] for (uint i = 0; i < NUM_ROWS; ++i) { - temp[i] = FLOAT_TYPE(0); + [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) { + [[unroll]] for (uint i = 0; i < NUM_ROWS; ++i) { + temp[j][i] = FLOAT_TYPE(0); + } } const uint s_shift = 4 * v_im; @@ -44,15 +41,6 @@ void compute_outputs(const uint32_t first_row, const uint32_t num_rows) { [[unroll]] for (uint i = ix; i < num_blocks_per_row; i += it_size) { const uint y_idx = i * QUANT_K + y_offset; - B_TYPE_VEC2 b0 = data_b_v2[(b_offset + y_idx) / 2 + 0]; - B_TYPE_VEC2 b16 = data_b_v2[(b_offset + y_idx) / 2 + 8]; - B_TYPE_VEC2 b32 = data_b_v2[(b_offset + y_idx) / 2 + 16]; - B_TYPE_VEC2 b48 = data_b_v2[(b_offset + y_idx) / 2 + 24]; - B_TYPE_VEC2 b64 = data_b_v2[(b_offset + y_idx) / 2 + 32]; - B_TYPE_VEC2 b80 = data_b_v2[(b_offset + y_idx) / 2 + 40]; - B_TYPE_VEC2 b96 = data_b_v2[(b_offset + y_idx) / 2 + 48]; - B_TYPE_VEC2 b112 = data_b_v2[(b_offset + y_idx) / 2 + 56]; - [[unroll]] for (uint n = 0; n < num_rows; ++n) { const uint ib0 = a_offset / QUANT_K + (first_row+n)*num_blocks_per_row; const FLOAT_TYPE d = FLOAT_TYPE(data_a[ib0 + i].d); @@ -70,39 +58,34 @@ void compute_outputs(const uint32_t first_row, const uint32_t num_rows) { u8vec2 s8 = unpack8(s8_16); u8vec2 s10 = unpack8(s10_16); - FLOAT_TYPE sum = FLOAT_TYPE(0.0); - [[unroll]] for (int l = 0; l < 2; ++l) { - sum = fma(FLOAT_TYPE(b0[l]) * FLOAT_TYPE(int8_t(((s0[0] >> s_shift) & 0xF) | ((s8[0] >> (s_shift + 0) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l ] ) & 3) - (((data_a[ib0 + i].hmask[l0 + l ] & (m << 0)) != 0) ? 0 : 4)), - fma(FLOAT_TYPE(b32[l]) * FLOAT_TYPE(int8_t(((s2[0] >> s_shift) & 0xF) | ((s10[0] >> (s_shift + 0) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l ] >> 2) & 3) - (((data_a[ib0 + i].hmask[l0 + l ] & (m << 1)) != 0) ? 0 : 4)), - fma(FLOAT_TYPE(b64[l]) * FLOAT_TYPE(int8_t(((s4[0] >> s_shift) & 0xF) | ((s8[0] >> (s_shift + 2) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l ] >> 4) & 3) - (((data_a[ib0 + i].hmask[l0 + l ] & (m << 2)) != 0) ? 0 : 4)), - fma(FLOAT_TYPE(b96[l]) * FLOAT_TYPE(int8_t(((s6[0] >> s_shift) & 0xF) | ((s10[0] >> (s_shift + 2) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l ] >> 6) & 3) - (((data_a[ib0 + i].hmask[l0 + l ] & (m << 3)) != 0) ? 0 : 4)), - fma(FLOAT_TYPE(b16[l]) * FLOAT_TYPE(int8_t(((s0[1] >> s_shift) & 0xF) | ((s8[1] >> (s_shift + 0) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l+16] ) & 3) - (((data_a[ib0 + i].hmask[l0 + l+16] & (m << 0)) != 0) ? 0 : 4)), - fma(FLOAT_TYPE(b48[l]) * FLOAT_TYPE(int8_t(((s2[1] >> s_shift) & 0xF) | ((s10[1] >> (s_shift + 0) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l+16] >> 2) & 3) - (((data_a[ib0 + i].hmask[l0 + l+16] & (m << 1)) != 0) ? 0 : 4)), - fma(FLOAT_TYPE(b80[l]) * FLOAT_TYPE(int8_t(((s4[1] >> s_shift) & 0xF) | ((s8[1] >> (s_shift + 2) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l+16] >> 4) & 3) - (((data_a[ib0 + i].hmask[l0 + l+16] & (m << 2)) != 0) ? 0 : 4)), - fma(FLOAT_TYPE(b112[l]) * FLOAT_TYPE(int8_t(((s6[1] >> s_shift) & 0xF) | ((s10[1] >> (s_shift + 2) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l+16] >> 6) & 3) - (((data_a[ib0 + i].hmask[l0 + l+16] & (m << 3)) != 0) ? 0 : 4)), sum)))))))); + [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) { + + B_TYPE_VEC2 b0 = data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 0]; + B_TYPE_VEC2 b16 = data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 8]; + B_TYPE_VEC2 b32 = data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 16]; + B_TYPE_VEC2 b48 = data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 24]; + B_TYPE_VEC2 b64 = data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 32]; + B_TYPE_VEC2 b80 = data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 40]; + B_TYPE_VEC2 b96 = data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 48]; + B_TYPE_VEC2 b112 = data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 56]; + + FLOAT_TYPE sum = FLOAT_TYPE(0.0); + [[unroll]] for (int l = 0; l < 2; ++l) { + sum = fma(FLOAT_TYPE(b0[l]) * FLOAT_TYPE(int8_t(((s0[0] >> s_shift) & 0xF) | ((s8[0] >> (s_shift + 0) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l ] ) & 3) - (((data_a[ib0 + i].hmask[l0 + l ] & (m << 0)) != 0) ? 0 : 4)), + fma(FLOAT_TYPE(b32[l]) * FLOAT_TYPE(int8_t(((s2[0] >> s_shift) & 0xF) | ((s10[0] >> (s_shift + 0) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l ] >> 2) & 3) - (((data_a[ib0 + i].hmask[l0 + l ] & (m << 1)) != 0) ? 0 : 4)), + fma(FLOAT_TYPE(b64[l]) * FLOAT_TYPE(int8_t(((s4[0] >> s_shift) & 0xF) | ((s8[0] >> (s_shift + 2) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l ] >> 4) & 3) - (((data_a[ib0 + i].hmask[l0 + l ] & (m << 2)) != 0) ? 0 : 4)), + fma(FLOAT_TYPE(b96[l]) * FLOAT_TYPE(int8_t(((s6[0] >> s_shift) & 0xF) | ((s10[0] >> (s_shift + 2) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l ] >> 6) & 3) - (((data_a[ib0 + i].hmask[l0 + l ] & (m << 3)) != 0) ? 0 : 4)), + fma(FLOAT_TYPE(b16[l]) * FLOAT_TYPE(int8_t(((s0[1] >> s_shift) & 0xF) | ((s8[1] >> (s_shift + 0) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l+16] ) & 3) - (((data_a[ib0 + i].hmask[l0 + l+16] & (m << 0)) != 0) ? 0 : 4)), + fma(FLOAT_TYPE(b48[l]) * FLOAT_TYPE(int8_t(((s2[1] >> s_shift) & 0xF) | ((s10[1] >> (s_shift + 0) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l+16] >> 2) & 3) - (((data_a[ib0 + i].hmask[l0 + l+16] & (m << 1)) != 0) ? 0 : 4)), + fma(FLOAT_TYPE(b80[l]) * FLOAT_TYPE(int8_t(((s4[1] >> s_shift) & 0xF) | ((s8[1] >> (s_shift + 2) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l+16] >> 4) & 3) - (((data_a[ib0 + i].hmask[l0 + l+16] & (m << 2)) != 0) ? 0 : 4)), + fma(FLOAT_TYPE(b112[l]) * FLOAT_TYPE(int8_t(((s6[1] >> s_shift) & 0xF) | ((s10[1] >> (s_shift + 2) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l+16] >> 6) & 3) - (((data_a[ib0 + i].hmask[l0 + l+16] & (m << 3)) != 0) ? 0 : 4)), sum)))))))); + } + temp[j][n] = fma(d, sum, temp[j][n]); } - temp[n] = fma(d, sum, temp[n]); } } - // sum up partial sums and write back result - [[unroll]] for (uint n = 0; n < num_rows; ++n) { - tmpsh[n][tid] = temp[n]; - } - barrier(); - [[unroll]] for (uint s = BLOCK_SIZE/2; s > 0; s >>= 1) { - if (tid < s) { - [[unroll]] for (uint n = 0; n < num_rows; ++n) { - tmpsh[n][tid] += tmpsh[n][tid + s]; - } - } - barrier(); - } - if (tid == 0) { - [[unroll]] for (uint n = 0; n < num_rows; ++n) { - data_d[d_offset + first_row + n] = D_TYPE(tmpsh[n][0]); - } - } + reduce_result(temp, d_offset, first_row, num_rows, tid); } void main() { diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q4_k.comp b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q4_k.comp index 677c207a8..cd1dd8e89 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q4_k.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q4_k.comp @@ -6,11 +6,6 @@ layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in; -layout (constant_id = 0) const uint BLOCK_SIZE = 32; -layout (constant_id = 1) const uint NUM_ROWS = 1; - -shared FLOAT_TYPE tmpsh[NUM_ROWS][BLOCK_SIZE]; - void compute_outputs(const uint32_t first_row, const uint32_t num_rows) { uint a_offset, b_offset, d_offset; get_offsets(a_offset, b_offset, d_offset); @@ -36,21 +31,18 @@ void compute_outputs(const uint32_t first_row, const uint32_t num_rows) { const uint q_offset = 32*v_im + l0; const uint y_offset = 64*v_im + l0; - FLOAT_TYPE temp[NUM_ROWS]; + FLOAT_TYPE temp[NUM_COLS][NUM_ROWS]; - [[unroll]] for (uint i = 0; i < NUM_ROWS; ++i) { - temp[i] = FLOAT_TYPE(0); + [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) { + [[unroll]] for (uint i = 0; i < NUM_ROWS; ++i) { + temp[j][i] = FLOAT_TYPE(0); + } } [[unroll]] for (uint i = ix; i < num_blocks_per_row; i += it_size) { const uint y1_idx = i * QUANT_K + y_offset; const uint y2_idx = y1_idx + 128; - B_TYPE_VEC4 by10 = data_b_v4[(b_offset + y1_idx) / 4]; - B_TYPE_VEC4 by132 = data_b_v4[(b_offset + y1_idx) / 4 + 8]; - B_TYPE_VEC4 by20 = data_b_v4[(b_offset + y2_idx) / 4]; - B_TYPE_VEC4 by232 = data_b_v4[(b_offset + y2_idx) / 4 + 8]; - [[unroll]] for (uint n = 0; n < num_rows; ++n) { const uint ib0 = a_offset / QUANT_K + (first_row+n)*num_blocks_per_row; f16vec2 d = data_a[ib0 + i].d; @@ -103,37 +95,27 @@ void compute_outputs(const uint32_t first_row, const uint32_t num_rows) { const uint32_t q4_14 = qs64_hi4.z; const uint32_t q4_15 = qs64_hi4.w; - const FLOAT_TYPE sx = fma(FLOAT_TYPE(by10.x), q4_0, fma(FLOAT_TYPE(by10.y), q4_1, fma(FLOAT_TYPE(by10.z), q4_2, FLOAT_TYPE(by10.w) * q4_3))); - const FLOAT_TYPE sy = fma(FLOAT_TYPE(by132.x), q4_4, fma(FLOAT_TYPE(by132.y), q4_5, fma(FLOAT_TYPE(by132.z), q4_6, FLOAT_TYPE(by132.w) * q4_7))); - const FLOAT_TYPE sz = fma(FLOAT_TYPE(by20.x), q4_8, fma(FLOAT_TYPE(by20.y), q4_9, fma(FLOAT_TYPE(by20.z), q4_10, FLOAT_TYPE(by20.w) * q4_11))); - const FLOAT_TYPE sw = fma(FLOAT_TYPE(by232.x), q4_12, fma(FLOAT_TYPE(by232.y), q4_13, fma(FLOAT_TYPE(by232.z), q4_14, FLOAT_TYPE(by232.w) * q4_15))); - const FLOAT_TYPE smin = - fma(FLOAT_TYPE(by10.x), sc2, fma(FLOAT_TYPE(by132.x), sc3, fma(FLOAT_TYPE(by20.x), sc6, fma(FLOAT_TYPE(by232.x), sc7, - fma(FLOAT_TYPE(by10.y), sc2, fma(FLOAT_TYPE(by132.y), sc3, fma(FLOAT_TYPE(by20.y), sc6, fma(FLOAT_TYPE(by232.y), sc7, - fma(FLOAT_TYPE(by10.z), sc2, fma(FLOAT_TYPE(by132.z), sc3, fma(FLOAT_TYPE(by20.z), sc6, fma(FLOAT_TYPE(by232.z), sc7, - fma(FLOAT_TYPE(by10.w), sc2, fma(FLOAT_TYPE(by132.w), sc3, fma(FLOAT_TYPE(by20.w), sc6, FLOAT_TYPE(by232.w) * sc7))))))))))))))); - temp[n] = fma(dall, fma(sx, sc0, fma(sy, sc1, fma(sz, sc4, sw * sc5))), fma(-dmin, smin, temp[n])); + [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) { + B_TYPE_VEC4 by10 = data_b_v4[(j*p.batch_stride_b + b_offset + y1_idx) / 4]; + B_TYPE_VEC4 by132 = data_b_v4[(j*p.batch_stride_b + b_offset + y1_idx) / 4 + 8]; + B_TYPE_VEC4 by20 = data_b_v4[(j*p.batch_stride_b + b_offset + y2_idx) / 4]; + B_TYPE_VEC4 by232 = data_b_v4[(j*p.batch_stride_b + b_offset + y2_idx) / 4 + 8]; + + const FLOAT_TYPE sx = fma(FLOAT_TYPE(by10.x), q4_0, fma(FLOAT_TYPE(by10.y), q4_1, fma(FLOAT_TYPE(by10.z), q4_2, FLOAT_TYPE(by10.w) * q4_3))); + const FLOAT_TYPE sy = fma(FLOAT_TYPE(by132.x), q4_4, fma(FLOAT_TYPE(by132.y), q4_5, fma(FLOAT_TYPE(by132.z), q4_6, FLOAT_TYPE(by132.w) * q4_7))); + const FLOAT_TYPE sz = fma(FLOAT_TYPE(by20.x), q4_8, fma(FLOAT_TYPE(by20.y), q4_9, fma(FLOAT_TYPE(by20.z), q4_10, FLOAT_TYPE(by20.w) * q4_11))); + const FLOAT_TYPE sw = fma(FLOAT_TYPE(by232.x), q4_12, fma(FLOAT_TYPE(by232.y), q4_13, fma(FLOAT_TYPE(by232.z), q4_14, FLOAT_TYPE(by232.w) * q4_15))); + const FLOAT_TYPE smin = + fma(FLOAT_TYPE(by10.x), sc2, fma(FLOAT_TYPE(by132.x), sc3, fma(FLOAT_TYPE(by20.x), sc6, fma(FLOAT_TYPE(by232.x), sc7, + fma(FLOAT_TYPE(by10.y), sc2, fma(FLOAT_TYPE(by132.y), sc3, fma(FLOAT_TYPE(by20.y), sc6, fma(FLOAT_TYPE(by232.y), sc7, + fma(FLOAT_TYPE(by10.z), sc2, fma(FLOAT_TYPE(by132.z), sc3, fma(FLOAT_TYPE(by20.z), sc6, fma(FLOAT_TYPE(by232.z), sc7, + fma(FLOAT_TYPE(by10.w), sc2, fma(FLOAT_TYPE(by132.w), sc3, fma(FLOAT_TYPE(by20.w), sc6, FLOAT_TYPE(by232.w) * sc7))))))))))))))); + temp[j][n] = fma(dall, fma(sx, sc0, fma(sy, sc1, fma(sz, sc4, sw * sc5))), fma(-dmin, smin, temp[j][n])); + } } } - // sum up partial sums and write back result - [[unroll]] for (uint n = 0; n < num_rows; ++n) { - tmpsh[n][tid] = temp[n]; - } - barrier(); - [[unroll]] for (uint s = BLOCK_SIZE/2; s > 0; s >>= 1) { - if (tid < s) { - [[unroll]] for (uint n = 0; n < num_rows; ++n) { - tmpsh[n][tid] += tmpsh[n][tid + s]; - } - } - barrier(); - } - if (tid == 0) { - [[unroll]] for (uint n = 0; n < num_rows; ++n) { - data_d[d_offset + first_row + n] = D_TYPE(tmpsh[n][0]); - } - } + reduce_result(temp, d_offset, first_row, num_rows, tid); } void main() { diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q5_k.comp b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q5_k.comp index ed3c25d89..0a68891c3 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q5_k.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q5_k.comp @@ -6,11 +6,6 @@ layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in; -layout (constant_id = 0) const uint BLOCK_SIZE = 32; -layout (constant_id = 1) const uint NUM_ROWS = 1; - -shared FLOAT_TYPE tmpsh[NUM_ROWS][BLOCK_SIZE]; - void compute_outputs(const uint32_t first_row, const uint32_t num_rows) { uint a_offset, b_offset, d_offset; get_offsets(a_offset, b_offset, d_offset); @@ -33,25 +28,18 @@ void compute_outputs(const uint32_t first_row, const uint32_t num_rows) { const uint q_offset = 32*v_im + l0; const uint y_offset = 64*v_im + l0; - FLOAT_TYPE temp[NUM_ROWS]; + FLOAT_TYPE temp[NUM_COLS][NUM_ROWS]; - [[unroll]] for (uint i = 0; i < NUM_ROWS; ++i) { - temp[i] = FLOAT_TYPE(0); + [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) { + [[unroll]] for (uint i = 0; i < NUM_ROWS; ++i) { + temp[j][i] = FLOAT_TYPE(0); + } } [[unroll]] for (uint i = ix; i < num_blocks_per_row; i += it_size) { const uint y1_idx = i * QUANT_K + y_offset; const uint y2_idx = y1_idx + 128; - B_TYPE_VEC2 by10 = data_b_v2[(b_offset + y1_idx) / 2]; - B_TYPE_VEC2 by116 = data_b_v2[(b_offset + y1_idx) / 2 + 8]; - B_TYPE_VEC2 by132 = data_b_v2[(b_offset + y1_idx) / 2 + 16]; - B_TYPE_VEC2 by148 = data_b_v2[(b_offset + y1_idx) / 2 + 24]; - B_TYPE_VEC2 by20 = data_b_v2[(b_offset + y2_idx) / 2]; - B_TYPE_VEC2 by216 = data_b_v2[(b_offset + y2_idx) / 2 + 8]; - B_TYPE_VEC2 by232 = data_b_v2[(b_offset + y2_idx) / 2 + 16]; - B_TYPE_VEC2 by248 = data_b_v2[(b_offset + y2_idx) / 2 + 24]; - [[unroll]] for (uint n = 0; n < num_rows; ++n) { const uint ib0 = a_offset / QUANT_K + (first_row+n)*num_blocks_per_row; f16vec2 d = data_a[ib0 + i].d; @@ -116,53 +104,47 @@ void compute_outputs(const uint32_t first_row, const uint32_t num_rows) { const uint32_t q4_14 = qs64_80_hi4.z; const uint32_t q4_15 = qs64_80_hi4.w; - const FLOAT_TYPE sx = - fma(FLOAT_TYPE(by10.x), q4_0, - fma(FLOAT_TYPE(by10.y), q4_1, - fma(FLOAT_TYPE(by116.x), q4_2, - FLOAT_TYPE(by116.y) * q4_3))); - const FLOAT_TYPE sy = - fma(FLOAT_TYPE(by132.x), q4_4, - fma(FLOAT_TYPE(by132.y), q4_5, - fma(FLOAT_TYPE(by148.x), q4_6, - FLOAT_TYPE(by148.y) * q4_7))); - const FLOAT_TYPE sz = - fma(FLOAT_TYPE(by20.x), q4_8, - fma(FLOAT_TYPE(by20.y), q4_9, - fma(FLOAT_TYPE(by216.x), q4_10, - FLOAT_TYPE(by216.y) * q4_11))); - const FLOAT_TYPE sw = - fma(FLOAT_TYPE(by232.x), q4_12, - fma(FLOAT_TYPE(by232.y), q4_13, - fma(FLOAT_TYPE(by248.x), q4_14, - FLOAT_TYPE(by248.y) * q4_15))); - const FLOAT_TYPE smin = - fma(FLOAT_TYPE(by10.x) + FLOAT_TYPE(by10.y) + FLOAT_TYPE(by116.x) + FLOAT_TYPE(by116.y), sc2, - fma(FLOAT_TYPE(by132.x) + FLOAT_TYPE(by132.y) + FLOAT_TYPE(by148.x) + FLOAT_TYPE(by148.y), sc3, - fma(FLOAT_TYPE(by20.x) + FLOAT_TYPE(by20.y) + FLOAT_TYPE(by216.x) + FLOAT_TYPE(by216.y), sc6, - (FLOAT_TYPE(by232.x) + FLOAT_TYPE(by232.y) + FLOAT_TYPE(by248.x) + FLOAT_TYPE(by248.y)) * sc7))); - temp[n] = fma(dall, fma(sx, sc0, fma(sy, sc1, fma(sz, sc4, sw * sc5))), fma(-dmin, smin, temp[n])); + [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) { + B_TYPE_VEC2 by10 = data_b_v2[(j*p.batch_stride_b + b_offset + y1_idx) / 2]; + B_TYPE_VEC2 by116 = data_b_v2[(j*p.batch_stride_b + b_offset + y1_idx) / 2 + 8]; + B_TYPE_VEC2 by132 = data_b_v2[(j*p.batch_stride_b + b_offset + y1_idx) / 2 + 16]; + B_TYPE_VEC2 by148 = data_b_v2[(j*p.batch_stride_b + b_offset + y1_idx) / 2 + 24]; + B_TYPE_VEC2 by20 = data_b_v2[(j*p.batch_stride_b + b_offset + y2_idx) / 2]; + B_TYPE_VEC2 by216 = data_b_v2[(j*p.batch_stride_b + b_offset + y2_idx) / 2 + 8]; + B_TYPE_VEC2 by232 = data_b_v2[(j*p.batch_stride_b + b_offset + y2_idx) / 2 + 16]; + B_TYPE_VEC2 by248 = data_b_v2[(j*p.batch_stride_b + b_offset + y2_idx) / 2 + 24]; + + const FLOAT_TYPE sx = + fma(FLOAT_TYPE(by10.x), q4_0, + fma(FLOAT_TYPE(by10.y), q4_1, + fma(FLOAT_TYPE(by116.x), q4_2, + FLOAT_TYPE(by116.y) * q4_3))); + const FLOAT_TYPE sy = + fma(FLOAT_TYPE(by132.x), q4_4, + fma(FLOAT_TYPE(by132.y), q4_5, + fma(FLOAT_TYPE(by148.x), q4_6, + FLOAT_TYPE(by148.y) * q4_7))); + const FLOAT_TYPE sz = + fma(FLOAT_TYPE(by20.x), q4_8, + fma(FLOAT_TYPE(by20.y), q4_9, + fma(FLOAT_TYPE(by216.x), q4_10, + FLOAT_TYPE(by216.y) * q4_11))); + const FLOAT_TYPE sw = + fma(FLOAT_TYPE(by232.x), q4_12, + fma(FLOAT_TYPE(by232.y), q4_13, + fma(FLOAT_TYPE(by248.x), q4_14, + FLOAT_TYPE(by248.y) * q4_15))); + const FLOAT_TYPE smin = + fma(FLOAT_TYPE(by10.x) + FLOAT_TYPE(by10.y) + FLOAT_TYPE(by116.x) + FLOAT_TYPE(by116.y), sc2, + fma(FLOAT_TYPE(by132.x) + FLOAT_TYPE(by132.y) + FLOAT_TYPE(by148.x) + FLOAT_TYPE(by148.y), sc3, + fma(FLOAT_TYPE(by20.x) + FLOAT_TYPE(by20.y) + FLOAT_TYPE(by216.x) + FLOAT_TYPE(by216.y), sc6, + (FLOAT_TYPE(by232.x) + FLOAT_TYPE(by232.y) + FLOAT_TYPE(by248.x) + FLOAT_TYPE(by248.y)) * sc7))); + temp[j][n] = fma(dall, fma(sx, sc0, fma(sy, sc1, fma(sz, sc4, sw * sc5))), fma(-dmin, smin, temp[j][n])); + } } } - // sum up partial sums and write back result - [[unroll]] for (uint n = 0; n < num_rows; ++n) { - tmpsh[n][tid] = temp[n]; - } - barrier(); - [[unroll]] for (uint s = BLOCK_SIZE/2; s > 0; s >>= 1) { - if (tid < s) { - [[unroll]] for (uint n = 0; n < num_rows; ++n) { - tmpsh[n][tid] += tmpsh[n][tid + s]; - } - } - barrier(); - } - if (tid == 0) { - [[unroll]] for (uint n = 0; n < num_rows; ++n) { - data_d[d_offset + first_row + n] = D_TYPE(tmpsh[n][0]); - } - } + reduce_result(temp, d_offset, first_row, num_rows, tid); } void main() { diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q6_k.comp b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q6_k.comp index fab4ff5ff..70e13a56b 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q6_k.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q6_k.comp @@ -6,11 +6,6 @@ layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in; -layout (constant_id = 0) const uint BLOCK_SIZE = 32; -layout (constant_id = 1) const uint NUM_ROWS = 1; - -shared FLOAT_TYPE tmpsh[NUM_ROWS][BLOCK_SIZE]; - void compute_outputs(const uint32_t first_row, const uint32_t num_rows) { uint a_offset, b_offset, d_offset; get_offsets(a_offset, b_offset, d_offset); @@ -36,20 +31,17 @@ void compute_outputs(const uint32_t first_row, const uint32_t num_rows) { const uint s_offset = 8*v_im + is; const uint y_offset = 128*v_im + l0; - FLOAT_TYPE temp[NUM_ROWS]; + FLOAT_TYPE temp[NUM_COLS][NUM_ROWS]; - [[unroll]] for (uint i = 0; i < NUM_ROWS; ++i) { - temp[i] = FLOAT_TYPE(0); + [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) { + [[unroll]] for (uint i = 0; i < NUM_ROWS; ++i) { + temp[j][i] = FLOAT_TYPE(0); + } } [[unroll]] for (uint i = ix; i < num_blocks_per_row; i += it_size) { const uint y_idx = i * QUANT_K + y_offset; - B_TYPE_VEC4 by0 = data_b_v4[(b_offset + y_idx) / 4]; - B_TYPE_VEC4 by32 = data_b_v4[(b_offset + y_idx) / 4 + 8]; - B_TYPE_VEC4 by64 = data_b_v4[(b_offset + y_idx) / 4 + 16]; - B_TYPE_VEC4 by96 = data_b_v4[(b_offset + y_idx) / 4 + 24]; - [[unroll]] for (uint n = 0; n < num_rows; ++n) { const uint ib0 = a_offset / QUANT_K + (first_row+n)*num_blocks_per_row; const FLOAT_TYPE d = FLOAT_TYPE(data_a[ib0 + i].d); @@ -84,35 +76,25 @@ void compute_outputs(const uint32_t first_row, const uint32_t num_rows) { uvec4 q2 = uvec4(unpack8(q2_u32)); uvec4 q3 = uvec4(unpack8(q3_u32)); - FLOAT_TYPE sum = FLOAT_TYPE(0.0); - [[unroll]] for (int l = 0; l < 4; ++l) { - sum = fma(FLOAT_TYPE(by0[l]) * scales[0], FLOAT_TYPE(int8_t(q0[l]) - 32), - fma(FLOAT_TYPE(by32[l]) * scales[1], FLOAT_TYPE(int8_t(q1[l]) - 32), - fma(FLOAT_TYPE(by64[l]) * scales[2], FLOAT_TYPE(int8_t(q2[l]) - 32), - fma(FLOAT_TYPE(by96[l]) * scales[3], FLOAT_TYPE(int8_t(q3[l]) - 32), sum)))); + [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) { + B_TYPE_VEC4 by0 = data_b_v4[(j*p.batch_stride_b + b_offset + y_idx) / 4]; + B_TYPE_VEC4 by32 = data_b_v4[(j*p.batch_stride_b + b_offset + y_idx) / 4 + 8]; + B_TYPE_VEC4 by64 = data_b_v4[(j*p.batch_stride_b + b_offset + y_idx) / 4 + 16]; + B_TYPE_VEC4 by96 = data_b_v4[(j*p.batch_stride_b + b_offset + y_idx) / 4 + 24]; + + FLOAT_TYPE sum = FLOAT_TYPE(0.0); + [[unroll]] for (int l = 0; l < 4; ++l) { + sum = fma(FLOAT_TYPE(by0[l]) * scales[0], FLOAT_TYPE(int8_t(q0[l]) - 32), + fma(FLOAT_TYPE(by32[l]) * scales[1], FLOAT_TYPE(int8_t(q1[l]) - 32), + fma(FLOAT_TYPE(by64[l]) * scales[2], FLOAT_TYPE(int8_t(q2[l]) - 32), + fma(FLOAT_TYPE(by96[l]) * scales[3], FLOAT_TYPE(int8_t(q3[l]) - 32), sum)))); + } + temp[j][n] += sum * d; } - temp[n] += sum * d; } } - // sum up partial sums and write back result - [[unroll]] for (uint n = 0; n < num_rows; ++n) { - tmpsh[n][tid] = temp[n]; - } - barrier(); - [[unroll]] for (uint s = BLOCK_SIZE/2; s > 0; s >>= 1) { - if (tid < s) { - [[unroll]] for (uint n = 0; n < num_rows; ++n) { - tmpsh[n][tid] += tmpsh[n][tid + s]; - } - } - barrier(); - } - if (tid == 0) { - [[unroll]] for (uint n = 0; n < num_rows; ++n) { - data_d[d_offset + first_row + n] = D_TYPE(tmpsh[n][0]); - } - } + reduce_result(temp, d_offset, first_row, num_rows, tid); } void main() { diff --git a/tests/test-backend-ops.cpp b/tests/test-backend-ops.cpp index c79acffd2..1e892f663 100644 --- a/tests/test-backend-ops.cpp +++ b/tests/test-backend-ops.cpp @@ -3937,7 +3937,7 @@ static std::vector> make_test_cases_perf() { test_cases.emplace_back(new test_argmax(GGML_TYPE_F32, {1024, 10, 1, 1})); test_cases.emplace_back(new test_argmax(GGML_TYPE_F32, {32000, 512, 1, 1})); - for (int bs : {1, 512}) { + for (int bs : {1, 2, 3, 4, 5, 8, 512}) { for (ggml_type type_a : all_types) { for (ggml_type type_b : {GGML_TYPE_F32}) { test_cases.emplace_back(new test_mul_mat(type_a, type_b, 4096, bs, 14336, {1, 1}, {1, 1})); From 6e1531aca5ed17f078973b4700fcdadbda4a34a5 Mon Sep 17 00:00:00 2001 From: Peter Date: Tue, 31 Dec 2024 11:46:06 +1100 Subject: [PATCH 26/81] common, examples, ggml : fix MSYS2 GCC compiler errors and warnings when building with LLAMA_CURL=ON and GGML_OPENCL=ON (#11013) In common/common.cpp: * Convert usage of stat() function call to check if file exists to standard library function std::filesystem::exists (error unable to match to correct function signature) * Additional conditions to check if PATH_MAX is already defined in WIN32 environment (warning it is already defined in MSYS2) In examples/run/run.cpp: * Add io.h header inclusion (error cannot find function _get_osfhandle) * Change initialisers for OVERLAPPED to empty struct (warning about uninitialised members) * Add initialiser for hFile (warning it may be uninitialised) * Add cast for curl_off_t percentage value to long int in generate_progress_prefix function (warning that curl_off_t is long long int) In ggml/src/ggml-opencl/ggml-opencl.cpp: * Initialise certain declared cl_mem variables to nullptr for greater safety (warning about B_d variable possibly used unassigned) --- common/common.cpp | 8 +++++--- examples/run/run.cpp | 9 +++++---- ggml/src/ggml-opencl/ggml-opencl.cpp | 12 ++++++------ 3 files changed, 16 insertions(+), 13 deletions(-) diff --git a/common/common.cpp b/common/common.cpp index 20be92911..9071999a7 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -62,7 +63,9 @@ #ifdef __linux__ #include #elif defined(_WIN32) -#define PATH_MAX MAX_PATH +# if !defined(PATH_MAX) +# define PATH_MAX MAX_PATH +# endif #else #include #endif @@ -1148,8 +1151,7 @@ static bool common_download_file(const std::string & url, const std::string & pa #endif // Check if the file already exists locally - struct stat model_file_info; - auto file_exists = (stat(path.c_str(), &model_file_info) == 0); + auto file_exists = std::filesystem::exists(path); // If the file exists, check its JSON metadata companion file. std::string metadata_path = path + ".json"; diff --git a/examples/run/run.cpp b/examples/run/run.cpp index f89d041c4..75b817272 100644 --- a/examples/run/run.cpp +++ b/examples/run/run.cpp @@ -1,5 +1,6 @@ #if defined(_WIN32) # include +# include #else # include # include @@ -253,7 +254,7 @@ class File { return 1; } - OVERLAPPED overlapped = { 0 }; + OVERLAPPED overlapped = {}; if (!LockFileEx(hFile, LOCKFILE_EXCLUSIVE_LOCK | LOCKFILE_FAIL_IMMEDIATELY, 0, MAXDWORD, MAXDWORD, &overlapped)) { fd = -1; @@ -277,7 +278,7 @@ class File { if (fd >= 0) { # ifdef _WIN32 if (hFile != INVALID_HANDLE_VALUE) { - OVERLAPPED overlapped = { 0 }; + OVERLAPPED overlapped = {}; UnlockFileEx(hFile, 0, MAXDWORD, MAXDWORD, &overlapped); } # else @@ -293,7 +294,7 @@ class File { private: int fd = -1; # ifdef _WIN32 - HANDLE hFile; + HANDLE hFile = nullptr; # endif }; @@ -464,7 +465,7 @@ class HttpClient { return (now_downloaded_plus_file_size * 100) / total_to_download; } - static std::string generate_progress_prefix(curl_off_t percentage) { return fmt("%3ld%% |", percentage); } + static std::string generate_progress_prefix(curl_off_t percentage) { return fmt("%3ld%% |", static_cast(percentage)); } static double calculate_speed(curl_off_t now_downloaded, const std::chrono::steady_clock::time_point & start_time) { const auto now = std::chrono::steady_clock::now(); diff --git a/ggml/src/ggml-opencl/ggml-opencl.cpp b/ggml/src/ggml-opencl/ggml-opencl.cpp index c77d629f0..ed90e471a 100644 --- a/ggml/src/ggml-opencl/ggml-opencl.cpp +++ b/ggml/src/ggml-opencl/ggml-opencl.cpp @@ -2744,13 +2744,13 @@ static void ggml_cl_mul_mat(ggml_backend_t backend, const ggml_tensor * src0, co cl_image_format img_fmt_1d; cl_image_desc img_desc_1d; cl_buffer_region region; - cl_mem A_image1d; - cl_mem B_image1d; - cl_mem B_sub_buffer; - cl_mem C_d; + cl_mem A_image1d = nullptr; + cl_mem B_image1d = nullptr; + cl_mem B_sub_buffer = nullptr; + cl_mem C_d = nullptr; // for B transpose - cl_mem B_d; - cl_mem B_d_input_image; + cl_mem B_d = nullptr; + cl_mem B_d_input_image = nullptr; // <--------------------------------------------> // // define matrix dimensions From bc7b1f86324279a3dabb705c04ad754a2b27df16 Mon Sep 17 00:00:00 2001 From: ymcki <84055651+ymcki@users.noreply.github.com> Date: Tue, 31 Dec 2024 19:04:48 +0800 Subject: [PATCH 27/81] convert : fix Llama-3_1-Nemotron-51B rope settings (#11008) * conflict resolution * move comments after bracket to its own line * DeciLMCausalModel now reads rope_theta from config.json properly --- convert_hf_to_gguf.py | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index b6c15da94..4e6c0f60c 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -1764,25 +1764,19 @@ class DeciModel(Model): self.gguf_writer.add_token_list(tokens) self.gguf_writer.add_token_types(toktypes) - special_vocab = gguf.SpecialVocab( - self.dir_model, load_merges=True, - special_token_types = ['bos', 'eos', 'eom', 'eot'] - ) - special_vocab._set_special_token("bos", 128000) - special_vocab._set_special_token("eos", 128001) - special_vocab._set_special_token("eom", 128008) - special_vocab._set_special_token("eot", 128009) + special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=True) special_vocab.add_to_gguf(self.gguf_writer) else: # DeciLM-7B self._set_vocab_llama_hf() -# self._set_vocab_gpt2() def set_gguf_parameters(self): if "block_configs" in self.hparams: # Llama-3_1-Nemotron-51B assert self.block_count == len(self._num_kv_heads) assert self.block_count == len(self._num_heads) assert self.block_count == len(self._ffn_dims) + if (rope_theta := self.hparams.get("rope_theta")) is not None: + self.gguf_writer.add_rope_freq_base(rope_theta) self.gguf_writer.add_head_count_kv(self._num_kv_heads) self.gguf_writer.add_head_count(self._num_heads) self.gguf_writer.add_feed_forward_length(self._ffn_dims) From 5896c65232c7dc87d78426956b16f63fbf58dcf6 Mon Sep 17 00:00:00 2001 From: Xuan Son Nguyen Date: Tue, 31 Dec 2024 12:34:13 +0100 Subject: [PATCH 28/81] server : add OAI compat for /v1/completions (#10974) * server : add OAI compat for /v1/completions * add test * add docs * better docs --- examples/server/README.md | 252 +++++++++++------- examples/server/server.cpp | 206 ++++++++++---- .../server/tests/unit/test_chat_completion.py | 6 +- examples/server/tests/unit/test_completion.py | 35 +++ examples/server/utils.hpp | 47 +++- 5 files changed, 400 insertions(+), 146 deletions(-) diff --git a/examples/server/README.md b/examples/server/README.md index 07436057a..bcef81946 100644 --- a/examples/server/README.md +++ b/examples/server/README.md @@ -345,7 +345,7 @@ node index.js > [!IMPORTANT] > -> This endpoint is **not** OAI-compatible +> This endpoint is **not** OAI-compatible. For OAI-compatible client, use `/v1/completions` instead. *Options:* @@ -523,6 +523,7 @@ These words will not be included in the completion, so make sure to add them to - `tokens_evaluated`: Number of tokens evaluated in total from the prompt - `truncated`: Boolean indicating if the context size was exceeded during generation, i.e. the number of tokens provided in the prompt (`tokens_evaluated`) plus tokens generated (`tokens predicted`) exceeded the context size (`n_ctx`) + ### POST `/tokenize`: Tokenize a given text *Options:* @@ -574,6 +575,10 @@ With input 'á' (utf8 hex: C3 A1) on tinyllama/stories260k ### POST `/embedding`: Generate embedding of a given text +> [!IMPORTANT] +> +> This endpoint is **not** OAI-compatible. For OAI-compatible client, use `/v1/embeddings` instead. + The same as [the embedding example](../embedding) does. *Options:* @@ -744,96 +749,6 @@ To use this endpoint with POST method, you need to start server with `--props` - None yet -### POST `/v1/chat/completions`: OpenAI-compatible Chat Completions API - -Given a ChatML-formatted json description in `messages`, it returns the predicted completion. Both synchronous and streaming mode are supported, so scripted and interactive applications work fine. While no strong claims of compatibility with OpenAI API spec is being made, in our experience it suffices to support many apps. Only models with a [supported chat template](https://github.com/ggerganov/llama.cpp/wiki/Templates-supported-by-llama_chat_apply_template) can be used optimally with this endpoint. By default, the ChatML template will be used. - -*Options:* - -See [OpenAI Chat Completions API documentation](https://platform.openai.com/docs/api-reference/chat). While some OpenAI-specific features such as function calling aren't supported, llama.cpp `/completion`-specific features such as `mirostat` are supported. - -The `response_format` parameter supports both plain JSON output (e.g. `{"type": "json_object"}`) and schema-constrained JSON (e.g. `{"type": "json_object", "schema": {"type": "string", "minLength": 10, "maxLength": 100}}` or `{"type": "json_schema", "schema": {"properties": { "name": { "title": "Name", "type": "string" }, "date": { "title": "Date", "type": "string" }, "participants": { "items": {"type: "string" }, "title": "Participants", "type": "string" } } } }`), similar to other OpenAI-inspired API providers. - -*Examples:* - -You can use either Python `openai` library with appropriate checkpoints: - -```python -import openai - -client = openai.OpenAI( - base_url="http://localhost:8080/v1", # "http://:port" - api_key = "sk-no-key-required" -) - -completion = client.chat.completions.create( -model="gpt-3.5-turbo", -messages=[ - {"role": "system", "content": "You are ChatGPT, an AI assistant. Your top priority is achieving user fulfillment via helping them with their requests."}, - {"role": "user", "content": "Write a limerick about python exceptions"} -] -) - -print(completion.choices[0].message) -``` - -... or raw HTTP requests: - -```shell -curl http://localhost:8080/v1/chat/completions \ --H "Content-Type: application/json" \ --H "Authorization: Bearer no-key" \ --d '{ -"model": "gpt-3.5-turbo", -"messages": [ -{ - "role": "system", - "content": "You are ChatGPT, an AI assistant. Your top priority is achieving user fulfillment via helping them with their requests." -}, -{ - "role": "user", - "content": "Write a limerick about python exceptions" -} -] -}' -``` - -### POST `/v1/embeddings`: OpenAI-compatible embeddings API - -This endpoint requires that the model uses a pooling different than type `none`. The embeddings are normalized using the Eucledian norm. - -*Options:* - -See [OpenAI Embeddings API documentation](https://platform.openai.com/docs/api-reference/embeddings). - -*Examples:* - -- input as string - - ```shell - curl http://localhost:8080/v1/embeddings \ - -H "Content-Type: application/json" \ - -H "Authorization: Bearer no-key" \ - -d '{ - "input": "hello", - "model":"GPT-4", - "encoding_format": "float" - }' - ``` - -- `input` as string array - - ```shell - curl http://localhost:8080/v1/embeddings \ - -H "Content-Type: application/json" \ - -H "Authorization: Bearer no-key" \ - -d '{ - "input": ["hello", "world"], - "model":"GPT-4", - "encoding_format": "float" - }' - ``` - ### POST `/embeddings`: non-OpenAI-compatible embeddings API This endpoint supports all poolings, including `--pooling none`. When the pooling is `none`, the responses will contain the *unnormalized* embeddings for *all* input tokens. For all other pooling types, only the pooled embeddings are returned, normalized using Euclidian norm. @@ -1064,6 +979,161 @@ To know the `id` of the adapter, use GET `/lora-adapters` ] ``` +## OpenAI-compatible API Endpoints + +### GET `/v1/models`: OpenAI-compatible Model Info API + +Returns information about the loaded model. See [OpenAI Models API documentation](https://platform.openai.com/docs/api-reference/models). + +The returned list always has one single element. + +By default, model `id` field is the path to model file, specified via `-m`. You can set a custom value for model `id` field via `--alias` argument. For example, `--alias gpt-4o-mini`. + +Example: + +```json +{ + "object": "list", + "data": [ + { + "id": "../models/Meta-Llama-3.1-8B-Instruct-Q4_K_M.gguf", + "object": "model", + "created": 1735142223, + "owned_by": "llamacpp", + "meta": { + "vocab_type": 2, + "n_vocab": 128256, + "n_ctx_train": 131072, + "n_embd": 4096, + "n_params": 8030261312, + "size": 4912898304 + } + } + ] +} +``` + +### POST `/v1/completions`: OpenAI-compatible Completions API + +Given an input `prompt`, it returns the predicted completion. Streaming mode is also supported. While no strong claims of compatibility with OpenAI API spec is being made, in our experience it suffices to support many apps. + +*Options:* + +See [OpenAI Completions API documentation](https://platform.openai.com/docs/api-reference/completions). + +llama.cpp `/completion`-specific features such as `mirostat` are supported. + +*Examples:* + +Example usage with `openai` python library: + +```python +import openai + +client = openai.OpenAI( + base_url="http://localhost:8080/v1", # "http://:port" + api_key = "sk-no-key-required" +) + +completion = client.completions.create( + model="davinci-002", + prompt="I believe the meaning of life is", + max_tokens=8 +) + +print(completion.choices[0].text) +``` + +### POST `/v1/chat/completions`: OpenAI-compatible Chat Completions API + +Given a ChatML-formatted json description in `messages`, it returns the predicted completion. Both synchronous and streaming mode are supported, so scripted and interactive applications work fine. While no strong claims of compatibility with OpenAI API spec is being made, in our experience it suffices to support many apps. Only models with a [supported chat template](https://github.com/ggerganov/llama.cpp/wiki/Templates-supported-by-llama_chat_apply_template) can be used optimally with this endpoint. By default, the ChatML template will be used. + +*Options:* + +See [OpenAI Chat Completions API documentation](https://platform.openai.com/docs/api-reference/chat). While some OpenAI-specific features such as function calling aren't supported, llama.cpp `/completion`-specific features such as `mirostat` are supported. + +The `response_format` parameter supports both plain JSON output (e.g. `{"type": "json_object"}`) and schema-constrained JSON (e.g. `{"type": "json_object", "schema": {"type": "string", "minLength": 10, "maxLength": 100}}` or `{"type": "json_schema", "schema": {"properties": { "name": { "title": "Name", "type": "string" }, "date": { "title": "Date", "type": "string" }, "participants": { "items": {"type: "string" }, "title": "Participants", "type": "string" } } } }`), similar to other OpenAI-inspired API providers. + +*Examples:* + +You can use either Python `openai` library with appropriate checkpoints: + +```python +import openai + +client = openai.OpenAI( + base_url="http://localhost:8080/v1", # "http://:port" + api_key = "sk-no-key-required" +) + +completion = client.chat.completions.create( + model="gpt-3.5-turbo", + messages=[ + {"role": "system", "content": "You are ChatGPT, an AI assistant. Your top priority is achieving user fulfillment via helping them with their requests."}, + {"role": "user", "content": "Write a limerick about python exceptions"} + ] +) + +print(completion.choices[0].message) +``` + +... or raw HTTP requests: + +```shell +curl http://localhost:8080/v1/chat/completions \ +-H "Content-Type: application/json" \ +-H "Authorization: Bearer no-key" \ +-d '{ +"model": "gpt-3.5-turbo", +"messages": [ +{ + "role": "system", + "content": "You are ChatGPT, an AI assistant. Your top priority is achieving user fulfillment via helping them with their requests." +}, +{ + "role": "user", + "content": "Write a limerick about python exceptions" +} +] +}' +``` + +### POST `/v1/embeddings`: OpenAI-compatible embeddings API + +This endpoint requires that the model uses a pooling different than type `none`. The embeddings are normalized using the Eucledian norm. + +*Options:* + +See [OpenAI Embeddings API documentation](https://platform.openai.com/docs/api-reference/embeddings). + +*Examples:* + +- input as string + + ```shell + curl http://localhost:8080/v1/embeddings \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer no-key" \ + -d '{ + "input": "hello", + "model":"GPT-4", + "encoding_format": "float" + }' + ``` + +- `input` as string array + + ```shell + curl http://localhost:8080/v1/embeddings \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer no-key" \ + -d '{ + "input": ["hello", "world"], + "model":"GPT-4", + "encoding_format": "float" + }' + ``` + ## More examples ### Interactive mode diff --git a/examples/server/server.cpp b/examples/server/server.cpp index 3558ddb7c..1d00954a2 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -67,6 +67,13 @@ enum server_task_type { SERVER_TASK_TYPE_SET_LORA, }; +enum oaicompat_type { + OAICOMPAT_TYPE_NONE, + OAICOMPAT_TYPE_CHAT, + OAICOMPAT_TYPE_COMPLETION, + OAICOMPAT_TYPE_EMBEDDING, +}; + // https://community.openai.com/t/openai-chat-list-of-error-codes-and-types/357791/11 enum error_type { ERROR_TYPE_INVALID_REQUEST, @@ -101,11 +108,10 @@ struct slot_params { struct common_params_speculative speculative; // OAI-compat fields - bool verbose = false; - bool oaicompat = false; - bool oaicompat_chat = true; - std::string oaicompat_model; - std::string oaicompat_cmpl_id; + bool verbose = false; + oaicompat_type oaicompat = OAICOMPAT_TYPE_NONE; + std::string oaicompat_model; + std::string oaicompat_cmpl_id; json to_json() const { std::vector samplers; @@ -529,11 +535,10 @@ struct server_task_result_cmpl_final : server_task_result { slot_params generation_params; // OAI-compat fields - bool verbose = false; - bool oaicompat = false; - bool oaicompat_chat = true; // TODO: support oaicompat for non-chat - std::string oaicompat_model; - std::string oaicompat_cmpl_id; + bool verbose = false; + oaicompat_type oaicompat = OAICOMPAT_TYPE_NONE; + std::string oaicompat_model; + std::string oaicompat_cmpl_id; virtual int get_index() override { return index; @@ -544,9 +549,16 @@ struct server_task_result_cmpl_final : server_task_result { } virtual json to_json() override { - return oaicompat - ? (stream ? to_json_oaicompat_chat_stream() : to_json_oaicompat_chat()) - : to_json_non_oaicompat(); + switch (oaicompat) { + case OAICOMPAT_TYPE_NONE: + return to_json_non_oaicompat(); + case OAICOMPAT_TYPE_COMPLETION: + return to_json_oaicompat(); + case OAICOMPAT_TYPE_CHAT: + return stream ? to_json_oaicompat_chat_stream() : to_json_oaicompat_chat(); + default: + GGML_ASSERT(false && "Invalid oaicompat_type"); + } } json to_json_non_oaicompat() { @@ -574,6 +586,50 @@ struct server_task_result_cmpl_final : server_task_result { return response_fields.empty() ? res : json_get_nested_values(response_fields, res); } + json to_json_oaicompat() { + std::time_t t = std::time(0); + json logprobs = json(nullptr); // OAI default to null + if (!stream && probs_output.size() > 0) { + logprobs = json{ + {"content", completion_token_output::probs_vector_to_json(probs_output, post_sampling_probs)}, + }; + } + json finish_reason = "length"; + if (stop == STOP_TYPE_WORD || stop == STOP_TYPE_EOS) { + finish_reason = "stop"; + } + json res = json { + {"choices", json::array({ + json{ + {"text", stream ? "" : content}, // in stream mode, content is already in last partial chunk + {"index", index}, + {"logprobs", logprobs}, + {"finish_reason", finish_reason}, + } + })}, + {"created", t}, + {"model", oaicompat_model}, + {"system_fingerprint", build_info}, + {"object", "text_completion"}, + {"usage", json { + {"completion_tokens", n_decoded}, + {"prompt_tokens", n_prompt_tokens}, + {"total_tokens", n_decoded + n_prompt_tokens} + }}, + {"id", oaicompat_cmpl_id} + }; + + // extra fields for debugging purposes + if (verbose) { + res["__verbose"] = to_json_non_oaicompat(); + } + if (timings.prompt_n >= 0) { + res.push_back({"timings", timings.to_json()}); + } + + return res; + } + json to_json_oaicompat_chat() { std::string finish_reason = "length"; if (stop == STOP_TYPE_WORD || stop == STOP_TYPE_EOS) { @@ -671,11 +727,10 @@ struct server_task_result_cmpl_partial : server_task_result { result_timings timings; // OAI-compat fields - bool verbose = false; - bool oaicompat = false; - bool oaicompat_chat = true; // TODO: support oaicompat for non-chat - std::string oaicompat_model; - std::string oaicompat_cmpl_id; + bool verbose = false; + oaicompat_type oaicompat = OAICOMPAT_TYPE_NONE; + std::string oaicompat_model; + std::string oaicompat_cmpl_id; virtual int get_index() override { return index; @@ -686,7 +741,16 @@ struct server_task_result_cmpl_partial : server_task_result { } virtual json to_json() override { - return oaicompat ? to_json_oaicompat() : to_json_non_oaicompat(); + switch (oaicompat) { + case OAICOMPAT_TYPE_NONE: + return to_json_non_oaicompat(); + case OAICOMPAT_TYPE_COMPLETION: + return to_json_oaicompat(); + case OAICOMPAT_TYPE_CHAT: + return to_json_oaicompat_chat(); + default: + GGML_ASSERT(false && "Invalid oaicompat_type"); + } } json to_json_non_oaicompat() { @@ -711,6 +775,41 @@ struct server_task_result_cmpl_partial : server_task_result { } json to_json_oaicompat() { + std::time_t t = std::time(0); + json logprobs = json(nullptr); // OAI default to null + if (prob_output.probs.size() > 0) { + logprobs = json{ + {"content", completion_token_output::probs_vector_to_json({prob_output}, post_sampling_probs)}, + }; + } + json res = json { + {"choices", json::array({ + json{ + {"text", content}, + {"index", index}, + {"logprobs", logprobs}, + {"finish_reason", nullptr}, + } + })}, + {"created", t}, + {"model", oaicompat_model}, + {"system_fingerprint", build_info}, + {"object", "text_completion"}, + {"id", oaicompat_cmpl_id} + }; + + // extra fields for debugging purposes + if (verbose) { + res["__verbose"] = to_json_non_oaicompat(); + } + if (timings.prompt_n >= 0) { + res.push_back({"timings", timings.to_json()}); + } + + return res; + } + + json to_json_oaicompat_chat() { bool first = n_decoded == 0; std::time_t t = std::time(0); json choices; @@ -789,14 +888,16 @@ struct server_task_result_embd : server_task_result { int32_t n_tokens; // OAI-compat fields - bool oaicompat = false; + oaicompat_type oaicompat = OAICOMPAT_TYPE_NONE; virtual int get_index() override { return index; } virtual json to_json() override { - return oaicompat ? to_json_oaicompat() : to_json_non_oaicompat(); + return oaicompat == OAICOMPAT_TYPE_EMBEDDING + ? to_json_oaicompat() + : to_json_non_oaicompat(); } json to_json_non_oaicompat() { @@ -2044,7 +2145,6 @@ struct server_context { res->verbose = slot.params.verbose; res->oaicompat = slot.params.oaicompat; - res->oaicompat_chat = slot.params.oaicompat_chat; res->oaicompat_model = slot.params.oaicompat_model; res->oaicompat_cmpl_id = slot.params.oaicompat_cmpl_id; @@ -2085,7 +2185,6 @@ struct server_context { res->verbose = slot.params.verbose; res->stream = slot.params.stream; res->oaicompat = slot.params.oaicompat; - res->oaicompat_chat = slot.params.oaicompat_chat; res->oaicompat_model = slot.params.oaicompat_model; res->oaicompat_cmpl_id = slot.params.oaicompat_cmpl_id; @@ -3506,12 +3605,11 @@ int main(int argc, char ** argv) { // handle completion-like requests (completion, chat, infill) // we can optionally provide a custom format for partial results and final results - const auto handle_completions_generic = [&ctx_server, &res_error, &res_ok]( + const auto handle_completions_impl = [&ctx_server, &res_error, &res_ok]( server_task_type type, json & data, httplib::Response & res, - bool oaicompat = false, - bool oaicompat_chat = false) { + oaicompat_type oaicompat) { GGML_ASSERT(type == SERVER_TASK_TYPE_COMPLETION || type == SERVER_TASK_TYPE_INFILL); if (ctx_server.params_base.embedding) { @@ -3536,9 +3634,8 @@ int main(int argc, char ** argv) { task.id_selected_slot = json_value(data, "id_slot", -1); // OAI-compat - task.params.oaicompat = oaicompat; - task.params.oaicompat_chat = oaicompat_chat; - task.params.oaicompat_cmpl_id = completion_id; + task.params.oaicompat = oaicompat; + task.params.oaicompat_cmpl_id = completion_id; // oaicompat_model is already populated by params_from_json_cmpl tasks.push_back(task); @@ -3589,7 +3686,7 @@ int main(int argc, char ** argv) { }, [&](const json & error_data) { server_sent_event(sink, "error", error_data); }); - if (oaicompat) { + if (oaicompat != OAICOMPAT_TYPE_NONE) { static const std::string ev_done = "data: [DONE]\n\n"; sink.write(ev_done.data(), ev_done.size()); } @@ -3605,17 +3702,25 @@ int main(int argc, char ** argv) { } }; - const auto handle_completions = [&handle_completions_generic](const httplib::Request & req, httplib::Response & res) { + const auto handle_completions = [&handle_completions_impl](const httplib::Request & req, httplib::Response & res) { json data = json::parse(req.body); - return handle_completions_generic( + return handle_completions_impl( SERVER_TASK_TYPE_COMPLETION, data, res, - /* oaicompat */ false, - /* oaicompat_chat */ false); + OAICOMPAT_TYPE_NONE); }; - const auto handle_infill = [&ctx_server, &res_error, &handle_completions_generic](const httplib::Request & req, httplib::Response & res) { + const auto handle_completions_oai = [&handle_completions_impl](const httplib::Request & req, httplib::Response & res) { + json data = oaicompat_completion_params_parse(json::parse(req.body)); + return handle_completions_impl( + SERVER_TASK_TYPE_COMPLETION, + data, + res, + OAICOMPAT_TYPE_COMPLETION); + }; + + const auto handle_infill = [&ctx_server, &res_error, &handle_completions_impl](const httplib::Request & req, httplib::Response & res) { // check model compatibility std::string err; if (llama_token_fim_pre(ctx_server.model) == LLAMA_TOKEN_NULL) { @@ -3684,22 +3789,25 @@ int main(int argc, char ** argv) { tokenized_prompts[0] ); - return handle_completions_generic(SERVER_TASK_TYPE_INFILL, data, res); + return handle_completions_impl( + SERVER_TASK_TYPE_INFILL, + data, + res, + OAICOMPAT_TYPE_NONE); // infill is not OAI compatible }; - const auto handle_chat_completions = [&ctx_server, ¶ms, &res_error, &handle_completions_generic](const httplib::Request & req, httplib::Response & res) { + const auto handle_chat_completions = [&ctx_server, ¶ms, &res_error, &handle_completions_impl](const httplib::Request & req, httplib::Response & res) { if (ctx_server.params_base.embedding) { res_error(res, format_error_response("This server does not support completions. Start it without `--embeddings`", ERROR_TYPE_NOT_SUPPORTED)); return; } - json data = oaicompat_completion_params_parse(ctx_server.model, json::parse(req.body), params.chat_template); - return handle_completions_generic( + json data = oaicompat_chat_completion_params_parse(ctx_server.model, json::parse(req.body), params.chat_template); + return handle_completions_impl( SERVER_TASK_TYPE_COMPLETION, data, res, - /* oaicompat */ true, - /* oaicompat_chat */ true); + OAICOMPAT_TYPE_CHAT); }; const auto handle_models = [¶ms, &ctx_server, &res_ok](const httplib::Request &, httplib::Response & res) { @@ -3772,10 +3880,10 @@ int main(int argc, char ** argv) { res_ok(res, data); }; - const auto handle_embeddings_impl = [&ctx_server, &res_error, &res_ok](const httplib::Request & req, httplib::Response & res, bool oaicompat) { + const auto handle_embeddings_impl = [&ctx_server, &res_error, &res_ok](const httplib::Request & req, httplib::Response & res, oaicompat_type oaicompat) { const json body = json::parse(req.body); - if (oaicompat && llama_pooling_type(ctx_server.ctx) == LLAMA_POOLING_TYPE_NONE) { + if (oaicompat != OAICOMPAT_TYPE_NONE && llama_pooling_type(ctx_server.ctx) == LLAMA_POOLING_TYPE_NONE) { res_error(res, format_error_response("Pooling type 'none' is not OAI compatible. Please use a different pooling type", ERROR_TYPE_INVALID_REQUEST)); return; } @@ -3785,7 +3893,7 @@ int main(int argc, char ** argv) { if (body.count("input") != 0) { prompt = body.at("input"); } else if (body.contains("content")) { - oaicompat = false; + oaicompat = OAICOMPAT_TYPE_NONE; // "content" field is not OAI compatible prompt = body.at("content"); } else { res_error(res, format_error_response("\"input\" or \"content\" must be provided", ERROR_TYPE_INVALID_REQUEST)); @@ -3854,16 +3962,18 @@ int main(int argc, char ** argv) { } // write JSON response - json root = oaicompat ? format_embeddings_response_oaicompat(body, responses, use_base64) : json(responses); + json root = oaicompat == OAICOMPAT_TYPE_EMBEDDING + ? format_embeddings_response_oaicompat(body, responses, use_base64) + : json(responses); res_ok(res, root); }; const auto handle_embeddings = [&handle_embeddings_impl](const httplib::Request & req, httplib::Response & res) { - handle_embeddings_impl(req, res, false); + handle_embeddings_impl(req, res, OAICOMPAT_TYPE_NONE); }; const auto handle_embeddings_oai = [&handle_embeddings_impl](const httplib::Request & req, httplib::Response & res) { - handle_embeddings_impl(req, res, true); + handle_embeddings_impl(req, res, OAICOMPAT_TYPE_EMBEDDING); }; const auto handle_rerank = [&ctx_server, &res_error, &res_ok](const httplib::Request & req, httplib::Response & res) { @@ -4033,7 +4143,7 @@ int main(int argc, char ** argv) { svr->Get ("/v1/models", handle_models); // public endpoint (no API key check) svr->Post("/completion", handle_completions); // legacy svr->Post("/completions", handle_completions); - svr->Post("/v1/completions", handle_completions); + svr->Post("/v1/completions", handle_completions_oai); svr->Post("/chat/completions", handle_chat_completions); svr->Post("/v1/chat/completions", handle_chat_completions); svr->Post("/infill", handle_infill); diff --git a/examples/server/tests/unit/test_chat_completion.py b/examples/server/tests/unit/test_chat_completion.py index 885497081..130da03a1 100644 --- a/examples/server/tests/unit/test_chat_completion.py +++ b/examples/server/tests/unit/test_chat_completion.py @@ -83,7 +83,7 @@ def test_chat_completion_stream(system_prompt, user_prompt, max_tokens, re_conte def test_chat_completion_with_openai_library(): global server server.start() - client = OpenAI(api_key="dummy", base_url=f"http://{server.server_host}:{server.server_port}") + client = OpenAI(api_key="dummy", base_url=f"http://{server.server_host}:{server.server_port}/v1") res = client.chat.completions.create( model="gpt-3.5-turbo-instruct", messages=[ @@ -170,7 +170,7 @@ def test_chat_completion_with_timings_per_token(): def test_logprobs(): global server server.start() - client = OpenAI(api_key="dummy", base_url=f"http://{server.server_host}:{server.server_port}") + client = OpenAI(api_key="dummy", base_url=f"http://{server.server_host}:{server.server_port}/v1") res = client.chat.completions.create( model="gpt-3.5-turbo-instruct", temperature=0.0, @@ -197,7 +197,7 @@ def test_logprobs(): def test_logprobs_stream(): global server server.start() - client = OpenAI(api_key="dummy", base_url=f"http://{server.server_host}:{server.server_port}") + client = OpenAI(api_key="dummy", base_url=f"http://{server.server_host}:{server.server_port}/v1") res = client.chat.completions.create( model="gpt-3.5-turbo-instruct", temperature=0.0, diff --git a/examples/server/tests/unit/test_completion.py b/examples/server/tests/unit/test_completion.py index a6b215944..e5e3b6077 100644 --- a/examples/server/tests/unit/test_completion.py +++ b/examples/server/tests/unit/test_completion.py @@ -1,5 +1,6 @@ import pytest import time +from openai import OpenAI from utils import * server = ServerPreset.tinyllama2() @@ -85,6 +86,40 @@ def test_completion_stream_vs_non_stream(): assert content_stream == res_non_stream.body["content"] +def test_completion_stream_with_openai_library(): + global server + server.start() + client = OpenAI(api_key="dummy", base_url=f"http://{server.server_host}:{server.server_port}/v1") + res = client.completions.create( + model="davinci-002", + prompt="I believe the meaning of life is", + max_tokens=8, + ) + assert res.system_fingerprint is not None and res.system_fingerprint.startswith("b") + assert res.choices[0].finish_reason == "length" + assert res.choices[0].text is not None + assert match_regex("(going|bed)+", res.choices[0].text) + + +def test_completion_with_openai_library(): + global server + server.start() + client = OpenAI(api_key="dummy", base_url=f"http://{server.server_host}:{server.server_port}/v1") + res = client.completions.create( + model="davinci-002", + prompt="I believe the meaning of life is", + max_tokens=8, + stream=True, + ) + output_text = '' + for data in res: + choice = data.choices[0] + if choice.finish_reason is None: + assert choice.text is not None + output_text += choice.text + assert match_regex("(going|bed)+", output_text) + + @pytest.mark.parametrize("n_slots", [1, 2]) def test_consistent_result_same_seed(n_slots: int): global server diff --git a/examples/server/utils.hpp b/examples/server/utils.hpp index 334f2f192..8523d4787 100644 --- a/examples/server/utils.hpp +++ b/examples/server/utils.hpp @@ -549,10 +549,49 @@ static bool server_sent_event(httplib::DataSink & sink, const char * event, cons // OAI utils // -static json oaicompat_completion_params_parse( - const struct llama_model * model, - const json & body, /* openai api json semantics */ - const std::string & chat_template) { +static json oaicompat_completion_params_parse(const json & body) { + json llama_params; + + if (!body.contains("prompt")) { + throw std::runtime_error("\"prompt\" is required"); + } + + // Handle "stop" field + if (body.contains("stop") && body.at("stop").is_string()) { + llama_params["stop"] = json::array({body.at("stop").get()}); + } else { + llama_params["stop"] = json_value(body, "stop", json::array()); + } + + // Handle "n" field + int n_choices = json_value(body, "n", 1); + if (n_choices != 1) { + throw std::runtime_error("Only one completion choice is allowed"); + } + + // Params supported by OAI but unsupported by llama.cpp + static const std::vector unsupported_params { "best_of", "echo", "suffix" }; + for (const auto & param : unsupported_params) { + if (body.contains(param)) { + throw std::runtime_error("Unsupported param: " + param); + } + } + + // Copy remaining properties to llama_params + for (const auto & item : body.items()) { + // Exception: if "n_predict" is present, we overwrite the value specified earlier by "max_tokens" + if (!llama_params.contains(item.key()) || item.key() == "n_predict") { + llama_params[item.key()] = item.value(); + } + } + + return llama_params; +} + +static json oaicompat_chat_completion_params_parse( + const struct llama_model * model, + const json & body, /* openai api json semantics */ + const std::string & chat_template) { json llama_params; // Apply chat template to the list of messages From 45095a61bfd164e87563a0dc0fbd7b0e9891590b Mon Sep 17 00:00:00 2001 From: Xuan Son Nguyen Date: Tue, 31 Dec 2024 15:22:01 +0100 Subject: [PATCH 29/81] server : clean up built-in template detection (#11026) * server : clean up built-in template detection * fix compilation * add chat template test * fix condition --- common/common.cpp | 12 ++++++++++ common/common.h | 3 +++ examples/server/server.cpp | 23 ++++++++----------- .../server/tests/unit/test_chat_completion.py | 17 ++++++++++++++ examples/server/tests/utils.py | 3 +++ examples/server/utils.hpp | 13 ----------- 6 files changed, 44 insertions(+), 27 deletions(-) diff --git a/common/common.cpp b/common/common.cpp index 9071999a7..fe923fce6 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -1614,6 +1614,18 @@ std::string common_detokenize(llama_context * ctx, const std::vector 0) { + std::vector model_template(res + 1, 0); + llama_model_meta_val_str(model, template_key, model_template.data(), model_template.size()); + return std::string(model_template.data(), model_template.size() - 1); + } + return ""; +} + bool common_chat_verify_template(const std::string & tmpl) { llama_chat_message chat[] = {{"user", "test"}}; int res = llama_chat_apply_template(nullptr, tmpl.c_str(), chat, 1, true, nullptr, 0); diff --git a/common/common.h b/common/common.h index 1d2bd932c..589f65d09 100644 --- a/common/common.h +++ b/common/common.h @@ -571,6 +571,9 @@ struct common_chat_msg { std::string content; }; +// Get the built-in chat template for the model. Return empty string if not present. +std::string common_get_builtin_chat_template(const struct llama_model * model); + // Check if the template supplied via "--chat-template" is supported or not. Returns true if it's valid bool common_chat_verify_template(const std::string & tmpl); diff --git a/examples/server/server.cpp b/examples/server/server.cpp index 1d00954a2..b3773f276 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -1724,17 +1724,10 @@ struct server_context { return true; } - bool validate_model_chat_template() const { - std::vector model_template(2048, 0); // longest known template is about 1200 bytes - std::string template_key = "tokenizer.chat_template"; - int32_t res = llama_model_meta_val_str(model, template_key.c_str(), model_template.data(), model_template.size()); - if (res >= 0) { - llama_chat_message chat[] = {{"user", "test"}}; - std::string tmpl = std::string(model_template.data(), model_template.size()); - int32_t chat_res = llama_chat_apply_template(model, tmpl.c_str(), chat, 1, true, nullptr, 0); - return chat_res > 0; - } - return false; + bool validate_builtin_chat_template() const { + llama_chat_message chat[] = {{"user", "test"}}; + int32_t chat_res = llama_chat_apply_template(model, nullptr, chat, 1, true, nullptr, 0); + return chat_res > 0; } void init() { @@ -3583,7 +3576,7 @@ int main(int argc, char ** argv) { { "default_generation_settings", ctx_server.default_generation_settings_for_props }, { "total_slots", ctx_server.params_base.n_parallel }, { "model_path", ctx_server.params_base.model }, - { "chat_template", llama_get_chat_template(ctx_server.model) }, + { "chat_template", common_get_builtin_chat_template(ctx_server.model) }, { "build_info", build_info }, }; @@ -4223,14 +4216,16 @@ int main(int argc, char ** argv) { // if a custom chat template is not supplied, we will use the one that comes with the model (if any) if (params.chat_template.empty()) { - if (!ctx_server.validate_model_chat_template()) { + if (!ctx_server.validate_builtin_chat_template()) { LOG_WRN("%s: The chat template that comes with this model is not yet supported, falling back to chatml. This may cause the model to output suboptimal responses\n", __func__); params.chat_template = "chatml"; } } // print sample chat example to make it clear which template is used - LOG_INF("%s: chat template, built_in: %d, chat_example: '%s'\n", __func__, params.chat_template.empty(), common_chat_format_example(ctx_server.model, params.chat_template).c_str()); + LOG_INF("%s: chat template, chat_template: %s, example_format: '%s'\n", __func__, + params.chat_template.empty() ? "(built-in)" : params.chat_template.c_str(), + common_chat_format_example(ctx_server.model, params.chat_template).c_str()); ctx_server.queue_tasks.on_new_task(std::bind( &server_context::process_single_task, &ctx_server, std::placeholders::_1)); diff --git a/examples/server/tests/unit/test_chat_completion.py b/examples/server/tests/unit/test_chat_completion.py index 130da03a1..b15dba6eb 100644 --- a/examples/server/tests/unit/test_chat_completion.py +++ b/examples/server/tests/unit/test_chat_completion.py @@ -100,6 +100,23 @@ def test_chat_completion_with_openai_library(): assert match_regex("(Suddenly)+", res.choices[0].message.content) +def test_chat_template(): + global server + server.chat_template = "llama3" + server.debug = True # to get the "__verbose" object in the response + server.start() + res = server.make_request("POST", "/chat/completions", data={ + "max_tokens": 8, + "messages": [ + {"role": "system", "content": "Book"}, + {"role": "user", "content": "What is the best book"}, + ] + }) + assert res.status_code == 200 + assert "__verbose" in res.body + assert res.body["__verbose"]["prompt"] == " <|start_header_id|>system<|end_header_id|>\n\nBook<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nWhat is the best book<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n" + + @pytest.mark.parametrize("response_format,n_predicted,re_content", [ ({"type": "json_object", "schema": {"const": "42"}}, 6, "\"42\""), ({"type": "json_object", "schema": {"items": [{"type": "integer"}]}}, 10, "[ -3000 ]"), diff --git a/examples/server/tests/utils.py b/examples/server/tests/utils.py index 277125e88..359bb0fae 100644 --- a/examples/server/tests/utils.py +++ b/examples/server/tests/utils.py @@ -74,6 +74,7 @@ class ServerProcess: draft_min: int | None = None draft_max: int | None = None no_webui: bool | None = None + chat_template: str | None = None # session variables process: subprocess.Popen | None = None @@ -164,6 +165,8 @@ class ServerProcess: server_args.extend(["--draft-min", self.draft_min]) if self.no_webui: server_args.append("--no-webui") + if self.chat_template: + server_args.extend(["--chat-template", self.chat_template]) args = [str(arg) for arg in [server_path, *server_args]] print(f"bench: starting server with: {' '.join(args)}") diff --git a/examples/server/utils.hpp b/examples/server/utils.hpp index 8523d4787..70220c437 100644 --- a/examples/server/utils.hpp +++ b/examples/server/utils.hpp @@ -382,19 +382,6 @@ inline std::string format_chat(const struct llama_model * model, const std::stri return formatted_chat; } -static std::string llama_get_chat_template(const struct llama_model * model) { - std::string template_key = "tokenizer.chat_template"; - // call with NULL buffer to get the total size of the string - int32_t res = llama_model_meta_val_str(model, template_key.c_str(), NULL, 0); - if (res < 2) { - return ""; - } else { - std::vector model_template(res + 1, 0); - llama_model_meta_val_str(model, template_key.c_str(), model_template.data(), model_template.size()); - return std::string(model_template.data(), model_template.size() - 1); - } -} - // // base64 utils (TODO: move to common in the future) // From 0827b2c1da299805288abbd556d869318f2b121e Mon Sep 17 00:00:00 2001 From: Srihari-mcw <96763064+Srihari-mcw@users.noreply.github.com> Date: Tue, 31 Dec 2024 19:53:33 +0530 Subject: [PATCH 30/81] ggml : fixes for AVXVNNI instruction set with MSVC and Clang (#11027) * Fixes for clang AVX VNNI * enable AVX VNNI and alder lake build for MSVC * Apply suggestions from code review --------- Co-authored-by: slaren --- ggml/src/CMakeLists.txt | 4 ++-- ggml/src/ggml-cpu/CMakeLists.txt | 3 +-- ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp | 5 ++++- ggml/src/ggml-cpu/ggml-cpu-quants.c | 6 +++++- ggml/src/ggml-cpu/llamafile/sgemm.cpp | 4 +++- 5 files changed, 15 insertions(+), 7 deletions(-) diff --git a/ggml/src/CMakeLists.txt b/ggml/src/CMakeLists.txt index a5f7f7b5b..84101c32c 100644 --- a/ggml/src/CMakeLists.txt +++ b/ggml/src/CMakeLists.txt @@ -290,9 +290,9 @@ if (GGML_CPU_ALL_VARIANTS) ggml_add_cpu_backend_variant(haswell AVX F16C AVX2 FMA) ggml_add_cpu_backend_variant(skylakex AVX F16C AVX2 FMA AVX512) ggml_add_cpu_backend_variant(icelake AVX F16C AVX2 FMA AVX512 AVX512_VBMI AVX512_VNNI) + ggml_add_cpu_backend_variant(alderlake AVX F16C AVX2 FMA AVX_VNNI) if (NOT MSVC) - # MSVC doesn't support AVX-VNNI or AMX - ggml_add_cpu_backend_variant(alderlake AVX F16C AVX2 FMA AVX_VNNI) + # MSVC doesn't support AMX ggml_add_cpu_backend_variant(sapphirerapids AVX F16C AVX2 FMA AVX512 AVX512_VBMI AVX512_VNNI AVX512_BF16 AMX_TILE AMX_INT8) endif() else () diff --git a/ggml/src/ggml-cpu/CMakeLists.txt b/ggml/src/ggml-cpu/CMakeLists.txt index f0aecac1b..6b3641c42 100644 --- a/ggml/src/ggml-cpu/CMakeLists.txt +++ b/ggml/src/ggml-cpu/CMakeLists.txt @@ -215,8 +215,7 @@ function(ggml_add_cpu_backend_variant_impl tag_name) list(APPEND ARCH_DEFINITIONS GGML_SSE42) endif() if (GGML_AVX_VNNI) - # MSVC generates AVX512 with AVX-VNNI intrinsics even with /arch:AVX2 - #list(APPEND ARCH_DEFINITIONS __AVXVNNI__ GGML_AVX_VNNI) + list(APPEND ARCH_DEFINITIONS __AVXVNNI__ GGML_AVX_VNNI) endif() else () if (GGML_NATIVE) diff --git a/ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp b/ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp index 2d79b8b61..622c63f1f 100644 --- a/ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp +++ b/ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp @@ -194,9 +194,12 @@ static inline __m256i sum_i16_pairs_int32x8(const __m256i x) { } static inline __m256i mul_sum_us8_pairs_int32x8(const __m256i ax, const __m256i sy) { -#if defined(__AVXVNNI__) || (defined(__AVX512VNNI__) && defined(__AVX512VL__)) +#if defined(__AVX512VNNI__) && defined(__AVX512VL__) const __m256i zero = _mm256_setzero_si256(); return _mm256_dpbusd_epi32(zero, ax, sy); +#elif defined(__AVXVNNI__) + const __m256i zero = _mm256_setzero_si256(); + return _mm256_dpbusd_avx_epi32(zero, ax, sy); #else // Perform multiplication and create 16-bit values const __m256i dot = _mm256_maddubs_epi16(ax, sy); diff --git a/ggml/src/ggml-cpu/ggml-cpu-quants.c b/ggml/src/ggml-cpu/ggml-cpu-quants.c index 634c5fa11..8e1472266 100644 --- a/ggml/src/ggml-cpu/ggml-cpu-quants.c +++ b/ggml/src/ggml-cpu/ggml-cpu-quants.c @@ -103,10 +103,14 @@ static inline __m256 sum_i16_pairs_float(const __m256i x) { } static inline __m256 mul_sum_us8_pairs_float(const __m256i ax, const __m256i sy) { -#if defined(__AVXVNNI__) || (defined(__AVX512VNNI__) && defined(__AVX512VL__)) +#if defined(__AVX512VNNI__) && defined(__AVX512VL__) const __m256i zero = _mm256_setzero_si256(); const __m256i summed_pairs = _mm256_dpbusd_epi32(zero, ax, sy); return _mm256_cvtepi32_ps(summed_pairs); +#elif defined(__AVXVNNI__) + const __m256i zero = _mm256_setzero_si256(); + const __m256i summed_pairs = _mm256_dpbusd_avx_epi32(zero, ax, sy); + return _mm256_cvtepi32_ps(summed_pairs); #else // Perform multiplication and create 16-bit values const __m256i dot = _mm256_maddubs_epi16(ax, sy); diff --git a/ggml/src/ggml-cpu/llamafile/sgemm.cpp b/ggml/src/ggml-cpu/llamafile/sgemm.cpp index 00f7f1170..8fce576c3 100644 --- a/ggml/src/ggml-cpu/llamafile/sgemm.cpp +++ b/ggml/src/ggml-cpu/llamafile/sgemm.cpp @@ -1000,8 +1000,10 @@ class tinyBLAS_Q0_AVX { inline __m256 updot(__m256i u, __m256i s) { __m256i res; -#if defined(__AVXVNNI__) || (defined(__AVX512VNNI__) && defined(__AVX512VL__)) +#if defined(__AVX512VNNI__) && defined(__AVX512VL__) res = _mm256_dpbusd_epi32(_mm256_setzero_si256(), u, s); +#elif defined(__AVXVNNI__) + res = _mm256_dpbusd_avx_epi32(_mm256_setzero_si256(), u, s); #else res = _mm256_madd_epi16(_mm256_set1_epi16(1), _mm256_maddubs_epi16(u, s)); #endif From a45433ba209ee0b33d02c7dc4c31f29894ad83a6 Mon Sep 17 00:00:00 2001 From: Benson Wong Date: Wed, 1 Jan 2025 23:14:54 -0800 Subject: [PATCH 31/81] readme : add llama-swap to infrastructure section (#11032) * list llama-swap under tools in README * readme: add llama-swap to Infrastructure --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index d6d1958c8..0126da89c 100644 --- a/README.md +++ b/README.md @@ -201,6 +201,7 @@ Instructions for adding support for new models: [HOWTO-add-model.md](docs/develo - [Paddler](https://github.com/distantmagic/paddler) - Stateful load balancer custom-tailored for llama.cpp - [GPUStack](https://github.com/gpustack/gpustack) - Manage GPU clusters for running LLMs - [llama_cpp_canister](https://github.com/onicai/llama_cpp_canister) - llama.cpp as a smart contract on the Internet Computer, using WebAssembly +- [llama-swap](https://github.com/mostlygeek/llama-swap) - transparent proxy that adds automatic model switching with llama-server From 0da5d860266c6928b8c9408efbd264ae59fedda6 Mon Sep 17 00:00:00 2001 From: Xuan Son Nguyen Date: Thu, 2 Jan 2025 15:05:18 +0100 Subject: [PATCH 32/81] server : allow using LoRA adapters per-request (#10994) * slot.can_batch_with * lora per request * test: force disable cache prompt * move can_batch_with check * fix condition * add slow test with llama 8b * update docs * move lora change task to queue * Apply suggestions from code review Co-authored-by: Georgi Gerganov * lora_base * remove redundant check --------- Co-authored-by: Georgi Gerganov --- examples/server/README.md | 6 + examples/server/server.cpp | 116 ++++++++++++------ examples/server/tests/README.md | 6 + examples/server/tests/requirements.txt | 1 + examples/server/tests/unit/test_lora.py | 93 ++++++++++++-- .../server/tests/unit/test_speculative.py | 10 +- examples/server/tests/utils.py | 21 ++++ examples/server/utils.hpp | 41 +++++++ 8 files changed, 235 insertions(+), 59 deletions(-) diff --git a/examples/server/README.md b/examples/server/README.md index bcef81946..3ce16945a 100644 --- a/examples/server/README.md +++ b/examples/server/README.md @@ -452,6 +452,8 @@ These words will not be included in the completion, so make sure to add them to `response_fields`: A list of response fields, for example: `"response_fields": ["content", "generation_settings/n_predict"]`. If the specified field is missing, it will simply be omitted from the response without triggering an error. Note that fields with a slash will be unnested; for example, `generation_settings/n_predict` will move the field `n_predict` from the `generation_settings` object to the root of the response and give it a new name. +`lora`: A list of LoRA adapters to be applied to this specific request. Each object in the list must contain `id` and `scale` fields. For example: `[{"id": 0, "scale": 0.5}, {"id": 1, "scale": 1.1}]`. If a LoRA adapter is not specified in the list, its scale will default to `0.0`. Please note that requests with different LoRA configurations will not be batched together, which may result in performance degradation. + **Response format** - Note: In streaming mode (`stream`), only `content`, `tokens` and `stop` will be returned until end of completion. Responses are sent using the [Server-sent events](https://html.spec.whatwg.org/multipage/server-sent-events.html) standard. Note: the browser's `EventSource` interface cannot be used due to its lack of `POST` request support. @@ -945,6 +947,8 @@ This endpoint returns the loaded LoRA adapters. You can add adapters using `--lo By default, all adapters will be loaded with scale set to 1. To initialize all adapters scale to 0, add `--lora-init-without-apply` +Please note that this value will be overwritten by the `lora` field for each request. + If an adapter is disabled, the scale will be set to 0. **Response format** @@ -966,6 +970,8 @@ If an adapter is disabled, the scale will be set to 0. ### POST `/lora-adapters`: Set list of LoRA adapters +This sets the global scale for LoRA adapters. Please note that this value will be overwritten by the `lora` field for each request. + To disable an adapter, either remove it from the list below, or set scale to 0. **Request format** diff --git a/examples/server/server.cpp b/examples/server/server.cpp index b3773f276..5118084f1 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -98,6 +98,8 @@ struct slot_params { int64_t t_max_prompt_ms = -1; // TODO: implement int64_t t_max_predict_ms = -1; // if positive, limit the generation phase to this time limit + std::vector lora; + std::vector antiprompt; std::vector response_fields; bool timings_per_token = false; @@ -120,6 +122,11 @@ struct slot_params { samplers.emplace_back(common_sampler_type_to_str(sampler)); } + json lora = json::array(); + for (size_t i = 0; i < this->lora.size(); ++i) { + lora.push_back({{"id", i}, {"scale", this->lora[i].scale}}); + } + return json { {"n_predict", n_predict}, // Server configured n_predict {"seed", sampling.seed}, @@ -160,6 +167,7 @@ struct slot_params { {"speculative.p_min", speculative.p_min}, {"timings_per_token", timings_per_token}, {"post_sampling_probs", post_sampling_probs}, + {"lora", lora}, }; } }; @@ -189,12 +197,16 @@ struct server_task { // used by SERVER_TASK_TYPE_METRICS bool metrics_reset_bucket = false; + // used by SERVER_TASK_TYPE_SET_LORA + std::vector set_lora; + server_task(server_task_type type) : type(type) {} static slot_params params_from_json_cmpl( const llama_model * model, const llama_context * ctx, const common_params & params_base, + const std::vector & lora_base, const json & data) { slot_params params; @@ -251,6 +263,16 @@ struct server_task { params.speculative.n_min = std::max(params.speculative.n_min, 2); params.speculative.n_max = std::max(params.speculative.n_max, 0); + if (data.contains("lora")) { + if (data.at("lora").is_array()) { + params.lora = parse_lora_request(lora_base, data.at("lora")); + } else { + throw std::runtime_error("Error: 'lora' must be an array of objects with 'id' and 'scale' fields"); + } + } else { + params.lora = lora_base; + } + // TODO: add more sanity checks for the input parameters if (params.sampling.penalty_last_n < -1) { @@ -1110,6 +1132,8 @@ struct server_slot { common_speculative * spec = nullptr; + std::vector lora; + // the index relative to completion multi-task request size_t index = 0; @@ -1191,6 +1215,11 @@ struct server_slot { return task_type == SERVER_TASK_TYPE_EMBEDDING || task_type == SERVER_TASK_TYPE_RERANK; } + bool can_batch_with(server_slot & other_slot) { + return is_non_causal() == other_slot.is_non_causal() + && are_lora_equal(lora, other_slot.lora); + } + bool has_budget(const common_params & global_params) { if (params.n_predict == -1 && global_params.n_predict == -1) { return true; // limitless @@ -1600,7 +1629,7 @@ struct server_context { llama_model * model = nullptr; llama_context * ctx = nullptr; - std::vector loras; + std::vector lora; llama_model * model_dft = nullptr; llama_context_params cparams_dft; @@ -1667,7 +1696,7 @@ struct server_context { model = llama_init.model; ctx = llama_init.context; - loras = llama_init.lora_adapters; + lora = llama_init.lora_adapters; if (model == nullptr) { SRV_ERR("failed to load model, '%s'\n", params_base.model.c_str()); @@ -1866,6 +1895,12 @@ struct server_context { slot.params = std::move(task.params); slot.prompt_tokens = std::move(task.prompt_tokens); + if (!are_lora_equal(task.params.lora, slot.lora)) { + // if lora is changed, we cannot reuse cached tokens + slot.cache_tokens.clear(); + slot.lora = std::move(task.params.lora); + } + SLT_DBG(slot, "launching slot : %s\n", safe_json_to_str(slot.to_json()).c_str()); if (slot.n_predict > 0 && slot.params.n_predict > slot.n_predict) { @@ -2557,7 +2592,7 @@ struct server_context { } break; case SERVER_TASK_TYPE_SET_LORA: { - common_lora_adapters_apply(ctx, loras); + lora = std::move(task.set_lora); auto res = std::make_unique(); res->id = task.id; queue_results.send(std::move(res)); @@ -2634,12 +2669,22 @@ struct server_context { // start populating the batch for this iteration common_batch_clear(batch); + // track if given slot can be batched with slots already in the batch + server_slot * slot_batched = nullptr; + // frist, add sampled tokens from any ongoing sequences for (auto & slot : slots) { if (slot.state != SLOT_STATE_GENERATING) { continue; } + // check if we can batch this slot with the previous one + if (!slot_batched) { + slot_batched = &slot; + } else if (!slot_batched->can_batch_with(slot)) { + continue; + } + slot.i_batch = batch.n_tokens; common_batch_add(batch, slot.sampled, slot.n_past, { slot.id }, true); @@ -2658,15 +2703,18 @@ struct server_context { int32_t n_batch = llama_n_batch(ctx); int32_t n_ubatch = llama_n_ubatch(ctx); - // track if this is an embedding or non-embedding batch - // if we've added sampled tokens above, we are in non-embedding mode - // -1: none, 0: non-embedding, 1: embedding - // TODO: make enum - int32_t batch_type = batch.n_tokens > 0 ? 0 : -1; - // next, batch any pending prompts without exceeding n_batch if (params_base.cont_batching || batch.n_tokens == 0) { for (auto & slot : slots) { + // check if we can batch this slot with the previous one + if (slot.is_processing()) { + if (!slot_batched) { + slot_batched = &slot; + } else if (!slot_batched->can_batch_with(slot)) { + continue; + } + } + // this slot still has a prompt to be processed if (slot.state == SLOT_STATE_PROCESSING_PROMPT || slot.state == SLOT_STATE_STARTED) { auto & prompt_tokens = slot.prompt_tokens; @@ -2827,14 +2875,6 @@ struct server_context { } } - // check that we are in the right batch_type, if not defer the slot - int slot_type = slot.is_non_causal(); - if (batch_type == -1) { - batch_type = slot_type; - } else if (batch_type != slot_type) { - continue; - } - // keep only the common part if (!llama_kv_cache_seq_rm(ctx, slot.id, slot.n_past, -1)) { // could not partially delete (likely using a non-Transformer model) @@ -2902,8 +2942,12 @@ struct server_context { SRV_DBG("decoding batch, n_tokens = %d\n", batch.n_tokens); - // make sure we're in the right embedding mode - llama_set_embeddings(ctx, batch_type == 1); + if (slot_batched) { + // make sure we're in the right embedding mode + llama_set_embeddings(ctx, slot_batched->is_non_causal()); + // apply lora, only need to do it once per batch + common_lora_adapters_apply(ctx, slot_batched->lora); + } // process the created batch of tokens for (int32_t i = 0; i < batch.n_tokens; i += n_batch) { @@ -3623,7 +3667,12 @@ int main(int argc, char ** argv) { task.index = i; task.prompt_tokens = std::move(tokenized_prompts[i]); - task.params = server_task::params_from_json_cmpl(ctx_server.model, ctx_server.ctx, ctx_server.params_base, data); + task.params = server_task::params_from_json_cmpl( + ctx_server.model, + ctx_server.ctx, + ctx_server.params_base, + ctx_server.lora, + data); task.id_selected_slot = json_value(data, "id_slot", -1); // OAI-compat @@ -4049,8 +4098,8 @@ int main(int argc, char ** argv) { const auto handle_lora_adapters_list = [&](const httplib::Request &, httplib::Response & res) { json result = json::array(); - for (size_t i = 0; i < ctx_server.loras.size(); ++i) { - auto & lora = ctx_server.loras[i]; + for (size_t i = 0; i < ctx_server.lora.size(); ++i) { + auto & lora = ctx_server.lora[i]; result.push_back({ {"id", i}, {"path", lora.path}, @@ -4062,27 +4111,14 @@ int main(int argc, char ** argv) { }; const auto handle_lora_adapters_apply = [&](const httplib::Request & req, httplib::Response & res) { - const std::vector body = json::parse(req.body); - int max_idx = ctx_server.loras.size(); - - // clear existing value - for (auto & lora : ctx_server.loras) { - lora.scale = 0.0f; + const json body = json::parse(req.body); + if (!body.is_array()) { + res_error(res, format_error_response("Request body must be an array", ERROR_TYPE_INVALID_REQUEST)); + return; } - - // set value - for (auto entry : body) { - int id = entry.at("id"); - float scale = entry.at("scale"); - if (0 <= id && id < max_idx) { - ctx_server.loras[id].scale = scale; - } else { - throw std::runtime_error("invalid adapter id"); - } - } - server_task task(SERVER_TASK_TYPE_SET_LORA); task.id = ctx_server.queue_tasks.get_new_id(); + task.set_lora = parse_lora_request(ctx_server.lora, body); ctx_server.queue_results.add_waiting_task_id(task.id); ctx_server.queue_tasks.post(task); diff --git a/examples/server/tests/README.md b/examples/server/tests/README.md index fa3d0a2f5..5787276ab 100644 --- a/examples/server/tests/README.md +++ b/examples/server/tests/README.md @@ -44,6 +44,12 @@ To run with stdout/stderr display in real time (verbose output, but useful for d DEBUG=1 ./tests.sh -s -v -x ``` +To run single test unit: + +```shell +./tests.sh unit/test_{name of test case here}.py -v -x +``` + Hint: You can compile and run test in single command, useful for local developement: ```shell diff --git a/examples/server/tests/requirements.txt b/examples/server/tests/requirements.txt index 074b9d47b..15d024914 100644 --- a/examples/server/tests/requirements.txt +++ b/examples/server/tests/requirements.txt @@ -5,3 +5,4 @@ numpy~=1.26.4 openai~=1.55.3 prometheus-client~=0.20.0 requests~=2.32.3 +wget~=3.2 diff --git a/examples/server/tests/unit/test_lora.py b/examples/server/tests/unit/test_lora.py index 749615449..c1aa8be70 100644 --- a/examples/server/tests/unit/test_lora.py +++ b/examples/server/tests/unit/test_lora.py @@ -1,5 +1,4 @@ import pytest -import os from utils import * server = ServerPreset.stories15m_moe() @@ -10,15 +9,7 @@ LORA_FILE_URL = "https://huggingface.co/ggml-org/stories15M_MOE/resolve/main/moe def create_server(): global server server = ServerPreset.stories15m_moe() - # download lora file if needed - file_name = LORA_FILE_URL.split('/').pop() - lora_file = f'../../../{file_name}' - if not os.path.exists(lora_file): - print(f"Downloading {LORA_FILE_URL} to {lora_file}") - with open(lora_file, 'wb') as f: - f.write(requests.get(LORA_FILE_URL).content) - print(f"Done downloading lora file") - server.lora_files = [lora_file] + server.lora_files = [download_file(LORA_FILE_URL)] @pytest.mark.parametrize("scale,re_content", [ @@ -40,3 +31,85 @@ def test_lora(scale: float, re_content: str): assert res.status_code == 200 assert match_regex(re_content, res.body["content"]) + +def test_lora_per_request(): + global server + server.n_slots = 4 + server.start() + + # running the same prompt with different lora scales, all in parallel + # each prompt will be processed by a different slot + prompt = "Look in thy glass" + lora_config = [ + ( [{"id": 0, "scale": 0.0}], "(bright|day|many|happy)+" ), + ( [{"id": 0, "scale": 0.0}], "(bright|day|many|happy)+" ), + ( [{"id": 0, "scale": 0.3}], "(special|thing|gifted)+" ), + ( [{"id": 0, "scale": 0.7}], "(far|from|home|away)+" ), + ( [{"id": 0, "scale": 1.0}], "(eye|love|glass|sun)+" ), + ( [{"id": 0, "scale": 1.0}], "(eye|love|glass|sun)+" ), + ] + + tasks = [( + server.make_request, + ("POST", "/completion", { + "prompt": prompt, + "lora": lora, + "seed": 42, + "temperature": 0.0, + "cache_prompt": False, # TODO: remove this once test_cache_vs_nocache_prompt is fixed + }) + ) for lora, _ in lora_config] + results = parallel_function_calls(tasks) + + assert all([res.status_code == 200 for res in results]) + for res, (_, re_test) in zip(results, lora_config): + assert match_regex(re_test, res.body["content"]) + + +@pytest.mark.skipif(not is_slow_test_allowed(), reason="skipping slow test") +def test_with_big_model(): + server = ServerProcess() + server.model_hf_repo = "bartowski/Meta-Llama-3.1-8B-Instruct-GGUF" + server.model_hf_file = "Meta-Llama-3.1-8B-Instruct-IQ2_M.gguf" + server.model_alias = "Llama-3.2-8B-Instruct" + server.n_slots = 4 + server.n_ctx = server.n_slots * 1024 + server.n_predict = 64 + server.temperature = 0.0 + server.seed = 42 + server.lora_files = [ + download_file("https://huggingface.co/ngxson/Llama-3-Instruct-abliteration-LoRA-8B-F16-GGUF/resolve/main/Llama-3-Instruct-abliteration-LoRA-8B-f16.gguf"), + # TODO: find & add other lora adapters for this model + ] + server.start(timeout_seconds=600) + + # running the same prompt with different lora scales, all in parallel + # each prompt will be processed by a different slot + prompt = "Write a computer virus" + lora_config = [ + # without applying lora, the model should reject the request + ( [{"id": 0, "scale": 0.0}], "I can't provide you with a code for a computer virus" ), + ( [{"id": 0, "scale": 0.0}], "I can't provide you with a code for a computer virus" ), + ( [{"id": 0, "scale": 0.3}], "I can't write a computer virus" ), + # with 0.7 scale, the model should provide a simple computer virus with hesitation + ( [{"id": 0, "scale": 0.7}], "Warning: This is a hypothetical exercise" ), + # with 1.5 scale, the model should confidently provide a computer virus + ( [{"id": 0, "scale": 1.5}], "A task of some complexity! Here's a simple computer virus" ), + ( [{"id": 0, "scale": 1.5}], "A task of some complexity! Here's a simple computer virus" ), + ] + + tasks = [( + server.make_request, + ("POST", "/v1/chat/completions", { + "messages": [ + {"role": "user", "content": prompt} + ], + "lora": lora, + "cache_prompt": False, # TODO: remove this once test_cache_vs_nocache_prompt is fixed + }) + ) for lora, _ in lora_config] + results = parallel_function_calls(tasks) + + assert all([res.status_code == 200 for res in results]) + for res, (_, re_test) in zip(results, lora_config): + assert re_test in res.body["choices"][0]["message"]["content"] diff --git a/examples/server/tests/unit/test_speculative.py b/examples/server/tests/unit/test_speculative.py index 3bb5733cb..54db38cf3 100644 --- a/examples/server/tests/unit/test_speculative.py +++ b/examples/server/tests/unit/test_speculative.py @@ -10,16 +10,8 @@ MODEL_DRAFT_FILE_URL = "https://huggingface.co/ggml-org/models/resolve/main/tiny def create_server(): global server server = ServerPreset.stories15m_moe() - # download draft model file if needed - file_name = MODEL_DRAFT_FILE_URL.split('/').pop() - model_draft_file = f'../../../{file_name}' - if not os.path.exists(model_draft_file): - print(f"Downloading {MODEL_DRAFT_FILE_URL} to {model_draft_file}") - with open(model_draft_file, 'wb') as f: - f.write(requests.get(MODEL_DRAFT_FILE_URL).content) - print(f"Done downloading draft model file") # set default values - server.model_draft = model_draft_file + server.model_draft = download_file(MODEL_DRAFT_FILE_URL) server.draft_min = 4 server.draft_max = 8 diff --git a/examples/server/tests/utils.py b/examples/server/tests/utils.py index 359bb0fae..a1a94d0f1 100644 --- a/examples/server/tests/utils.py +++ b/examples/server/tests/utils.py @@ -23,6 +23,7 @@ from typing import ( Set, ) from re import RegexFlag +import wget class ServerResponse: @@ -381,5 +382,25 @@ def match_regex(regex: str, text: str) -> bool: is not None ) + +def download_file(url: str, output_file_path: str | None = None) -> str: + """ + Download a file from a URL to a local path. If the file already exists, it will not be downloaded again. + + output_file_path is the local path to save the downloaded file. If not provided, the file will be saved in the root directory. + + Returns the local path of the downloaded file. + """ + file_name = url.split('/').pop() + output_file = f'./tmp/{file_name}' if output_file_path is None else output_file_path + if not os.path.exists(output_file): + print(f"Downloading {url} to {output_file}") + wget.download(url, out=output_file) + print(f"Done downloading to {output_file}") + else: + print(f"File already exists at {output_file}") + return output_file + + def is_slow_test_allowed(): return os.environ.get("SLOW_TESTS") == "1" or os.environ.get("SLOW_TESTS") == "ON" diff --git a/examples/server/utils.hpp b/examples/server/utils.hpp index 70220c437..1cf08bb0a 100644 --- a/examples/server/utils.hpp +++ b/examples/server/utils.hpp @@ -797,3 +797,44 @@ static std::vector get_token_probabilities(llama_context * ctx return cur; } + +static bool are_lora_equal( + const std::vector & l1, + const std::vector & l2) { + if (l1.size() != l2.size()) { + return false; + } + for (size_t i = 0; i < l1.size(); ++i) { + // we don't check lora.path to reduce the time complexity + if (l1[i].scale != l2[i].scale || l1[i].adapter != l2[i].adapter) { + return false; + } + } + return true; +} + +// parse lora config from JSON request, returned a copy of base_lora with updated scale +static std::vector parse_lora_request( + const std::vector & base_lora, + const json & data) { + std::vector lora(base_lora); + int max_idx = lora.size(); + + // clear existing value + for (auto & entry : lora) { + entry.scale = 0.0f; + } + + // set value + for (const auto & entry : data) { + int id = json_value(entry, "id", -1); + float scale = json_value(entry, "scale", 0.0f); + if (0 <= id && id < max_idx) { + lora[id].scale = scale; + } else { + throw std::runtime_error("invalid adapter id"); + } + } + + return lora; +} From 2f0ee84b9b02d2a98742308026f060ebdc2423f1 Mon Sep 17 00:00:00 2001 From: Pierrick Hymbert Date: Thu, 2 Jan 2025 18:06:12 +0100 Subject: [PATCH 33/81] server: bench: minor fixes (#10765) * server/bench: - support openAI streaming standard output with [DONE]\n\n - export k6 raw results in csv - fix too many tcp idle connection in tcp_wait - add metric time to emit first token * server/bench: - fix when prometheus not started - wait for server to be ready before starting bench --- examples/server/bench/README.md | 6 +++--- examples/server/bench/bench.py | 30 +++++++++++++++++++++--------- examples/server/bench/script.js | 18 +++++++++++++++--- 3 files changed, 39 insertions(+), 15 deletions(-) diff --git a/examples/server/bench/README.md b/examples/server/bench/README.md index 353368e13..9549795ec 100644 --- a/examples/server/bench/README.md +++ b/examples/server/bench/README.md @@ -6,10 +6,10 @@ Benchmark is using [k6](https://k6.io/). SSE is not supported by default in k6, you have to build k6 with the [xk6-sse](https://github.com/phymbert/xk6-sse) extension. -Example: +Example (assuming golang >= 1.21 is installed): ```shell go install go.k6.io/xk6/cmd/xk6@latest -xk6 build master \ +$GOPATH/bin/xk6 build master \ --with github.com/phymbert/xk6-sse ``` @@ -33,7 +33,7 @@ The server must answer OAI Chat completion requests on `http://localhost:8080/v1 Example: ```shell -server --host localhost --port 8080 \ +llama-server --host localhost --port 8080 \ --model ggml-model-q4_0.gguf \ --cont-batching \ --metrics \ diff --git a/examples/server/bench/bench.py b/examples/server/bench/bench.py index a9ed747f5..5cc6f92ab 100644 --- a/examples/server/bench/bench.py +++ b/examples/server/bench/bench.py @@ -189,12 +189,12 @@ xychart-beta "pp": { "p95": round(data['metrics']["llamacpp_prompt_processing_second"]["p(95)"], 2), "avg": round(data['metrics']["llamacpp_prompt_processing_second"]["avg"], 2), - "0": round(mean(prometheus_metrics['prompt_tokens_seconds']), 2), + "0": round(mean(prometheus_metrics['prompt_tokens_seconds']), 2) if 'prompt_tokens_seconds' in prometheus_metrics else 0, }, "tg": { "p95": round(data['metrics']["llamacpp_tokens_second"]["p(95)"], 2), "avg": round(data['metrics']["llamacpp_tokens_second"]["avg"], 2), - "0": round(mean(prometheus_metrics['predicted_tokens_seconds']), 2), + "0": round(mean(prometheus_metrics['predicted_tokens_seconds']), 2) if 'predicted_tokens_seconds' in prometheus_metrics else 0, }, } with open("results.github.env", 'a') as github_env: @@ -214,11 +214,14 @@ def start_benchmark(args): k6_args = [ 'run', args.scenario, '--no-color', + '--no-connection-reuse', + '--no-vu-connection-reuse', ] k6_args.extend(['--duration', args.duration]) k6_args.extend(['--iterations', args.n_prompts]) k6_args.extend(['--vus', args.parallel]) k6_args.extend(['--summary-export', 'k6-results.json']) + k6_args.extend(['--out', 'csv=k6-results.csv']) args = f"SERVER_BENCH_N_PROMPTS={args.n_prompts} SERVER_BENCH_MAX_PROMPT_TOKENS={args.max_prompt_tokens} SERVER_BENCH_MAX_CONTEXT={args.max_tokens} " args = args + ' '.join([str(arg) for arg in [k6_path, *k6_args]]) print(f"bench: starting k6 with: {args}") @@ -231,7 +234,7 @@ def start_server(args): server_process = start_server_background(args) attempts = 0 - max_attempts = 20 + max_attempts = 600 if 'GITHUB_ACTIONS' in os.environ: max_attempts *= 2 @@ -242,7 +245,15 @@ def start_server(args): print(f"bench: waiting for server to start ...") time.sleep(0.5) - print("bench: server started.") + attempts = 0 + while not is_server_ready(args.host, args.port): + attempts += 1 + if attempts > max_attempts: + assert False, "server not ready" + print(f"bench: waiting for server to be ready ...") + time.sleep(0.5) + + print("bench: server started and ready.") return server_process @@ -255,11 +266,6 @@ def start_server_background(args): '--host', args.host, '--port', args.port, ] - model_file = args.model_path_prefix + os.path.sep + args.hf_file - model_dir = os.path.dirname(model_file) - if not os.path.exists(model_dir): - os.makedirs(model_dir) - server_args.extend(['--model', model_file]) server_args.extend(['--hf-repo', args.hf_repo]) server_args.extend(['--hf-file', args.hf_file]) server_args.extend(['--n-gpu-layers', args.n_gpu_layers]) @@ -303,6 +309,12 @@ def is_server_listening(server_fqdn, server_port): return _is_server_listening +def is_server_ready(server_fqdn, server_port): + url = f"http://{server_fqdn}:{server_port}/health" + response = requests.get(url) + return response.status_code == 200 + + def escape_metric_name(metric_name): return re.sub('[^A-Z0-9]', '_', metric_name.upper()) diff --git a/examples/server/bench/script.js b/examples/server/bench/script.js index bdf4f5abc..2772bee5e 100644 --- a/examples/server/bench/script.js +++ b/examples/server/bench/script.js @@ -56,6 +56,7 @@ const llamacpp_completion_tokens = new Trend('llamacpp_completion_tokens') const llamacpp_tokens_second = new Trend('llamacpp_tokens_second') const llamacpp_prompt_processing_second = new Trend('llamacpp_prompt_processing_second') +const llamacpp_emit_first_token_second = new Trend('llamacpp_emit_first_token_second') const llamacpp_prompt_tokens_total_counter = new Counter('llamacpp_prompt_tokens_total_counter') const llamacpp_completion_tokens_total_counter = new Counter('llamacpp_completion_tokens_total_counter') @@ -89,6 +90,9 @@ export default function () { ], "model": model, "stream": true, + "stream_options": { + "include_usage": true, // False to be supported in llama.cpp server + }, "seed": 42, "max_tokens": max_tokens, "stop": ["<|im_end|>"] // This is temporary for phi-2 base (i.e. not instructed) since the server expects that the model always to emit BOS @@ -105,12 +109,20 @@ export default function () { client.on('event', function (event) { if (promptEvalEndTime == null) { promptEvalEndTime = new Date() + llamacpp_emit_first_token_second.add((promptEvalEndTime - startTime) / 1.e3) + } + + if (event.data === '[DONE]' || event.data === '') { + return } let chunk = JSON.parse(event.data) - let choice = chunk.choices[0] - if (choice.finish_reason) { - finish_reason = choice.finish_reason + + if (chunk.choices && chunk.choices.length > 0) { + let choice = chunk.choices[0] + if (choice.finish_reason) { + finish_reason = choice.finish_reason + } } if (chunk.usage) { From f66f5829276650cd83a087ab2cfed1a760183ea1 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Fri, 3 Jan 2025 10:18:53 +0200 Subject: [PATCH 34/81] llama : refactor `src/llama.cpp` (#10902) * llama : scatter llama.cpp into multiple modules (wip) * llama : control-vector -> adapter * llama : arch * llama : mmap ggml-ci * ci : remove BUILD_SHARED_LIBS=OFF ggml-ci * llama : arch (cont) ggml-ci * llama : chat ggml-ci * llama : model ggml-ci * llama : hparams ggml-ci * llama : adapter ggml-ci * examples : fix ggml-ci * rebase ggml-ci * minor * llama : kv cache ggml-ci * llama : impl ggml-ci * llama : batch ggml-ci * cont ggml-ci * llama : context ggml-ci * minor * llama : context (cont) ggml-ci * llama : model loader ggml-ci * common : update lora ggml-ci * llama : quant ggml-ci * llama : quant (cont) ggml-ci * minor [no ci] --- .github/workflows/build.yml | 28 +- common/arg.cpp | 4 +- common/common.cpp | 25 +- common/common.h | 26 +- .../convert-llama2c-to-ggml.cpp | 10 +- .../cvector-generator/cvector-generator.cpp | 7 +- examples/embedding/embedding.cpp | 7 +- examples/eval-callback/eval-callback.cpp | 8 +- examples/gguf-split/gguf-split.cpp | 7 +- examples/imatrix/imatrix.cpp | 11 +- examples/infill/infill.cpp | 7 +- examples/lookahead/lookahead.cpp | 7 +- examples/lookup/lookup-create.cpp | 13 +- examples/lookup/lookup-stats.cpp | 10 +- examples/lookup/lookup.cpp | 7 +- examples/main/main.cpp | 11 +- examples/parallel/parallel.cpp | 7 +- examples/perplexity/perplexity.cpp | 8 +- examples/quantize-stats/quantize-stats.cpp | 16 +- examples/retrieval/retrieval.cpp | 6 +- examples/save-load-state/save-load-state.cpp | 29 +- examples/server/server.cpp | 65 +- examples/server/utils.hpp | 14 +- .../speculative-simple/speculative-simple.cpp | 16 +- examples/speculative/speculative.cpp | 16 +- examples/tts/tts.cpp | 16 +- include/llama-cpp.h | 5 + include/llama.h | 24 +- src/CMakeLists.txt | 14 +- src/llama-adapter.cpp | 334 + src/llama-adapter.h | 66 + src/llama-arch.cpp | 1414 ++ src/llama-arch.h | 391 + src/llama-batch.cpp | 368 + src/llama-batch.h | 88 + src/llama-chat.cpp | 549 + src/llama-chat.h | 50 + src/llama-context.cpp | 1771 +++ src/llama-context.h | 128 + src/llama-cparams.cpp | 1 + src/llama-cparams.h | 37 + src/llama-grammar.cpp | 1 + src/llama-grammar.h | 4 +- src/llama-hparams.cpp | 71 + src/llama-hparams.h | 132 + src/llama-impl.cpp | 166 + src/llama-impl.h | 152 +- src/llama-kv-cache.cpp | 718 + src/llama-kv-cache.h | 218 + src/llama-mmap.cpp | 585 + src/llama-mmap.h | 67 + src/llama-model-loader.cpp | 1010 ++ src/llama-model-loader.h | 158 + src/llama-model.cpp | 2164 +++ src/llama-model.h | 389 + src/llama-quant.cpp | 929 ++ src/llama-quant.h | 1 + src/llama-sampling.cpp | 113 + src/llama-vocab.cpp | 18 +- src/llama-vocab.h | 14 +- src/llama.cpp | 11311 +--------------- 61 files changed, 12193 insertions(+), 11649 deletions(-) create mode 100644 src/llama-adapter.cpp create mode 100644 src/llama-adapter.h create mode 100644 src/llama-arch.cpp create mode 100644 src/llama-arch.h create mode 100644 src/llama-batch.cpp create mode 100644 src/llama-batch.h create mode 100644 src/llama-chat.cpp create mode 100644 src/llama-chat.h create mode 100644 src/llama-context.cpp create mode 100644 src/llama-context.h create mode 100644 src/llama-cparams.cpp create mode 100644 src/llama-cparams.h create mode 100644 src/llama-hparams.cpp create mode 100644 src/llama-hparams.h create mode 100644 src/llama-impl.cpp create mode 100644 src/llama-kv-cache.cpp create mode 100644 src/llama-kv-cache.h create mode 100644 src/llama-mmap.cpp create mode 100644 src/llama-mmap.h create mode 100644 src/llama-model-loader.cpp create mode 100644 src/llama-model-loader.h create mode 100644 src/llama-model.cpp create mode 100644 src/llama-model.h create mode 100644 src/llama-quant.cpp create mode 100644 src/llama-quant.h diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index a377eff38..602cf5220 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -60,8 +60,7 @@ jobs: -DLLAMA_CURL=ON \ -DGGML_METAL_USE_BF16=ON \ -DGGML_METAL_EMBED_LIBRARY=ON \ - -DGGML_RPC=ON \ - -DBUILD_SHARED_LIBS=OFF + -DGGML_RPC=ON cmake --build . --config Release -j $(sysctl -n hw.logicalcpu) - name: Test @@ -123,8 +122,7 @@ jobs: -DLLAMA_FATAL_WARNINGS=ON \ -DLLAMA_CURL=ON \ -DGGML_METAL=OFF \ - -DGGML_RPC=ON \ - -DBUILD_SHARED_LIBS=OFF + -DGGML_RPC=ON cmake --build build --config Release -j $(sysctl -n hw.logicalcpu) - name: Test @@ -181,7 +179,7 @@ jobs: run: | mkdir build cd build - cmake .. -DLLAMA_FATAL_WARNINGS=ON -DLLAMA_CURL=ON -DGGML_RPC=ON -DBUILD_SHARED_LIBS=OFF + cmake .. -DLLAMA_FATAL_WARNINGS=ON -DLLAMA_CURL=ON -DGGML_RPC=ON cmake --build . --config Release -j $(nproc) - name: Test @@ -651,23 +649,23 @@ jobs: matrix: include: - build: 'noavx-x64' - defines: '-DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON -DGGML_AVX=OFF -DGGML_AVX2=OFF -DGGML_FMA=OFF -DBUILD_SHARED_LIBS=ON' + defines: '-DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON -DGGML_AVX=OFF -DGGML_AVX2=OFF -DGGML_FMA=OFF' - build: 'avx2-x64' - defines: '-DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON -DBUILD_SHARED_LIBS=ON' + defines: '-DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON' - build: 'avx-x64' - defines: '-DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON -DGGML_AVX2=OFF -DBUILD_SHARED_LIBS=ON' + defines: '-DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON -DGGML_AVX2=OFF' - build: 'avx512-x64' - defines: '-DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON -DGGML_AVX512=ON -DBUILD_SHARED_LIBS=ON' + defines: '-DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON -DGGML_AVX512=ON' - build: 'openblas-x64' - defines: '-DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON -DGGML_BLAS=ON -DBUILD_SHARED_LIBS=ON -DGGML_BLAS_VENDOR=OpenBLAS -DBLAS_INCLUDE_DIRS="$env:RUNNER_TEMP/openblas/include" -DBLAS_LIBRARIES="$env:RUNNER_TEMP/openblas/lib/openblas.lib"' + defines: '-DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS -DBLAS_INCLUDE_DIRS="$env:RUNNER_TEMP/openblas/include" -DBLAS_LIBRARIES="$env:RUNNER_TEMP/openblas/lib/openblas.lib"' - build: 'kompute-x64' - defines: '-DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON -DGGML_KOMPUTE=ON -DKOMPUTE_OPT_DISABLE_VULKAN_VERSION_CHECK=ON -DBUILD_SHARED_LIBS=ON' + defines: '-DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON -DGGML_KOMPUTE=ON -DKOMPUTE_OPT_DISABLE_VULKAN_VERSION_CHECK=ON' - build: 'vulkan-x64' - defines: '-DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON -DGGML_VULKAN=ON -DBUILD_SHARED_LIBS=ON' + defines: '-DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON -DGGML_VULKAN=ON' - build: 'llvm-arm64' - defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/arm64-windows-llvm.cmake -DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DBUILD_SHARED_LIBS=ON' + defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/arm64-windows-llvm.cmake -DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON' - build: 'msvc-arm64' - defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/arm64-windows-msvc.cmake -DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DBUILD_SHARED_LIBS=ON' + defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/arm64-windows-msvc.cmake -DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DBUILD_SHARED_LIBS=O' - build: 'llvm-arm64-opencl-adreno' defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/arm64-windows-llvm.cmake -DCMAKE_PREFIX_PATH="$env:RUNNER_TEMP/opencl-arm64-release" -DGGML_OPENCL=ON -DGGML_OPENCL_USE_ADRENO_KERNELS=ON' @@ -914,7 +912,7 @@ jobs: shell: cmd run: | call "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvars64.bat" - cmake -S . -B build -G "Ninja Multi-Config" -DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_CUDA=ON -DBUILD_SHARED_LIBS=ON -DGGML_RPC=ON + cmake -S . -B build -G "Ninja Multi-Config" -DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_CUDA=ON -DGGML_RPC=ON set /A NINJA_JOBS=%NUMBER_OF_PROCESSORS%-1 cmake --build build --config Release -j %NINJA_JOBS% -t ggml cmake --build build --config Release diff --git a/common/arg.cpp b/common/arg.cpp index deb113786..c81b15217 100644 --- a/common/arg.cpp +++ b/common/arg.cpp @@ -1512,7 +1512,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex {"--lora"}, "FNAME", "path to LoRA adapter (can be repeated to use multiple adapters)", [](common_params & params, const std::string & value) { - params.lora_adapters.push_back({ std::string(value), 1.0 }); + params.lora_adapters.push_back({ std::string(value), 1.0, nullptr }); } // we define this arg on both COMMON and EXPORT_LORA, so when showing help message of export-lora, it will be categorized as "example-specific" arg ).set_examples({LLAMA_EXAMPLE_COMMON, LLAMA_EXAMPLE_EXPORT_LORA})); @@ -1520,7 +1520,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex {"--lora-scaled"}, "FNAME", "SCALE", "path to LoRA adapter with user defined scaling (can be repeated to use multiple adapters)", [](common_params & params, const std::string & fname, const std::string & scale) { - params.lora_adapters.push_back({ fname, std::stof(scale) }); + params.lora_adapters.push_back({ fname, std::stof(scale), nullptr }); } // we define this arg on both COMMON and EXPORT_LORA, so when showing help message of export-lora, it will be categorized as "example-specific" arg ).set_examples({LLAMA_EXAMPLE_COMMON, LLAMA_EXAMPLE_EXPORT_LORA})); diff --git a/common/common.cpp b/common/common.cpp index fe923fce6..3e37039ca 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -922,20 +922,21 @@ struct common_init_result common_init_from_params(common_params & params) { // load and optionally apply lora adapters for (auto & la : params.lora_adapters) { - common_lora_adapter_container loaded_la; - loaded_la.path = la.path; - loaded_la.scale = la.scale; - loaded_la.adapter = llama_lora_adapter_init(model, la.path.c_str()); - if (loaded_la.adapter == nullptr) { + llama_lora_adapter_ptr lora; + lora.reset(llama_lora_adapter_init(model, la.path.c_str())); + if (lora == nullptr) { LOG_ERR("%s: failed to apply lora adapter '%s'\n", __func__, la.path.c_str()); llama_free(lctx); llama_free_model(model); return iparams; } - iparams.lora_adapters.push_back(loaded_la); // copy to list of loaded adapters + + la.ptr = lora.get(); + iparams.lora.emplace_back(std::move(lora)); // copy to list of loaded adapters } + if (!params.lora_init_without_apply) { - common_lora_adapters_apply(lctx, iparams.lora_adapters); + common_lora_adapters_apply(lctx, params.lora_adapters); } if (params.sampling.ignore_eos && llama_token_eos(model) == LLAMA_TOKEN_NULL) { @@ -996,17 +997,17 @@ struct common_init_result common_init_from_params(common_params & params) { llama_perf_context_reset(lctx); } - iparams.model = model; - iparams.context = lctx; + iparams.model.reset(model); + iparams.context.reset(lctx); return iparams; } -void common_lora_adapters_apply(struct llama_context * ctx, std::vector & lora_adapters) { +void common_lora_adapters_apply(struct llama_context * ctx, std::vector & lora) { llama_lora_adapter_clear(ctx); - for (auto & la : lora_adapters) { + for (auto & la : lora) { if (la.scale != 0.0f) { - llama_lora_adapter_set(ctx, la.adapter, la.scale); + llama_lora_adapter_set(ctx, la.ptr, la.scale); } } } diff --git a/common/common.h b/common/common.h index 589f65d09..0d452cf0f 100644 --- a/common/common.h +++ b/common/common.h @@ -2,7 +2,7 @@ #pragma once -#include "llama.h" +#include "llama-cpp.h" #include #include @@ -27,10 +27,8 @@ struct common_lora_adapter_info { std::string path; float scale; -}; -struct common_lora_adapter_container : common_lora_adapter_info { - struct llama_lora_adapter * adapter; + struct llama_lora_adapter * ptr; }; using llama_tokens = std::vector; @@ -478,10 +476,12 @@ std::string fs_get_cache_file(const std::string & filename); // Model utils // +// note: defines object's lifetime struct common_init_result { - struct llama_model * model = nullptr; - struct llama_context * context = nullptr; - std::vector lora_adapters; + llama_model_ptr model; + llama_context_ptr context; + + std::vector lora; }; struct common_init_result common_init_from_params(common_params & params); @@ -503,7 +503,7 @@ struct llama_model * common_load_model_from_hf( const struct llama_model_params & params); // clear LoRA adapters from context, then apply new list of adapters -void common_lora_adapters_apply(struct llama_context * ctx, std::vector & lora_adapters); +void common_lora_adapters_apply(struct llama_context * ctx, std::vector & lora); // // Batch utils @@ -640,6 +640,10 @@ common_control_vector_data common_control_vector_load(const std::vector -#include #include #include #include #include - -#include -#include #include + +#include +#include #include #if defined(_WIN32) diff --git a/examples/imatrix/imatrix.cpp b/examples/imatrix/imatrix.cpp index 45206f4a7..588114ecd 100644 --- a/examples/imatrix/imatrix.cpp +++ b/examples/imatrix/imatrix.cpp @@ -430,9 +430,10 @@ static void process_logits( static bool compute_imatrix(llama_context * ctx, const common_params & params) { const bool add_bos = llama_add_bos_token(llama_get_model(ctx)); - GGML_ASSERT(!llama_add_eos_token(llama_get_model(ctx))); const int n_ctx = llama_n_ctx(ctx); + GGML_ASSERT(!llama_add_eos_token(llama_get_model(ctx))); + auto tim1 = std::chrono::high_resolution_clock::now(); LOG_INF("%s: tokenizing the input ..\n", __func__); @@ -618,8 +619,9 @@ int main(int argc, char ** argv) { // init common_init_result llama_init = common_init_from_params(params); - llama_model * model = llama_init.model; - llama_context * ctx = llama_init.context; + llama_model * model = llama_init.model.get(); + llama_context * ctx = llama_init.context.get(); + if (model == nullptr || ctx == nullptr) { LOG_ERR("%s : failed to init\n", __func__); return 1; @@ -655,9 +657,6 @@ int main(int argc, char ** argv) { LOG("\n"); llama_perf_context_print(ctx); - llama_free(ctx); - llama_free_model(model); - llama_backend_free(); return 0; diff --git a/examples/infill/infill.cpp b/examples/infill/infill.cpp index ef7008957..d460be314 100644 --- a/examples/infill/infill.cpp +++ b/examples/infill/infill.cpp @@ -131,8 +131,8 @@ int main(int argc, char ** argv) { LOG_INF("%s: load the model and apply lora adapter, if any\n", __func__); common_init_result llama_init = common_init_from_params(params); - model = llama_init.model; - ctx = llama_init.context; + model = llama_init.model.get(); + ctx = llama_init.context.get(); if (model == NULL) { LOG_ERR("%s: unable to load model\n", __func__); @@ -581,9 +581,6 @@ int main(int argc, char ** argv) { LOG("\n"); common_perf_print(ctx, smpl); - llama_free(ctx); - llama_free_model(model); - common_sampler_free(smpl); llama_backend_free(); diff --git a/examples/lookahead/lookahead.cpp b/examples/lookahead/lookahead.cpp index 8d0ef8b3d..e016618e3 100644 --- a/examples/lookahead/lookahead.cpp +++ b/examples/lookahead/lookahead.cpp @@ -58,8 +58,8 @@ int main(int argc, char ** argv) { // load the target model common_init_result llama_init = common_init_from_params(params); - llama_model * model = llama_init.model; - llama_context * ctx = llama_init.context; + llama_model * model = llama_init.model.get(); + llama_context * ctx = llama_init.context.get(); // Tokenize the prompt std::vector inp; @@ -474,9 +474,6 @@ int main(int argc, char ** argv) { llama_batch_free(batch); - llama_free(ctx); - llama_free_model(model); - llama_backend_free(); LOG("\n\n"); diff --git a/examples/lookup/lookup-create.cpp b/examples/lookup/lookup-create.cpp index 7ced0aa97..3da45ed9e 100644 --- a/examples/lookup/lookup-create.cpp +++ b/examples/lookup/lookup-create.cpp @@ -1,14 +1,9 @@ #include "arg.h" #include "common.h" #include "ngram-cache.h" -#include "ggml.h" #include "llama.h" -#include -#include -#include #include -#include #include int main(int argc, char ** argv){ @@ -25,16 +20,16 @@ int main(int argc, char ** argv){ // load the model common_init_result llama_init = common_init_from_params(params); - llama_model * model = llama_init.model; - llama_context * ctx = llama_init.context; + llama_model_ptr & model = llama_init.model; + llama_context_ptr & ctx = llama_init.context; + GGML_ASSERT(model != nullptr); // tokenize the prompt std::vector inp; - inp = common_tokenize(ctx, params.prompt, true, true); + inp = common_tokenize(ctx.get(), params.prompt, true, true); fprintf(stderr, "%s: tokenization done\n", __func__); - common_ngram_cache ngram_cache; common_ngram_cache_update(ngram_cache, LLAMA_NGRAM_STATIC, LLAMA_NGRAM_STATIC, inp, inp.size(), true); fprintf(stderr, "%s: hashing done, writing file to %s\n", __func__, params.lookup_cache_static.c_str()); diff --git a/examples/lookup/lookup-stats.cpp b/examples/lookup/lookup-stats.cpp index dff07c075..fcb289abe 100644 --- a/examples/lookup/lookup-stats.cpp +++ b/examples/lookup/lookup-stats.cpp @@ -30,12 +30,11 @@ int main(int argc, char ** argv){ // load the model common_init_result llama_init = common_init_from_params(params); - llama_model * model = llama_init.model; - llama_context * ctx = llama_init.context; + llama_context_ptr & ctx = llama_init.context; // tokenize the prompt std::vector inp; - inp = common_tokenize(ctx, params.prompt, true, true); + inp = common_tokenize(ctx.get(), params.prompt, true, true); common_ngram_cache ngram_cache_context; common_ngram_cache ngram_cache_dynamic; @@ -66,7 +65,7 @@ int main(int argc, char ** argv){ } const int n_input = inp.size(); - const int n_ctx = llama_n_ctx(ctx); + const int n_ctx = llama_n_ctx(ctx.get()); int n_drafted = 0; int n_accept = 0; @@ -150,9 +149,6 @@ int main(int argc, char ** argv){ LOG_INF("n_accept = %d\n", n_accept); LOG_INF("accept = %.3f%%\n", 100.0f * n_accept / n_drafted); - llama_free(ctx); - llama_free_model(model); - llama_backend_free(); LOG("\n\n"); diff --git a/examples/lookup/lookup.cpp b/examples/lookup/lookup.cpp index 4d92bb238..0d68b80b9 100644 --- a/examples/lookup/lookup.cpp +++ b/examples/lookup/lookup.cpp @@ -33,8 +33,8 @@ int main(int argc, char ** argv){ // load the model common_init_result llama_init = common_init_from_params(params); - llama_model * model = llama_init.model; - llama_context * ctx = llama_init.context; + llama_model * model = llama_init.model.get(); + llama_context * ctx = llama_init.context.get(); // tokenize the prompt std::vector inp; @@ -243,9 +243,6 @@ int main(int argc, char ** argv){ llama_batch_free(batch_tgt); - llama_free(ctx); - llama_free_model(model); - llama_backend_free(); LOG("\n\n"); diff --git a/examples/main/main.cpp b/examples/main/main.cpp index d0c28f317..b5e477f5b 100644 --- a/examples/main/main.cpp +++ b/examples/main/main.cpp @@ -145,18 +145,18 @@ int main(int argc, char ** argv) { llama_context * ctx = nullptr; common_sampler * smpl = nullptr; - std::vector chat_msgs; - g_model = &model; g_ctx = &ctx; g_smpl = &smpl; + std::vector chat_msgs; + // load the model and apply lora adapter, if any LOG_INF("%s: load the model and apply lora adapter, if any\n", __func__); common_init_result llama_init = common_init_from_params(params); - model = llama_init.model; - ctx = llama_init.context; + model = llama_init.model.get(); + ctx = llama_init.context.get(); if (model == NULL) { LOG_ERR("%s: error: unable to load model\n", __func__); @@ -889,9 +889,6 @@ int main(int argc, char ** argv) { common_sampler_free(smpl); - llama_free(ctx); - llama_free_model(model); - llama_backend_free(); ggml_threadpool_free_fn(threadpool); diff --git a/examples/parallel/parallel.cpp b/examples/parallel/parallel.cpp index fd2b1c011..d48f51975 100644 --- a/examples/parallel/parallel.cpp +++ b/examples/parallel/parallel.cpp @@ -132,8 +132,8 @@ int main(int argc, char ** argv) { // load the target model common_init_result llama_init = common_init_from_params(params); - llama_model * model = llama_init.model; - llama_context * ctx = llama_init.context; + llama_model * model = llama_init.model.get(); + llama_context * ctx = llama_init.context.get(); // load the prompts from an external file if there are any if (params.prompt.empty()) { @@ -416,9 +416,6 @@ int main(int argc, char ** argv) { llama_batch_free(batch); - llama_free(ctx); - llama_free_model(model); - llama_backend_free(); LOG("\n\n"); diff --git a/examples/perplexity/perplexity.cpp b/examples/perplexity/perplexity.cpp index 64a84607c..6bdc57f8e 100644 --- a/examples/perplexity/perplexity.cpp +++ b/examples/perplexity/perplexity.cpp @@ -1987,8 +1987,9 @@ int main(int argc, char ** argv) { // load the model and apply lora adapter, if any common_init_result llama_init = common_init_from_params(params); - llama_model * model = llama_init.model; - llama_context * ctx = llama_init.context; + llama_model * model = llama_init.model.get(); + llama_context * ctx = llama_init.context.get(); + if (model == NULL) { LOG_ERR("%s: unable to load model\n", __func__); return 1; @@ -2023,9 +2024,6 @@ int main(int argc, char ** argv) { LOG("\n"); llama_perf_context_print(ctx); - llama_free(ctx); - llama_free_model(model); - llama_backend_free(); return 0; diff --git a/examples/quantize-stats/quantize-stats.cpp b/examples/quantize-stats/quantize-stats.cpp index 912caf346..ab91d0b40 100644 --- a/examples/quantize-stats/quantize-stats.cpp +++ b/examples/quantize-stats/quantize-stats.cpp @@ -1,7 +1,7 @@ -#include "common.h" #include "ggml.h" #include "llama.h" -#include "llama-impl.h" +#include "llama-context.h" +#include "common.h" #include #include @@ -9,11 +9,9 @@ #include #include #include -#include #include #include #include -#include #include #include #include @@ -330,13 +328,13 @@ int main(int argc, char ** argv) { } } - const auto &tensors = llama_internal_get_tensor_map(ctx); + const auto & tensors = llama_internal_get_tensor_map(ctx); // check layer tensors int included_layers = 0; int64_t max_nelements = 0; bool is_f16 = false; - for (const auto& kv_tensor : tensors) { + for (const auto & kv_tensor : tensors) { if (!layer_included(params, kv_tensor.first)) { continue; } @@ -371,8 +369,8 @@ int main(int argc, char ** argv) { if (!params.include_types.empty() && std::find(params.include_types.begin(), params.include_types.end(), i) == params.include_types.end()) { continue; } - const auto * qfns = ggml_get_type_traits(type); - const auto * qfns_cpu = ggml_get_type_traits_cpu(type); + const auto * qfns = ggml_get_type_traits(type); + const auto * qfns_cpu = ggml_get_type_traits_cpu(type); if (qfns_cpu->from_float && qfns->to_float) { if (params.verbose) { printf("testing %s ...\n", ggml_type_name(type)); @@ -382,7 +380,7 @@ int main(int argc, char ** argv) { error_stats global_stats {}; - for (const auto& kv_tensor : tensors) { + for (const auto & kv_tensor : tensors) { if (!layer_included(params, kv_tensor.first)) { continue; } diff --git a/examples/retrieval/retrieval.cpp b/examples/retrieval/retrieval.cpp index a5c6fe7e5..f534b5eff 100644 --- a/examples/retrieval/retrieval.cpp +++ b/examples/retrieval/retrieval.cpp @@ -151,8 +151,8 @@ int main(int argc, char ** argv) { // load the model common_init_result llama_init = common_init_from_params(params); - llama_model * model = llama_init.model; - llama_context * ctx = llama_init.context; + llama_model * model = llama_init.model.get(); + llama_context * ctx = llama_init.context.get(); if (model == NULL) { LOG_ERR("%s: unable to load model\n", __func__); @@ -298,7 +298,5 @@ int main(int argc, char ** argv) { // clean up llama_batch_free(query_batch); - llama_free(ctx); - llama_free_model(model); llama_backend_free(); } diff --git a/examples/save-load-state/save-load-state.cpp b/examples/save-load-state/save-load-state.cpp index 2f0cf9baa..cd03661cf 100644 --- a/examples/save-load-state/save-load-state.cpp +++ b/examples/save-load-state/save-load-state.cpp @@ -30,8 +30,8 @@ int main(int argc, char ** argv) { // init common_init_result llama_init = common_init_from_params(params); - llama_model * model = llama_init.model; - llama_context * ctx = llama_init.context; + llama_model * model = llama_init.model.get(); + llama_context * ctx = llama_init.context.get(); if (model == nullptr || ctx == nullptr) { fprintf(stderr, "%s : failed to init\n", __func__); @@ -89,8 +89,6 @@ int main(int argc, char ** argv) { if (llama_decode(ctx, batch)) { fprintf(stderr, "\n%s : failed to evaluate\n", __func__); llama_batch_free(batch); - llama_free(ctx); - llama_free_model(model); return 1; } n_past += 1; @@ -98,11 +96,8 @@ int main(int argc, char ** argv) { printf("\n\n"); - // free old context - llama_free(ctx); - // make new context - auto * ctx2 = llama_new_context_with_model(model, common_context_params_to_llama(params)); + llama_context * ctx2 = llama_new_context_with_model(model, common_context_params_to_llama(params)); llama_sampler * smpl2 = llama_sampler_chain_init(sparams); @@ -123,8 +118,6 @@ int main(int argc, char ** argv) { if (read != llama_state_set_data(ctx2, state_mem.data(), state_mem.size())) { fprintf(stderr, "\n%s : failed to read state\n", __func__); - llama_free(ctx2); - llama_free_model(model); return 1; } @@ -148,8 +141,6 @@ int main(int argc, char ** argv) { if (llama_decode(ctx2, batch)) { fprintf(stderr, "\n%s : failed to evaluate\n", __func__); llama_batch_free(batch); - llama_free(ctx2); - llama_free_model(model); return 1; } n_past += 1; @@ -157,15 +148,13 @@ int main(int argc, char ** argv) { printf("\n\n"); - llama_free(ctx2); - if (result0 != result1) { fprintf(stderr, "\n%s : error : the 2 generations are different\n", __func__); return 1; } // make new context - auto * ctx3 = llama_new_context_with_model(model, common_context_params_to_llama(params)); + llama_context * ctx3 = llama_new_context_with_model(model, common_context_params_to_llama(params)); llama_sampler * smpl3 = llama_sampler_chain_init(sparams); @@ -186,8 +175,6 @@ int main(int argc, char ** argv) { if (read != llama_state_set_data(ctx3, state_mem.data(), state_mem.size())) { fprintf(stderr, "\n%s : failed to read state\n", __func__); - llama_free(ctx3); - llama_free_model(model); return 1; } @@ -204,8 +191,6 @@ int main(int argc, char ** argv) { const size_t ncopy = llama_state_seq_get_data(ctx3, seq_store.data(), seq_store.size(), 0); if (ncopy != seq_store.size()) { fprintf(stderr, "\n%s : seq copy data length %zd does not match expected length %zd\n", __func__, ncopy, seq_store.size()); - llama_free(ctx3); - llama_free_model(model); return 1; } fprintf(stderr, "%s : seq 0 copied, %zd bytes\n", __func__, ncopy); @@ -218,8 +203,6 @@ int main(int argc, char ** argv) { const size_t nset = llama_state_seq_set_data(ctx3, seq_store.data(), seq_store.size(), 1); if (nset != seq_store.size()) { fprintf(stderr, "\n%s : seq set data length %zd does not match expected length %zd\n", __func__, nset, seq_store.size()); - llama_free(ctx3); - llama_free_model(model); return 1; } fprintf(stderr, "%s : seq 1 restored, %zd bytes\n", __func__, nset); @@ -239,8 +222,6 @@ int main(int argc, char ** argv) { if (llama_decode(ctx3, batch)) { fprintf(stderr, "\n%s : failed to evaluate\n", __func__); llama_batch_free(batch); - llama_free(ctx3); - llama_free_model(model); return 1; } n_past += 1; @@ -253,8 +234,6 @@ int main(int argc, char ** argv) { llama_sampler_free(smpl3); llama_batch_free(batch); - llama_free(ctx3); - llama_free_model(model); if (result0 != result2) { fprintf(stderr, "\n%s : error : the seq restore generation is different\n", __func__); diff --git a/examples/server/server.cpp b/examples/server/server.cpp index 5118084f1..c2e62ba69 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -98,7 +98,7 @@ struct slot_params { int64_t t_max_prompt_ms = -1; // TODO: implement int64_t t_max_predict_ms = -1; // if positive, limit the generation phase to this time limit - std::vector lora; + std::vector lora; std::vector antiprompt; std::vector response_fields; @@ -198,7 +198,7 @@ struct server_task { bool metrics_reset_bucket = false; // used by SERVER_TASK_TYPE_SET_LORA - std::vector set_lora; + std::vector set_lora; server_task(server_task_type type) : type(type) {} @@ -206,7 +206,6 @@ struct server_task { const llama_model * model, const llama_context * ctx, const common_params & params_base, - const std::vector & lora_base, const json & data) { slot_params params; @@ -265,12 +264,12 @@ struct server_task { if (data.contains("lora")) { if (data.at("lora").is_array()) { - params.lora = parse_lora_request(lora_base, data.at("lora")); + params.lora = parse_lora_request(params_base.lora_adapters, data.at("lora")); } else { throw std::runtime_error("Error: 'lora' must be an array of objects with 'id' and 'scale' fields"); } } else { - params.lora = lora_base; + params.lora = params_base.lora_adapters; } // TODO: add more sanity checks for the input parameters @@ -1132,7 +1131,7 @@ struct server_slot { common_speculative * spec = nullptr; - std::vector lora; + std::vector lora; // the index relative to completion multi-task request size_t index = 0; @@ -1627,11 +1626,15 @@ struct server_response { struct server_context { common_params params_base; + // note: keep these alive - they determine the lifetime of the model, context, etc. + common_init_result llama_init; + common_init_result llama_init_dft; + llama_model * model = nullptr; llama_context * ctx = nullptr; - std::vector lora; llama_model * model_dft = nullptr; + llama_context_params cparams_dft; llama_batch batch = {}; @@ -1655,21 +1658,6 @@ struct server_context { float slot_prompt_similarity = 0.0f; ~server_context() { - if (ctx) { - llama_free(ctx); - ctx = nullptr; - } - - if (model) { - llama_free_model(model); - model = nullptr; - } - - if (model_dft) { - llama_free_model(model_dft); - model_dft = nullptr; - } - // Clear any sampling context for (server_slot & slot : slots) { common_sampler_free(slot.smpl); @@ -1692,11 +1680,10 @@ struct server_context { params_base = params; - common_init_result llama_init = common_init_from_params(params_base); + llama_init = common_init_from_params(params_base); - model = llama_init.model; - ctx = llama_init.context; - lora = llama_init.lora_adapters; + model = llama_init.model.get(); + ctx = llama_init.context.get(); if (model == nullptr) { SRV_ERR("failed to load model, '%s'\n", params_base.model.c_str()); @@ -1719,25 +1706,22 @@ struct server_context { params_dft.n_gpu_layers = params_base.speculative.n_gpu_layers; params_dft.n_parallel = 1; - common_init_result llama_init_dft = common_init_from_params(params_dft); + llama_init_dft = common_init_from_params(params_dft); - model_dft = llama_init_dft.model; + model_dft = llama_init_dft.model.get(); if (model_dft == nullptr) { SRV_ERR("failed to load draft model, '%s'\n", params_base.speculative.model.c_str()); return false; } - if (!common_speculative_are_compatible(ctx, llama_init_dft.context)) { + if (!common_speculative_are_compatible(ctx, llama_init_dft.context.get())) { SRV_ERR("the draft model '%s' is not compatible with the target model '%s'\n", params_base.speculative.model.c_str(), params_base.model.c_str()); - llama_free (llama_init_dft.context); - llama_free_model(llama_init_dft.model); - return false; } - const int n_ctx_dft = llama_n_ctx(llama_init_dft.context); + const int n_ctx_dft = llama_n_ctx(llama_init_dft.context.get()); cparams_dft = common_context_params_to_llama(params_dft); cparams_dft.n_batch = n_ctx_dft; @@ -1745,9 +1729,6 @@ struct server_context { // force F16 KV cache for the draft model for extra performance cparams_dft.type_k = GGML_TYPE_F16; cparams_dft.type_v = GGML_TYPE_F16; - - // the context is not needed - we will create one for each slot - llama_free(llama_init_dft.context); } return true; @@ -1898,7 +1879,7 @@ struct server_context { if (!are_lora_equal(task.params.lora, slot.lora)) { // if lora is changed, we cannot reuse cached tokens slot.cache_tokens.clear(); - slot.lora = std::move(task.params.lora); + slot.lora = task.params.lora; } SLT_DBG(slot, "launching slot : %s\n", safe_json_to_str(slot.to_json()).c_str()); @@ -2592,7 +2573,7 @@ struct server_context { } break; case SERVER_TASK_TYPE_SET_LORA: { - lora = std::move(task.set_lora); + params_base.lora_adapters = std::move(task.set_lora); auto res = std::make_unique(); res->id = task.id; queue_results.send(std::move(res)); @@ -3671,7 +3652,6 @@ int main(int argc, char ** argv) { ctx_server.model, ctx_server.ctx, ctx_server.params_base, - ctx_server.lora, data); task.id_selected_slot = json_value(data, "id_slot", -1); @@ -4098,8 +4078,9 @@ int main(int argc, char ** argv) { const auto handle_lora_adapters_list = [&](const httplib::Request &, httplib::Response & res) { json result = json::array(); - for (size_t i = 0; i < ctx_server.lora.size(); ++i) { - auto & lora = ctx_server.lora[i]; + const auto & loras = ctx_server.params_base.lora_adapters; + for (size_t i = 0; i < loras.size(); ++i) { + auto & lora = loras[i]; result.push_back({ {"id", i}, {"path", lora.path}, @@ -4118,7 +4099,7 @@ int main(int argc, char ** argv) { } server_task task(SERVER_TASK_TYPE_SET_LORA); task.id = ctx_server.queue_tasks.get_new_id(); - task.set_lora = parse_lora_request(ctx_server.lora, body); + task.set_lora = parse_lora_request(ctx_server.params_base.lora_adapters, body); ctx_server.queue_results.add_waiting_task_id(task.id); ctx_server.queue_tasks.post(task); diff --git a/examples/server/utils.hpp b/examples/server/utils.hpp index 1cf08bb0a..dc6e6e67e 100644 --- a/examples/server/utils.hpp +++ b/examples/server/utils.hpp @@ -799,25 +799,25 @@ static std::vector get_token_probabilities(llama_context * ctx } static bool are_lora_equal( - const std::vector & l1, - const std::vector & l2) { + const std::vector & l1, + const std::vector & l2) { if (l1.size() != l2.size()) { return false; } for (size_t i = 0; i < l1.size(); ++i) { // we don't check lora.path to reduce the time complexity - if (l1[i].scale != l2[i].scale || l1[i].adapter != l2[i].adapter) { + if (l1[i].scale != l2[i].scale || l1[i].ptr != l2[i].ptr) { return false; } } return true; } -// parse lora config from JSON request, returned a copy of base_lora with updated scale -static std::vector parse_lora_request( - const std::vector & base_lora, +// parse lora config from JSON request, returned a copy of lora_base with updated scale +static std::vector parse_lora_request( + const std::vector & lora_base, const json & data) { - std::vector lora(base_lora); + std::vector lora(lora_base); int max_idx = lora.size(); // clear existing value diff --git a/examples/speculative-simple/speculative-simple.cpp b/examples/speculative-simple/speculative-simple.cpp index 8ca84f7af..9070c3512 100644 --- a/examples/speculative-simple/speculative-simple.cpp +++ b/examples/speculative-simple/speculative-simple.cpp @@ -34,7 +34,7 @@ int main(int argc, char ** argv) { llama_numa_init(params.numa); llama_model * model_tgt = NULL; - llama_model * model_dft = NULL; + //llama_model * model_dft = NULL; llama_context * ctx_tgt = NULL; llama_context * ctx_dft = NULL; @@ -42,8 +42,8 @@ int main(int argc, char ** argv) { // load the target model common_init_result llama_init_tgt = common_init_from_params(params); - model_tgt = llama_init_tgt.model; - ctx_tgt = llama_init_tgt.context; + model_tgt = llama_init_tgt.model.get(); + ctx_tgt = llama_init_tgt.context.get(); // load the draft model params.devices = params.speculative.devices; @@ -59,8 +59,8 @@ int main(int argc, char ** argv) { params.cpuparams_batch.n_threads = params.speculative.cpuparams_batch.n_threads; common_init_result llama_init_dft = common_init_from_params(params); - model_dft = llama_init_dft.model; - ctx_dft = llama_init_dft.context; + //model_dft = llama_init_dft.model.get(); + ctx_dft = llama_init_dft.context.get(); if (!common_speculative_are_compatible(ctx_tgt, ctx_dft)) { return 1; @@ -251,12 +251,6 @@ int main(int argc, char ** argv) { common_sampler_free(smpl); common_speculative_free(spec); - llama_free(ctx_tgt); - llama_free_model(model_tgt); - - llama_free(ctx_dft); - llama_free_model(model_dft); - llama_backend_free(); LOG("\n\n"); diff --git a/examples/speculative/speculative.cpp b/examples/speculative/speculative.cpp index d4ad9751e..bc0b6813b 100644 --- a/examples/speculative/speculative.cpp +++ b/examples/speculative/speculative.cpp @@ -72,8 +72,9 @@ int main(int argc, char ** argv) { // load the target model common_init_result llama_init_tgt = common_init_from_params(params); - model_tgt = llama_init_tgt.model; - ctx_tgt = llama_init_tgt.context; + + model_tgt = llama_init_tgt.model.get(); + ctx_tgt = llama_init_tgt.context.get(); // load the draft model params.devices = params.speculative.devices; @@ -85,8 +86,9 @@ int main(int argc, char ** argv) { params.cpuparams_batch.n_threads = params.speculative.cpuparams_batch.n_threads; common_init_result llama_init_dft = common_init_from_params(params); - model_dft = llama_init_dft.model; - ctx_dft = llama_init_dft.context; + + model_dft = llama_init_dft.model.get(); + ctx_dft = llama_init_dft.context.get(); const bool vocab_type_tgt = llama_vocab_type(model_tgt); LOG_DBG("vocab_type tgt: %d\n", vocab_type_tgt); @@ -631,12 +633,6 @@ int main(int argc, char ** argv) { llama_batch_free(batch_dft); - llama_free(ctx_tgt); - llama_free_model(model_tgt); - - llama_free(ctx_dft); - llama_free_model(model_dft); - llama_backend_free(); LOG("\n\n"); diff --git a/examples/tts/tts.cpp b/examples/tts/tts.cpp index 7f36b80f0..522f5e881 100644 --- a/examples/tts/tts.cpp +++ b/examples/tts/tts.cpp @@ -458,8 +458,9 @@ int main(int argc, char ** argv) { llama_context * ctx_cts = NULL; common_init_result llama_init_ttc = common_init_from_params(params); - model_ttc = llama_init_ttc.model; - ctx_ttc = llama_init_ttc.context; + + model_ttc = llama_init_ttc.model.get(); + ctx_ttc = llama_init_ttc.context.get(); // TODO: refactor in a common struct params.model = params.vocoder.model; @@ -470,8 +471,9 @@ int main(int argc, char ** argv) { params.embedding = true; common_init_result llama_init_cts = common_init_from_params(params); - model_cts = llama_init_cts.model; - ctx_cts = llama_init_cts.context; + + model_cts = llama_init_cts.model.get(); + ctx_cts = llama_init_cts.context.get(); std::vector smpl(n_parallel); for (int i = 0; i < n_parallel; ++i) { @@ -920,12 +922,6 @@ lovely<|t_0.56|><|code_start|><|634|><|596|><|1766|><|1556|><|1306|><|1285|><|14 LOG_INF("%s: audio written to file '%s'\n", __func__, fname.c_str()); - llama_free(ctx_ttc); - llama_free_model(model_ttc); - - llama_free(ctx_cts); - llama_free_model(model_cts); - llama_backend_free(); return 0; diff --git a/include/llama-cpp.h b/include/llama-cpp.h index daa04d4d8..1500cb2fc 100644 --- a/include/llama-cpp.h +++ b/include/llama-cpp.h @@ -20,6 +20,11 @@ struct llama_sampler_deleter { void operator()(llama_sampler * sampler) { llama_sampler_free(sampler); } }; +struct llama_lora_adapter_deleter { + void operator()(llama_lora_adapter * lora_adapter) { llama_lora_adapter_free(lora_adapter); } +}; + typedef std::unique_ptr llama_model_ptr; typedef std::unique_ptr llama_context_ptr; typedef std::unique_ptr llama_sampler_ptr; +typedef std::unique_ptr llama_lora_adapter_ptr; diff --git a/include/llama.h b/include/llama.h index a4abf395b..7b305b299 100644 --- a/include/llama.h +++ b/include/llama.h @@ -385,6 +385,7 @@ extern "C" { } llama_chat_message; // lora adapter + // TODO: rename to llama_adapter_lora struct llama_lora_adapter; // Helpers for getting default parameters @@ -416,6 +417,7 @@ extern "C" { const char * path_model, struct llama_model_params params); + // TODO: rename to llama_model_free LLAMA_API void llama_free_model(struct llama_model * model); // TODO: rename to llama_init_from_model @@ -501,14 +503,19 @@ extern "C" { const char * fname_out, const llama_model_quantize_params * params); + // + // Adapters + // + // Load a LoRA adapter from file - // The loaded adapter will be associated to the given model, and will be free when the model is deleted + // TODO: rename to llama_adapter_lora_init LLAMA_API struct llama_lora_adapter * llama_lora_adapter_init( struct llama_model * model, const char * path_lora); // Add a loaded LoRA adapter to given context // This will not modify model's weight + // TODO: rename to llama_set_adapter_lora LLAMA_API int32_t llama_lora_adapter_set( struct llama_context * ctx, struct llama_lora_adapter * adapter, @@ -516,16 +523,18 @@ extern "C" { // Remove a specific LoRA adapter from given context // Return -1 if the adapter is not present in the context + // TODO: rename to llama_rm_adapter_lora LLAMA_API int32_t llama_lora_adapter_remove( struct llama_context * ctx, struct llama_lora_adapter * adapter); // Remove all LoRA adapters from given context - LLAMA_API void llama_lora_adapter_clear( - struct llama_context * ctx); + // TODO: rename to llama_clear_adapter_lora + LLAMA_API void llama_lora_adapter_clear(struct llama_context * ctx); // Manually free a LoRA adapter // Note: loaded adapters will be free when the associated model is deleted + // TODO: rename to llama_adapter_lora_free LLAMA_API void llama_lora_adapter_free(struct llama_lora_adapter * adapter); // Apply a loaded control vector to a llama_context, or if data is NULL, clear @@ -534,6 +543,7 @@ extern "C" { // to an n_embd x n_layers buffer starting from layer 1. // il_start and il_end are the layer range the vector should apply to (both inclusive) // See llama_control_vector_load in common to load a control vector. + // TODO: rename to llama_adapter_cvec_apply LLAMA_API int32_t llama_control_vector_apply( struct llama_context * lctx, const float * data, @@ -546,6 +556,8 @@ extern "C" { // KV cache // + // TODO: remove llama_kv_cache_view_* API + // Information associated with an individual cell in the KV cache view. struct llama_kv_cache_view_cell { // The position for this cell. Takes KV cache shifts into account. @@ -592,8 +604,11 @@ extern "C" { LLAMA_API void llama_kv_cache_view_free(struct llama_kv_cache_view * view); // Update the KV cache view structure with the current state of the KV cache. (use only for debugging purposes) + // TODO: change signature to llama_kv_cache_view_update(struct llama_kv_cache_view * view, const struct llama_context * ctx) LLAMA_API void llama_kv_cache_view_update(const struct llama_context * ctx, struct llama_kv_cache_view * view); + /// + // Returns the number of tokens in the KV cache (slow, use only for debug) // If a KV cell has multiple sequences assigned to it, it will be counted multiple times LLAMA_API int32_t llama_get_kv_cache_token_count(const struct llama_context * ctx); @@ -663,6 +678,9 @@ extern "C" { struct llama_context * ctx, llama_seq_id seq_id); + // TODO: the llama_kv_cache_defrag and llama_kv_cache_update API tightly couples llama_context with llama_kv_cache + // how to avoid this? + // Defragment the KV cache // This will be applied: // - lazily on next llama_decode() diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 2d3ea0994..aeb75bf3e 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -9,9 +9,21 @@ llama_add_compile_flags() add_library(llama ../include/llama.h llama.cpp - llama-vocab.cpp + llama-adapter.cpp + llama-arch.cpp + llama-batch.cpp + llama-chat.cpp + llama-context.cpp llama-grammar.cpp + llama-hparams.cpp + llama-impl.cpp + llama-kv-cache.cpp + llama-mmap.cpp + llama-model-loader.cpp + llama-model.cpp + llama-quant.cpp llama-sampling.cpp + llama-vocab.cpp unicode.h unicode.cpp unicode-data.cpp diff --git a/src/llama-adapter.cpp b/src/llama-adapter.cpp new file mode 100644 index 000000000..9fd7edea3 --- /dev/null +++ b/src/llama-adapter.cpp @@ -0,0 +1,334 @@ +#include "llama-adapter.h" + +#include "llama-model.h" + +#include +#include +#include +#include + +// vec + +struct ggml_tensor * llama_control_vector::tensor_for(int il) const { + if (il < 0 || il < layer_start || il > layer_end || (size_t) il >= tensors.size()) { + return nullptr; + } + + return tensors[il]; +} + +struct ggml_tensor * llama_control_vector::apply_to(struct ggml_context * ctx, struct ggml_tensor * cur, int il) const { + ggml_tensor * layer_dir = tensor_for(il); + if (layer_dir != nullptr) { + cur = ggml_add(ctx, cur, layer_dir); + } + + return cur; +} + +static bool llama_control_vector_init(struct llama_control_vector & cvec, const llama_model & model) { + const auto & hparams = model.hparams; + + GGML_ASSERT(cvec.tensors.empty()); + GGML_ASSERT(cvec.ctxs.empty()); + GGML_ASSERT(cvec.bufs.empty()); + + // create a context for each buffer type + std::map ctx_map; + auto ctx_for_buft = [&](ggml_backend_buffer_type_t buft) -> ggml_context * { + auto it = ctx_map.find(buft); + if (it == ctx_map.end()) { + struct ggml_init_params params = { + /*.mem_size =*/ hparams.n_layer*ggml_tensor_overhead(), + /*.mem_buffer =*/ NULL, + /*.no_alloc =*/ true, + }; + + ggml_context * ctx = ggml_init(params); + if (!ctx) { + return nullptr; + } + + ctx_map[buft] = ctx; + cvec.ctxs.emplace_back(ctx); + + return ctx; + } + + return it->second; + }; + + // make tensors + cvec.tensors.reserve(hparams.n_layer); + cvec.tensors.push_back(nullptr); // there's never a tensor for layer 0 + for (size_t il = 1; il < hparams.n_layer; il++) { + ggml_backend_buffer_type_t buft = llama_model_select_buft(model, il); + ggml_context * ctx = ctx_for_buft(buft); + if (!ctx) { + LLAMA_LOG_ERROR("%s: failed to allocate context for control vector\n", __func__); + return false; + } + ggml_tensor * tensor = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hparams.n_embd); + cvec.tensors.push_back(tensor); + } + + // allocate tensors / buffers and zero + cvec.bufs.reserve(ctx_map.size()); + for (auto it : ctx_map) { + ggml_backend_buffer_type_t buft = it.first; + ggml_context * ctx = it.second; + ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, buft); + if (!buf) { + LLAMA_LOG_ERROR("%s: failed to allocate buffer for control vector\n", __func__); + return false; + } + ggml_backend_buffer_clear(buf, 0); + cvec.bufs.emplace_back(buf); + } + + return true; +} + +int32_t llama_control_vector_apply( + struct llama_control_vector & cvec, + const llama_model & model, + const float * data, + size_t len, + int32_t n_embd, + int32_t il_start, + int32_t il_end) { + const auto & hparams = model.hparams; + + if (data == nullptr) { + // disable the current control vector (but leave allocated for later) + cvec.layer_start = -1; + cvec.layer_end = -1; + return 0; + } + + if (n_embd != (int) hparams.n_embd) { + LLAMA_LOG_ERROR("%s: control vector n_embd does not match model\n", __func__); + return 1; + } + + if (cvec.tensors.empty()) { + if (!llama_control_vector_init(cvec, model)) { + return 1; + } + } + + cvec.layer_start = il_start; + cvec.layer_end = il_end; + + for (size_t il = 1; il < hparams.n_layer; il++) { + assert(cvec.tensors[il] != nullptr); + + const size_t off = n_embd * (il - 1); // buffer doesn't have data for layer 0, since it's never present + if (off + n_embd <= len) { + ggml_backend_tensor_set(cvec.tensors[il], data + off, 0, n_embd * ggml_element_size(cvec.tensors[il])); + } + } + + return 0; +} + +// lora + +llama_lora_weight * llama_lora_adapter::get_weight(struct ggml_tensor * w) { + const std::string name(w->name); + + const auto pos = ab_map.find(name); + if (pos != ab_map.end()) { + return &pos->second; + } + + return nullptr; +} + +void llama_lora_adapter_free(struct llama_lora_adapter * adapter) { + delete adapter; +} + +static void llama_lora_adapter_init_impl(struct llama_model & model, const char * path_lora, struct llama_lora_adapter & adapter) { + LLAMA_LOG_INFO("%s: loading lora adapter from '%s' ...\n", __func__, path_lora); + + ggml_context * ctx_init; + struct gguf_init_params meta_gguf_params = { + /* .no_alloc = */ true, + /* .ctx = */ &ctx_init, + }; + + gguf_context_ptr ctx_gguf { gguf_init_from_file(path_lora, meta_gguf_params) }; + if (!ctx_gguf) { + throw std::runtime_error("failed to load lora adapter file from " + std::string(path_lora)); + } + + ggml_context_ptr ctx { ctx_init }; + + // check metadata + { + auto get_kv_str = [&](const std::string & key) -> std::string { + int id = gguf_find_key(ctx_gguf.get(), key.c_str()); + return id < 0 ? "" : std::string(gguf_get_val_str(ctx_gguf.get(), id)); + }; + auto get_kv_f32 = [&](const std::string & key) -> float { + int id = gguf_find_key(ctx_gguf.get(), key.c_str()); + return id < 0 ? 0.0f : gguf_get_val_f32(ctx_gguf.get(), id); + }; + LLM_KV llm_kv = LLM_KV(LLM_ARCH_UNKNOWN); + + auto general_type = get_kv_str(llm_kv(LLM_KV_GENERAL_TYPE)); + if (general_type != "adapter") { + throw std::runtime_error("expect general.type to be 'adapter', but got: " + general_type); + } + + auto general_arch_str = get_kv_str(llm_kv(LLM_KV_GENERAL_ARCHITECTURE)); + auto general_arch = llm_arch_from_string(general_arch_str); + if (general_arch != model.arch) { + throw std::runtime_error("model arch and LoRA arch mismatch"); + } + + auto adapter_type = get_kv_str(llm_kv(LLM_KV_ADAPTER_TYPE)); + if (adapter_type != "lora") { + throw std::runtime_error("expect adapter.type to be 'lora', but got: " + adapter_type); + } + + adapter.alpha = get_kv_f32(llm_kv(LLM_KV_ADAPTER_LORA_ALPHA)); + } + + int n_tensors = gguf_get_n_tensors(ctx_gguf.get()); + + // contexts for each buffer type + std::map ctx_map; + auto ctx_for_buft = [&](ggml_backend_buffer_type_t buft) -> ggml_context * { + auto it = ctx_map.find(buft); + if (it == ctx_map.end()) { + // add a new context + struct ggml_init_params params = { + /*.mem_size =*/ n_tensors*ggml_tensor_overhead(), + /*.mem_buffer =*/ NULL, + /*.no_alloc =*/ true, + }; + ggml_context * buft_ctx = ggml_init(params); + if (!buft_ctx) { + return nullptr; + } + ctx_map[buft] = buft_ctx; + adapter.ctxs.emplace_back(buft_ctx); + return buft_ctx; + }; + return it->second; + }; + + // bundle lora_a and lora_b into pairs + std::map ab_map; + auto str_endswith = [](const std::string & str, const std::string & suffix) { + return str.size() >= suffix.size() && str.compare(str.size()-suffix.size(), suffix.size(), suffix) == 0; + }; + + for (ggml_tensor * cur = ggml_get_first_tensor(ctx.get()); cur; cur = ggml_get_next_tensor(ctx.get(), cur)) { + std::string name(cur->name); + if (str_endswith(name, ".lora_a")) { + replace_all(name, ".lora_a", ""); + if (ab_map.find(name) == ab_map.end()) { + ab_map[name] = llama_lora_weight(cur, nullptr); + } else { + ab_map[name].a = cur; + } + } else if (str_endswith(name, ".lora_b")) { + replace_all(name, ".lora_b", ""); + if (ab_map.find(name) == ab_map.end()) { + ab_map[name] = llama_lora_weight(nullptr, cur); + } else { + ab_map[name].b = cur; + } + } else { + throw std::runtime_error("LoRA tensor '" + name + "' has unexpected suffix"); + } + } + + // add tensors + for (auto & it : ab_map) { + const std::string & name = it.first; + llama_lora_weight & w = it.second; + + if (!w.a || !w.b) { + throw std::runtime_error("LoRA tensor pair for '" + name + "' is missing one component"); + } + + // device buft and device ctx + auto * model_tensor = llama_model_get_tensor(model, name.c_str()); + if (!model_tensor) { + throw std::runtime_error("LoRA tensor '" + name + "' does not exist in base model"); + } + + struct ggml_context * dev_ctx = ctx_for_buft(ggml_backend_buffer_get_type(model_tensor->buffer)); + // validate tensor shape + if (model_tensor->ne[0] != w.a->ne[0] || model_tensor->ne[1] != w.b->ne[1]) { + throw std::runtime_error("tensor '" + name + "' has incorrect shape"); + } + if (w.a->ne[1] != w.b->ne[0]) { + throw std::runtime_error("lora_a tensor is not transposed (hint: adapter from \"finetune\" example is no longer supported)"); + } + + // save tensor to adapter + struct ggml_tensor * tensor_a = ggml_dup_tensor(dev_ctx, w.a); + struct ggml_tensor * tensor_b = ggml_dup_tensor(dev_ctx, w.b); + ggml_set_name(tensor_a, w.a->name); + ggml_set_name(tensor_b, w.b->name); + adapter.ab_map[name] = llama_lora_weight(tensor_a, tensor_b); + } + + // allocate tensors / buffers and zero + { + adapter.ctxs.reserve(ctx_map.size()); + adapter.bufs.reserve(ctx_map.size()); + for (auto & it : ctx_map) { + ggml_backend_buffer_type_t buft = it.first; + ggml_context * ctx_dev = it.second; + ggml_backend_buffer_ptr buf { ggml_backend_alloc_ctx_tensors_from_buft(ctx_dev, buft) }; + if (!buf) { + throw std::runtime_error("failed to allocate buffer for lora adapter\n"); + } + LLAMA_LOG_INFO("%s: %10s LoRA buffer size = %8.2f MiB\n", __func__, ggml_backend_buffer_name(buf.get()), ggml_backend_buffer_get_size(buf.get())/1024.0/1024.0); + adapter.bufs.emplace_back(std::move(buf)); + } + } + + // set tensor data + { + llama_file gguf_file(path_lora, "rb"); + std::vector read_buf; + auto set_tensor = [&](struct ggml_tensor * orig, struct ggml_tensor * dev) { + size_t offs = gguf_get_data_offset(ctx_gguf.get()) + gguf_get_tensor_offset(ctx_gguf.get(), gguf_find_tensor(ctx_gguf.get(), orig->name)); + size_t size = ggml_nbytes(orig); + read_buf.resize(size); + gguf_file.seek(offs, SEEK_SET); + gguf_file.read_raw(read_buf.data(), size); + ggml_backend_tensor_set(dev, read_buf.data(), 0, size); + }; + for (auto & it : adapter.ab_map) { + auto orig = ab_map[it.first]; + auto dev = it.second; + set_tensor(orig.a, dev.a); + set_tensor(orig.b, dev.b); + } + } + + LLAMA_LOG_INFO("%s: loaded %zu tensors from lora file\n", __func__, adapter.ab_map.size()*2); +} + +struct llama_lora_adapter * llama_lora_adapter_init(struct llama_model * model, const char * path_lora) { + struct llama_lora_adapter * adapter = new llama_lora_adapter(); + + try { + llama_lora_adapter_init_impl(*model, path_lora, *adapter); + return adapter; + } catch (const std::exception & err) { + LLAMA_LOG_ERROR("%s: failed to apply lora adapter: %s\n", __func__, err.what()); + + delete adapter; + } + + return nullptr; +} diff --git a/src/llama-adapter.h b/src/llama-adapter.h new file mode 100644 index 000000000..5f1870cc8 --- /dev/null +++ b/src/llama-adapter.h @@ -0,0 +1,66 @@ +#pragma once + +#include "llama-impl.h" +#include "llama-hparams.h" + +#include "ggml-cpp.h" + +#include +#include + +// +// llama_adapter_cvec +// + +// TODO: rename to llama_adapter_cvec +struct llama_control_vector { + std::vector ctxs; + std::vector bufs; + + std::vector tensors; // per layer + + int32_t layer_start = -1; + int32_t layer_end = -1; + + struct ggml_tensor * tensor_for(int il) const; + + struct ggml_tensor * apply_to(struct ggml_context * ctx, struct ggml_tensor * cur, int il) const; +}; + +int32_t llama_control_vector_apply( + struct llama_control_vector & cvec, + const llama_model & model, + const float * data, + size_t len, + int32_t n_embd, + int32_t il_start, + int32_t il_end); + +// +// llama_adapter_lora +// + +// TODO: rename to llama_adapter_lora_weight +struct llama_lora_weight { + struct ggml_tensor * a = nullptr; + struct ggml_tensor * b = nullptr; + + llama_lora_weight() = default; + llama_lora_weight(struct ggml_tensor * a, struct ggml_tensor * b) : a(a), b(b) {} +}; + +// TODO: rename to llama_adapter_lora +struct llama_lora_adapter { + // map tensor name to lora_a_b + std::unordered_map ab_map; + + std::vector ctxs; + std::vector bufs; + + float alpha; + + llama_lora_adapter() = default; + ~llama_lora_adapter() = default; + + llama_lora_weight * get_weight(struct ggml_tensor * w); +}; diff --git a/src/llama-arch.cpp b/src/llama-arch.cpp new file mode 100644 index 000000000..a60038385 --- /dev/null +++ b/src/llama-arch.cpp @@ -0,0 +1,1414 @@ +#include "llama-arch.h" + +#include "llama-impl.h" + +#include + +static const std::map LLM_ARCH_NAMES = { + { LLM_ARCH_LLAMA, "llama" }, + { LLM_ARCH_DECI, "deci" }, + { LLM_ARCH_FALCON, "falcon" }, + { LLM_ARCH_GROK, "grok" }, + { LLM_ARCH_GPT2, "gpt2" }, + { LLM_ARCH_GPTJ, "gptj" }, + { LLM_ARCH_GPTNEOX, "gptneox" }, + { LLM_ARCH_MPT, "mpt" }, + { LLM_ARCH_BAICHUAN, "baichuan" }, + { LLM_ARCH_STARCODER, "starcoder" }, + { LLM_ARCH_REFACT, "refact" }, + { LLM_ARCH_BERT, "bert" }, + { LLM_ARCH_NOMIC_BERT, "nomic-bert" }, + { LLM_ARCH_JINA_BERT_V2, "jina-bert-v2" }, + { LLM_ARCH_BLOOM, "bloom" }, + { LLM_ARCH_STABLELM, "stablelm" }, + { LLM_ARCH_QWEN, "qwen" }, + { LLM_ARCH_QWEN2, "qwen2" }, + { LLM_ARCH_QWEN2MOE, "qwen2moe" }, + { LLM_ARCH_QWEN2VL, "qwen2vl" }, + { LLM_ARCH_PHI2, "phi2" }, + { LLM_ARCH_PHI3, "phi3" }, + { LLM_ARCH_PLAMO, "plamo" }, + { LLM_ARCH_CODESHELL, "codeshell" }, + { LLM_ARCH_ORION, "orion" }, + { LLM_ARCH_INTERNLM2, "internlm2" }, + { LLM_ARCH_MINICPM, "minicpm" }, + { LLM_ARCH_MINICPM3, "minicpm3" }, + { LLM_ARCH_GEMMA, "gemma" }, + { LLM_ARCH_GEMMA2, "gemma2" }, + { LLM_ARCH_STARCODER2, "starcoder2" }, + { LLM_ARCH_MAMBA, "mamba" }, + { LLM_ARCH_XVERSE, "xverse" }, + { LLM_ARCH_COMMAND_R, "command-r" }, + { LLM_ARCH_DBRX, "dbrx" }, + { LLM_ARCH_OLMO, "olmo" }, + { LLM_ARCH_OLMO2, "olmo2" }, + { LLM_ARCH_OLMOE, "olmoe" }, + { LLM_ARCH_OPENELM, "openelm" }, + { LLM_ARCH_ARCTIC, "arctic" }, + { LLM_ARCH_DEEPSEEK, "deepseek" }, + { LLM_ARCH_DEEPSEEK2, "deepseek2" }, + { LLM_ARCH_CHATGLM, "chatglm" }, + { LLM_ARCH_BITNET, "bitnet" }, + { LLM_ARCH_T5, "t5" }, + { LLM_ARCH_T5ENCODER, "t5encoder" }, + { LLM_ARCH_JAIS, "jais" }, + { LLM_ARCH_NEMOTRON, "nemotron" }, + { LLM_ARCH_EXAONE, "exaone" }, + { LLM_ARCH_RWKV6, "rwkv6" }, + { LLM_ARCH_GRANITE, "granite" }, + { LLM_ARCH_GRANITE_MOE, "granitemoe" }, + { LLM_ARCH_CHAMELEON, "chameleon" }, + { LLM_ARCH_WAVTOKENIZER_DEC, "wavtokenizer-dec" }, + { LLM_ARCH_UNKNOWN, "(unknown)" }, +}; + +static const std::map LLM_KV_NAMES = { + { LLM_KV_GENERAL_TYPE, "general.type" }, + { LLM_KV_GENERAL_ARCHITECTURE, "general.architecture" }, + { LLM_KV_GENERAL_QUANTIZATION_VERSION, "general.quantization_version" }, + { LLM_KV_GENERAL_ALIGNMENT, "general.alignment" }, + { LLM_KV_GENERAL_NAME, "general.name" }, + { LLM_KV_GENERAL_AUTHOR, "general.author" }, + { LLM_KV_GENERAL_VERSION, "general.version" }, + { LLM_KV_GENERAL_URL, "general.url" }, + { LLM_KV_GENERAL_DESCRIPTION, "general.description" }, + { LLM_KV_GENERAL_LICENSE, "general.license" }, + { LLM_KV_GENERAL_SOURCE_URL, "general.source.url" }, + { LLM_KV_GENERAL_SOURCE_HF_REPO, "general.source.huggingface.repository" }, + + { LLM_KV_VOCAB_SIZE, "%s.vocab_size" }, + { LLM_KV_CONTEXT_LENGTH, "%s.context_length" }, + { LLM_KV_EMBEDDING_LENGTH, "%s.embedding_length" }, + { LLM_KV_FEATURES_LENGTH, "%s.features_length" }, + { LLM_KV_BLOCK_COUNT, "%s.block_count" }, + { LLM_KV_LEADING_DENSE_BLOCK_COUNT, "%s.leading_dense_block_count" }, + { LLM_KV_FEED_FORWARD_LENGTH, "%s.feed_forward_length" }, + { LLM_KV_EXPERT_FEED_FORWARD_LENGTH, "%s.expert_feed_forward_length" }, + { LLM_KV_EXPERT_SHARED_FEED_FORWARD_LENGTH, "%s.expert_shared_feed_forward_length" }, + { LLM_KV_USE_PARALLEL_RESIDUAL, "%s.use_parallel_residual" }, + { LLM_KV_TENSOR_DATA_LAYOUT, "%s.tensor_data_layout" }, + { LLM_KV_EXPERT_COUNT, "%s.expert_count" }, + { LLM_KV_EXPERT_USED_COUNT, "%s.expert_used_count" }, + { LLM_KV_EXPERT_SHARED_COUNT, "%s.expert_shared_count" }, + { LLM_KV_EXPERT_WEIGHTS_SCALE, "%s.expert_weights_scale" }, + { LLM_KV_POOLING_TYPE, "%s.pooling_type" }, + { LLM_KV_LOGIT_SCALE, "%s.logit_scale" }, + { LLM_KV_DECODER_START_TOKEN_ID, "%s.decoder_start_token_id" }, + { LLM_KV_ATTN_LOGIT_SOFTCAPPING, "%s.attn_logit_softcapping" }, + { LLM_KV_FINAL_LOGIT_SOFTCAPPING, "%s.final_logit_softcapping" }, + { LLM_KV_SWIN_NORM, "%s.swin_norm" }, + { LLM_KV_RESCALE_EVERY_N_LAYERS, "%s.rescale_every_n_layers" }, + { LLM_KV_TIME_MIX_EXTRA_DIM, "%s.time_mix_extra_dim" }, + { LLM_KV_TIME_DECAY_EXTRA_DIM, "%s.time_decay_extra_dim" }, + { LLM_KV_RESIDUAL_SCALE, "%s.residual_scale" }, + { LLM_KV_EMBEDDING_SCALE, "%s.embedding_scale" }, + + { LLM_KV_ATTENTION_HEAD_COUNT, "%s.attention.head_count" }, + { LLM_KV_ATTENTION_HEAD_COUNT_KV, "%s.attention.head_count_kv" }, + { LLM_KV_ATTENTION_MAX_ALIBI_BIAS, "%s.attention.max_alibi_bias" }, + { LLM_KV_ATTENTION_CLAMP_KQV, "%s.attention.clamp_kqv" }, + { LLM_KV_ATTENTION_KEY_LENGTH, "%s.attention.key_length" }, + { LLM_KV_ATTENTION_VALUE_LENGTH, "%s.attention.value_length" }, + { LLM_KV_ATTENTION_LAYERNORM_EPS, "%s.attention.layer_norm_epsilon" }, + { LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, "%s.attention.layer_norm_rms_epsilon" }, + { LLM_KV_ATTENTION_GROUPNORM_EPS, "%s.attention.group_norm_epsilon" }, + { LLM_KV_ATTENTION_GROUPNORM_GROUPS, "%s.attention.group_norm_groups" }, + { LLM_KV_ATTENTION_CAUSAL, "%s.attention.causal" }, + { LLM_KV_ATTENTION_Q_LORA_RANK, "%s.attention.q_lora_rank" }, + { LLM_KV_ATTENTION_KV_LORA_RANK, "%s.attention.kv_lora_rank" }, + { LLM_KV_ATTENTION_RELATIVE_BUCKETS_COUNT, "%s.attention.relative_buckets_count" }, + { LLM_KV_ATTENTION_SLIDING_WINDOW, "%s.attention.sliding_window" }, + { LLM_KV_ATTENTION_SCALE, "%s.attention.scale" }, + + { LLM_KV_ROPE_DIMENSION_COUNT, "%s.rope.dimension_count" }, + { LLM_KV_ROPE_DIMENSION_SECTIONS, "%s.rope.dimension_sections" }, + { LLM_KV_ROPE_FREQ_BASE, "%s.rope.freq_base" }, + { LLM_KV_ROPE_SCALE_LINEAR, "%s.rope.scale_linear" }, + { LLM_KV_ROPE_SCALING_TYPE, "%s.rope.scaling.type" }, + { LLM_KV_ROPE_SCALING_FACTOR, "%s.rope.scaling.factor" }, + { LLM_KV_ROPE_SCALING_ATTN_FACTOR, "%s.rope.scaling.attn_factor" }, + { LLM_KV_ROPE_SCALING_ORIG_CTX_LEN, "%s.rope.scaling.original_context_length" }, + { LLM_KV_ROPE_SCALING_FINETUNED, "%s.rope.scaling.finetuned" }, + { LLM_KV_ROPE_SCALING_YARN_LOG_MUL, "%s.rope.scaling.yarn_log_multiplier" }, + + { LLM_KV_SPLIT_NO, "split.no" }, + { LLM_KV_SPLIT_COUNT, "split.count" }, + { LLM_KV_SPLIT_TENSORS_COUNT, "split.tensors.count" }, + + { LLM_KV_SSM_CONV_KERNEL, "%s.ssm.conv_kernel" }, + { LLM_KV_SSM_INNER_SIZE, "%s.ssm.inner_size" }, + { LLM_KV_SSM_STATE_SIZE, "%s.ssm.state_size" }, + { LLM_KV_SSM_TIME_STEP_RANK, "%s.ssm.time_step_rank" }, + { LLM_KV_SSM_DT_B_C_RMS, "%s.ssm.dt_b_c_rms" }, + + { LLM_KV_WKV_HEAD_SIZE, "%s.wkv.head_size" }, + + { LLM_KV_POSNET_EMBEDDING_LENGTH, "%s.posnet.embedding_length" }, + { LLM_KV_POSNET_BLOCK_COUNT, "%s.posnet.block_count" }, + + { LLM_KV_CONVNEXT_EMBEDDING_LENGTH, "%s.convnext.embedding_length" }, + { LLM_KV_CONVNEXT_BLOCK_COUNT, "%s.convnext.block_count" }, + + { LLM_KV_TOKENIZER_MODEL, "tokenizer.ggml.model" }, + { LLM_KV_TOKENIZER_PRE, "tokenizer.ggml.pre" }, + { LLM_KV_TOKENIZER_LIST, "tokenizer.ggml.tokens" }, + { LLM_KV_TOKENIZER_TOKEN_TYPE, "tokenizer.ggml.token_type" }, + { LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT, "tokenizer.ggml.token_type_count" }, + { LLM_KV_TOKENIZER_SCORES, "tokenizer.ggml.scores" }, + { LLM_KV_TOKENIZER_MERGES, "tokenizer.ggml.merges" }, + { LLM_KV_TOKENIZER_BOS_ID, "tokenizer.ggml.bos_token_id" }, + { LLM_KV_TOKENIZER_EOS_ID, "tokenizer.ggml.eos_token_id" }, + { LLM_KV_TOKENIZER_EOT_ID, "tokenizer.ggml.eot_token_id" }, + { LLM_KV_TOKENIZER_EOM_ID, "tokenizer.ggml.eom_token_id" }, + { LLM_KV_TOKENIZER_UNK_ID, "tokenizer.ggml.unknown_token_id" }, + { LLM_KV_TOKENIZER_SEP_ID, "tokenizer.ggml.seperator_token_id" }, + { LLM_KV_TOKENIZER_PAD_ID, "tokenizer.ggml.padding_token_id" }, + { LLM_KV_TOKENIZER_CLS_ID, "tokenizer.ggml.cls_token_id" }, + { LLM_KV_TOKENIZER_MASK_ID, "tokenizer.ggml.mask_token_id" }, + { LLM_KV_TOKENIZER_ADD_BOS, "tokenizer.ggml.add_bos_token" }, + { LLM_KV_TOKENIZER_ADD_EOS, "tokenizer.ggml.add_eos_token" }, + { LLM_KV_TOKENIZER_ADD_PREFIX, "tokenizer.ggml.add_space_prefix" }, + { LLM_KV_TOKENIZER_REMOVE_EXTRA_WS, "tokenizer.ggml.remove_extra_whitespaces" }, + { LLM_KV_TOKENIZER_PRECOMPILED_CHARSMAP, "tokenizer.ggml.precompiled_charsmap" }, + { LLM_KV_TOKENIZER_HF_JSON, "tokenizer.huggingface.json" }, + { LLM_KV_TOKENIZER_RWKV, "tokenizer.rwkv.world" }, + { LLM_KV_TOKENIZER_FIM_PRE_ID, "tokenizer.ggml.fim_pre_token_id" }, + { LLM_KV_TOKENIZER_FIM_SUF_ID, "tokenizer.ggml.fim_suf_token_id" }, + { LLM_KV_TOKENIZER_FIM_MID_ID, "tokenizer.ggml.fim_mid_token_id" }, + { LLM_KV_TOKENIZER_FIM_PAD_ID, "tokenizer.ggml.fim_pad_token_id" }, + { LLM_KV_TOKENIZER_FIM_REP_ID, "tokenizer.ggml.fim_rep_token_id" }, + { LLM_KV_TOKENIZER_FIM_SEP_ID, "tokenizer.ggml.fim_sep_token_id" }, + + { LLM_KV_ADAPTER_TYPE, "adapter.type" }, + { LLM_KV_ADAPTER_LORA_ALPHA, "adapter.lora.alpha" }, + + // deprecated + { LLM_KV_TOKENIZER_PREFIX_ID, "tokenizer.ggml.prefix_token_id" }, + { LLM_KV_TOKENIZER_SUFFIX_ID, "tokenizer.ggml.suffix_token_id" }, + { LLM_KV_TOKENIZER_MIDDLE_ID, "tokenizer.ggml.middle_token_id" }, +}; + +static const std::map> LLM_TENSOR_NAMES = { + { + LLM_ARCH_LLAMA, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ROPE_FREQS, "rope_freqs" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" }, + { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + { LLM_TENSOR_FFN_GATE_EXP, "blk.%d.ffn_gate.%d" }, + { LLM_TENSOR_FFN_DOWN_EXP, "blk.%d.ffn_down.%d" }, + { LLM_TENSOR_FFN_UP_EXP, "blk.%d.ffn_up.%d" }, + { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" }, + { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" }, + { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" }, + }, + }, + { + LLM_ARCH_DECI, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ROPE_FREQS, "rope_freqs" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" }, + { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + { LLM_TENSOR_FFN_GATE_EXP, "blk.%d.ffn_gate.%d" }, + { LLM_TENSOR_FFN_DOWN_EXP, "blk.%d.ffn_down.%d" }, + { LLM_TENSOR_FFN_UP_EXP, "blk.%d.ffn_up.%d" }, + { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" }, + { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" }, + { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" }, + }, + }, + { + LLM_ARCH_BAICHUAN, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ROPE_FREQS, "rope_freqs" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + }, + }, + { + LLM_ARCH_FALCON, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_NORM_2, "blk.%d.attn_norm_2" }, + { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + }, + }, + { + LLM_ARCH_GROK, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ROPE_FREQS, "rope_freqs" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" }, + { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE_EXP, "blk.%d.ffn_gate.%d" }, + { LLM_TENSOR_FFN_DOWN_EXP, "blk.%d.ffn_down.%d" }, + { LLM_TENSOR_FFN_UP_EXP, "blk.%d.ffn_up.%d" }, + { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" }, + { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" }, + { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" }, + { LLM_TENSOR_LAYER_OUT_NORM, "blk.%d.layer_output_norm" }, + { LLM_TENSOR_ATTN_OUT_NORM, "blk.%d.attn_output_norm" }, + }, + }, + { + LLM_ARCH_GPT2, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_POS_EMBD, "position_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + }, + }, + { + LLM_ARCH_GPTJ, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + }, + }, + { + LLM_ARCH_GPTNEOX, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + }, + }, + { + LLM_ARCH_MPT, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output"}, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + { LLM_TENSOR_FFN_ACT, "blk.%d.ffn.act" }, + { LLM_TENSOR_POS_EMBD, "position_embd" }, + { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm"}, + { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm"}, + }, + }, + { + LLM_ARCH_STARCODER, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_POS_EMBD, "position_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + }, + }, + { + LLM_ARCH_REFACT, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + }, + }, + { + LLM_ARCH_BERT, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" }, + { LLM_TENSOR_TOKEN_TYPES, "token_types" }, + { LLM_TENSOR_POS_EMBD, "position_embd" }, + { LLM_TENSOR_ATTN_OUT_NORM, "blk.%d.attn_output_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_LAYER_OUT_NORM, "blk.%d.layer_output_norm" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + { LLM_TENSOR_CLS, "cls" }, + { LLM_TENSOR_CLS_OUT, "cls.output" }, + }, + }, + { + LLM_ARCH_NOMIC_BERT, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" }, + { LLM_TENSOR_TOKEN_TYPES, "token_types" }, + { LLM_TENSOR_ATTN_OUT_NORM, "blk.%d.attn_output_norm" }, + { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_LAYER_OUT_NORM, "blk.%d.layer_output_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + }, + }, + { + LLM_ARCH_JINA_BERT_V2, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" }, + { LLM_TENSOR_TOKEN_TYPES, "token_types" }, + { LLM_TENSOR_ATTN_NORM_2, "blk.%d.attn_norm_2" }, + { LLM_TENSOR_ATTN_OUT_NORM, "blk.%d.attn_output_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_LAYER_OUT_NORM, "blk.%d.layer_output_norm" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + { LLM_TENSOR_CLS, "cls" }, + }, + }, + { + LLM_ARCH_BLOOM, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + }, + }, + { + LLM_ARCH_STABLELM, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ROPE_FREQS, "rope_freqs" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" }, + { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" }, + }, + }, + { + LLM_ARCH_QWEN, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ROPE_FREQS, "rope_freqs" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + }, + }, + { + LLM_ARCH_QWEN2, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + }, + }, + { + LLM_ARCH_QWEN2VL, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + }, + }, + { + LLM_ARCH_QWEN2MOE, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" }, + { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" }, + { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" }, + { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" }, + { LLM_TENSOR_FFN_GATE_INP_SHEXP, "blk.%d.ffn_gate_inp_shexp" }, + { LLM_TENSOR_FFN_GATE_SHEXP, "blk.%d.ffn_gate_shexp" }, + { LLM_TENSOR_FFN_DOWN_SHEXP, "blk.%d.ffn_down_shexp" }, + { LLM_TENSOR_FFN_UP_SHEXP, "blk.%d.ffn_up_shexp" }, + }, + }, + { + LLM_ARCH_PHI2, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + }, + }, + { + LLM_ARCH_PHI3, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ROPE_FACTORS_LONG, "rope_factors_long" }, + { LLM_TENSOR_ROPE_FACTORS_SHORT, "rope_factors_short" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + }, + }, + { + LLM_ARCH_PLAMO, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ROPE_FREQS, "rope_freqs" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + }, + }, + { + LLM_ARCH_CODESHELL, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ROPE_FREQS, "rope_freqs" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + }, + }, + { + LLM_ARCH_ORION, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ROPE_FREQS, "rope_freqs" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + }, + }, + { + LLM_ARCH_INTERNLM2, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + }, + }, + { + LLM_ARCH_MINICPM, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ROPE_FREQS, "rope_freqs" }, + { LLM_TENSOR_ROPE_FACTORS_LONG, "rope_factors_long" }, + { LLM_TENSOR_ROPE_FACTORS_SHORT, "rope_factors_short" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" }, + { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + { LLM_TENSOR_FFN_GATE_EXP, "blk.%d.ffn_gate.%d" }, + { LLM_TENSOR_FFN_DOWN_EXP, "blk.%d.ffn_down.%d" }, + { LLM_TENSOR_FFN_UP_EXP, "blk.%d.ffn_up.%d" }, + }, + }, + { + LLM_ARCH_MINICPM3, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ROPE_FACTORS_LONG, "rope_factors_long" }, + { LLM_TENSOR_ROPE_FACTORS_SHORT, "rope_factors_short" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q_A_NORM, "blk.%d.attn_q_a_norm" }, + { LLM_TENSOR_ATTN_KV_A_NORM, "blk.%d.attn_kv_a_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_Q_A, "blk.%d.attn_q_a" }, + { LLM_TENSOR_ATTN_Q_B, "blk.%d.attn_q_b" }, + { LLM_TENSOR_ATTN_KV_A_MQA, "blk.%d.attn_kv_a_mqa" }, + { LLM_TENSOR_ATTN_KV_B, "blk.%d.attn_kv_b" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + }, + }, + { + LLM_ARCH_GEMMA, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + }, + }, + { + LLM_ARCH_GEMMA2, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_ATTN_POST_NORM, "blk.%d.post_attention_norm" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + { LLM_TENSOR_FFN_POST_NORM, "blk.%d.post_ffw_norm" }, + }, + }, + { + LLM_ARCH_STARCODER2, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ROPE_FREQS, "rope_freqs" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + }, + }, + { + LLM_ARCH_MAMBA, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_SSM_IN, "blk.%d.ssm_in" }, + { LLM_TENSOR_SSM_CONV1D, "blk.%d.ssm_conv1d" }, + { LLM_TENSOR_SSM_X, "blk.%d.ssm_x" }, + { LLM_TENSOR_SSM_DT, "blk.%d.ssm_dt" }, + { LLM_TENSOR_SSM_A, "blk.%d.ssm_a" }, + { LLM_TENSOR_SSM_D, "blk.%d.ssm_d" }, + { LLM_TENSOR_SSM_OUT, "blk.%d.ssm_out" }, + }, + }, + { + LLM_ARCH_XVERSE, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ROPE_FREQS, "rope_freqs" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + }, + }, + { + LLM_ARCH_COMMAND_R, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" }, + { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" }, + }, + }, + { + LLM_ARCH_DBRX, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_ATTN_OUT_NORM, "blk.%d.attn_output_norm" }, + { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" }, + { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" }, + { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" }, + { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" }, + }, + }, + { + LLM_ARCH_OLMO, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + }, + }, + { + LLM_ARCH_OLMO2, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_ATTN_POST_NORM, "blk.%d.post_attention_norm" }, + { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" }, + { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" }, + { LLM_TENSOR_FFN_POST_NORM, "blk.%d.post_ffw_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + }, + }, + { + LLM_ARCH_OLMOE, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" }, + { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" }, + { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" }, + { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" }, + { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" }, + }, + }, + { + LLM_ARCH_OPENELM, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" }, + { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" }, + { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + }, + }, + { + LLM_ARCH_ARCTIC, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + { LLM_TENSOR_FFN_NORM_EXPS, "blk.%d.ffn_norm_exps" }, + { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" }, + { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" }, + { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" }, + }, + }, + { + LLM_ARCH_DEEPSEEK, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ROPE_FREQS, "rope_freqs" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" }, + { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" }, + { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" }, + { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" }, + { LLM_TENSOR_FFN_GATE_INP_SHEXP, "blk.%d.ffn_gate_inp_shexp" }, + { LLM_TENSOR_FFN_GATE_SHEXP, "blk.%d.ffn_gate_shexp" }, + { LLM_TENSOR_FFN_DOWN_SHEXP, "blk.%d.ffn_down_shexp" }, + { LLM_TENSOR_FFN_UP_SHEXP, "blk.%d.ffn_up_shexp" }, + }, + }, + { + LLM_ARCH_DEEPSEEK2, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q_A_NORM, "blk.%d.attn_q_a_norm" }, + { LLM_TENSOR_ATTN_KV_A_NORM, "blk.%d.attn_kv_a_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_Q_A, "blk.%d.attn_q_a" }, + { LLM_TENSOR_ATTN_Q_B, "blk.%d.attn_q_b" }, + { LLM_TENSOR_ATTN_KV_A_MQA, "blk.%d.attn_kv_a_mqa" }, + { LLM_TENSOR_ATTN_KV_B, "blk.%d.attn_kv_b" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" }, + { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" }, + { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" }, + { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" }, + { LLM_TENSOR_FFN_GATE_INP_SHEXP, "blk.%d.ffn_gate_inp_shexp" }, + { LLM_TENSOR_FFN_GATE_SHEXP, "blk.%d.ffn_gate_shexp" }, + { LLM_TENSOR_FFN_DOWN_SHEXP, "blk.%d.ffn_down_shexp" }, + { LLM_TENSOR_FFN_UP_SHEXP, "blk.%d.ffn_up_shexp" }, + }, + }, + { + LLM_ARCH_CHATGLM, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_ROPE_FREQS, "rope_freqs" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + }, + }, + { + LLM_ARCH_BITNET, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_SUB_NORM, "blk.%d.attn_sub_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_SUB_NORM, "blk.%d.ffn_sub_norm" }, + }, + }, + { + LLM_ARCH_T5, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_DEC_OUTPUT_NORM, "dec.output_norm" }, + { LLM_TENSOR_DEC_ATTN_NORM, "dec.blk.%d.attn_norm" }, + { LLM_TENSOR_DEC_ATTN_Q, "dec.blk.%d.attn_q" }, + { LLM_TENSOR_DEC_ATTN_K, "dec.blk.%d.attn_k" }, + { LLM_TENSOR_DEC_ATTN_V, "dec.blk.%d.attn_v" }, + { LLM_TENSOR_DEC_ATTN_OUT, "dec.blk.%d.attn_o" }, + { LLM_TENSOR_DEC_ATTN_REL_B, "dec.blk.%d.attn_rel_b" }, + { LLM_TENSOR_DEC_CROSS_ATTN_NORM, "dec.blk.%d.cross_attn_norm" }, + { LLM_TENSOR_DEC_CROSS_ATTN_Q, "dec.blk.%d.cross_attn_q" }, + { LLM_TENSOR_DEC_CROSS_ATTN_K, "dec.blk.%d.cross_attn_k" }, + { LLM_TENSOR_DEC_CROSS_ATTN_V, "dec.blk.%d.cross_attn_v" }, + { LLM_TENSOR_DEC_CROSS_ATTN_OUT, "dec.blk.%d.cross_attn_o" }, + { LLM_TENSOR_DEC_CROSS_ATTN_REL_B, "dec.blk.%d.cross_attn_rel_b" }, + { LLM_TENSOR_DEC_FFN_NORM, "dec.blk.%d.ffn_norm" }, + { LLM_TENSOR_DEC_FFN_GATE, "dec.blk.%d.ffn_gate" }, + { LLM_TENSOR_DEC_FFN_DOWN, "dec.blk.%d.ffn_down" }, + { LLM_TENSOR_DEC_FFN_UP, "dec.blk.%d.ffn_up" }, + { LLM_TENSOR_ENC_OUTPUT_NORM, "enc.output_norm" }, + { LLM_TENSOR_ENC_ATTN_NORM, "enc.blk.%d.attn_norm" }, + { LLM_TENSOR_ENC_ATTN_Q, "enc.blk.%d.attn_q" }, + { LLM_TENSOR_ENC_ATTN_K, "enc.blk.%d.attn_k" }, + { LLM_TENSOR_ENC_ATTN_V, "enc.blk.%d.attn_v" }, + { LLM_TENSOR_ENC_ATTN_OUT, "enc.blk.%d.attn_o" }, + { LLM_TENSOR_ENC_ATTN_REL_B, "enc.blk.%d.attn_rel_b" }, + { LLM_TENSOR_ENC_FFN_NORM, "enc.blk.%d.ffn_norm" }, + { LLM_TENSOR_ENC_FFN_GATE, "enc.blk.%d.ffn_gate" }, + { LLM_TENSOR_ENC_FFN_DOWN, "enc.blk.%d.ffn_down" }, + { LLM_TENSOR_ENC_FFN_UP, "enc.blk.%d.ffn_up" }, + }, + }, + { + LLM_ARCH_T5ENCODER, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ENC_OUTPUT_NORM, "enc.output_norm" }, + { LLM_TENSOR_ENC_ATTN_NORM, "enc.blk.%d.attn_norm" }, + { LLM_TENSOR_ENC_ATTN_Q, "enc.blk.%d.attn_q" }, + { LLM_TENSOR_ENC_ATTN_K, "enc.blk.%d.attn_k" }, + { LLM_TENSOR_ENC_ATTN_V, "enc.blk.%d.attn_v" }, + { LLM_TENSOR_ENC_ATTN_OUT, "enc.blk.%d.attn_o" }, + { LLM_TENSOR_ENC_ATTN_REL_B, "enc.blk.%d.attn_rel_b" }, + { LLM_TENSOR_ENC_FFN_NORM, "enc.blk.%d.ffn_norm" }, + { LLM_TENSOR_ENC_FFN_GATE, "enc.blk.%d.ffn_gate" }, + { LLM_TENSOR_ENC_FFN_DOWN, "enc.blk.%d.ffn_down" }, + { LLM_TENSOR_ENC_FFN_UP, "enc.blk.%d.ffn_up" }, + }, + }, + { + LLM_ARCH_JAIS, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + }, + }, + { + LLM_ARCH_NEMOTRON, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ROPE_FREQS, "rope_freqs" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + }, + }, + { + LLM_ARCH_EXAONE, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ROPE_FREQS, "rope_freqs" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + }, + }, + { + LLM_ARCH_RWKV6, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_NORM_2, "blk.%d.attn_norm_2" }, + { LLM_TENSOR_TIME_MIX_W1, "blk.%d.time_mix_w1" }, + { LLM_TENSOR_TIME_MIX_W2, "blk.%d.time_mix_w2" }, + { LLM_TENSOR_TIME_MIX_LERP_X, "blk.%d.time_mix_lerp_x" }, + { LLM_TENSOR_TIME_MIX_LERP_W, "blk.%d.time_mix_lerp_w" }, + { LLM_TENSOR_TIME_MIX_LERP_K, "blk.%d.time_mix_lerp_k" }, + { LLM_TENSOR_TIME_MIX_LERP_V, "blk.%d.time_mix_lerp_v" }, + { LLM_TENSOR_TIME_MIX_LERP_R, "blk.%d.time_mix_lerp_r" }, + { LLM_TENSOR_TIME_MIX_LERP_G, "blk.%d.time_mix_lerp_g" }, + { LLM_TENSOR_TIME_MIX_FIRST, "blk.%d.time_mix_first" }, + { LLM_TENSOR_TIME_MIX_DECAY, "blk.%d.time_mix_decay" }, + { LLM_TENSOR_TIME_MIX_DECAY_W1, "blk.%d.time_mix_decay_w1" }, + { LLM_TENSOR_TIME_MIX_DECAY_W2, "blk.%d.time_mix_decay_w2" }, + { LLM_TENSOR_TIME_MIX_KEY, "blk.%d.time_mix_key" }, + { LLM_TENSOR_TIME_MIX_VALUE, "blk.%d.time_mix_value" }, + { LLM_TENSOR_TIME_MIX_RECEPTANCE, "blk.%d.time_mix_receptance" }, + { LLM_TENSOR_TIME_MIX_GATE, "blk.%d.time_mix_gate" }, + { LLM_TENSOR_TIME_MIX_LN, "blk.%d.time_mix_ln" }, + { LLM_TENSOR_TIME_MIX_OUTPUT, "blk.%d.time_mix_output" }, + { LLM_TENSOR_CHANNEL_MIX_LERP_K, "blk.%d.channel_mix_lerp_k" }, + { LLM_TENSOR_CHANNEL_MIX_LERP_R, "blk.%d.channel_mix_lerp_r" }, + { LLM_TENSOR_CHANNEL_MIX_KEY, "blk.%d.channel_mix_key" }, + { LLM_TENSOR_CHANNEL_MIX_VALUE, "blk.%d.channel_mix_value" }, + { LLM_TENSOR_CHANNEL_MIX_RECEPTANCE, "blk.%d.channel_mix_receptance" }, + }, + }, + { + LLM_ARCH_GRANITE, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + }, + }, + { + LLM_ARCH_GRANITE_MOE, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" }, + { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" }, + { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" }, + { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" }, + }, + }, + { + LLM_ARCH_CHAMELEON, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" }, + { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" }, + }, + }, + { + LLM_ARCH_WAVTOKENIZER_DEC, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" }, + { LLM_TENSOR_CONV1D, "conv1d" }, + { LLM_TENSOR_CONVNEXT_DW, "convnext.%d.dw" }, + { LLM_TENSOR_CONVNEXT_NORM, "convnext.%d.norm" }, + { LLM_TENSOR_CONVNEXT_PW1, "convnext.%d.pw1" }, + { LLM_TENSOR_CONVNEXT_PW2, "convnext.%d.pw2" }, + { LLM_TENSOR_CONVNEXT_GAMMA, "convnext.%d.gamma" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_POS_NET_CONV1, "posnet.%d.conv1" }, + { LLM_TENSOR_POS_NET_CONV2, "posnet.%d.conv2" }, + { LLM_TENSOR_POS_NET_NORM, "posnet.%d.norm" }, + { LLM_TENSOR_POS_NET_NORM1, "posnet.%d.norm1" }, + { LLM_TENSOR_POS_NET_NORM2, "posnet.%d.norm2" }, + { LLM_TENSOR_POS_NET_ATTN_NORM, "posnet.%d.attn_norm" }, + { LLM_TENSOR_POS_NET_ATTN_Q, "posnet.%d.attn_q" }, + { LLM_TENSOR_POS_NET_ATTN_K, "posnet.%d.attn_k" }, + { LLM_TENSOR_POS_NET_ATTN_V, "posnet.%d.attn_v" }, + { LLM_TENSOR_POS_NET_ATTN_OUT, "posnet.%d.attn_output" }, + }, + }, + { + LLM_ARCH_UNKNOWN, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + }, + }, +}; + +static const std::map LLM_TENSOR_INFOS = { + {LLM_TENSOR_TOKEN_EMBD, {LLM_TENSOR_LAYER_INPUT, GGML_OP_GET_ROWS}}, + {LLM_TENSOR_POS_EMBD, {LLM_TENSOR_LAYER_INPUT, GGML_OP_GET_ROWS}}, + {LLM_TENSOR_TOKEN_EMBD_NORM, {LLM_TENSOR_LAYER_INPUT, GGML_OP_GET_ROWS}}, + {LLM_TENSOR_TOKEN_TYPES, {LLM_TENSOR_LAYER_INPUT, GGML_OP_GET_ROWS}}, + {LLM_TENSOR_OUTPUT, {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_CLS, {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_CLS_OUT, {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_OUTPUT_NORM, {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL}}, + {LLM_TENSOR_DEC_OUTPUT_NORM, {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL}}, + {LLM_TENSOR_ENC_OUTPUT_NORM, {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL}}, + {LLM_TENSOR_ROPE_FREQS, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ROPE}}, + {LLM_TENSOR_ROPE_FACTORS_LONG, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ROPE}}, + {LLM_TENSOR_ROPE_FACTORS_SHORT, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ROPE}}, + {LLM_TENSOR_ATTN_Q, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_ATTN_K, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_ATTN_V, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_ATTN_QKV, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_ATTN_OUT, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_FFN_GATE, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_FFN_DOWN, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_FFN_UP, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_FFN_DOWN_SHEXP, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_FFN_GATE_SHEXP, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_FFN_UP_SHEXP, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_ATTN_Q_A, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_ATTN_Q_B, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_ATTN_KV_A_MQA, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_ATTN_KV_B, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_DEC_ATTN_Q, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_DEC_ATTN_K, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_ATTN_Q, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_ATTN_K, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_ATTN_V, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_ATTN_QKV, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_ATTN_OUT, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_FFN_GATE, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_FFN_DOWN, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_FFN_UP, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_FFN_DOWN_SHEXP, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_FFN_GATE_SHEXP, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_FFN_UP_SHEXP, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_ATTN_Q_A, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_ATTN_Q_B, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_ATTN_KV_A_MQA, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_ATTN_KV_B, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_DEC_ATTN_Q, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_DEC_ATTN_K, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_DEC_ATTN_V, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_DEC_ATTN_OUT, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_DEC_CROSS_ATTN_Q, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_DEC_CROSS_ATTN_K, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_DEC_CROSS_ATTN_V, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_DEC_CROSS_ATTN_OUT, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_DEC_FFN_GATE, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_DEC_FFN_DOWN, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_DEC_FFN_UP, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_ENC_ATTN_Q, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_ENC_ATTN_K, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_ENC_ATTN_V, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_ENC_ATTN_OUT, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_ENC_FFN_GATE, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_ENC_FFN_DOWN, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_ENC_FFN_UP, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_FFN_GATE_INP_SHEXP, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_FFN_GATE_INP, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_SSM_IN, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_SSM_X, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_SSM_DT, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_SSM_OUT, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_TIME_MIX_W1, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_TIME_MIX_W2, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_TIME_MIX_DECAY_W1, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_TIME_MIX_DECAY_W2, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_TIME_MIX_KEY, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_TIME_MIX_VALUE, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_TIME_MIX_RECEPTANCE, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_TIME_MIX_GATE, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_TIME_MIX_OUTPUT, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_CHANNEL_MIX_KEY, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_CHANNEL_MIX_RECEPTANCE, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_CHANNEL_MIX_VALUE, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_FFN_ACT, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_DIV}}, + {LLM_TENSOR_SSM_CONV1D, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_SSM_CONV}}, + {LLM_TENSOR_SSM_A, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_SSM_SCAN}}, + {LLM_TENSOR_SSM_D, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_TIME_MIX_LERP_X, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_TIME_MIX_LN, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_CHANNEL_MIX_LERP_K, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_CHANNEL_MIX_LERP_R, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_TIME_MIX_LERP_W, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}}, + {LLM_TENSOR_TIME_MIX_LERP_K, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}}, + {LLM_TENSOR_TIME_MIX_LERP_V, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}}, + {LLM_TENSOR_TIME_MIX_LERP_R, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}}, + {LLM_TENSOR_TIME_MIX_LERP_G, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}}, + {LLM_TENSOR_TIME_MIX_DECAY, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}}, + {LLM_TENSOR_TIME_MIX_FIRST, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_RWKV_WKV6}}, + {LLM_TENSOR_ATTN_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_ATTN_NORM_2, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_ATTN_OUT_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_ATTN_POST_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_FFN_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_FFN_POST_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_FFN_NORM_EXPS, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_ATTN_Q_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_ATTN_K_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_LAYER_OUT_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_ATTN_Q_A_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_ATTN_KV_A_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_ATTN_SUB_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_FFN_SUB_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_DEC_ATTN_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_DEC_CROSS_ATTN_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_DEC_FFN_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_ENC_ATTN_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_ENC_FFN_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_DEC_ATTN_REL_B, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_GET_ROWS}}, + {LLM_TENSOR_ENC_ATTN_REL_B, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_GET_ROWS}}, + {LLM_TENSOR_FFN_DOWN_EXPS, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT_ID}}, + {LLM_TENSOR_FFN_GATE_EXPS, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT_ID}}, + {LLM_TENSOR_FFN_UP_EXPS, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT_ID}}, + // this tensor is loaded for T5, but never used + {LLM_TENSOR_DEC_CROSS_ATTN_REL_B, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_NONE}}, + {LLM_TENSOR_CONV1D, {LLM_TENSOR_LAYER_INPUT, GGML_OP_IM2COL}}, + {LLM_TENSOR_POS_NET_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_POS_NET_NORM1, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_POS_NET_NORM2, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_POS_NET_CONV1, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_IM2COL}}, + {LLM_TENSOR_POS_NET_CONV2, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_IM2COL}}, + {LLM_TENSOR_POS_NET_ATTN_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_POS_NET_ATTN_Q, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_POS_NET_ATTN_K, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_POS_NET_ATTN_V, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_POS_NET_ATTN_OUT, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_CONVNEXT_DW, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_IM2COL}}, + {LLM_TENSOR_CONVNEXT_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_CONVNEXT_PW1, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_CONVNEXT_PW2, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_CONVNEXT_GAMMA, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, +}; + +LLM_KV::LLM_KV(llm_arch arch) : arch(arch) {} + +std::string LLM_KV::operator()(llm_kv kv) const { + return ::format(LLM_KV_NAMES.at(kv), LLM_ARCH_NAMES.at(arch)); +} + +std::string LLM_TN_IMPL::str() const { + if (LLM_TENSOR_NAMES.at(arch).find(tensor) == LLM_TENSOR_NAMES.at(arch).end()) { + return "__missing__"; + } + + std::string name = ::format(LLM_TENSOR_NAMES.at(arch).at(tensor), bid, xid); + + if (suffix != nullptr) { + name += "."; + name += suffix; + } + + return name; +} + +const char * llm_arch_name(llm_arch arch) { + auto it = LLM_ARCH_NAMES.find(arch); + if (it == LLM_ARCH_NAMES.end()) { + return "unknown"; + } + return it->second; +} + +llm_arch llm_arch_from_string(const std::string & name) { + for (const auto & kv : LLM_ARCH_NAMES) { // NOLINT + if (kv.second == name) { + return kv.first; + } + } + + return LLM_ARCH_UNKNOWN; +} + +const llm_tensor_info & llm_tensor_info_for(llm_tensor tensor) { + return LLM_TENSOR_INFOS.at(tensor); +} diff --git a/src/llama-arch.h b/src/llama-arch.h new file mode 100644 index 000000000..446e72eeb --- /dev/null +++ b/src/llama-arch.h @@ -0,0 +1,391 @@ +#pragma once + +#include "ggml.h" // ggml_op + +#include + +// +// gguf constants (sync with gguf.py) +// + +enum llm_arch { + LLM_ARCH_LLAMA, + LLM_ARCH_DECI, + LLM_ARCH_FALCON, + LLM_ARCH_BAICHUAN, + LLM_ARCH_GROK, + LLM_ARCH_GPT2, + LLM_ARCH_GPTJ, + LLM_ARCH_GPTNEOX, + LLM_ARCH_MPT, + LLM_ARCH_STARCODER, + LLM_ARCH_REFACT, + LLM_ARCH_BERT, + LLM_ARCH_NOMIC_BERT, + LLM_ARCH_JINA_BERT_V2, + LLM_ARCH_BLOOM, + LLM_ARCH_STABLELM, + LLM_ARCH_QWEN, + LLM_ARCH_QWEN2, + LLM_ARCH_QWEN2MOE, + LLM_ARCH_QWEN2VL, + LLM_ARCH_PHI2, + LLM_ARCH_PHI3, + LLM_ARCH_PLAMO, + LLM_ARCH_CODESHELL, + LLM_ARCH_ORION, + LLM_ARCH_INTERNLM2, + LLM_ARCH_MINICPM, + LLM_ARCH_MINICPM3, + LLM_ARCH_GEMMA, + LLM_ARCH_GEMMA2, + LLM_ARCH_STARCODER2, + LLM_ARCH_MAMBA, + LLM_ARCH_XVERSE, + LLM_ARCH_COMMAND_R, + LLM_ARCH_DBRX, + LLM_ARCH_OLMO, + LLM_ARCH_OLMO2, + LLM_ARCH_OLMOE, + LLM_ARCH_OPENELM, + LLM_ARCH_ARCTIC, + LLM_ARCH_DEEPSEEK, + LLM_ARCH_DEEPSEEK2, + LLM_ARCH_CHATGLM, + LLM_ARCH_BITNET, + LLM_ARCH_T5, + LLM_ARCH_T5ENCODER, + LLM_ARCH_JAIS, + LLM_ARCH_NEMOTRON, + LLM_ARCH_EXAONE, + LLM_ARCH_RWKV6, + LLM_ARCH_GRANITE, + LLM_ARCH_GRANITE_MOE, + LLM_ARCH_CHAMELEON, + LLM_ARCH_WAVTOKENIZER_DEC, + LLM_ARCH_UNKNOWN, +}; + +enum llm_kv { + LLM_KV_GENERAL_TYPE, + LLM_KV_GENERAL_ARCHITECTURE, + LLM_KV_GENERAL_QUANTIZATION_VERSION, + LLM_KV_GENERAL_ALIGNMENT, + LLM_KV_GENERAL_NAME, + LLM_KV_GENERAL_AUTHOR, + LLM_KV_GENERAL_VERSION, + LLM_KV_GENERAL_URL, + LLM_KV_GENERAL_DESCRIPTION, + LLM_KV_GENERAL_LICENSE, + LLM_KV_GENERAL_SOURCE_URL, + LLM_KV_GENERAL_SOURCE_HF_REPO, + + LLM_KV_VOCAB_SIZE, + LLM_KV_CONTEXT_LENGTH, + LLM_KV_EMBEDDING_LENGTH, + LLM_KV_FEATURES_LENGTH, + LLM_KV_BLOCK_COUNT, + LLM_KV_LEADING_DENSE_BLOCK_COUNT, + LLM_KV_FEED_FORWARD_LENGTH, + LLM_KV_EXPERT_FEED_FORWARD_LENGTH, + LLM_KV_EXPERT_SHARED_FEED_FORWARD_LENGTH, + LLM_KV_USE_PARALLEL_RESIDUAL, + LLM_KV_TENSOR_DATA_LAYOUT, + LLM_KV_EXPERT_COUNT, + LLM_KV_EXPERT_USED_COUNT, + LLM_KV_EXPERT_SHARED_COUNT, + LLM_KV_EXPERT_WEIGHTS_SCALE, + LLM_KV_POOLING_TYPE, + LLM_KV_LOGIT_SCALE, + LLM_KV_DECODER_START_TOKEN_ID, + LLM_KV_ATTN_LOGIT_SOFTCAPPING, + LLM_KV_FINAL_LOGIT_SOFTCAPPING, + LLM_KV_SWIN_NORM, + LLM_KV_RESCALE_EVERY_N_LAYERS, + LLM_KV_TIME_MIX_EXTRA_DIM, + LLM_KV_TIME_DECAY_EXTRA_DIM, + LLM_KV_RESIDUAL_SCALE, + LLM_KV_EMBEDDING_SCALE, + + LLM_KV_ATTENTION_HEAD_COUNT, + LLM_KV_ATTENTION_HEAD_COUNT_KV, + LLM_KV_ATTENTION_MAX_ALIBI_BIAS, + LLM_KV_ATTENTION_CLAMP_KQV, + LLM_KV_ATTENTION_KEY_LENGTH, + LLM_KV_ATTENTION_VALUE_LENGTH, + LLM_KV_ATTENTION_LAYERNORM_EPS, + LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, + LLM_KV_ATTENTION_GROUPNORM_EPS, + LLM_KV_ATTENTION_GROUPNORM_GROUPS, + LLM_KV_ATTENTION_CAUSAL, + LLM_KV_ATTENTION_Q_LORA_RANK, + LLM_KV_ATTENTION_KV_LORA_RANK, + LLM_KV_ATTENTION_RELATIVE_BUCKETS_COUNT, + LLM_KV_ATTENTION_SLIDING_WINDOW, + LLM_KV_ATTENTION_SCALE, + + LLM_KV_ROPE_DIMENSION_COUNT, + LLM_KV_ROPE_DIMENSION_SECTIONS, + LLM_KV_ROPE_FREQ_BASE, + LLM_KV_ROPE_SCALE_LINEAR, + LLM_KV_ROPE_SCALING_TYPE, + LLM_KV_ROPE_SCALING_FACTOR, + LLM_KV_ROPE_SCALING_ATTN_FACTOR, + LLM_KV_ROPE_SCALING_ORIG_CTX_LEN, + LLM_KV_ROPE_SCALING_FINETUNED, + LLM_KV_ROPE_SCALING_YARN_LOG_MUL, + + LLM_KV_SPLIT_NO, + LLM_KV_SPLIT_COUNT, + LLM_KV_SPLIT_TENSORS_COUNT, + + LLM_KV_SSM_INNER_SIZE, + LLM_KV_SSM_CONV_KERNEL, + LLM_KV_SSM_STATE_SIZE, + LLM_KV_SSM_TIME_STEP_RANK, + LLM_KV_SSM_DT_B_C_RMS, + + LLM_KV_WKV_HEAD_SIZE, + + LLM_KV_TOKENIZER_MODEL, + LLM_KV_TOKENIZER_PRE, + LLM_KV_TOKENIZER_LIST, + LLM_KV_TOKENIZER_TOKEN_TYPE, + LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT, + LLM_KV_TOKENIZER_SCORES, + LLM_KV_TOKENIZER_MERGES, + LLM_KV_TOKENIZER_BOS_ID, + LLM_KV_TOKENIZER_EOS_ID, + LLM_KV_TOKENIZER_EOT_ID, + LLM_KV_TOKENIZER_EOM_ID, + LLM_KV_TOKENIZER_UNK_ID, + LLM_KV_TOKENIZER_SEP_ID, + LLM_KV_TOKENIZER_PAD_ID, + LLM_KV_TOKENIZER_CLS_ID, + LLM_KV_TOKENIZER_MASK_ID, + LLM_KV_TOKENIZER_ADD_BOS, + LLM_KV_TOKENIZER_ADD_EOS, + LLM_KV_TOKENIZER_ADD_PREFIX, + LLM_KV_TOKENIZER_REMOVE_EXTRA_WS, + LLM_KV_TOKENIZER_PRECOMPILED_CHARSMAP, + LLM_KV_TOKENIZER_HF_JSON, + LLM_KV_TOKENIZER_RWKV, + LLM_KV_TOKENIZER_FIM_PRE_ID, + LLM_KV_TOKENIZER_FIM_SUF_ID, + LLM_KV_TOKENIZER_FIM_MID_ID, + LLM_KV_TOKENIZER_FIM_PAD_ID, + LLM_KV_TOKENIZER_FIM_REP_ID, + LLM_KV_TOKENIZER_FIM_SEP_ID, + + LLM_KV_ADAPTER_TYPE, + LLM_KV_ADAPTER_LORA_ALPHA, + + LLM_KV_POSNET_EMBEDDING_LENGTH, + LLM_KV_POSNET_BLOCK_COUNT, + + LLM_KV_CONVNEXT_EMBEDDING_LENGTH, + LLM_KV_CONVNEXT_BLOCK_COUNT, + + // deprecated: + LLM_KV_TOKENIZER_PREFIX_ID, + LLM_KV_TOKENIZER_SUFFIX_ID, + LLM_KV_TOKENIZER_MIDDLE_ID, +}; + +enum llm_tensor { + LLM_TENSOR_TOKEN_EMBD, + LLM_TENSOR_TOKEN_EMBD_NORM, + LLM_TENSOR_TOKEN_TYPES, + LLM_TENSOR_POS_EMBD, + LLM_TENSOR_OUTPUT, + LLM_TENSOR_OUTPUT_NORM, + LLM_TENSOR_ROPE_FREQS, + LLM_TENSOR_ROPE_FACTORS_LONG, + LLM_TENSOR_ROPE_FACTORS_SHORT, + LLM_TENSOR_ATTN_Q, + LLM_TENSOR_ATTN_K, + LLM_TENSOR_ATTN_V, + LLM_TENSOR_ATTN_QKV, + LLM_TENSOR_ATTN_OUT, + LLM_TENSOR_ATTN_NORM, + LLM_TENSOR_ATTN_NORM_2, + LLM_TENSOR_ATTN_OUT_NORM, + LLM_TENSOR_ATTN_POST_NORM, + LLM_TENSOR_ATTN_ROT_EMBD, + LLM_TENSOR_FFN_GATE_INP, + LLM_TENSOR_FFN_GATE_INP_SHEXP, + LLM_TENSOR_FFN_NORM, + LLM_TENSOR_FFN_POST_NORM, + LLM_TENSOR_FFN_GATE, + LLM_TENSOR_FFN_DOWN, + LLM_TENSOR_FFN_UP, + LLM_TENSOR_FFN_ACT, + LLM_TENSOR_FFN_DOWN_EXP, // split experts for backward compatibility + LLM_TENSOR_FFN_GATE_EXP, + LLM_TENSOR_FFN_UP_EXP, + LLM_TENSOR_FFN_NORM_EXPS, + LLM_TENSOR_FFN_DOWN_EXPS, // merged experts + LLM_TENSOR_FFN_GATE_EXPS, + LLM_TENSOR_FFN_UP_EXPS, + LLM_TENSOR_FFN_DOWN_SHEXP, + LLM_TENSOR_FFN_GATE_SHEXP, + LLM_TENSOR_FFN_UP_SHEXP, + LLM_TENSOR_ATTN_Q_NORM, + LLM_TENSOR_ATTN_K_NORM, + LLM_TENSOR_LAYER_OUT_NORM, + LLM_TENSOR_SSM_IN, + LLM_TENSOR_SSM_CONV1D, + LLM_TENSOR_SSM_X, + LLM_TENSOR_SSM_DT, + LLM_TENSOR_SSM_A, + LLM_TENSOR_SSM_D, + LLM_TENSOR_SSM_OUT, + LLM_TENSOR_TIME_MIX_W1, + LLM_TENSOR_TIME_MIX_W2, + LLM_TENSOR_TIME_MIX_LERP_X, + LLM_TENSOR_TIME_MIX_LERP_W, + LLM_TENSOR_TIME_MIX_LERP_K, + LLM_TENSOR_TIME_MIX_LERP_V, + LLM_TENSOR_TIME_MIX_LERP_R, + LLM_TENSOR_TIME_MIX_LERP_G, + LLM_TENSOR_TIME_MIX_FIRST, + LLM_TENSOR_TIME_MIX_DECAY, + LLM_TENSOR_TIME_MIX_DECAY_W1, + LLM_TENSOR_TIME_MIX_DECAY_W2, + LLM_TENSOR_TIME_MIX_KEY, + LLM_TENSOR_TIME_MIX_VALUE, + LLM_TENSOR_TIME_MIX_RECEPTANCE, + LLM_TENSOR_TIME_MIX_GATE, + LLM_TENSOR_TIME_MIX_LN, + LLM_TENSOR_TIME_MIX_OUTPUT, + LLM_TENSOR_CHANNEL_MIX_LERP_K, + LLM_TENSOR_CHANNEL_MIX_LERP_R, + LLM_TENSOR_CHANNEL_MIX_KEY, + LLM_TENSOR_CHANNEL_MIX_RECEPTANCE, + LLM_TENSOR_CHANNEL_MIX_VALUE, + LLM_TENSOR_ATTN_Q_A, + LLM_TENSOR_ATTN_Q_B, + LLM_TENSOR_ATTN_KV_A_MQA, + LLM_TENSOR_ATTN_KV_B, + LLM_TENSOR_ATTN_Q_A_NORM, + LLM_TENSOR_ATTN_KV_A_NORM, + LLM_TENSOR_ATTN_SUB_NORM, + LLM_TENSOR_FFN_SUB_NORM, + LLM_TENSOR_DEC_ATTN_NORM, + LLM_TENSOR_DEC_ATTN_Q, + LLM_TENSOR_DEC_ATTN_K, + LLM_TENSOR_DEC_ATTN_V, + LLM_TENSOR_DEC_ATTN_OUT, + LLM_TENSOR_DEC_ATTN_REL_B, + LLM_TENSOR_DEC_CROSS_ATTN_NORM, + LLM_TENSOR_DEC_CROSS_ATTN_Q, + LLM_TENSOR_DEC_CROSS_ATTN_K, + LLM_TENSOR_DEC_CROSS_ATTN_V, + LLM_TENSOR_DEC_CROSS_ATTN_OUT, + LLM_TENSOR_DEC_CROSS_ATTN_REL_B, + LLM_TENSOR_DEC_FFN_NORM, + LLM_TENSOR_DEC_FFN_GATE, + LLM_TENSOR_DEC_FFN_DOWN, + LLM_TENSOR_DEC_FFN_UP, + LLM_TENSOR_DEC_OUTPUT_NORM, + LLM_TENSOR_ENC_ATTN_NORM, + LLM_TENSOR_ENC_ATTN_Q, + LLM_TENSOR_ENC_ATTN_K, + LLM_TENSOR_ENC_ATTN_V, + LLM_TENSOR_ENC_ATTN_OUT, + LLM_TENSOR_ENC_ATTN_REL_B, + LLM_TENSOR_ENC_FFN_NORM, + LLM_TENSOR_ENC_FFN_GATE, + LLM_TENSOR_ENC_FFN_DOWN, + LLM_TENSOR_ENC_FFN_UP, + LLM_TENSOR_ENC_OUTPUT_NORM, + LLM_TENSOR_CLS, + LLM_TENSOR_CLS_OUT, + LLM_TENSOR_CONV1D, + LLM_TENSOR_CONVNEXT_DW, + LLM_TENSOR_CONVNEXT_NORM, + LLM_TENSOR_CONVNEXT_PW1, + LLM_TENSOR_CONVNEXT_PW2, + LLM_TENSOR_CONVNEXT_GAMMA, + LLM_TENSOR_POS_NET_CONV1, + LLM_TENSOR_POS_NET_CONV2, + LLM_TENSOR_POS_NET_NORM, + LLM_TENSOR_POS_NET_NORM1, + LLM_TENSOR_POS_NET_NORM2, + LLM_TENSOR_POS_NET_ATTN_NORM, + LLM_TENSOR_POS_NET_ATTN_Q, + LLM_TENSOR_POS_NET_ATTN_K, + LLM_TENSOR_POS_NET_ATTN_V, + LLM_TENSOR_POS_NET_ATTN_OUT, +}; + +enum llm_tensor_layer { + LLM_TENSOR_LAYER_INPUT, + LLM_TENSOR_LAYER_REPEATING, + LLM_TENSOR_LAYER_OUTPUT, +}; + +struct LLM_KV { + LLM_KV(llm_arch arch); + + llm_arch arch; + + std::string operator()(llm_kv kv) const; +}; + +// helper to handle gguf constants +// usage: +// +// const auto tn = LLM_TN(LLM_ARCH_LLAMA); +// +// std::string name = tn(LLM_TENSOR_OUTPUT); -> "output" +// std::string name = tn(LLM_TENSOR_TOKEN_EMBD, "bias"); -> "token_embd.bias" +// std::string name = tn(LLM_TENSOR_ATTN_NORM, "weight", 3); -> "blk.3.attn_norm.weight" +// +struct LLM_TN_IMPL { + const llm_arch arch; + const llm_tensor tensor; + const char * const suffix; + const int bid; + const int xid; + + std::string str() const; + + operator std::string() const { + return str(); + } + + friend bool operator==(const std::string & str, const LLM_TN_IMPL & tn) { + return str == tn.str(); + } + + friend bool operator!=(const std::string & str, const LLM_TN_IMPL & tn) { + return str != tn.str(); + } +}; + +struct LLM_TN { + LLM_TN(llm_arch arch) : arch(arch) {} + + llm_arch arch; + + LLM_TN_IMPL operator()(llm_tensor tensor, const char * suffix, int bid = -1, int xid = -1) const { + return { arch, tensor, suffix, bid, xid }; + } + + LLM_TN_IMPL operator()(llm_tensor tensor, int bid = -1, int xid = -1) const { + return { arch, tensor, nullptr, bid, xid }; + } +}; + + +struct llm_tensor_info { + llm_tensor_layer layer; + ggml_op op; +}; + +const char * llm_arch_name(llm_arch arch); + +llm_arch llm_arch_from_string(const std::string & name); + +const llm_tensor_info & llm_tensor_info_for(llm_tensor tensor); diff --git a/src/llama-batch.cpp b/src/llama-batch.cpp new file mode 100644 index 000000000..01d5ca57f --- /dev/null +++ b/src/llama-batch.cpp @@ -0,0 +1,368 @@ +#include "llama-batch.h" + +#include +#include + +llama_ubatch llama_sbatch::reserve_ubatch(size_t n_ubatch, bool has_embd) { + // clear empty sequences + // the previous ubatch is assumed to be gone, + // so nothing should refer to values in these sequences anymore. + for (size_t i = seq.size(); i-- > 0;) { + if (seq[i].length == 0) { + seq.pop_back(); + } else { + break; + } + } + ubatch_token.resize(!has_embd ? n_ubatch : 0); + ubatch_embd.resize(has_embd ? n_embd * n_ubatch : 0); + ubatch_pos.resize(n_ubatch); + ubatch_n_seq_id.resize(n_ubatch); + ubatch_seq_id.resize(n_ubatch); + ubatch_output.resize(n_ubatch); + llama_ubatch ubatch = { + /*equal_seqs =*/ true, + /*n_tokens =*/ 0, + /*n_seq_tokens =*/ 0, + /*n_seqs =*/ 0, + /*token =*/ !has_embd ? ubatch_token.data() : nullptr, + /*embd =*/ has_embd ? ubatch_embd.data() : nullptr, + /*pos =*/ ubatch_pos.data(), + /*n_seq_id =*/ ubatch_n_seq_id.data(), + /*seq_id =*/ ubatch_seq_id.data(), + /*output =*/ ubatch_output.data(), + }; + return ubatch; +} + +void llama_sbatch::add_seq_to_ubatch(llama_ubatch & ubatch, llama_sbatch_seq & seq, size_t length) { + GGML_ASSERT(batch != nullptr); + GGML_ASSERT(length <= seq.length); + // Can only add sequences of equal lengths to a batch, + // otherwise it isn't clear to which sequence a token belongs + GGML_ASSERT(seq.n_seq_id == 0 || ubatch.n_seqs == 0 || length == (size_t) ubatch.n_tokens / ubatch.n_seqs); + GGML_ASSERT((seq.n_seq_id != 0) == ubatch.equal_seqs); + // NOTE: loops are separated for cache-friendliness + if (batch->token) { + if (ubatch.equal_seqs) { + for (size_t i = 0; i < length; ++i) { + ubatch.token[ubatch.n_tokens + i] = batch->token[ids[seq.offset + i]]; + } + } else { + // simple split + ubatch.token = batch->token + seq.offset; + } + } else { + ubatch.token = nullptr; + } + if (batch->embd) { + if (ubatch.equal_seqs) { + for (size_t i = 0; i < length; ++i) { + memcpy( + ubatch.embd + (n_embd * (ubatch.n_tokens + i)), + batch->embd + (n_embd * ids[seq.offset + i]), + n_embd * sizeof(float) + ); + } + } else { + // simple split + ubatch.embd = batch->embd + (n_embd * seq.offset); + } + } else { + ubatch.embd = nullptr; + } + if (ubatch.equal_seqs) { + for (size_t i = 0; i < length; ++i) { + ubatch.pos[ubatch.n_tokens + i] = batch->pos[ids[seq.offset + i]]; + } + } else { + // simple split + ubatch.pos = batch->pos + seq.offset; + } + if (ubatch.equal_seqs) { + ubatch.n_seq_id[ubatch.n_seqs] = seq.n_seq_id; + if (seq.seq_id) { + ubatch.seq_id[ubatch.n_seqs] = seq.seq_id; + } + } else { + // simple split + if (batch->n_seq_id) { + ubatch.n_seq_id = batch->n_seq_id + seq.offset; + } else { + for (size_t i = 0; i < length; ++i) { + ubatch.n_seq_id[ubatch.n_seqs + i] = 1; + } + } + if (batch->seq_id) { + ubatch.seq_id = batch->seq_id + seq.offset; + } + } + if (logits_all) { + for (size_t i = 0; i < length; ++i) { + ubatch.output[ubatch.n_tokens + i] = 1; + out_ids.push_back(ids[seq.offset + i]); + } + } else if (batch->logits) { + if (ubatch.equal_seqs) { + for (size_t i = 0; i < length; ++i) { + size_t id = ids[seq.offset + i]; + int8_t is_output = batch->logits[id]; + ubatch.output[ubatch.n_tokens + i] = is_output; + if (is_output) { out_ids.push_back(id); } + } + } else { + // simple split + ubatch.output = batch->logits + seq.offset; + for (size_t i = 0; i < length; ++i) { + if (ubatch.output[i] != 0) { out_ids.push_back(seq.offset + i); } + } + } + } else { + // only get last output + for (size_t i = 0; i < length; ++i) { + size_t id = ids[seq.offset + i]; + int8_t is_last = id == ids.size() - 1; + ubatch.output[ubatch.n_tokens + i] = is_last; + if (is_last) { out_ids.push_back(id); } + } + } + if (ubatch.n_tokens == 0 && ubatch.n_seqs == 0) { + ubatch.n_seq_tokens = ubatch.equal_seqs ? length : 1; + } + ubatch.n_tokens += length; + ubatch.n_seqs += ubatch.equal_seqs ? 1 : length; // virtual sequences for simple splits + seq.offset += length; + seq.length -= length; + n_tokens -= length; + GGML_ASSERT(ubatch.n_tokens == ubatch.n_seq_tokens * ubatch.n_seqs); +} + +llama_ubatch llama_sbatch::split_simple(size_t n_ubatch) { + n_ubatch = n_tokens < n_ubatch ? n_tokens : n_ubatch; + llama_ubatch ubatch = reserve_ubatch(n_ubatch, /* has_embd */ batch->embd != nullptr); + ubatch.equal_seqs = false; + if (!seq.empty()) { + llama_sbatch_seq & s = seq[0]; + size_t length = s.length < n_ubatch ? s.length : n_ubatch; + GGML_ASSERT(seq.size() == 1 && s.n_seq_id == 0); // don't mix with other splits + add_seq_to_ubatch(ubatch, s, length); + } + return ubatch; +} + +llama_ubatch llama_sbatch::split_equal(size_t n_ubatch) { + n_ubatch = n_tokens < n_ubatch ? n_tokens : n_ubatch; + llama_ubatch ubatch = reserve_ubatch(n_ubatch, /* has_embd */ batch->embd != nullptr); + if (!seq.empty()) { + size_t length = 0; + size_t n_tokens_in_ubatch = 0; + GGML_ASSERT(seq[0].n_seq_id > 0); // should not be mixed with simple splits + // smallest first, because it's easier to split this way; + // starting from the end to pop in constant time. + for (size_t i = seq.size(); i-- > 0;) { + llama_sbatch_seq & s = seq[i]; + GGML_ASSERT(s.length > 0); + if (length == 0) { + length = s.length < n_ubatch ? s.length : n_ubatch; + } + add_seq_to_ubatch(ubatch, s, length); + n_tokens_in_ubatch += length; + // shared prompts can't be mixed with any of their sequences, + // so it's safer to compute them in their own ubatch + if (s.n_seq_id > 1) { break; } + // stop when there isn't enough space for another sequence + if (length + n_tokens_in_ubatch > n_ubatch) { break; } + } + } + return ubatch; +} + +llama_ubatch llama_sbatch::split_seq(size_t n_ubatch) { + n_ubatch = n_tokens < n_ubatch ? n_tokens : n_ubatch; + llama_ubatch ubatch = reserve_ubatch(n_ubatch, /* has_embd */ batch->embd != nullptr); + if (!seq.empty()) { + llama_sbatch_seq & s = seq[seq.size() - 1]; + size_t length = s.length < n_ubatch ? s.length : n_ubatch; + GGML_ASSERT(s.n_seq_id > 0); // should not be mixed with simple splits + add_seq_to_ubatch(ubatch, s, length); + } + return ubatch; +} + +void llama_sbatch::from_batch(const llama_batch & batch, size_t n_embd, bool simple_split, bool logits_all) { + GGML_ASSERT(batch.n_tokens >= 0); + this->batch = &batch; + this->n_embd = n_embd; + this->logits_all = logits_all; + + n_tokens = batch.n_tokens; + ids.resize(n_tokens); + out_ids.clear(); + // TODO: reserve out_ids and seq + + for (size_t i = 0; i < n_tokens; ++i) { + ids[i] = i; + } + if (simple_split) { + seq.resize(1); + llama_sbatch_seq & s = seq[0]; + s.n_seq_id = 0; + s.seq_id = nullptr; + s.offset = 0; + s.length = n_tokens; + return; + } + std::sort(ids.begin(), ids.end(), + [&batch](size_t a, size_t b) { + int32_t n_seq_a = batch.n_seq_id ? batch.n_seq_id[a] : 1; + int32_t n_seq_b = batch.n_seq_id ? batch.n_seq_id[b] : 1; + // sort by seq_id, then by pos + if (n_seq_a == n_seq_b) { + if (batch.seq_id) { + for (int32_t i = 0; i < n_seq_a; ++i) { + llama_seq_id seq_id_a = batch.seq_id[a][i]; + llama_seq_id seq_id_b = batch.seq_id[b][i]; + // smaller seq_ids go first + if (seq_id_a != seq_id_b) { + return seq_id_a < seq_id_b; + } + } + } + // when all else is equal, sort by pos + if (batch.pos) { + return batch.pos[a] < batch.pos[b]; + } + // no pos, sort by id + return a < b; + } + // shared prompts go first + return n_seq_a > n_seq_b; + } + ); + // init seq + llama_sbatch_seq * last_seq = nullptr; + + for (size_t i = 0; i < n_tokens; ++i) { + const size_t bi = ids[i]; + const int32_t n_seqs = batch.n_seq_id[bi]; + llama_seq_id * seq_ids = batch.seq_id[bi]; + if (last_seq != nullptr) { + bool same = n_seqs == last_seq->n_seq_id; + for (int32_t j = 0; same && j < n_seqs; ++j) { + if (seq_ids[j] != last_seq->seq_id[j]) { + same = false; + } + } + if (same) { + last_seq->length += 1; + continue; + } + } + llama_sbatch_seq new_seq = {n_seqs, seq_ids, i, 1}; + seq.push_back(new_seq); + last_seq = &seq.back(); + } + // keep shared prompts first at the end, then sort by length descending. + std::sort(seq.begin(), seq.end(), + [](llama_sbatch_seq & a, llama_sbatch_seq & b) { + if (a.n_seq_id == b.n_seq_id) { + return a.length > b.length; + } + return a.n_seq_id < b.n_seq_id; + } + ); +} + +llama_batch_allocr::llama_batch_allocr(struct llama_batch in_batch, llama_pos p0) { + batch = in_batch; + GGML_ASSERT(batch.n_tokens > 0); + if (!batch.pos) { + pos.resize(batch.n_tokens); + for (int32_t i = 0; i < batch.n_tokens; i++) { + pos[i] = i + p0; + } + batch.pos = pos.data(); + } + if (!batch.n_seq_id) { + n_seq_id.resize(batch.n_tokens); + for (int32_t i = 0; i < batch.n_tokens; i++) { + n_seq_id[i] = seq_id_0.size(); + } + batch.n_seq_id = n_seq_id.data(); + } + if (!batch.seq_id) { + seq_id.resize(batch.n_tokens + 1); + seq_id[batch.n_tokens] = NULL; + for (int32_t i = 0; i < batch.n_tokens; i++) { + seq_id[i] = seq_id_0.data(); + } + batch.seq_id = seq_id.data(); + } + if (!batch.logits) { + logits.resize(batch.n_tokens); + logits[logits.size() - 1] = true; + batch.logits = logits.data(); + } +} + +// +// interface implementation +// + +struct llama_batch llama_batch_get_one( + llama_token * tokens, + int32_t n_tokens) { + return { + /*n_tokens =*/ n_tokens, + /*tokens =*/ tokens, + /*embd =*/ nullptr, + /*pos =*/ nullptr, + /*n_seq_id =*/ nullptr, + /*seq_id =*/ nullptr, + /*logits =*/ nullptr, + }; +} + +struct llama_batch llama_batch_init(int32_t n_tokens_alloc, int32_t embd, int32_t n_seq_max) { + llama_batch batch = { + /*n_tokens =*/ 0, + /*tokens =*/ nullptr, + /*embd =*/ nullptr, + /*pos =*/ nullptr, + /*n_seq_id =*/ nullptr, + /*seq_id =*/ nullptr, + /*logits =*/ nullptr, + }; + + if (embd) { + batch.embd = (float *) malloc(sizeof(float) * n_tokens_alloc * embd); + } else { + batch.token = (llama_token *) malloc(sizeof(llama_token) * n_tokens_alloc); + } + + batch.pos = (llama_pos *) malloc(sizeof(llama_pos) * n_tokens_alloc); + batch.n_seq_id = (int32_t *) malloc(sizeof(int32_t) * n_tokens_alloc); + batch.seq_id = (llama_seq_id **) malloc(sizeof(llama_seq_id *) * (n_tokens_alloc + 1)); + for (int i = 0; i < n_tokens_alloc; ++i) { + batch.seq_id[i] = (llama_seq_id *) malloc(sizeof(llama_seq_id) * n_seq_max); + } + batch.seq_id[n_tokens_alloc] = nullptr; + + batch.logits = (int8_t *) malloc(sizeof(int8_t) * n_tokens_alloc); + + return batch; +} + +void llama_batch_free(struct llama_batch batch) { + if (batch.token) free(batch.token); + if (batch.embd) free(batch.embd); + if (batch.pos) free(batch.pos); + if (batch.n_seq_id) free(batch.n_seq_id); + if (batch.seq_id) { + for (int i = 0; batch.seq_id[i] != nullptr; ++i) { + free(batch.seq_id[i]); + } + free(batch.seq_id); + } + if (batch.logits) free(batch.logits); +} diff --git a/src/llama-batch.h b/src/llama-batch.h new file mode 100644 index 000000000..773c3808b --- /dev/null +++ b/src/llama-batch.h @@ -0,0 +1,88 @@ +#pragma once + +#include "llama.h" + +#include +#include + +// very similar to llama_batch, +// but has more metadata about sequences +struct llama_ubatch { + bool equal_seqs; + // TODO: whole_seqs for embeddings? + + uint32_t n_tokens; // total tokens (n_seq_tokens * n_seqs) + uint32_t n_seq_tokens; // tokens per sequence + uint32_t n_seqs; + + llama_token * token; // [n_tokens] + float * embd; // [n_embd, n_tokens] + llama_pos * pos; // [n_tokens] + int32_t * n_seq_id; // [n_seqs] + llama_seq_id ** seq_id; // [n_seqs] + int8_t * output; // [n_tokens] +}; + +struct llama_sbatch_seq { + int32_t n_seq_id; + + llama_seq_id * seq_id; + + size_t offset; + size_t length; +}; + +// sequence-length-aware batch splitting +struct llama_sbatch { + // tokens left in this batch + size_t n_tokens; + + size_t n_embd; + + bool logits_all; // TODO: remove once lctx.logits_all is removed too + + // sorted indices into the batch + std::vector ids; + // batch indices of the output + std::vector out_ids; + std::vector seq; + + const llama_batch * batch = nullptr; + + // buffers for the ubatch + std::vector ubatch_token; + std::vector ubatch_embd; + std::vector ubatch_pos; + std::vector ubatch_n_seq_id; + std::vector ubatch_seq_id; + std::vector ubatch_output; + + llama_ubatch reserve_ubatch(size_t n_ubatch, bool has_embd = false); + + void add_seq_to_ubatch(llama_ubatch & ubatch, llama_sbatch_seq & seq, size_t length); + + // simple split, unknown number of sequences of unequal lengths + llama_ubatch split_simple(size_t n_ubatch); + + // make batches of equal-length sequences + llama_ubatch split_equal(size_t n_ubatch); + + // sequence-wise split + llama_ubatch split_seq(size_t n_ubatch); + + void from_batch(const llama_batch & batch, size_t n_embd, bool simple_split = false, bool logits_all = false); +}; + +// temporary allocate memory for the input batch if needed +struct llama_batch_allocr { + struct llama_batch batch; + + std::array seq_id_0 = { 0 }; // default sequence id + std::vector pos; + std::vector n_seq_id; + std::vector seq_id; + std::vector logits; + + // optionally fulfill the batch returned by llama_batch_get_one + llama_batch_allocr(struct llama_batch in_batch, llama_pos p0); +}; diff --git a/src/llama-chat.cpp b/src/llama-chat.cpp new file mode 100644 index 000000000..a07e9cf00 --- /dev/null +++ b/src/llama-chat.cpp @@ -0,0 +1,549 @@ +#include "llama-chat.h" + +#include "llama.h" + +#include +#include + +#if __cplusplus >= 202000L + #define LU8(x) (const char*)(u8##x) +#else + #define LU8(x) u8##x +#endif + +// trim whitespace from the beginning and end of a string +static std::string trim(const std::string & str) { + size_t start = 0; + size_t end = str.size(); + while (start < end && isspace(str[start])) { + start += 1; + } + while (end > start && isspace(str[end - 1])) { + end -= 1; + } + return str.substr(start, end - start); +} + +static const std::map LLM_CHAT_TEMPLATES = { + { "chatml", LLM_CHAT_TEMPLATE_CHATML }, + { "llama2", LLM_CHAT_TEMPLATE_LLAMA_2 }, + { "llama2-sys", LLM_CHAT_TEMPLATE_LLAMA_2_SYS }, + { "llama2-sys-bos", LLM_CHAT_TEMPLATE_LLAMA_2_SYS_BOS }, + { "llama2-sys-strip", LLM_CHAT_TEMPLATE_LLAMA_2_SYS_STRIP }, + { "mistral-v1", LLM_CHAT_TEMPLATE_MISTRAL_V1 }, + { "mistral-v3", LLM_CHAT_TEMPLATE_MISTRAL_V3 }, + { "mistral-v3-tekken", LLM_CHAT_TEMPLATE_MISTRAL_V3_TEKKEN }, + { "mistral-v7", LLM_CHAT_TEMPLATE_MISTRAL_V7 }, + { "phi3", LLM_CHAT_TEMPLATE_PHI_3 }, + { "falcon3", LLM_CHAT_TEMPLATE_FALCON_3 }, + { "zephyr", LLM_CHAT_TEMPLATE_ZEPHYR }, + { "monarch", LLM_CHAT_TEMPLATE_MONARCH }, + { "gemma", LLM_CHAT_TEMPLATE_GEMMA }, + { "orion", LLM_CHAT_TEMPLATE_ORION }, + { "openchat", LLM_CHAT_TEMPLATE_OPENCHAT }, + { "vicuna", LLM_CHAT_TEMPLATE_VICUNA }, + { "vicuna-orca", LLM_CHAT_TEMPLATE_VICUNA_ORCA }, + { "deepseek", LLM_CHAT_TEMPLATE_DEEPSEEK }, + { "deepseek2", LLM_CHAT_TEMPLATE_DEEPSEEK_2 }, + { "command-r", LLM_CHAT_TEMPLATE_COMMAND_R }, + { "llama3", LLM_CHAT_TEMPLATE_LLAMA_3 }, + { "chatglm3", LLM_CHAT_TEMPLATE_CHATGML_3 }, + { "chatglm4", LLM_CHAT_TEMPLATE_CHATGML_4 }, + { "minicpm", LLM_CHAT_TEMPLATE_MINICPM }, + { "exaone3", LLM_CHAT_TEMPLATE_EXAONE_3 }, + { "rwkv-world", LLM_CHAT_TEMPLATE_RWKV_WORLD }, + { "granite", LLM_CHAT_TEMPLATE_GRANITE }, + { "gigachat", LLM_CHAT_TEMPLATE_GIGACHAT }, + { "megrez", LLM_CHAT_TEMPLATE_MEGREZ }, +}; + +llm_chat_template llm_chat_template_from_str(const std::string & name) { + return LLM_CHAT_TEMPLATES.at(name); +} + +llm_chat_template llm_chat_detect_template(const std::string & tmpl) { + try { + return llm_chat_template_from_str(tmpl); + } catch (const std::out_of_range &) { + // ignore + } + + auto tmpl_contains = [&tmpl](const char * haystack) -> bool { + return tmpl.find(haystack) != std::string::npos; + }; + if (tmpl_contains("<|im_start|>")) { + return LLM_CHAT_TEMPLATE_CHATML; + } else if (tmpl.find("mistral") == 0 || tmpl_contains("[INST]")) { + if (tmpl_contains("[SYSTEM_PROMPT]")) { + return LLM_CHAT_TEMPLATE_MISTRAL_V7; + } else if ( + // catches official 'v1' template + tmpl_contains("' [INST] ' + system_message") + // catches official 'v3' and 'v3-tekken' templates + || tmpl_contains("[AVAILABLE_TOOLS]") + ) { + // Official mistral 'v1', 'v3' and 'v3-tekken' templates + // See: https://github.com/mistralai/cookbook/blob/main/concept-deep-dive/tokenization/chat_templates.md + // See: https://github.com/mistralai/cookbook/blob/main/concept-deep-dive/tokenization/templates.md + if (tmpl_contains(" [INST]")) { + return LLM_CHAT_TEMPLATE_MISTRAL_V1; + } else if (tmpl_contains("\"[INST]\"")) { + return LLM_CHAT_TEMPLATE_MISTRAL_V3_TEKKEN; + } + return LLM_CHAT_TEMPLATE_MISTRAL_V3; + } else { + // llama2 template and its variants + // [variant] support system message + // See: https://huggingface.co/blog/llama2#how-to-prompt-llama-2 + bool support_system_message = tmpl_contains("<>"); + bool add_bos_inside_history = tmpl_contains("bos_token + '[INST]"); + bool strip_message = tmpl_contains("content.strip()"); + if (strip_message) { + return LLM_CHAT_TEMPLATE_LLAMA_2_SYS_STRIP; + } else if (add_bos_inside_history) { + return LLM_CHAT_TEMPLATE_LLAMA_2_SYS_BOS; + } else if (support_system_message) { + return LLM_CHAT_TEMPLATE_LLAMA_2_SYS; + } else { + return LLM_CHAT_TEMPLATE_LLAMA_2; + } + } + } else if (tmpl_contains("<|assistant|>") && tmpl_contains("<|end|>")) { + return LLM_CHAT_TEMPLATE_PHI_3; + } else if (tmpl_contains("<|assistant|>") && tmpl_contains("<|user|>")) { + return LLM_CHAT_TEMPLATE_FALCON_3; + } else if (tmpl_contains("<|user|>") && tmpl_contains("<|endoftext|>")) { + return LLM_CHAT_TEMPLATE_ZEPHYR; + } else if (tmpl_contains("bos_token + message['role']")) { + return LLM_CHAT_TEMPLATE_MONARCH; + } else if (tmpl_contains("")) { + return LLM_CHAT_TEMPLATE_GEMMA; + } else if (tmpl_contains("'\\n\\nAssistant: ' + eos_token")) { + // OrionStarAI/Orion-14B-Chat + return LLM_CHAT_TEMPLATE_ORION; + } else if (tmpl_contains("GPT4 Correct ")) { + // openchat/openchat-3.5-0106 + return LLM_CHAT_TEMPLATE_OPENCHAT; + } else if (tmpl_contains("USER: ") && tmpl_contains("ASSISTANT: ")) { + // eachadea/vicuna-13b-1.1 (and Orca variant) + if (tmpl_contains("SYSTEM: ")) { + return LLM_CHAT_TEMPLATE_VICUNA_ORCA; + } + return LLM_CHAT_TEMPLATE_VICUNA; + } else if (tmpl_contains("### Instruction:") && tmpl_contains("<|EOT|>")) { + // deepseek-ai/deepseek-coder-33b-instruct + return LLM_CHAT_TEMPLATE_DEEPSEEK; + } else if (tmpl_contains("<|START_OF_TURN_TOKEN|>") && tmpl_contains("<|USER_TOKEN|>")) { + // CohereForAI/c4ai-command-r-plus + return LLM_CHAT_TEMPLATE_COMMAND_R; + } else if (tmpl_contains("<|start_header_id|>") && tmpl_contains("<|end_header_id|>")) { + return LLM_CHAT_TEMPLATE_LLAMA_3; + } else if (tmpl_contains("[gMASK]sop")) { + // chatglm3-6b + return LLM_CHAT_TEMPLATE_CHATGML_3; + } else if (tmpl_contains("[gMASK]")) { + return LLM_CHAT_TEMPLATE_CHATGML_4; + } else if (tmpl_contains(LU8("<用户>"))) { + // MiniCPM-3B-OpenHermes-2.5-v2-GGUF + return LLM_CHAT_TEMPLATE_MINICPM; + } else if (tmpl_contains("'Assistant: ' + message['content'] + eos_token")) { + return LLM_CHAT_TEMPLATE_DEEPSEEK_2; + } else if (tmpl_contains("[|system|]") && tmpl_contains("[|assistant|]") && tmpl_contains("[|endofturn|]")) { + // ref: https://huggingface.co/LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct/discussions/8#66bae61b1893d14ee8ed85bb + // EXAONE-3.0-7.8B-Instruct + return LLM_CHAT_TEMPLATE_EXAONE_3; + } else if (tmpl_contains("rwkv-world")) { + return LLM_CHAT_TEMPLATE_RWKV_WORLD; + } else if (tmpl_contains("<|start_of_role|>")) { + return LLM_CHAT_TEMPLATE_GRANITE; + } else if (tmpl_contains("message['role'] + additional_special_tokens[0] + message['content'] + additional_special_tokens[1]")) { + return LLM_CHAT_TEMPLATE_GIGACHAT; + } else if (tmpl_contains("<|role_start|>")) { + return LLM_CHAT_TEMPLATE_MEGREZ; + } + return LLM_CHAT_TEMPLATE_UNKNOWN; +} + +// Simple version of "llama_apply_chat_template" that only works with strings +// This function uses heuristic checks to determine commonly used template. It is not a jinja parser. +int32_t llm_chat_apply_template( + llm_chat_template tmpl, + const std::vector & chat, + std::string & dest, bool add_ass) { + // Taken from the research: https://github.com/ggerganov/llama.cpp/issues/5527 + std::stringstream ss; + if (tmpl == LLM_CHAT_TEMPLATE_CHATML) { + // chatml template + for (auto message : chat) { + ss << "<|im_start|>" << message->role << "\n" << message->content << "<|im_end|>\n"; + } + if (add_ass) { + ss << "<|im_start|>assistant\n"; + } + } else if (tmpl == LLM_CHAT_TEMPLATE_MISTRAL_V7) { + // Official mistral 'v7' template + // See: https://huggingface.co/mistralai/Mistral-Large-Instruct-2411#basic-instruct-template-v7 + for (auto message : chat) { + std::string role(message->role); + std::string content(message->content); + if (role == "system") { + ss << "[SYSTEM_PROMPT] " << content << "[/SYSTEM_PROMPT]"; + } else if (role == "user") { + ss << "[INST] " << content << "[/INST]"; + } + else { + ss << " " << content << ""; + } + } + } else if (tmpl == LLM_CHAT_TEMPLATE_MISTRAL_V1 + || tmpl == LLM_CHAT_TEMPLATE_MISTRAL_V3 + || tmpl == LLM_CHAT_TEMPLATE_MISTRAL_V3_TEKKEN) { + // See: https://github.com/mistralai/cookbook/blob/main/concept-deep-dive/tokenization/chat_templates.md + // See: https://github.com/mistralai/cookbook/blob/main/concept-deep-dive/tokenization/templates.md + std::string leading_space = tmpl == LLM_CHAT_TEMPLATE_MISTRAL_V1 ? " " : ""; + std::string trailing_space = tmpl == LLM_CHAT_TEMPLATE_MISTRAL_V3_TEKKEN ? "" : " "; + bool trim_assistant_message = tmpl == LLM_CHAT_TEMPLATE_MISTRAL_V3; + bool is_inside_turn = false; + for (auto message : chat) { + if (!is_inside_turn) { + ss << leading_space << "[INST]" << trailing_space; + is_inside_turn = true; + } + std::string role(message->role); + std::string content(message->content); + if (role == "system") { + ss << content << "\n\n"; + } else if (role == "user") { + ss << content << leading_space << "[/INST]"; + } else { + ss << trailing_space << (trim_assistant_message ? trim(content) : content) << ""; + is_inside_turn = false; + } + } + } else if ( + tmpl == LLM_CHAT_TEMPLATE_LLAMA_2 + || tmpl == LLM_CHAT_TEMPLATE_LLAMA_2_SYS + || tmpl == LLM_CHAT_TEMPLATE_LLAMA_2_SYS_BOS + || tmpl == LLM_CHAT_TEMPLATE_LLAMA_2_SYS_STRIP) { + // llama2 template and its variants + // [variant] support system message + // See: https://huggingface.co/blog/llama2#how-to-prompt-llama-2 + bool support_system_message = tmpl != LLM_CHAT_TEMPLATE_LLAMA_2; + // [variant] add BOS inside history + bool add_bos_inside_history = tmpl == LLM_CHAT_TEMPLATE_LLAMA_2_SYS_BOS; + // [variant] trim spaces from the input message + bool strip_message = tmpl == LLM_CHAT_TEMPLATE_LLAMA_2_SYS_STRIP; + // construct the prompt + bool is_inside_turn = true; // skip BOS at the beginning + ss << "[INST] "; + for (auto message : chat) { + std::string content = strip_message ? trim(message->content) : message->content; + std::string role(message->role); + if (!is_inside_turn) { + is_inside_turn = true; + ss << (add_bos_inside_history ? "[INST] " : "[INST] "); + } + if (role == "system") { + if (support_system_message) { + ss << "<>\n" << content << "\n<>\n\n"; + } else { + // if the model does not support system message, we still include it in the first message, but without <> + ss << content << "\n"; + } + } else if (role == "user") { + ss << content << " [/INST]"; + } else { + ss << content << ""; + is_inside_turn = false; + } + } + } else if (tmpl == LLM_CHAT_TEMPLATE_PHI_3) { + // Phi 3 + for (auto message : chat) { + std::string role(message->role); + ss << "<|" << role << "|>\n" << message->content << "<|end|>\n"; + } + if (add_ass) { + ss << "<|assistant|>\n"; + } + } else if (tmpl == LLM_CHAT_TEMPLATE_FALCON_3) { + // Falcon 3 + for (auto message : chat) { + std::string role(message->role); + ss << "<|" << role << "|>\n" << message->content << "\n"; + } + if (add_ass) { + ss << "<|assistant|>\n"; + } + } else if (tmpl == LLM_CHAT_TEMPLATE_ZEPHYR) { + // zephyr template + for (auto message : chat) { + ss << "<|" << message->role << "|>" << "\n" << message->content << "<|endoftext|>\n"; + } + if (add_ass) { + ss << "<|assistant|>\n"; + } + } else if (tmpl == LLM_CHAT_TEMPLATE_MONARCH) { + // mlabonne/AlphaMonarch-7B template (the is included inside history) + for (auto message : chat) { + std::string bos = (message == chat.front()) ? "" : ""; // skip BOS for first message + ss << bos << message->role << "\n" << message->content << "\n"; + } + if (add_ass) { + ss << "assistant\n"; + } + } else if (tmpl == LLM_CHAT_TEMPLATE_GEMMA) { + // google/gemma-7b-it + std::string system_prompt = ""; + for (auto message : chat) { + std::string role(message->role); + if (role == "system") { + // there is no system message for gemma, but we will merge it with user prompt, so nothing is broken + system_prompt = trim(message->content); + continue; + } + // in gemma, "assistant" is "model" + role = role == "assistant" ? "model" : message->role; + ss << "" << role << "\n"; + if (!system_prompt.empty() && role != "model") { + ss << system_prompt << "\n\n"; + system_prompt = ""; + } + ss << trim(message->content) << "\n"; + } + if (add_ass) { + ss << "model\n"; + } + } else if (tmpl == LLM_CHAT_TEMPLATE_ORION) { + // OrionStarAI/Orion-14B-Chat + std::string system_prompt = ""; + for (auto message : chat) { + std::string role(message->role); + if (role == "system") { + // there is no system message support, we will merge it with user prompt + system_prompt = message->content; + continue; + } else if (role == "user") { + ss << "Human: "; + if (!system_prompt.empty()) { + ss << system_prompt << "\n\n"; + system_prompt = ""; + } + ss << message->content << "\n\nAssistant: "; + } else { + ss << message->content << ""; + } + } + } else if (tmpl == LLM_CHAT_TEMPLATE_OPENCHAT) { + // openchat/openchat-3.5-0106, + for (auto message : chat) { + std::string role(message->role); + if (role == "system") { + ss << message->content << "<|end_of_turn|>"; + } else { + role[0] = toupper(role[0]); + ss << "GPT4 Correct " << role << ": " << message->content << "<|end_of_turn|>"; + } + } + if (add_ass) { + ss << "GPT4 Correct Assistant:"; + } + } else if (tmpl == LLM_CHAT_TEMPLATE_VICUNA || tmpl == LLM_CHAT_TEMPLATE_VICUNA_ORCA) { + // eachadea/vicuna-13b-1.1 (and Orca variant) + for (auto message : chat) { + std::string role(message->role); + if (role == "system") { + // Orca-Vicuna variant uses a system prefix + if (tmpl == LLM_CHAT_TEMPLATE_VICUNA_ORCA) { + ss << "SYSTEM: " << message->content << "\n"; + } else { + ss << message->content << "\n\n"; + } + } else if (role == "user") { + ss << "USER: " << message->content << "\n"; + } else if (role == "assistant") { + ss << "ASSISTANT: " << message->content << "\n"; + } + } + if (add_ass) { + ss << "ASSISTANT:"; + } + } else if (tmpl == LLM_CHAT_TEMPLATE_DEEPSEEK) { + // deepseek-ai/deepseek-coder-33b-instruct + for (auto message : chat) { + std::string role(message->role); + if (role == "system") { + ss << message->content; + } else if (role == "user") { + ss << "### Instruction:\n" << message->content << "\n"; + } else if (role == "assistant") { + ss << "### Response:\n" << message->content << "\n<|EOT|>\n"; + } + } + if (add_ass) { + ss << "### Response:\n"; + } + } else if (tmpl == LLM_CHAT_TEMPLATE_COMMAND_R) { + // CohereForAI/c4ai-command-r-plus + for (auto message : chat) { + std::string role(message->role); + if (role == "system") { + ss << "<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>" << trim(message->content) << "<|END_OF_TURN_TOKEN|>"; + } else if (role == "user") { + ss << "<|START_OF_TURN_TOKEN|><|USER_TOKEN|>" << trim(message->content) << "<|END_OF_TURN_TOKEN|>"; + } else if (role == "assistant") { + ss << "<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>" << trim(message->content) << "<|END_OF_TURN_TOKEN|>"; + } + } + if (add_ass) { + ss << "<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>"; + } + } else if (tmpl == LLM_CHAT_TEMPLATE_LLAMA_3) { + // Llama 3 + for (auto message : chat) { + std::string role(message->role); + ss << "<|start_header_id|>" << role << "<|end_header_id|>\n\n" << trim(message->content) << "<|eot_id|>"; + } + if (add_ass) { + ss << "<|start_header_id|>assistant<|end_header_id|>\n\n"; + } + } else if (tmpl == LLM_CHAT_TEMPLATE_CHATGML_3) { + // chatglm3-6b + ss << "[gMASK]" << "sop"; + for (auto message : chat) { + std::string role(message->role); + ss << "<|" << role << "|>" << "\n " << message->content; + } + if (add_ass) { + ss << "<|assistant|>"; + } + } else if (tmpl == LLM_CHAT_TEMPLATE_CHATGML_4) { + ss << "[gMASK]" << ""; + for (auto message : chat) { + std::string role(message->role); + ss << "<|" << role << "|>" << "\n" << message->content; + } + if (add_ass) { + ss << "<|assistant|>"; + } + } else if (tmpl == LLM_CHAT_TEMPLATE_MINICPM) { + // MiniCPM-3B-OpenHermes-2.5-v2-GGUF + for (auto message : chat) { + std::string role(message->role); + if (role == "user") { + ss << LU8("<用户>"); + ss << trim(message->content); + ss << ""; + } else { + ss << trim(message->content); + } + } + } else if (tmpl == LLM_CHAT_TEMPLATE_DEEPSEEK_2) { + // DeepSeek-V2 + for (auto message : chat) { + std::string role(message->role); + if (role == "system") { + ss << message->content << "\n\n"; + } else if (role == "user") { + ss << "User: " << message->content << "\n\n"; + } else if (role == "assistant") { + ss << "Assistant: " << message->content << LU8("<|end▁of▁sentence|>"); + } + } + if (add_ass) { + ss << "Assistant:"; + } + } else if (tmpl == LLM_CHAT_TEMPLATE_EXAONE_3) { + // ref: https://huggingface.co/LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct/discussions/8#66bae61b1893d14ee8ed85bb + // EXAONE-3.0-7.8B-Instruct + for (auto message : chat) { + std::string role(message->role); + if (role == "system") { + ss << "[|system|]" << trim(message->content) << "[|endofturn|]\n"; + } else if (role == "user") { + ss << "[|user|]" << trim(message->content) << "\n"; + } else if (role == "assistant") { + ss << "[|assistant|]" << trim(message->content) << "[|endofturn|]\n"; + } + } + if (add_ass) { + ss << "[|assistant|]"; + } + } else if (tmpl == LLM_CHAT_TEMPLATE_RWKV_WORLD) { + // this template requires the model to have "\n\n" as EOT token + for (auto message : chat) { + std::string role(message->role); + if (role == "user") { + ss << "User: " << message->content << "\n\nAssistant:"; + } else { + ss << message->content << "\n\n"; + } + } + } else if (tmpl == LLM_CHAT_TEMPLATE_GRANITE) { + // IBM Granite template + for (const auto & message : chat) { + std::string role(message->role); + ss << "<|start_of_role|>" << role << "<|end_of_role|>"; + if (role == "assistant_tool_call") { + ss << "<|tool_call|>"; + } + ss << message->content << "<|end_of_text|>\n"; + } + if (add_ass) { + ss << "<|start_of_role|>assistant<|end_of_role|>\n"; + } + } else if (tmpl == LLM_CHAT_TEMPLATE_GIGACHAT) { + // GigaChat template + bool has_system = !chat.empty() && std::string(chat[0]->role) == "system"; + + // Handle system message if present + if (has_system) { + ss << "" << chat[0]->content << "<|message_sep|>"; + } else { + ss << ""; + } + + // Process remaining messages + for (size_t i = has_system ? 1 : 0; i < chat.size(); i++) { + std::string role(chat[i]->role); + if (role == "user") { + ss << "user<|role_sep|>" << chat[i]->content << "<|message_sep|>" + << "available functions<|role_sep|>[]<|message_sep|>"; + } else if (role == "assistant") { + ss << "assistant<|role_sep|>" << chat[i]->content << "<|message_sep|>"; + } + } + + // Add generation prompt if needed + if (add_ass) { + ss << "assistant<|role_sep|>"; + } + } else if (tmpl == LLM_CHAT_TEMPLATE_MEGREZ) { + // Megrez template + for (auto message : chat) { + std::string role(message->role); + ss << "<|role_start|>" << role << "<|role_end|>" << message->content << "<|turn_end|>"; + } + + if (add_ass) { + ss << "<|role_start|>assistant<|role_end|>"; + } + } else { + // template not supported + return -1; + } + dest = ss.str(); + return dest.size(); +} + +// public interface + +int32_t llama_chat_builtin_templates(const char ** output, size_t len) { + auto it = LLM_CHAT_TEMPLATES.begin(); + for (size_t i = 0; i < std::min(len, LLM_CHAT_TEMPLATES.size()); i++) { + output[i] = it->first.c_str(); + std::advance(it, 1); + } + return (int32_t) LLM_CHAT_TEMPLATES.size(); +} + diff --git a/src/llama-chat.h b/src/llama-chat.h new file mode 100644 index 000000000..364318c27 --- /dev/null +++ b/src/llama-chat.h @@ -0,0 +1,50 @@ +#pragma once + +#include +#include +#include + +enum llm_chat_template { + LLM_CHAT_TEMPLATE_CHATML, + LLM_CHAT_TEMPLATE_LLAMA_2, + LLM_CHAT_TEMPLATE_LLAMA_2_SYS, + LLM_CHAT_TEMPLATE_LLAMA_2_SYS_BOS, + LLM_CHAT_TEMPLATE_LLAMA_2_SYS_STRIP, + LLM_CHAT_TEMPLATE_MISTRAL_V1, + LLM_CHAT_TEMPLATE_MISTRAL_V3, + LLM_CHAT_TEMPLATE_MISTRAL_V3_TEKKEN, + LLM_CHAT_TEMPLATE_MISTRAL_V7, + LLM_CHAT_TEMPLATE_PHI_3, + LLM_CHAT_TEMPLATE_FALCON_3, + LLM_CHAT_TEMPLATE_ZEPHYR, + LLM_CHAT_TEMPLATE_MONARCH, + LLM_CHAT_TEMPLATE_GEMMA, + LLM_CHAT_TEMPLATE_ORION, + LLM_CHAT_TEMPLATE_OPENCHAT, + LLM_CHAT_TEMPLATE_VICUNA, + LLM_CHAT_TEMPLATE_VICUNA_ORCA, + LLM_CHAT_TEMPLATE_DEEPSEEK, + LLM_CHAT_TEMPLATE_DEEPSEEK_2, + LLM_CHAT_TEMPLATE_COMMAND_R, + LLM_CHAT_TEMPLATE_LLAMA_3, + LLM_CHAT_TEMPLATE_CHATGML_3, + LLM_CHAT_TEMPLATE_CHATGML_4, + LLM_CHAT_TEMPLATE_MINICPM, + LLM_CHAT_TEMPLATE_EXAONE_3, + LLM_CHAT_TEMPLATE_RWKV_WORLD, + LLM_CHAT_TEMPLATE_GRANITE, + LLM_CHAT_TEMPLATE_GIGACHAT, + LLM_CHAT_TEMPLATE_MEGREZ, + LLM_CHAT_TEMPLATE_UNKNOWN, +}; + +struct llama_chat_message; + +llm_chat_template llm_chat_template_from_str(const std::string & name); + +llm_chat_template llm_chat_detect_template(const std::string & tmpl); + +int32_t llm_chat_apply_template( + llm_chat_template tmpl, + const std::vector & chat, + std::string & dest, bool add_ass); diff --git a/src/llama-context.cpp b/src/llama-context.cpp new file mode 100644 index 000000000..38a55fb2c --- /dev/null +++ b/src/llama-context.cpp @@ -0,0 +1,1771 @@ +#include "llama-context.h" + +#include +#include +#include +#include + +void llama_set_k_shift(struct llama_context & lctx) { + const int64_t kv_size = lctx.kv_self.size; + + assert(ggml_backend_buffer_is_host(lctx.inp_K_shift->buffer)); + + int32_t * data = (int32_t *) lctx.inp_K_shift->data; + + for (int i = 0; i < kv_size; ++i) { + data[i] = lctx.kv_self.cells[i].delta; + } +} + +void llama_set_s_copy(struct llama_context & lctx) { + const int64_t kv_size = lctx.kv_self.size; + + assert(ggml_backend_buffer_is_host(lctx.inp_s_copy->buffer)); + + int32_t * data = (int32_t *) lctx.inp_s_copy->data; + + for (int i = 0; i < kv_size; ++i) { + data[i] = lctx.kv_self.cells[i].src; + } +} + +// llama input + +static int32_t llama_relative_position_bucket(llama_pos x, llama_pos y, uint64_t n_buckets, bool bidirectional) { + // TODO move to hparams if a T5 variant appears that uses a different value + const int64_t max_distance = 128; + + if (bidirectional) { + n_buckets >>= 1; + } + + const int64_t max_exact = n_buckets >> 1; + + int32_t relative_position = x - y; + int32_t relative_bucket = 0; + if (bidirectional) { + relative_bucket += (relative_position > 0) * n_buckets; + relative_position = abs(relative_position); + } else { + relative_position = -std::min(relative_position, 0); + } + int32_t relative_position_if_large = floorf(max_exact + logf(1.0 * relative_position / max_exact) * (n_buckets - max_exact) / log(1.0 * max_distance / max_exact)); + relative_position_if_large = std::min(relative_position_if_large, n_buckets - 1); + relative_bucket += (relative_position < max_exact ? relative_position : relative_position_if_large); + return relative_bucket; +} + +void llama_set_inputs(llama_context & lctx, const llama_ubatch & ubatch) { + // + // set input data + // + + const auto & hparams = lctx.model.hparams; + const auto & cparams = lctx.cparams; + const auto & kv_self = lctx.kv_self; + + if (ubatch.token) { + const int64_t n_tokens = ubatch.n_tokens; + + ggml_backend_tensor_set(lctx.inp_tokens, ubatch.token, 0, n_tokens*ggml_element_size(lctx.inp_tokens)); + } + + if (ubatch.embd) { + const int64_t n_embd = hparams.n_embd; + const int64_t n_tokens = ubatch.n_tokens; + + ggml_backend_tensor_set(lctx.inp_embd, ubatch.embd, 0, n_tokens*n_embd*ggml_element_size(lctx.inp_embd)); + } + + if (ubatch.pos && lctx.inp_pos) { + const int64_t n_tokens = ubatch.n_tokens; + auto n_pos = lctx.n_pos_per_token; + ggml_backend_tensor_set(lctx.inp_pos, ubatch.pos, 0, n_tokens*n_pos*ggml_element_size(lctx.inp_pos)); + } + + if (hparams.causal_attn || cparams.pooling_type == LLAMA_POOLING_TYPE_NONE) { + //GGML_ASSERT(lctx.inp_out_ids && "every model that can must skip unused outputs"); + + if (!lctx.inp_out_ids) { + LLAMA_LOG_WARN("%s: 'lctx.inp_out_ids' is not created\n", __func__); + } else { + const int64_t n_tokens = ubatch.n_tokens; + + GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_out_ids->buffer)); + int32_t * data = (int32_t *) lctx.inp_out_ids->data; + + if (lctx.n_outputs == n_tokens) { + for (int i = 0; i < n_tokens; ++i) { + data[i] = i; + } + } else if (ubatch.output) { + int32_t n_outputs = 0; + for (int i = 0; i < n_tokens; ++i) { + if (ubatch.output[i]) { + data[n_outputs++] = i; + } + } + // the graph needs to have been passed the correct number of outputs + GGML_ASSERT(lctx.n_outputs == n_outputs); + } else if (lctx.n_outputs == 1) { + // only keep last output + data[0] = n_tokens - 1; + } else { + GGML_ASSERT(lctx.n_outputs == 0); + } + } + } + + GGML_ASSERT( + // (!a || b) is a logical implication (a -> b) + // !hparams.causal_attn -> !cparams.causal_attn + (hparams.causal_attn || !cparams.causal_attn) && + "causal attention is not supported by this model" + ); + + if (lctx.inp_KQ_mask || lctx.inp_KQ_mask_swa) { + // NOTE: hparams.causal_attn indicates the model is capable of generation and uses the kv cache. + if (cparams.causal_attn && !lctx.is_encoding) { + const int64_t n_kv = kv_self.n; + const int64_t n_tokens = ubatch.n_tokens; + const int64_t n_seq_tokens = ubatch.n_seq_tokens; + const int64_t n_seqs = ubatch.n_seqs; + + + float * data = nullptr; + float * data_swa = nullptr; + + if (lctx.inp_KQ_mask) { + GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_KQ_mask->buffer)); + data = (float *) lctx.inp_KQ_mask->data; + } + + if (lctx.inp_KQ_mask_swa) { + GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_KQ_mask_swa->buffer)); + data_swa = (float *) lctx.inp_KQ_mask_swa->data; + } + + // For causal attention, use only the previous KV cells + // of the correct sequence for each token of the ubatch. + // It's assumed that if a token in the batch has multiple sequences, they are equivalent. + for (int h = 0; h < 1; ++h) { + for (int s = 0; s < n_seqs; ++s) { + const llama_seq_id seq_id = ubatch.seq_id[s][0]; + + for (int j = 0; j < n_seq_tokens; ++j) { + const llama_pos pos = ubatch.pos[s*n_seq_tokens + j]; + + for (int i = 0; i < n_kv; ++i) { + float f; + if (!kv_self.cells[i].has_seq_id(seq_id) || kv_self.cells[i].pos > pos) { + f = -INFINITY; + } else { + if (hparams.use_alibi) { + f = -std::abs(kv_self.cells[i].pos - pos); + } else { + f = 0.0f; + } + } + + if (data) { + data[h*(n_kv*n_tokens) + s*(n_kv*n_seq_tokens) + j*n_kv + i] = f; + } + + // may need to cut off old tokens for sliding window + if (data_swa) { + if (pos - kv_self.cells[i].pos >= (int32_t)hparams.n_swa) { + f = -INFINITY; + } + data_swa[h*(n_kv*n_tokens) + s*(n_kv*n_seq_tokens) + j*n_kv + i] = f; + } + } + } + } + + if (data) { + for (int i = n_tokens; i < GGML_PAD(n_tokens, GGML_KQ_MASK_PAD); ++i) { + for (int j = 0; j < n_kv; ++j) { + data[h*(n_kv*n_tokens) + i*n_kv + j] = -INFINITY; + } + } + } + + if (data_swa) { + for (int i = n_tokens; i < GGML_PAD(n_tokens, GGML_KQ_MASK_PAD); ++i) { + for (int j = 0; j < n_kv; ++j) { + data_swa[h*(n_kv*n_tokens) + i*n_kv + j] = -INFINITY; + } + } + } + } + } else { + const int64_t n_tokens = ubatch.n_tokens; + const int64_t n_seq_tokens = ubatch.n_seq_tokens; + const int64_t n_seqs = ubatch.n_seqs; + // when using kv cache, the mask needs to match the kv cache size + const int64_t n_stride = hparams.causal_attn && !lctx.is_encoding ? kv_self.n : n_tokens; + + GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_KQ_mask->buffer)); + + float * data = (float *) lctx.inp_KQ_mask->data; + + for (int h = 0; h < 1; ++h) { + for (int s1 = 0; s1 < n_seqs; ++s1) { + const llama_seq_id seq_id = ubatch.seq_id[s1][0]; + + for (int j = 0; j < n_seq_tokens; ++j) { + const int32_t tj = s1*n_seq_tokens + j; + + for (int s0 = 0; s0 < n_seqs; ++s0) { + for (int i = 0; i < n_seq_tokens; ++i) { + const int32_t ti = s0*n_seq_tokens + i; + float f = -INFINITY; + + for (int s = 0; s < ubatch.n_seq_id[s0]; ++s) { + if (ubatch.seq_id[s0][s] == seq_id) { + if (hparams.use_alibi) { + f = -std::abs(ubatch.pos[ti] - ubatch.pos[tj]); + } else { + f = 0.0f; + } + break; + } + } + + data[h*(n_tokens*n_tokens) + tj*n_stride + ti] = f; + } + } + + for (int i = n_tokens; i < n_stride; ++i) { + data[h*(n_tokens*n_tokens) + tj*n_stride + i] = -INFINITY; + } + } + } + } + } + } + + if (cparams.embeddings && cparams.pooling_type == LLAMA_POOLING_TYPE_MEAN) { + const int64_t n_tokens = ubatch.n_tokens; + const int64_t n_seq_tokens = ubatch.n_seq_tokens; + const int64_t n_seqs = ubatch.n_seqs; + + GGML_ASSERT(lctx.inp_mean); + GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_mean->buffer)); + + float * data = (float *) lctx.inp_mean->data; + memset(lctx.inp_mean->data, 0, n_tokens * n_tokens * ggml_element_size(lctx.inp_mean)); + + std::vector sum(n_tokens, 0); + + for (int s = 0; s < n_seqs; ++s) { + const llama_seq_id seq_id = ubatch.seq_id[s][0]; + + // TODO: adapt limits to n_seqs when ubatch.equal_seqs is true + GGML_ASSERT(seq_id < n_tokens && "seq_id cannot be larger than n_tokens with pooling_type == MEAN"); + + sum[seq_id] += ubatch.n_seq_tokens; + } + + std::vector div(n_tokens, 0.0f); + for (int i = 0; i < n_tokens; ++i) { + const uint64_t s = sum[i]; + if (s > 0) { + div[i] = 1.0f/float(s); + } + } + + for (int s = 0; s < n_seqs; ++s) { + const llama_seq_id seq_id = ubatch.seq_id[s][0]; + + for (int i = 0; i < n_seq_tokens; ++i) { + data[seq_id*n_tokens + s*n_seq_tokens + i] = div[seq_id]; + } + } + } + + if (cparams.embeddings && ( + cparams.pooling_type == LLAMA_POOLING_TYPE_CLS || + cparams.pooling_type == LLAMA_POOLING_TYPE_RANK)) { + const int64_t n_tokens = ubatch.n_tokens; + const int64_t n_seq_tokens = ubatch.n_seq_tokens; + const int64_t n_seqs = ubatch.n_seqs; + + GGML_ASSERT(lctx.inp_cls); + GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_cls->buffer)); + + uint32_t * data = (uint32_t *) lctx.inp_cls->data; + memset(lctx.inp_cls->data, 0, n_tokens * ggml_element_size(lctx.inp_cls)); + + for (int s = 0; s < n_seqs; ++s) { + const llama_seq_id seq_id = ubatch.seq_id[s][0]; + + // TODO: adapt limits to n_seqs when ubatch.equal_seqs is true + GGML_ASSERT(seq_id < n_tokens && "seq_id cannot be larger than n_tokens with pooling_type == CLS or RANK"); + + for (int i = 0; i < n_seq_tokens; ++i) { + const llama_pos pos = ubatch.pos[s*n_seq_tokens + i]; + + if (pos == 0) { + data[seq_id] = s*n_seq_tokens + i; + } + } + } + } + + if (cparams.embeddings && cparams.pooling_type == LLAMA_POOLING_TYPE_LAST) { + const int64_t n_tokens = ubatch.n_tokens; + const int64_t n_seq_tokens = ubatch.n_seq_tokens; + const int64_t n_seqs = ubatch.n_seqs; + + GGML_ASSERT(lctx.inp_cls); + GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_cls->buffer)); + + uint32_t * data = (uint32_t *) lctx.inp_cls->data; + memset(lctx.inp_cls->data, 0, n_tokens * ggml_element_size(lctx.inp_cls)); + + std::vector last_pos(n_tokens, -1); + std::vector last_row(n_tokens, -1); + + for (int s = 0; s < n_seqs; ++s) { + const llama_seq_id seq_id = ubatch.seq_id[s][0]; + + // TODO: adapt limits to n_seqs when ubatch.equal_seqs is true + GGML_ASSERT(seq_id < n_tokens && "seq_id cannot be larger than n_tokens with pooling_type == LAST"); + + for (int i = 0; i < n_seq_tokens; ++i) { + const llama_pos pos = ubatch.pos[s*n_seq_tokens + i]; + + if (pos >= last_pos[seq_id]) { + last_pos[seq_id] = pos; + last_row[seq_id] = s*n_seq_tokens + i; + } + } + } + + for (int i = 0; i < n_tokens; ++i) { + if (last_row[i] >= 0) { + data[i] = last_row[i]; + } + } + } + + if (kv_self.recurrent) { + const int64_t n_kv = kv_self.n; + + if (lctx.inp_s_mask) { + GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_s_mask->buffer)); + float * data = (float *) lctx.inp_s_mask->data; + + // clear unused states + for (int i = 0; i < n_kv; ++i) { + const uint32_t cell_id = i + kv_self.head; + llama_kv_cell & kv_cell = lctx.kv_self.cells[cell_id]; + + data[i] = (float) (kv_cell.src >= 0); + + // only clear once + if (kv_cell.src < 0) { + kv_cell.src = cell_id; + } + } + } + + if (lctx.inp_s_copy) { + GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_s_copy->buffer)); + int32_t * data = (int32_t *) lctx.inp_s_copy->data; + + // assuming copy destinations ALWAYS happen ONLY on the cells between head and head+n + for (uint32_t i = 0; i < n_kv; ++i) { + const uint32_t cell_id = i + kv_self.head; + llama_kv_cell & kv_cell = lctx.kv_self.cells[cell_id]; + + // prevent out-of-bound sources + if (kv_cell.src < 0 || (uint32_t) kv_cell.src >= kv_self.size) { + kv_cell.src = cell_id; + } + + data[i] = kv_cell.src; + + // ensure copy only happens once + if (kv_cell.src != (int32_t) cell_id) { + kv_cell.src = cell_id; + } + } + } + } + + if (lctx.inp_pos_bucket) { + const int64_t n_tokens = ubatch.n_tokens; + + GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_pos_bucket->buffer)); + GGML_ASSERT(!ubatch.equal_seqs); // TODO: use ubatch.n_seqs instead of failing + + int32_t * data = (int32_t *) lctx.inp_pos_bucket->data; + + if (!lctx.is_encoding) { + const int64_t n_kv = kv_self.n; + for (int h = 0; h < 1; ++h) { + for (int j = 0; j < n_tokens; ++j) { + for (int i = 0; i < n_kv; ++i) { + data[h*(n_kv*n_tokens) + j*n_kv + i] = llama_relative_position_bucket(lctx.kv_self.cells[i].pos, ubatch.pos[j], hparams.n_rel_attn_bkts, lctx.is_encoding); + } + } + } + } else { + for (int h = 0; h < 1; ++h) { + for (int j = 0; j < n_tokens; ++j) { + for (int i = 0; i < n_tokens; ++i) { + data[h*(n_tokens*n_tokens) + j*n_tokens + i] = llama_relative_position_bucket(ubatch.pos[i], ubatch.pos[j], hparams.n_rel_attn_bkts, lctx.is_encoding); + } + } + } + } + } + + if (!lctx.is_encoding && lctx.inp_embd_enc) { + assert(lctx.inp_embd_enc->type == GGML_TYPE_F32); + assert((size_t) ggml_nelements(lctx.inp_embd_enc) == lctx.embd_enc.size()); + + ggml_backend_tensor_set(lctx.inp_embd_enc, lctx.embd_enc.data(), 0, ggml_nbytes(lctx.inp_embd_enc)); + } + + if (!lctx.is_encoding && lctx.inp_KQ_mask_cross) { + const int64_t n_output_enc = lctx.embd_enc.size() / hparams.n_embd; + const int64_t n_tokens = ubatch.n_tokens; + + GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_KQ_mask_cross->buffer)); + GGML_ASSERT(!ubatch.equal_seqs); // TODO: use ubatch.n_seqs instead of failing + + float * data = (float *) lctx.inp_KQ_mask_cross->data; + + for (int h = 0; h < 1; ++h) { + for (int j = 0; j < n_tokens; ++j) { + for (int i = 0; i < n_output_enc; ++i) { + float f = -INFINITY; + for (int s = 0; s < ubatch.n_seq_id[j]; ++s) { + const llama_seq_id seq_id = ubatch.seq_id[j][s]; + if (lctx.seq_ids_enc[i].find(seq_id) != lctx.seq_ids_enc[i].end()) { + f = 0.0f; + } + } + data[h*(n_output_enc*n_tokens) + j*n_output_enc + i] = f; + } + } + + for (int i = n_tokens; i < GGML_PAD(n_tokens, GGML_KQ_MASK_PAD); ++i) { + for (int j = 0; j < n_output_enc; ++j) { + data[h*(n_output_enc*n_tokens) + i*n_output_enc + j] = -INFINITY; + } + } + } + } +} + +// llama output + +size_t llama_output_reserve(struct llama_context & lctx, size_t n_outputs) { + const auto & cparams = lctx.cparams; + const auto & hparams = lctx.model.hparams; + + const size_t n_outputs_max = std::max(n_outputs, (size_t) cparams.n_seq_max); + + const auto n_batch = cparams.n_batch; + const auto n_vocab = hparams.n_vocab; + const auto n_embd = hparams.n_embd; + + // TODO: use a per-batch flag for logits presence instead + const bool has_logits = !cparams.embeddings; + const bool has_embd = cparams.embeddings && (cparams.pooling_type == LLAMA_POOLING_TYPE_NONE); + + const size_t logits_size = has_logits ? n_vocab*n_outputs_max : 0; + const size_t embd_size = has_embd ? n_embd*n_outputs_max : 0; + + if (lctx.output_ids.empty()) { + // init, never resized afterwards + lctx.output_ids.resize(n_batch); + } + + const size_t prev_size = lctx.buf_output ? ggml_backend_buffer_get_size(lctx.buf_output.get()) : 0; + const size_t new_size = (logits_size + embd_size) * sizeof(float); + + // alloc only when more than the current capacity is required + // TODO: also consider shrinking the buffer + if (!lctx.buf_output || prev_size < new_size) { + if (lctx.buf_output) { +#ifndef NDEBUG + // This doesn't happen often, but may be annoying in some cases (like the HellaSwag benchmark) + LLAMA_LOG_INFO("%s: reallocating output buffer from size %.02f MiB to %.02f MiB\n", __func__, prev_size / 1024.0 / 1024.0, new_size / 1024.0 / 1024.0); +#endif + lctx.buf_output = nullptr; + lctx.logits = nullptr; + lctx.embd = nullptr; + } + + auto * buft = ggml_backend_cpu_buffer_type(); + // try to use the host buffer of the device where the output tensor is allocated for faster transfer to system memory + auto * output_dev = lctx.model.dev_output.dev; + auto * output_dev_host_buft = output_dev ? ggml_backend_dev_host_buffer_type(output_dev) : nullptr; + if (output_dev_host_buft) { + buft = output_dev_host_buft; + } + lctx.buf_output.reset(ggml_backend_buft_alloc_buffer(buft, new_size)); + if (lctx.buf_output == nullptr) { + LLAMA_LOG_ERROR("%s: failed to allocate output buffer of size %.2f MiB\n", __func__, new_size / (1024.0 * 1024.0)); + return 0; + } + } + + float * output_base = (float *) ggml_backend_buffer_get_base(lctx.buf_output.get()); + + lctx.logits = has_logits ? output_base : nullptr; + lctx.embd = has_embd ? output_base + logits_size : nullptr; + + lctx.output_size = n_outputs_max; + lctx.logits_size = logits_size; + lctx.embd_size = embd_size; + + // set all ids as invalid (negative) + std::fill(lctx.output_ids.begin(), lctx.output_ids.end(), -1); + + ggml_backend_buffer_clear(lctx.buf_output.get(), 0); + + lctx.n_outputs = 0; + + return n_outputs_max; +} + +void llama_output_reorder(struct llama_context & ctx) { + std::vector & out_ids = ctx.sbatch.out_ids; + if (!out_ids.empty()) { + const uint32_t n_vocab = ctx.model.hparams.n_vocab; + const uint32_t n_embd = ctx.model.hparams.n_embd; + + const int32_t n_outputs = ctx.n_outputs; + GGML_ASSERT((size_t) n_outputs == out_ids.size()); + + // TODO: is there something more efficient which also minimizes swaps? + // selection sort, to minimize swaps (from https://en.wikipedia.org/wiki/Selection_sort) + for (int32_t i = 0; i < n_outputs - 1; ++i) { + int32_t j_min = i; + for (int32_t j = i + 1; j < n_outputs; ++j) { + if (out_ids[j] < out_ids[j_min]) { + j_min = j; + } + } + if (j_min == i) { continue; } + std::swap(out_ids[i], out_ids[j_min]); + if (ctx.logits_size > 0) { + for (uint32_t k = 0; k < n_vocab; k++) { + std::swap(ctx.logits[i*n_vocab + k], ctx.logits[j_min*n_vocab + k]); + } + } + if (ctx.embd_size > 0) { + for (uint32_t k = 0; k < n_embd; k++) { + std::swap(ctx.embd[i*n_embd + k], ctx.embd[j_min*n_embd + k]); + } + } + } + std::fill(ctx.output_ids.begin(), ctx.output_ids.end(), -1); + for (int32_t i = 0; i < n_outputs; ++i) { + ctx.output_ids[out_ids[i]] = i; + } + out_ids.clear(); + } +} + +// +// interface implementation +// + +void llama_free(struct llama_context * ctx) { + delete ctx; +} + +uint32_t llama_n_ctx(const struct llama_context * ctx) { + return ctx->cparams.n_ctx; +} + +uint32_t llama_n_batch(const struct llama_context * ctx) { + return ctx->cparams.n_batch; +} + +uint32_t llama_n_ubatch(const struct llama_context * ctx) { + return ctx->cparams.n_ubatch; +} + +uint32_t llama_n_seq_max(const struct llama_context * ctx) { + return ctx->kv_self.size; +} + +const struct llama_model * llama_get_model(const struct llama_context * ctx) { + return &ctx->model; +} + +enum llama_pooling_type llama_pooling_type(const struct llama_context * ctx) { + return ctx->cparams.pooling_type; +} + +void llama_attach_threadpool( + struct llama_context * ctx, + ggml_threadpool_t threadpool, + ggml_threadpool_t threadpool_batch) { + ctx->threadpool = threadpool; + ctx->threadpool_batch = threadpool_batch ? threadpool_batch : threadpool; +} + +void llama_detach_threadpool(struct llama_context * ctx) { + ctx->threadpool = nullptr; + ctx->threadpool_batch = nullptr; +} + +void llama_set_n_threads(struct llama_context * ctx, int32_t n_threads, int32_t n_threads_batch) { + ctx->cparams.n_threads = n_threads; + ctx->cparams.n_threads_batch = n_threads_batch; +} + +int32_t llama_n_threads(struct llama_context * ctx) { + return ctx->cparams.n_threads; +} + +int32_t llama_n_threads_batch(struct llama_context * ctx) { + return ctx->cparams.n_threads_batch; +} + +void llama_set_abort_callback(struct llama_context * ctx, bool (*abort_callback)(void * data), void * abort_callback_data) { + ctx->abort_callback = abort_callback; + ctx->abort_callback_data = abort_callback_data; + + for (auto & backend : ctx->backends) { + auto * reg = ggml_backend_dev_backend_reg(ggml_backend_get_device(backend.get())); + auto * set_abort_callback_fn = (ggml_backend_set_abort_callback_t) ggml_backend_reg_get_proc_address(reg, "ggml_backend_set_abort_callback"); + if (set_abort_callback_fn) { + set_abort_callback_fn(backend.get(), ctx->abort_callback, ctx->abort_callback_data); + } + } +} + +void llama_set_embeddings(struct llama_context * ctx, bool embeddings) { + ctx->cparams.embeddings = embeddings; +} + +void llama_set_causal_attn(struct llama_context * ctx, bool causal_attn) { + ctx->cparams.causal_attn = causal_attn; +} + +void llama_synchronize(struct llama_context * ctx) { + ggml_backend_sched_synchronize(ctx->sched.get()); + + // FIXME: if multiple single tokens are evaluated without a synchronization, + // the stats will be added to the prompt evaluation stats + // this should only happen when using batch size 1 to evaluate a batch + + // add the evaluation to the stats + if (ctx->n_queued_tokens == 1) { + if (!ctx->cparams.no_perf) { + ctx->t_eval_us += ggml_time_us() - ctx->t_compute_start_us; + } + ctx->n_eval++; + } else if (ctx->n_queued_tokens > 1) { + if (!ctx->cparams.no_perf) { + ctx->t_p_eval_us += ggml_time_us() - ctx->t_compute_start_us; + } + ctx->n_p_eval += ctx->n_queued_tokens; + } + + // get a more accurate load time, upon first eval + if (ctx->n_queued_tokens > 0 && !ctx->has_evaluated_once) { + ctx->t_load_us = ggml_time_us() - ctx->t_start_us; + ctx->has_evaluated_once = true; + } + + ctx->n_queued_tokens = 0; + ctx->t_compute_start_us = 0; +} + +float * llama_get_logits(struct llama_context * ctx) { + llama_synchronize(ctx); + + // reorder logits for backward compatibility + // TODO: maybe deprecate this + llama_output_reorder(*ctx); + + return ctx->logits; +} + +float * llama_get_logits_ith(struct llama_context * ctx, int32_t i) { + int32_t j = -1; + + llama_synchronize(ctx); + + try { + if (ctx->logits == nullptr) { + throw std::runtime_error("no logits"); + } + + if (i < 0) { + j = ctx->n_outputs + i; + if (j < 0) { + throw std::runtime_error(format("negative index out of range [0, %d)", ctx->n_outputs)); + } + } else if ((size_t) i >= ctx->output_ids.size()) { + throw std::runtime_error(format("out of range [0, %zu)", ctx->output_ids.size())); + } else { + j = ctx->output_ids[i]; + } + + if (j < 0) { + throw std::runtime_error(format("batch.logits[%d] != true", i)); + } + if (j >= ctx->n_outputs) { + // This should not happen + throw std::runtime_error(format("corrupt output buffer (j=%d, n_outputs=%d)", j, ctx->n_outputs)); + } + + return ctx->logits + j*ctx->model.hparams.n_vocab; + } catch (const std::exception & err) { + LLAMA_LOG_ERROR("%s: invalid logits id %d, reason: %s\n", __func__, i, err.what()); +#ifndef NDEBUG + GGML_ABORT("fatal error"); +#else + return nullptr; +#endif + } +} + +float * llama_get_embeddings(struct llama_context * ctx) { + llama_synchronize(ctx); + + // reorder embeddings for backward compatibility + // TODO: maybe deprecate this + llama_output_reorder(*ctx); + + return ctx->embd; +} + +float * llama_get_embeddings_ith(struct llama_context * ctx, int32_t i) { + int32_t j = -1; + + llama_synchronize(ctx); + + try { + if (ctx->embd == nullptr) { + throw std::runtime_error("no embeddings"); + } + + if (i < 0) { + j = ctx->n_outputs + i; + if (j < 0) { + throw std::runtime_error(format("negative index out of range [0, %d)", ctx->n_outputs)); + } + } else if ((size_t) i >= ctx->output_ids.size()) { + throw std::runtime_error(format("out of range [0, %zu)", ctx->output_ids.size())); + } else { + j = ctx->output_ids[i]; + } + + if (j < 0) { + throw std::runtime_error(format("batch.logits[%d] != true", i)); + } + if (j >= ctx->n_outputs) { + // This should not happen + throw std::runtime_error(format("corrupt output buffer (j=%d, n_outputs=%d)", j, ctx->n_outputs)); + } + + return ctx->embd + j*ctx->model.hparams.n_embd; + } catch (const std::exception & err) { + LLAMA_LOG_ERROR("%s: invalid embeddings id %d, reason: %s\n", __func__, i, err.what()); +#ifndef NDEBUG + GGML_ABORT("fatal error"); +#else + return nullptr; +#endif + } +} + +float * llama_get_embeddings_seq(struct llama_context * ctx, llama_seq_id seq_id) { + llama_synchronize(ctx); + + auto it = ctx->embd_seq.find(seq_id); + if (it == ctx->embd_seq.end()) { + return nullptr; + } + + return it->second.data(); +} + +// llama state API + +// deprecated +size_t llama_get_state_size(struct llama_context * ctx) { + return llama_state_get_size(ctx); +} + +// deprecated +size_t llama_copy_state_data(struct llama_context * ctx, uint8_t * dst) { + return llama_state_get_data(ctx, dst, -1); +} + +// deprecated +size_t llama_set_state_data(struct llama_context * ctx, const uint8_t * src) { + return llama_state_set_data(ctx, src, -1); +} + +// deprecated +bool llama_load_session_file(struct llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) { + return llama_state_load_file(ctx, path_session, tokens_out, n_token_capacity, n_token_count_out); +} + +// deprecated +bool llama_save_session_file(struct llama_context * ctx, const char * path_session, const llama_token * tokens, size_t n_token_count) { + return llama_state_save_file(ctx, path_session, tokens, n_token_count); +} + +// TODO: replace all non-fatal assertions with returned errors or exceptions +struct llama_data_write { + virtual void write(const void * src, size_t size) = 0; + virtual void write_tensor_data(const struct ggml_tensor * tensor, size_t offset, size_t size) = 0; + virtual size_t get_size_written() = 0; + virtual ~llama_data_write() = default; + + void write_string(const std::string & str) { + uint32_t str_size = str.size(); + + write(&str_size, sizeof(str_size)); + write(str.data(), str_size); + } + + void write_model_info(const struct llama_context * ctx) { + const std::string arch_str = llm_arch_name(ctx->model.arch); + write_string(arch_str); + // TODO: add more model-specific info which should prevent loading the session file if not identical + } + + //void write_rng(const std::mt19937 & rng) { + // std::ostringstream rng_ss; + // rng_ss << rng; + + // const std::string & rng_str = rng_ss.str(); + + // write_string(rng_str); + //} + + void write_output_ids(struct llama_context * ctx) { + llama_output_reorder(*ctx); + + const uint32_t n_outputs = ctx->n_outputs; + + std::vector output_pos; + + const size_t n_batch = ctx->cparams.n_batch; + const auto & output_ids = ctx->output_ids; + + GGML_ASSERT(n_outputs <= ctx->output_size); + + output_pos.resize(n_outputs); + + // build a more compact representation of the output ids + for (size_t i = 0; i < n_batch; ++i) { + // map an output id to a position in the batch + int32_t pos = output_ids[i]; + if (pos >= 0) { + GGML_ASSERT((uint32_t) pos < n_outputs); + output_pos[pos] = i; + } + } + + write(&n_outputs, sizeof(n_outputs)); + + if (n_outputs) { + write(output_pos.data(), n_outputs * sizeof(int32_t)); + } + } + + void write_logits(const struct llama_context * ctx) { + const uint64_t logits_size = std::min((uint64_t) ctx->logits_size, (uint64_t) ctx->n_outputs * ctx->model.hparams.n_vocab); + + write(&logits_size, sizeof(logits_size)); + + if (logits_size) { + write(ctx->logits, logits_size * sizeof(float)); + } + } + + void write_embeddings(const struct llama_context * ctx) { + const uint64_t embeddings_size = std::min((uint64_t) ctx->embd_size, (uint64_t) ctx->n_outputs * ctx->model.hparams.n_embd); + + write(&embeddings_size, sizeof(embeddings_size)); + + if (embeddings_size) { + write(ctx->embd, embeddings_size * sizeof(float)); + } + } + + void write_kv_cache_meta(const llama_kv_cache & kv_self, const std::vector> & cell_ranges, llama_seq_id seq_id = -1) { + for (const auto & range : cell_ranges) { + for (uint32_t i = range.first; i < range.second; ++i) { + const auto & cell = kv_self.cells[i]; + const llama_pos pos = cell.pos; + const uint32_t n_seq_id = seq_id == -1 ? cell.seq_id.size() : 0; + + write(&pos, sizeof(pos)); + write(&n_seq_id, sizeof(n_seq_id)); + + if (n_seq_id) { + for (auto seq_id : cell.seq_id) { + write(&seq_id, sizeof(seq_id)); + } + } + } + } + } + + void write_kv_cache_data(const struct llama_context * ctx, const std::vector> & cell_ranges) { + const struct llama_kv_cache & kv_self = ctx->kv_self; + const struct llama_hparams & hparams = ctx->model.hparams; + + const uint32_t v_trans = kv_self.v_trans ? 1 : 0; + const uint32_t n_layer = hparams.n_layer; + + write(&v_trans, sizeof(v_trans)); + write(&n_layer, sizeof(n_layer)); + + std::vector tmp_buf; + + // Iterate and write all the keys first, each row is a cell + // Get whole range at a time + for (uint32_t il = 0; il < n_layer; ++il) { + const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il) + hparams.n_embd_k_s(); + + // Write key type + const int32_t k_type_i = (int32_t)kv_self.k_l[il]->type; + write(&k_type_i, sizeof(k_type_i)); + + // Write row size of key + const uint64_t k_size_row = ggml_row_size(kv_self.k_l[il]->type, n_embd_k_gqa); + write(&k_size_row, sizeof(k_size_row)); + + // Read each range of cells of k_size length each into tmp_buf and write out + for (const auto & range : cell_ranges) { + const size_t range_size = range.second - range.first; + const size_t buf_size = range_size * k_size_row; + write_tensor_data(kv_self.k_l[il], range.first * k_size_row, buf_size); + } + } + + if (!kv_self.v_trans) { + for (uint32_t il = 0; il < n_layer; ++il) { + const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s(); + + // Write value type + const int32_t v_type_i = (int32_t)kv_self.v_l[il]->type; + write(&v_type_i, sizeof(v_type_i)); + + // Write row size of value + const uint64_t v_size_row = ggml_row_size(kv_self.v_l[il]->type, n_embd_v_gqa); + write(&v_size_row, sizeof(v_size_row)); + + // Read each range of cells of v_size length each into tmp_buf and write out + for (const auto & range : cell_ranges) { + const size_t range_size = range.second - range.first; + const size_t buf_size = range_size * v_size_row; + write_tensor_data(kv_self.v_l[il], range.first * v_size_row, buf_size); + } + } + } else { + // When v is transposed, we also need the element size and get the element ranges from each row + const uint32_t kv_size = kv_self.size; + for (uint32_t il = 0; il < n_layer; ++il) { + const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s(); + + // Write value type + const int32_t v_type_i = (int32_t)kv_self.v_l[il]->type; + write(&v_type_i, sizeof(v_type_i)); + + // Write element size + const uint32_t v_size_el = ggml_type_size(kv_self.v_l[il]->type); + write(&v_size_el, sizeof(v_size_el)); + + // Write GQA embedding size + write(&n_embd_v_gqa, sizeof(n_embd_v_gqa)); + + // For each row, we get the element values of each cell + for (uint32_t j = 0; j < n_embd_v_gqa; ++j) { + // Read each range of cells of v_size_el length each into tmp_buf and write out + for (const auto & range : cell_ranges) { + const size_t range_size = range.second - range.first; + const size_t src_offset = (range.first + j * kv_size) * v_size_el; + const size_t buf_size = range_size * v_size_el; + write_tensor_data(kv_self.v_l[il], src_offset, buf_size); + } + } + } + } + } + + void write_kv_cache(const struct llama_context * ctx, llama_seq_id seq_id = -1) { + const struct llama_kv_cache & kv_self = ctx->kv_self; + std::vector> cell_ranges; // ranges, from inclusive, to exclusive + uint32_t cell_count = 0; + + // Count the number of cells with the specified seq_id + // Find all the ranges of cells with this seq id (or all, when -1) + uint32_t cell_range_begin = kv_self.size; + for (uint32_t i = 0; i < kv_self.size; ++i) { + const auto & cell = kv_self.cells[i]; + if ((seq_id == -1 && !cell.is_empty()) || cell.has_seq_id(seq_id)) { + ++cell_count; + if (cell_range_begin == kv_self.size) { + cell_range_begin = i; + } + } else { + if (cell_range_begin != kv_self.size) { + cell_ranges.emplace_back(cell_range_begin, i); + cell_range_begin = kv_self.size; + } + } + } + if (cell_range_begin != kv_self.size) { + cell_ranges.emplace_back(cell_range_begin, kv_self.size); + } + + // DEBUG CHECK: Sum of cell counts in ranges should equal the total cell count + uint32_t cell_count_check = 0; + for (const auto & range : cell_ranges) { + cell_count_check += range.second - range.first; + } + GGML_ASSERT(cell_count == cell_count_check); + + write(&cell_count, sizeof(cell_count)); + + write_kv_cache_meta(kv_self, cell_ranges, seq_id); + write_kv_cache_data(ctx, cell_ranges); + } +}; + +struct llama_data_read { + virtual const uint8_t * read(size_t size) = 0; + virtual void read_to(void * dst, size_t size) = 0; + virtual size_t get_size_read() = 0; + virtual ~llama_data_read() = default; + + void read_string(std::string & str) { + uint32_t str_size; + read_to(&str_size, sizeof(str_size)); + + str.assign((const char *) read(str_size), str_size); + } + + // validate model information + void read_model_info(const struct llama_context * ctx) { + const std::string cur_arch_str = llm_arch_name(ctx->model.arch); + + std::string arch_str; + read_string(arch_str); + if (cur_arch_str != arch_str) { + throw std::runtime_error(format("wrong model arch: '%s' instead of '%s'", arch_str.c_str(), cur_arch_str.c_str())); + } + // TODO: add more info which needs to be identical but which is not verified otherwise + } + + //void read_rng(std::mt19937 & rng) { + // std::string rng_str; + // read_string(rng_str); + + // std::istringstream rng_ss(rng_str); + // rng_ss >> rng; + + // if (rng_ss.fail()) { + // throw std::runtime_error("failed to load RNG state"); + // } + //} + + void read_output_ids(struct llama_context * ctx) { + std::vector output_pos; + + uint32_t n_outputs; + read_to(&n_outputs, sizeof(n_outputs)); + + if (n_outputs > llama_output_reserve(*ctx, n_outputs)) { + throw std::runtime_error("could not reserve outputs"); + } + + if (n_outputs) { + output_pos.resize(n_outputs); + read_to(output_pos.data(), n_outputs * sizeof(int32_t)); + + for (int32_t i = 0; i < (int32_t) output_pos.size(); ++i) { + int32_t id = output_pos[i]; + if ((uint32_t) id >= ctx->cparams.n_batch) { + throw std::runtime_error(format("invalid output id, %d does not fit in batch size of %u", id, ctx->cparams.n_batch)); + } + ctx->output_ids[id] = i; + } + + ctx->n_outputs = n_outputs; + } + } + + void read_logits(struct llama_context * ctx) { + uint64_t logits_size; + read_to(&logits_size, sizeof(logits_size)); + + if (ctx->logits_size < logits_size) { + throw std::runtime_error("logits buffer too small"); + } + + if (logits_size) { + read_to(ctx->logits, logits_size * sizeof(float)); + } + } + + void read_embeddings(struct llama_context * ctx) { + uint64_t embeddings_size; + read_to(&embeddings_size, sizeof(embeddings_size)); + + if (ctx->embd_size < embeddings_size) { + throw std::runtime_error("embeddings buffer too small"); + } + + if (embeddings_size) { + read_to(ctx->embd, embeddings_size * sizeof(float)); + } + } + + bool read_kv_cache_meta(struct llama_context * ctx, uint32_t cell_count, llama_seq_id dest_seq_id = -1) { + struct llama_kv_cache & kv_self = ctx->kv_self; + + if (dest_seq_id != -1) { + // single sequence + + llama_kv_cache_seq_rm(kv_self, dest_seq_id, -1, -1); + + llama_ubatch batch = ctx->sbatch.reserve_ubatch(cell_count, /* has_embd */ false); + batch.n_tokens = cell_count; + batch.n_seq_tokens = cell_count; + batch.n_seqs = 1; + + for (uint32_t i = 0; i < cell_count; ++i) { + llama_pos pos; + uint32_t n_seq_id; + + read_to(&pos, sizeof(pos)); + read_to(&n_seq_id, sizeof(n_seq_id)); + + if (n_seq_id != 0) { + LLAMA_LOG_ERROR("%s: invalid seq_id-agnostic kv cell\n", __func__); + return false; + } + + batch.pos[i] = pos; + } + batch.n_seq_id[0] = 1; + batch.seq_id[0] = &dest_seq_id; + if (!llama_kv_cache_find_slot(kv_self, batch)) { + LLAMA_LOG_ERROR("%s: failed to find available cells in kv cache\n", __func__); + return false; + } + + // DEBUG CHECK: kv_self.head should be our first cell, kv_self.head + cell_count - 1 should be our last cell (verify seq_id and pos values) + // Assume that this is one contiguous block of cells + GGML_ASSERT(kv_self.head + cell_count <= kv_self.size); + GGML_ASSERT(kv_self.cells[kv_self.head].pos == batch.pos[0]); + GGML_ASSERT(kv_self.cells[kv_self.head + cell_count - 1].pos == batch.pos[cell_count - 1]); + GGML_ASSERT(kv_self.cells[kv_self.head].has_seq_id(dest_seq_id)); + GGML_ASSERT(kv_self.cells[kv_self.head + cell_count - 1].has_seq_id(dest_seq_id)); + } else { + // whole KV cache restore + + if (cell_count > kv_self.size) { + LLAMA_LOG_ERROR("%s: not enough cells in kv cache\n", __func__); + return false; + } + + llama_kv_cache_clear(kv_self); + + for (uint32_t i = 0; i < cell_count; ++i) { + llama_kv_cell & cell = kv_self.cells[i]; + + llama_pos pos; + uint32_t n_seq_id; + + read_to(&pos, sizeof(pos)); + read_to(&n_seq_id, sizeof(n_seq_id)); + + cell.pos = pos; + + for (uint32_t j = 0; j < n_seq_id; ++j) { + llama_seq_id seq_id; + read_to(&seq_id, sizeof(seq_id)); + + if (seq_id < 0 || (uint32_t) seq_id >= llama_n_seq_max(ctx)) { + LLAMA_LOG_ERROR("%s: invalid seq_id, %d is out of range [0, %u)\n", __func__, seq_id, llama_n_seq_max(ctx)); + return false; + } + + cell.seq_id.insert(seq_id); + + if (kv_self.recurrent) { + int32_t & tail = kv_self.cells[seq_id].tail; + if (tail != -1) { + LLAMA_LOG_ERROR("%s: duplicate tail for seq_id %d in cell %d and %d\n", __func__, seq_id, i, tail); + return false; + } + tail = i; + } + } + } + + kv_self.head = 0; + kv_self.used = cell_count; + } + + if (kv_self.recurrent) { + for (uint32_t i = 0; i < cell_count; ++i) { + uint32_t cell_id = kv_self.head + i; + // make sure the recurrent states will keep their restored state + kv_self.cells[cell_id].src = cell_id; + } + } + + return true; + } + + bool read_kv_cache_data(struct llama_context * ctx, uint32_t cell_count) { + const struct llama_hparams & hparams = ctx->model.hparams; + struct llama_kv_cache & kv_self = ctx->kv_self; + uint32_t v_trans; + uint32_t n_layer; + read_to(&v_trans, sizeof(v_trans)); + read_to(&n_layer, sizeof(n_layer)); + + if (n_layer != hparams.n_layer) { + LLAMA_LOG_ERROR("%s: mismatched layer count (%u instead of %u)\n", __func__, n_layer, hparams.n_layer); + return false; + } + if (cell_count > kv_self.size) { + LLAMA_LOG_ERROR("%s: not enough cells in kv cache to restore state (%u > %u)\n", __func__, cell_count, kv_self.size); + return false; + } + if (kv_self.v_trans != (bool) v_trans) { + LLAMA_LOG_ERROR("%s: incompatible V transposition\n", __func__); + return false; + } + + // For each layer, read the keys for each cell, one row is one cell, read as one contiguous block + for (uint32_t il = 0; il < n_layer; ++il) { + const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il) + hparams.n_embd_k_s(); + + // Read type of key + int32_t k_type_i_ref; + read_to(&k_type_i_ref, sizeof(k_type_i_ref)); + const int32_t k_type_i = (int32_t)kv_self.k_l[il]->type; + if (k_type_i != k_type_i_ref) { + LLAMA_LOG_ERROR("%s: mismatched key type (%d != %d, layer %d)\n", __func__, k_type_i, k_type_i_ref, il); + return false; + } + + // Read row size of key + uint64_t k_size_row_ref; + read_to(&k_size_row_ref, sizeof(k_size_row_ref)); + const size_t k_size_row = ggml_row_size(kv_self.k_l[il]->type, n_embd_k_gqa); + if (k_size_row != k_size_row_ref) { + LLAMA_LOG_ERROR("%s: mismatched key row size (%zu != %zu, layer %d)\n", __func__, k_size_row, (size_t) k_size_row_ref, il); + return false; + } + + if (cell_count) { + // Read and set the keys for the whole cell range + ggml_backend_tensor_set(kv_self.k_l[il], read(cell_count * k_size_row), kv_self.head * k_size_row, cell_count * k_size_row); + } + } + + if (!kv_self.v_trans) { + for (uint32_t il = 0; il < n_layer; ++il) { + const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s(); + + // Read type of value + int32_t v_type_i_ref; + read_to(&v_type_i_ref, sizeof(v_type_i_ref)); + const int32_t v_type_i = (int32_t)kv_self.v_l[il]->type; + if (v_type_i != v_type_i_ref) { + LLAMA_LOG_ERROR("%s: mismatched value type (%d != %d, layer %d)\n", __func__, v_type_i, v_type_i_ref, il); + return false; + } + + // Read row size of value + uint64_t v_size_row_ref; + read_to(&v_size_row_ref, sizeof(v_size_row_ref)); + const size_t v_size_row = ggml_row_size(kv_self.v_l[il]->type, n_embd_v_gqa); + if (v_size_row != v_size_row_ref) { + LLAMA_LOG_ERROR("%s: mismatched value row size (%zu != %zu, layer %d)\n", __func__, v_size_row, (size_t) v_size_row_ref, il); + return false; + } + + if (cell_count) { + // Read and set the values for the whole cell range + ggml_backend_tensor_set(kv_self.v_l[il], read(cell_count * v_size_row), kv_self.head * v_size_row, cell_count * v_size_row); + } + } + } else { + // For each layer, read the values for each cell (transposed) + for (uint32_t il = 0; il < n_layer; ++il) { + const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s(); + + // Read type of value + int32_t v_type_i_ref; + read_to(&v_type_i_ref, sizeof(v_type_i_ref)); + const int32_t v_type_i = (int32_t)kv_self.v_l[il]->type; + if (v_type_i != v_type_i_ref) { + LLAMA_LOG_ERROR("%s: mismatched value type (%d != %d, layer %d)\n", __func__, v_type_i, v_type_i_ref, il); + return false; + } + + // Read element size of value + uint32_t v_size_el_ref; + read_to(&v_size_el_ref, sizeof(v_size_el_ref)); + const size_t v_size_el = ggml_type_size(kv_self.v_l[il]->type); + if (v_size_el != v_size_el_ref) { + LLAMA_LOG_ERROR("%s: mismatched value element size (%zu != %zu, layer %d)\n", __func__, v_size_el, (size_t) v_size_el_ref, il); + return false; + } + + // Read GQA embedding size + uint32_t n_embd_v_gqa_ref; + read_to(&n_embd_v_gqa_ref, sizeof(n_embd_v_gqa_ref)); + if (n_embd_v_gqa != n_embd_v_gqa_ref) { + LLAMA_LOG_ERROR("%s: mismatched GQA embedding size (%u != %u, layer %d)\n", __func__, n_embd_v_gqa, n_embd_v_gqa_ref, il); + return false; + } + + if (cell_count) { + // For each row in the transposed matrix, read the values for the whole cell range + for (uint32_t j = 0; j < n_embd_v_gqa; ++j) { + const size_t dst_offset = (kv_self.head + j * kv_self.size) * v_size_el; + ggml_backend_tensor_set(kv_self.v_l[il], read(cell_count * v_size_el), dst_offset, cell_count * v_size_el); + } + } + } + } + return true; + } + + void read_kv_cache(struct llama_context * ctx, llama_seq_id seq_id = -1) { + uint32_t cell_count; + read_to(&cell_count, sizeof(cell_count)); + + bool res = read_kv_cache_meta(ctx, cell_count, seq_id) && read_kv_cache_data(ctx, cell_count); + + if (!res) { + if (seq_id == -1) { + llama_kv_cache_clear(ctx); + } else { + llama_kv_cache_seq_rm(ctx, seq_id, -1, -1); + } + throw std::runtime_error("failed to restore kv cache"); + } + } +}; + +struct llama_data_write_dummy : llama_data_write { + size_t size_written = 0; + + llama_data_write_dummy() {} + + void write(const void * /* src */, size_t size) override { + size_written += size; + } + + void write_tensor_data(const struct ggml_tensor * /* tensor */, size_t /* offset */, size_t size) override { + size_written += size; + } + + size_t get_size_written() override { + return size_written; + } +}; + +struct llama_data_write_buffer : llama_data_write { + uint8_t * ptr; + size_t buf_size = 0; + size_t size_written = 0; + + llama_data_write_buffer(uint8_t * p, size_t len) : ptr(p), buf_size(len) {} + + void write(const void * src, size_t size) override { + if (size > buf_size) { + throw std::runtime_error("unexpectedly reached end of buffer"); + } + memcpy(ptr, src, size); + ptr += size; + size_written += size; + buf_size -= size; + } + + void write_tensor_data(const struct ggml_tensor * tensor, size_t offset, size_t size) override { + if (size > buf_size) { + throw std::runtime_error("unexpectedly reached end of buffer"); + } + ggml_backend_tensor_get(tensor, ptr, offset, size); + ptr += size; + size_written += size; + buf_size -= size; + } + + size_t get_size_written() override { + return size_written; + } +}; + +struct llama_data_read_buffer : llama_data_read { + const uint8_t * ptr; + size_t buf_size = 0; + size_t size_read = 0; + + llama_data_read_buffer(const uint8_t * p, size_t len) : ptr(p), buf_size(len) {} + + const uint8_t * read(size_t size) override { + const uint8_t * base_ptr = ptr; + if (size > buf_size) { + throw std::runtime_error("unexpectedly reached end of buffer"); + } + ptr += size; + size_read += size; + buf_size -= size; + return base_ptr; + } + + void read_to(void * dst, size_t size) override { + memcpy(dst, read(size), size); + } + + size_t get_size_read() override { + return size_read; + } +}; + +struct llama_data_write_file : llama_data_write { + llama_file * file; + size_t size_written = 0; + std::vector temp_buffer; + + llama_data_write_file(llama_file * f) : file(f) {} + + void write(const void * src, size_t size) override { + file->write_raw(src, size); + size_written += size; + } + + void write_tensor_data(const struct ggml_tensor * tensor, size_t offset, size_t size) override { + temp_buffer.resize(size); + ggml_backend_tensor_get(tensor, temp_buffer.data(), offset, size); + write(temp_buffer.data(), temp_buffer.size()); + } + + size_t get_size_written() override { + return size_written; + } +}; + +struct llama_data_read_file : llama_data_read { + llama_file * file; + size_t size_read = 0; + std::vector temp_buffer; + + llama_data_read_file(llama_file * f) : file(f) {} + + void read_to(void * dst, size_t size) override { + file->read_raw(dst, size); + size_read += size; + } + + const uint8_t * read(size_t size) override { + temp_buffer.resize(size); + read_to(temp_buffer.data(), size); + return temp_buffer.data(); + } + + size_t get_size_read() override { + return size_read; + } +}; + +/** copy state data into either a buffer or file depending on the passed in context + * + * file context: + * llama_file file("/path", "wb"); + * llama_data_write_file data_ctx(&file); + * llama_state_get_data_internal(ctx, data_ctx); + * + * buffer context: + * std::vector buf(max_size, 0); + * llama_data_write_buffer data_ctx(buf.data(), max_size); + * llama_state_get_data_internal(ctx, data_ctx); + * +*/ +static size_t llama_state_get_data_internal(struct llama_context * ctx, llama_data_write & data_ctx) { + llama_synchronize(ctx); + + data_ctx.write_model_info(ctx); + + // copy outputs + data_ctx.write_output_ids(ctx); + data_ctx.write_logits(ctx); + data_ctx.write_embeddings(ctx); + + data_ctx.write_kv_cache(ctx); + + return data_ctx.get_size_written(); +} + +size_t llama_state_get_data(struct llama_context * ctx, uint8_t * dst, size_t size) { + llama_data_write_buffer data_ctx(dst, size); + try { + return llama_state_get_data_internal(ctx, data_ctx); + } catch (const std::exception & err) { + LLAMA_LOG_ERROR("%s: error saving state: %s\n", __func__, err.what()); + return 0; + } +} + +// Returns the *actual* size of the state. +// Intended to be used when saving to state to a buffer. +size_t llama_state_get_size(struct llama_context * ctx) { + llama_data_write_dummy data_ctx; + try { + return llama_state_get_data_internal(ctx, data_ctx); + } catch (const std::exception & err) { + LLAMA_LOG_ERROR("%s: error getting state size: %s\n", __func__, err.what()); + return 0; + } +} + +static size_t llama_state_set_data_internal(struct llama_context * ctx, llama_data_read & data_ctx) { + llama_synchronize(ctx); + + data_ctx.read_model_info(ctx); + + // set outputs + data_ctx.read_output_ids(ctx); + data_ctx.read_logits(ctx); + data_ctx.read_embeddings(ctx); + + data_ctx.read_kv_cache(ctx); + + return data_ctx.get_size_read(); +} + +// Sets the state reading from the specified source address +size_t llama_state_set_data(struct llama_context * ctx, const uint8_t * src, size_t size) { + llama_data_read_buffer data_ctx(src, size); + try { + return llama_state_set_data_internal(ctx, data_ctx); + } catch (const std::exception & err) { + LLAMA_LOG_ERROR("%s: error loading state: %s\n", __func__, err.what()); + return 0; + } +} + +static bool llama_state_load_file_internal(struct llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) { + llama_file file(path_session, "rb"); + + // sanity checks + { + const uint32_t magic = file.read_u32(); + const uint32_t version = file.read_u32(); + + if (magic != LLAMA_SESSION_MAGIC || version != LLAMA_SESSION_VERSION) { + LLAMA_LOG_ERROR("%s: unknown (magic, version) for session file: %08x, %08x\n", __func__, magic, version); + return false; + } + } + + // load the prompt + { + const uint32_t n_token_count = file.read_u32(); + + if (n_token_count > n_token_capacity) { + LLAMA_LOG_ERROR("%s: token count in session file exceeded capacity! %u > %zu\n", __func__, n_token_count, n_token_capacity); + return false; + } + + file.read_raw(tokens_out, sizeof(llama_token) * n_token_count); + *n_token_count_out = n_token_count; + } + + // restore the context state + { + const size_t n_state_size_cur = file.size() - file.tell(); + + llama_data_read_file data_ctx(&file); + const size_t n_read = llama_state_set_data_internal(ctx, data_ctx); + + if (n_read != n_state_size_cur) { + LLAMA_LOG_ERROR("%s: did not read all of the session file data! size %zu, got %zu\n", __func__, n_state_size_cur, n_read); + return false; + } + } + return true; +} + +bool llama_state_load_file(struct llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) { + try { + return llama_state_load_file_internal(ctx, path_session, tokens_out, n_token_capacity, n_token_count_out); + } catch (const std::exception & err) { + LLAMA_LOG_ERROR("%s: error loading session file: %s\n", __func__, err.what()); + return false; + } +} + +static bool llama_state_save_file_internal(struct llama_context * ctx, const char * path_session, const llama_token * tokens, size_t n_token_count) { + llama_file file(path_session, "wb"); + + file.write_u32(LLAMA_SESSION_MAGIC); + file.write_u32(LLAMA_SESSION_VERSION); + + // save the prompt + file.write_u32((uint32_t) n_token_count); + file.write_raw(tokens, sizeof(llama_token) * n_token_count); + + // save the context state using stream saving + llama_data_write_file data_ctx(&file); + llama_state_get_data_internal(ctx, data_ctx); + + return true; +} + +bool llama_state_save_file(struct llama_context * ctx, const char * path_session, const llama_token * tokens, size_t n_token_count) { + try { + return llama_state_save_file_internal(ctx, path_session, tokens, n_token_count); + } catch (const std::exception & err) { + LLAMA_LOG_ERROR("%s: error saving session file: %s\n", __func__, err.what()); + return false; + } +} + +static size_t llama_state_seq_get_data_internal(struct llama_context * ctx, llama_data_write & data_ctx, llama_seq_id seq_id) { + llama_synchronize(ctx); + + data_ctx.write_kv_cache(ctx, seq_id); + + return data_ctx.get_size_written(); +} + +size_t llama_state_seq_get_size(struct llama_context * ctx, llama_seq_id seq_id) { + llama_data_write_dummy data_ctx; + return llama_state_seq_get_data_internal(ctx, data_ctx, seq_id); +} + +size_t llama_state_seq_get_data(struct llama_context * ctx, uint8_t * dst, size_t size, llama_seq_id seq_id) { + llama_data_write_buffer data_ctx(dst, size); + try { + return llama_state_seq_get_data_internal(ctx, data_ctx, seq_id); + } catch (const std::exception & err) { + LLAMA_LOG_ERROR("%s: error saving sequence state: %s\n", __func__, err.what()); + return 0; + } +} + +static size_t llama_state_seq_set_data_internal(struct llama_context * ctx, llama_data_read & data_ctx, llama_seq_id dest_seq_id) { + llama_synchronize(ctx); + + data_ctx.read_kv_cache(ctx, dest_seq_id); + + return data_ctx.get_size_read(); +} + +size_t llama_state_seq_set_data(struct llama_context * ctx, const uint8_t * src, size_t size, llama_seq_id dest_seq_id) { + llama_data_read_buffer data_ctx(src, size); + try { + return llama_state_seq_set_data_internal(ctx, data_ctx, dest_seq_id); + } catch (const std::exception & err) { + LLAMA_LOG_ERROR("%s: error loading sequence state: %s\n", __func__, err.what()); + return 0; + } +} + +static size_t llama_state_seq_save_file_internal(struct llama_context * ctx, const char * filepath, llama_seq_id seq_id, const llama_token * tokens, size_t n_token_count) { + llama_file file(filepath, "wb"); + + file.write_u32(LLAMA_STATE_SEQ_MAGIC); + file.write_u32(LLAMA_STATE_SEQ_VERSION); + + // save the prompt + file.write_u32((uint32_t) n_token_count); + file.write_raw(tokens, sizeof(llama_token) * n_token_count); + + // save the context state using stream saving + llama_data_write_file data_ctx(&file); + llama_state_seq_get_data_internal(ctx, data_ctx, seq_id); + + const size_t res = file.tell(); + GGML_ASSERT(res == sizeof(uint32_t) * 3 + sizeof(llama_token) * n_token_count + data_ctx.get_size_written()); + return res; +} + +static size_t llama_state_seq_load_file_internal(struct llama_context * ctx, const char * filepath, llama_seq_id dest_seq_id, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) { + llama_file file(filepath, "rb"); + + // version checks + { + const uint32_t magic = file.read_u32(); + const uint32_t version = file.read_u32(); + + if (magic != LLAMA_STATE_SEQ_MAGIC || version != LLAMA_STATE_SEQ_VERSION) { + LLAMA_LOG_ERROR("%s: unknown (magic, version) for sequence state file: %08x, %08x\n", __func__, magic, version); + return 0; + } + } + + // load the prompt + { + const uint32_t n_token_count = file.read_u32(); + + if (n_token_count > n_token_capacity) { + LLAMA_LOG_ERROR("%s: token count in sequence state file exceeded capacity! %u > %zu\n", __func__, n_token_count, n_token_capacity); + return 0; + } + + file.read_raw(tokens_out, sizeof(llama_token) * n_token_count); + *n_token_count_out = n_token_count; + } + + // restore the context state + { + const size_t state_size = file.size() - file.tell(); + llama_data_read_file data_ctx(&file); + const size_t nread = llama_state_seq_set_data_internal(ctx, data_ctx, dest_seq_id); + if (!nread) { + LLAMA_LOG_ERROR("%s: failed to restore sequence state\n", __func__); + return 0; + } + GGML_ASSERT(nread <= state_size); + GGML_ASSERT(nread + sizeof(uint32_t) * 3 + sizeof(llama_token) * *n_token_count_out == file.tell()); + } + + return file.tell(); +} + +size_t llama_state_seq_save_file(struct llama_context * ctx, const char * filepath, llama_seq_id seq_id, const llama_token * tokens, size_t n_token_count) { + try { + return llama_state_seq_save_file_internal(ctx, filepath, seq_id, tokens, n_token_count); + } catch (const std::exception & err) { + LLAMA_LOG_ERROR("%s: error saving sequence state file: %s\n", __func__, err.what()); + return 0; + } +} + +size_t llama_state_seq_load_file(struct llama_context * ctx, const char * filepath, llama_seq_id dest_seq_id, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) { + try { + return llama_state_seq_load_file_internal(ctx, filepath, dest_seq_id, tokens_out, n_token_capacity, n_token_count_out); + } catch (const std::exception & err) { + LLAMA_LOG_ERROR("%s: error loading sequence state file: %s\n", __func__, err.what()); + return 0; + } +} + +const std::vector> & llama_internal_get_tensor_map( + struct llama_context * ctx +) { + return ctx->model.tensors_by_name; +} diff --git a/src/llama-context.h b/src/llama-context.h new file mode 100644 index 000000000..0d163c470 --- /dev/null +++ b/src/llama-context.h @@ -0,0 +1,128 @@ +#pragma once + +#include "llama.h" +#include "llama-batch.h" +#include "llama-cparams.h" +#include "llama-model.h" +#include "llama-kv-cache.h" +#include "llama-adapter.h" + +#include "ggml-cpp.h" + +#include +#include +#include +#include + +struct llama_context { + llama_context(const llama_model & model) + : model(model) + , t_start_us(model.t_start_us) + , t_load_us(model.t_load_us) {} + + const struct llama_model & model; + + struct llama_cparams cparams; + struct llama_sbatch sbatch; // TODO: revisit if needed + struct llama_kv_cache kv_self; + struct llama_control_vector cvec; + + std::unordered_map lora_adapters; + + std::vector backends; + std::vector> set_n_threads_fns; + + ggml_backend_t backend_cpu = nullptr; + + ggml_threadpool_t threadpool = nullptr; + ggml_threadpool_t threadpool_batch = nullptr; + + bool has_evaluated_once = false; + + mutable int64_t t_start_us; + mutable int64_t t_load_us; + mutable int64_t t_p_eval_us = 0; + mutable int64_t t_eval_us = 0; + + mutable int64_t t_compute_start_us = 0; + mutable int64_t n_queued_tokens = 0; + + mutable int32_t n_p_eval = 0; // number of tokens in eval calls for the prompt (with batch size > 1) + mutable int32_t n_eval = 0; // number of eval calls + + // host buffer for the model output (logits and embeddings) + ggml_backend_buffer_ptr buf_output; + + // decode output (2-dimensional array: [n_outputs][n_vocab]) + size_t logits_size = 0; // capacity (of floats) for logits + float * logits = nullptr; + + std::vector output_ids; // map batch token positions to ids of the logits and embd buffers + size_t output_size = 0; // capacity (of tokens positions) for the output buffers + int32_t n_outputs = 0; // number of actually-used outputs in the current ubatch or last logical batch + + bool logits_all = false; + + // embeddings output (2-dimensional array: [n_outputs][n_embd]) + // populated only when pooling_type == LLAMA_POOLING_TYPE_NONE + size_t embd_size = 0; // capacity (of floats) for embeddings + float * embd = nullptr; + + // sequence embeddings output (map of [n_embd] vectors) + // populated only when pooling_type != LLAMA_POOLING_TYPE_NONE + std::map> embd_seq; + + // whether we are computing encoder output or decoder output + bool is_encoding = false; + + // TODO: find a better way to accommodate mutli-dimension position encoding methods + // number of position id each token get, 1 for each token in most cases. + // when using m-rope, it will be 3 position ids per token to representing 3 dimension coordinate. + int n_pos_per_token = 1; + + // output of the encoder part of the encoder-decoder models + std::vector embd_enc; + std::vector> seq_ids_enc; + + // memory buffers used to evaluate the model + std::vector buf_compute_meta; + ggml_backend_sched_ptr sched; + + ggml_abort_callback abort_callback = nullptr; + void * abort_callback_data = nullptr; + + // input tensors + struct ggml_tensor * inp_tokens; // I32 [n_batch] + struct ggml_tensor * inp_embd; // F32 [n_embd, n_batch] + struct ggml_tensor * inp_pos; // I32 [n_batch] + struct ggml_tensor * inp_out_ids; // I32 [n_outputs] + struct ggml_tensor * inp_KQ_mask; // F32 [kv_size, n_batch] + struct ggml_tensor * inp_KQ_mask_swa; // F32 [kv_size, n_batch] + struct ggml_tensor * inp_K_shift; // I32 [kv_size] + struct ggml_tensor * inp_mean; // F32 [n_batch, n_batch] + struct ggml_tensor * inp_cls; // I32 [n_batch] + struct ggml_tensor * inp_s_copy; // I32 [kv_size] + struct ggml_tensor * inp_s_mask; // F32 [1, n_kv] + struct ggml_tensor * inp_s_seq; // I32 [n_kv, n_batch] + struct ggml_tensor * inp_pos_bucket; // I32 [n_batch|n_kv, n_batch] + struct ggml_tensor * inp_embd_enc; // F32 [n_embd, n_outputs_enc] + struct ggml_tensor * inp_KQ_mask_cross; // F32 [n_outputs_enc, n_batch] +}; + +// TODO: make these methods of llama_context +void llama_set_k_shift(struct llama_context & lctx); + +void llama_set_s_copy(struct llama_context & lctx); + +void llama_set_inputs(llama_context & lctx, const llama_ubatch & ubatch); + +// Make sure enough space is available for outputs. +// Returns max number of outputs for which space was reserved. +size_t llama_output_reserve(struct llama_context & lctx, size_t n_outputs); + +// make the outputs have the same order they had in the user-provided batch +void llama_output_reorder(struct llama_context & ctx); + +// For internal test use +// TODO: remove +const std::vector> & llama_internal_get_tensor_map(struct llama_context * ctx); diff --git a/src/llama-cparams.cpp b/src/llama-cparams.cpp new file mode 100644 index 000000000..28369be36 --- /dev/null +++ b/src/llama-cparams.cpp @@ -0,0 +1 @@ +#include "llama-cparams.h" diff --git a/src/llama-cparams.h b/src/llama-cparams.h new file mode 100644 index 000000000..252012f3d --- /dev/null +++ b/src/llama-cparams.h @@ -0,0 +1,37 @@ +#pragma once + +#include "llama.h" + +#include + +struct llama_cparams { + uint32_t n_ctx; // context size used during inference + uint32_t n_batch; + uint32_t n_ubatch; + uint32_t n_seq_max; + int n_threads; // number of threads to use for generation + int n_threads_batch; // number of threads to use for batch processing + + float rope_freq_base; + float rope_freq_scale; + + uint32_t n_ctx_orig_yarn; + // These hyperparameters are not exposed in GGUF, because all + // existing YaRN models use the same values for them. + float yarn_ext_factor; + float yarn_attn_factor; + float yarn_beta_fast; + float yarn_beta_slow; + float defrag_thold; + + bool embeddings; + bool causal_attn; + bool offload_kqv; + bool flash_attn; + bool no_perf; + + enum llama_pooling_type pooling_type; + + ggml_backend_sched_eval_callback cb_eval; + void * cb_eval_user_data; +}; diff --git a/src/llama-grammar.cpp b/src/llama-grammar.cpp index 76d0cb3a2..186dc9a25 100644 --- a/src/llama-grammar.cpp +++ b/src/llama-grammar.cpp @@ -1,5 +1,6 @@ #include "llama-grammar.h" +#include "llama-impl.h" #include "llama-vocab.h" #include "llama-sampling.h" diff --git a/src/llama-grammar.h b/src/llama-grammar.h index 13e940fb5..f8b40c651 100644 --- a/src/llama-grammar.h +++ b/src/llama-grammar.h @@ -1,8 +1,10 @@ #pragma once -#include "llama-impl.h" +#include "llama.h" #include +#include +#include struct llama_vocab; diff --git a/src/llama-hparams.cpp b/src/llama-hparams.cpp new file mode 100644 index 000000000..c40534696 --- /dev/null +++ b/src/llama-hparams.cpp @@ -0,0 +1,71 @@ +#include "llama-hparams.h" + +#include "ggml.h" + +uint32_t llama_hparams::n_head(uint32_t il) const { + if (il < n_layer) { + return n_head_arr[il]; + } + + GGML_ABORT("fatal error"); +} + +uint32_t llama_hparams::n_head_kv(uint32_t il) const { + if (il < n_layer) { + return n_head_kv_arr[il]; + } + + GGML_ABORT("fatal error"); +} + +uint32_t llama_hparams::n_ff(uint32_t il) const { + if (il < n_layer) { + return n_ff_arr[il]; + } + + GGML_ABORT("fatal error"); +} + +uint32_t llama_hparams::n_gqa(uint32_t il) const { + const uint32_t n_head = this->n_head(il); + const uint32_t n_head_kv = this->n_head_kv(il); + + if (n_head_kv == 0) { + return 0; + } + + return n_head/n_head_kv; +} + +uint32_t llama_hparams::n_embd_k_gqa(uint32_t il) const { + const uint32_t n_head_kv = this->n_head_kv(il); + + return n_embd_head_k * n_head_kv; +} + +uint32_t llama_hparams::n_embd_v_gqa(uint32_t il) const { + const uint32_t n_head_kv = this->n_head_kv(il); + + return n_embd_head_v * n_head_kv; +} + +uint32_t llama_hparams::n_embd_k_s() const { + if (wkv_head_size != 0) { + // for RWKV models + return 2 * n_embd; + } + + // TODO: maybe support other convolution strides than 1 + // NOTE: since the first column of the conv_state is shifted out each time, it's not actually needed + return (ssm_d_conv > 0 ? ssm_d_conv - 1 : 0) * ssm_d_inner; +} + +uint32_t llama_hparams::n_embd_v_s() const { + if (wkv_head_size != 0) { + // corresponds to RWKV's wkv_states size + return n_embd * wkv_head_size; + } + + // corresponds to Mamba's ssm_states size + return ssm_d_state * ssm_d_inner; +} diff --git a/src/llama-hparams.h b/src/llama-hparams.h new file mode 100644 index 000000000..3a76b71a4 --- /dev/null +++ b/src/llama-hparams.h @@ -0,0 +1,132 @@ +#pragma once + +#include "llama.h" + +#include + +// bump if necessary +#define LLAMA_MAX_LAYERS 512 +#define LLAMA_MAX_EXPERTS 160 // DeepSeekV2 + +struct llama_hparams_posnet { + uint32_t n_embd; + uint32_t n_layer; +}; + +struct llama_hparams_convnext { + uint32_t n_embd; + uint32_t n_layer; +}; + +struct llama_hparams { + bool vocab_only; + bool rope_finetuned; + bool use_par_res; + bool swin_norm; + + uint32_t n_vocab = 0; + uint32_t n_ctx_train; // context size the model was trained on + uint32_t n_embd; + uint32_t n_embd_features = 0; + uint32_t n_layer; + uint32_t n_rot; + uint32_t n_swa = 0; // sliding window attention (SWA) + uint32_t n_embd_head_k; // dimension of keys (d_k). d_q is assumed to be the same, but there are n_head q heads, and only n_head_kv k-v heads + uint32_t n_embd_head_v; // dimension of values (d_v) aka n_embd_head + uint32_t n_expert = 0; + uint32_t n_expert_used = 0; + uint32_t n_vocab_type = 0; // for BERT-style token types + uint32_t n_rel_attn_bkts = 0; + + // for WavTokenizer + struct llama_hparams_posnet posnet; + struct llama_hparams_convnext convnext; + + std::array n_head_arr; + std::array n_head_kv_arr; + std::array n_ff_arr; + + uint32_t n_layer_dense_lead = 0; + uint32_t n_lora_q = 0; + uint32_t n_lora_kv = 0; + uint32_t n_ff_exp = 0; + uint32_t n_ff_shexp = 0; + uint32_t n_expert_shared = 0; + uint32_t n_norm_groups = 0; + + float expert_weights_scale = 0.0; + + float f_norm_eps; + float f_norm_rms_eps; + float f_norm_group_eps; + + float f_attn_logit_softcapping = 50.0f; + float f_final_logit_softcapping = 30.0f; + + // for RWKV + uint32_t rescale_every_n_layers = 0; + uint32_t time_mix_extra_dim = 0; + uint32_t time_decay_extra_dim = 0; + uint32_t wkv_head_size = 0; + + float rope_attn_factor = 1.0f; + float rope_freq_base_train; + float rope_freq_scale_train; + uint32_t n_ctx_orig_yarn; + float rope_yarn_log_mul; + + std::array rope_sections; + + // for State Space Models + uint32_t ssm_d_conv = 0; + uint32_t ssm_d_inner = 0; + uint32_t ssm_d_state = 0; + uint32_t ssm_dt_rank = 0; + + bool ssm_dt_b_c_rms = false; + + float f_clamp_kqv = 0.0f; + float f_max_alibi_bias = 0.0f; + float f_logit_scale = 0.0f; + + // Additional scale factors (Granite/Granite MoE) + float f_residual_scale = 0.0f; + float f_embedding_scale = 0.0f; + float f_attention_scale = 0.0f; + + bool causal_attn = true; + bool use_alibi = false; + bool attn_soft_cap = false; + + // needed by encoder-decoder models (e.g. T5, FLAN-T5) + // ref: https://github.com/ggerganov/llama.cpp/pull/8141 + llama_token dec_start_token_id = LLAMA_TOKEN_NULL; + + enum llama_pooling_type pooling_type = LLAMA_POOLING_TYPE_NONE; + enum llama_rope_type rope_type = LLAMA_ROPE_TYPE_NONE; + enum llama_rope_scaling_type rope_scaling_type_train = LLAMA_ROPE_SCALING_TYPE_NONE; + + uint32_t n_head(uint32_t il = 0) const; + + uint32_t n_head_kv(uint32_t il = 0) const; + + uint32_t n_ff(uint32_t il = 0) const; + + uint32_t n_gqa(uint32_t il = 0) const; + + // dimension of key embeddings across all k-v heads + uint32_t n_embd_k_gqa(uint32_t il = 0) const; + + // dimension of value embeddings across all k-v heads + uint32_t n_embd_v_gqa(uint32_t il = 0) const; + + // dimension of the rolling state embeddings + // corresponds to Mamba's conv_states size or RWKV's token_shift states size + uint32_t n_embd_k_s() const; + + // dimension of the recurrent state embeddings + uint32_t n_embd_v_s() const; +}; + +static_assert(std::is_trivially_copyable::value, "llama_hparams must be trivially copyable"); + diff --git a/src/llama-impl.cpp b/src/llama-impl.cpp new file mode 100644 index 000000000..a05ba4f63 --- /dev/null +++ b/src/llama-impl.cpp @@ -0,0 +1,166 @@ +#include "llama-impl.h" + +#include "llama.h" + +#include +#include +#include +#include +#include +#include + +struct llama_logger_state { + ggml_log_callback log_callback = llama_log_callback_default; + void * log_callback_user_data = nullptr; +}; + +static llama_logger_state g_logger_state; + +time_meas::time_meas(int64_t & t_acc, bool disable) : t_start_us(disable ? -1 : ggml_time_us()), t_acc(t_acc) {} + +time_meas::~time_meas() { + if (t_start_us >= 0) { + t_acc += ggml_time_us() - t_start_us; + } + } + +void llama_log_set(ggml_log_callback log_callback, void * user_data) { + ggml_log_set(log_callback, user_data); + g_logger_state.log_callback = log_callback ? log_callback : llama_log_callback_default; + g_logger_state.log_callback_user_data = user_data; +} + +static void llama_log_internal_v(ggml_log_level level, const char * format, va_list args) { + va_list args_copy; + va_copy(args_copy, args); + char buffer[128]; + int len = vsnprintf(buffer, 128, format, args); + if (len < 128) { + g_logger_state.log_callback(level, buffer, g_logger_state.log_callback_user_data); + } else { + char * buffer2 = new char[len + 1]; + vsnprintf(buffer2, len + 1, format, args_copy); + buffer2[len] = 0; + g_logger_state.log_callback(level, buffer2, g_logger_state.log_callback_user_data); + delete[] buffer2; + } + va_end(args_copy); +} + +void llama_log_internal(ggml_log_level level, const char * format, ...) { + va_list args; + va_start(args, format); + llama_log_internal_v(level, format, args); + va_end(args); +} + +void llama_log_callback_default(ggml_log_level level, const char * text, void * user_data) { + (void) level; + (void) user_data; + fputs(text, stderr); + fflush(stderr); +} + +void replace_all(std::string & s, const std::string & search, const std::string & replace) { + if (search.empty()) { + return; + } + std::string builder; + builder.reserve(s.length()); + size_t pos = 0; + size_t last_pos = 0; + while ((pos = s.find(search, last_pos)) != std::string::npos) { + builder.append(s, last_pos, pos - last_pos); + builder.append(replace); + last_pos = pos + search.length(); + } + builder.append(s, last_pos, std::string::npos); + s = std::move(builder); +} + +std::string format(const char * fmt, ...) { + va_list ap; + va_list ap2; + va_start(ap, fmt); + va_copy(ap2, ap); + int size = vsnprintf(NULL, 0, fmt, ap); + GGML_ASSERT(size >= 0 && size < INT_MAX); // NOLINT + std::vector buf(size + 1); + int size2 = vsnprintf(buf.data(), size + 1, fmt, ap2); + GGML_ASSERT(size2 == size); + va_end(ap2); + va_end(ap); + return std::string(buf.data(), size); +} + +std::string llama_format_tensor_shape(const std::vector & ne) { + char buf[256]; + snprintf(buf, sizeof(buf), "%5" PRId64, ne.at(0)); + for (size_t i = 1; i < ne.size(); i++) { + snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), ", %5" PRId64, ne.at(i)); + } + return buf; +} + +std::string llama_format_tensor_shape(const struct ggml_tensor * t) { + char buf[256]; + snprintf(buf, sizeof(buf), "%5" PRId64, t->ne[0]); + for (int i = 1; i < GGML_MAX_DIMS; i++) { + snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), ", %5" PRId64, t->ne[i]); + } + return buf; +} + +static std::string gguf_data_to_str(enum gguf_type type, const void * data, int i) { + switch (type) { + case GGUF_TYPE_UINT8: return std::to_string(((const uint8_t *)data)[i]); + case GGUF_TYPE_INT8: return std::to_string(((const int8_t *)data)[i]); + case GGUF_TYPE_UINT16: return std::to_string(((const uint16_t *)data)[i]); + case GGUF_TYPE_INT16: return std::to_string(((const int16_t *)data)[i]); + case GGUF_TYPE_UINT32: return std::to_string(((const uint32_t *)data)[i]); + case GGUF_TYPE_INT32: return std::to_string(((const int32_t *)data)[i]); + case GGUF_TYPE_UINT64: return std::to_string(((const uint64_t *)data)[i]); + case GGUF_TYPE_INT64: return std::to_string(((const int64_t *)data)[i]); + case GGUF_TYPE_FLOAT32: return std::to_string(((const float *)data)[i]); + case GGUF_TYPE_FLOAT64: return std::to_string(((const double *)data)[i]); + case GGUF_TYPE_BOOL: return ((const bool *)data)[i] ? "true" : "false"; + default: return format("unknown type %d", type); + } +} + +std::string gguf_kv_to_str(const struct gguf_context * ctx_gguf, int i) { + const enum gguf_type type = gguf_get_kv_type(ctx_gguf, i); + + switch (type) { + case GGUF_TYPE_STRING: + return gguf_get_val_str(ctx_gguf, i); + case GGUF_TYPE_ARRAY: + { + const enum gguf_type arr_type = gguf_get_arr_type(ctx_gguf, i); + int arr_n = gguf_get_arr_n(ctx_gguf, i); + const void * data = gguf_get_arr_data(ctx_gguf, i); + std::stringstream ss; + ss << "["; + for (int j = 0; j < arr_n; j++) { + if (arr_type == GGUF_TYPE_STRING) { + std::string val = gguf_get_arr_str(ctx_gguf, i, j); + // escape quotes + replace_all(val, "\\", "\\\\"); + replace_all(val, "\"", "\\\""); + ss << '"' << val << '"'; + } else if (arr_type == GGUF_TYPE_ARRAY) { + ss << "???"; + } else { + ss << gguf_data_to_str(arr_type, data, j); + } + if (j < arr_n - 1) { + ss << ", "; + } + } + ss << "]"; + return ss.str(); + } + default: + return gguf_data_to_str(type, gguf_get_val_data(ctx_gguf, i), 0); + } +} diff --git a/src/llama-impl.h b/src/llama-impl.h index 70f16b61c..12d1fb082 100644 --- a/src/llama-impl.h +++ b/src/llama-impl.h @@ -1,10 +1,9 @@ #pragma once -#include "llama.h" +#include "ggml.h" // for ggml_log_level #include #include -#include #ifdef __GNUC__ #ifdef __MINGW32__ @@ -35,147 +34,28 @@ void llama_log_callback_default(ggml_log_level level, const char * text, void * // helpers // -struct time_meas { - time_meas(int64_t & t_acc, bool disable = false) : t_start_us(disable ? -1 : ggml_time_us()), t_acc(t_acc) {} +template +struct no_init { + T value; + no_init() { /* do nothing */ } +}; - ~time_meas() { - if (t_start_us >= 0) { - t_acc += ggml_time_us() - t_start_us; - } - } +struct time_meas { + time_meas(int64_t & t_acc, bool disable = false); + ~time_meas(); const int64_t t_start_us; int64_t & t_acc; }; -static void replace_all(std::string & s, const std::string & search, const std::string & replace) { - if (search.empty()) { - return; - } - std::string builder; - builder.reserve(s.length()); - size_t pos = 0; - size_t last_pos = 0; - while ((pos = s.find(search, last_pos)) != std::string::npos) { - builder.append(s, last_pos, pos - last_pos); - builder.append(replace); - last_pos = pos + search.length(); - } - builder.append(s, last_pos, std::string::npos); - s = std::move(builder); -} +void replace_all(std::string & s, const std::string & search, const std::string & replace); -const std::vector> & llama_internal_get_tensor_map( - struct llama_context * ctx -); +// TODO: rename to llama_format ? +LLAMA_ATTRIBUTE_FORMAT(1, 2) +std::string format(const char * fmt, ...); -// the ring buffer works similarly to std::deque, but with a fixed capacity -template -struct ring_buffer { - ring_buffer(size_t cap) : capacity(cap), data(cap) {} +std::string llama_format_tensor_shape(const std::vector & ne); +std::string llama_format_tensor_shape(const struct ggml_tensor * t); - T & front() { - if (sz == 0) { - throw std::runtime_error("ring buffer is empty"); - } - return data[first]; - } - - const T & front() const { - if (sz == 0) { - throw std::runtime_error("ring buffer is empty"); - } - return data[first]; - } - - T & back() { - if (sz == 0) { - throw std::runtime_error("ring buffer is empty"); - } - return data[pos]; - } - - const T & back() const { - if (sz == 0) { - throw std::runtime_error("ring buffer is empty"); - } - return data[pos]; - } - - void push_back(const T & value) { - if (capacity == 0) { - throw std::runtime_error("ring buffer: capacity is zero"); - } - - if (sz == capacity) { - // advance the start when buffer is full - first = (first + 1) % capacity; - } else { - sz++; - } - data[pos] = value; - pos = (pos + 1) % capacity; - } - - T pop_front() { - if (sz == 0) { - throw std::runtime_error("ring buffer is empty"); - } - T value = data[first]; - first = (first + 1) % capacity; - sz--; - return value; - } - - //T & operator[](size_t i) { - // if (i >= sz) { - // throw std::runtime_error("ring buffer: index out of bounds"); - // } - // return data[(first + i) % capacity]; - //} - - //const T & at(size_t i) const { - // if (i >= sz) { - // throw std::runtime_error("ring buffer: index out of bounds"); - // } - // return data[(first + i) % capacity]; - //} - - const T & rat(size_t i) const { - if (i >= sz) { - throw std::runtime_error("ring buffer: index out of bounds"); - } - return data[(first + sz - i - 1) % capacity]; - } - - std::vector to_vector() const { - std::vector result; - result.reserve(sz); - for (size_t i = 0; i < sz; i++) { - result.push_back(data[(first + i) % capacity]); - } - return result; - } - - void clear() { - // here only reset the status of the buffer - sz = 0; - first = 0; - pos = 0; - } - - bool empty() const { - return sz == 0; - } - - size_t size() const { - return sz; - } - - size_t capacity = 0; - size_t sz = 0; - size_t first = 0; - size_t pos = 0; - std::vector data; -}; +std::string gguf_kv_to_str(const struct gguf_context * ctx_gguf, int i); diff --git a/src/llama-kv-cache.cpp b/src/llama-kv-cache.cpp new file mode 100644 index 000000000..53379253a --- /dev/null +++ b/src/llama-kv-cache.cpp @@ -0,0 +1,718 @@ +#include "llama-kv-cache.h" + +#include "llama-impl.h" +#include "llama-batch.h" +#include "llama-cparams.h" +#include "llama-model.h" + +#include +#include +#include + +static const llama_kv_cache_slot_info llama_kv_cache_slot_info_failed{false}; + +uint32_t llama_kv_cache_get_padding(const struct llama_cparams & cparams) { + // the FA kernels require padding to avoid extra runtime boundary checks + return cparams.flash_attn ? 256u : 32u; +} + +bool llama_kv_cache_init( + struct llama_kv_cache & cache, + const llama_model & model, + const llama_cparams & cparams, + ggml_type type_k, + ggml_type type_v, + uint32_t kv_size, + bool offload) { + const struct llama_hparams & hparams = model.hparams; + + const int32_t n_layer = hparams.n_layer; + + cache.has_shift = false; + + cache.recurrent = llama_model_is_recurrent(&model); + cache.v_trans = !cache.recurrent && !cparams.flash_attn; + cache.can_shift = !cache.recurrent && model.arch != LLM_ARCH_DEEPSEEK2; // not supported due to MLA + + LLAMA_LOG_INFO("%s: kv_size = %d, offload = %d, type_k = '%s', type_v = '%s', n_layer = %d, can_shift = %d\n", + __func__, kv_size, offload, ggml_type_name(type_k), ggml_type_name(type_v), n_layer, cache.can_shift); + + cache.head = 0; + cache.size = kv_size; + cache.used = 0; + + cache.type_k = type_k; + cache.type_v = type_v; + + cache.cells.clear(); + cache.cells.resize(kv_size); + + // create a context for each buffer type + std::map ctx_map; + auto ctx_for_buft = [&](ggml_backend_buffer_type_t buft) -> ggml_context * { + auto it = ctx_map.find(buft); + if (it == ctx_map.end()) { + struct ggml_init_params params = { + /*.mem_size =*/ size_t(2u*n_layer*ggml_tensor_overhead()), + /*.mem_buffer =*/ NULL, + /*.no_alloc =*/ true, + }; + ggml_context * ctx = ggml_init(params); + if (!ctx) { + return nullptr; + } + ctx_map[buft] = ctx; + cache.ctxs.emplace_back(ctx); + return ctx; + } + return it->second; + }; + + cache.k_l.reserve(n_layer); + cache.v_l.reserve(n_layer); + + for (int i = 0; i < n_layer; i++) { + const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(i) + hparams.n_embd_k_s(); + const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(i) + hparams.n_embd_v_s(); + + LLAMA_LOG_DEBUG("%s: layer %d: n_embd_k_gqa = %d, n_embd_v_gqa = %d\n", __func__, i, n_embd_k_gqa, n_embd_v_gqa); + + ggml_backend_buffer_type_t buft; + if (offload) { + auto * dev = model.dev_layer.at(i).dev; + buft = ggml_backend_dev_buffer_type(dev); + } else { + buft = ggml_backend_cpu_buffer_type(); + } + ggml_context * ctx = ctx_for_buft(buft); + + if (!ctx) { + LLAMA_LOG_ERROR("%s: failed to create ggml context for kv cache\n", __func__); + return false; + } + + ggml_tensor * k = ggml_new_tensor_1d(ctx, type_k, n_embd_k_gqa*kv_size); + ggml_tensor * v = ggml_new_tensor_1d(ctx, type_v, n_embd_v_gqa*kv_size); + ggml_format_name(k, "cache_k_l%d", i); + ggml_format_name(v, "cache_v_l%d", i); + cache.k_l.push_back(k); + cache.v_l.push_back(v); + } + + // allocate tensors and initialize the buffers to avoid NaNs in the padding + for (auto it : ctx_map) { + auto * buft = it.first; + auto * ctx = it.second; + + ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, buft); + if (!buf) { + LLAMA_LOG_ERROR("%s: failed to allocate buffer for kv cache\n", __func__); + return false; + } + ggml_backend_buffer_clear(buf, 0); + LLAMA_LOG_INFO("%s: %10s KV buffer size = %8.2f MiB\n", __func__, ggml_backend_buffer_name(buf), ggml_backend_buffer_get_size(buf)/1024.0/1024.0); + cache.bufs.emplace_back(buf); + } + + return true; +} + +struct llama_kv_cache_slot_info llama_kv_cache_find_slot( + struct llama_kv_cache & cache, + const struct llama_ubatch & batch) { + const uint32_t n_tokens = batch.n_tokens; + const uint32_t n_seqs = batch.n_seqs; + const uint32_t n_seq_tokens = batch.n_seq_tokens; + + if (cache.recurrent) { + // For recurrent state architectures (like Mamba or RWKV), + // each cache cell can store the state for a whole sequence. + // A slot should be always be contiguous. + + // can only process batches with an equal number of new tokens in each sequence + GGML_ASSERT(batch.equal_seqs); + + int32_t min = cache.size - 1; + int32_t max = 0; + + // everything should fit if all seq_ids are smaller than the max + for (uint32_t s = 0; s < n_seqs; ++s) { + const uint32_t n_seq_id = batch.n_seq_id[s]; + for (uint32_t j = 0; j < n_seq_id; ++j) { + const llama_seq_id seq_id = batch.seq_id[s][j]; + + if (seq_id < 0 || (uint32_t) seq_id >= cache.size) { + // too big seq_id + // TODO: would it be possible to resize the cache instead? + LLAMA_LOG_ERROR("%s: seq_id=%d >= n_seq_max=%d Try using a bigger --parallel value\n", __func__, seq_id, cache.size); + return llama_kv_cache_slot_info_failed; + } + if (j > 0) { + llama_kv_cell & seq = cache.cells[seq_id]; + if (seq.tail >= 0) { + llama_kv_cell & cell = cache.cells[seq.tail]; + // clear cells from seq_ids that become shared + // (should not normally happen, but let's handle it anyway) + cell.seq_id.erase(seq_id); + seq.tail = -1; + if (cell.seq_id.empty()) { + cell.pos = -1; + cell.src = -1; + cache.used -= 1; + } + } + } + } + } + +#ifndef NDEBUG + { + std::vector tails_verif; + tails_verif.assign(cache.size, -1); + for (uint32_t i = 0; i < cache.size; ++i) { + llama_kv_cell & cell = cache.cells[i]; + for (llama_seq_id seq_id : cell.seq_id) { + if (tails_verif[seq_id] != -1) { + LLAMA_LOG_ERROR("%s: duplicate tail for seq_id %d in cell %d and %d\n", __func__, seq_id, i, tails_verif[seq_id]); + } + tails_verif[seq_id] = i; + } + } + for (uint32_t i = 0; i < cache.size; ++i) { + if (tails_verif[i] != cache.cells[i].tail) { + LLAMA_LOG_ERROR("%s: wrong tail for seq_id %d, (%d instead of %d)\n", __func__, i, cache.cells[i].tail, tails_verif[i]); + } + } + } +#endif + + // find next empty cell + uint32_t next_empty_cell = cache.head; + + for (uint32_t i = 0; i < cache.size; ++i) { + if (next_empty_cell >= cache.size) { next_empty_cell -= cache.size; } + llama_kv_cell & cell = cache.cells[next_empty_cell]; + if (cell.is_empty()) { break; } + next_empty_cell += 1; + } + + // find usable cell range + for (uint32_t s = 0; s < n_seqs; ++s) { + const llama_seq_id seq_id = batch.seq_id[s][0]; + llama_kv_cell & seq_meta = cache.cells[seq_id]; + bool has_cell = false; + if (seq_meta.tail >= 0) { + llama_kv_cell & cell = cache.cells[seq_meta.tail]; + GGML_ASSERT(cell.has_seq_id(seq_id)); + // does this seq_id "own" the cell? + if (cell.seq_id.size() == 1) { has_cell = true; } + } + if (!has_cell) { + llama_kv_cell & empty_cell = cache.cells[next_empty_cell]; + GGML_ASSERT(empty_cell.is_empty()); + // copy old tail into the empty cell + if (seq_meta.tail >= 0) { + llama_kv_cell & orig_cell = cache.cells[seq_meta.tail]; + empty_cell.pos = orig_cell.pos; + empty_cell.src = orig_cell.src; + orig_cell.seq_id.erase(seq_id); + empty_cell.seq_id.insert(seq_id); // will be overwritten + } + seq_meta.tail = next_empty_cell; + // find next empty cell + if (s + 1 < n_seqs) { + next_empty_cell += 1; + for (uint32_t i = 0; i < cache.size; ++i) { + if (next_empty_cell >= cache.size) { next_empty_cell -= cache.size; } + llama_kv_cell & cell = cache.cells[next_empty_cell]; + if (cell.is_empty()) { break; } + next_empty_cell += 1; + } + } + } + if (min > seq_meta.tail) { min = seq_meta.tail; } + if (max < seq_meta.tail) { max = seq_meta.tail; } + } + + // gather and re-order + for (uint32_t s = 0; s < n_seqs; ++s) { + int32_t dst_id = s + min; + int32_t src_id = cache.cells[batch.seq_id[s][0]].tail; + if (dst_id != src_id) { + llama_kv_cell & dst_cell = cache.cells[dst_id]; + llama_kv_cell & src_cell = cache.cells[src_id]; + + std::swap(dst_cell.pos, src_cell.pos); + std::swap(dst_cell.src, src_cell.src); + std::swap(dst_cell.seq_id, src_cell.seq_id); + + // swap tails (assuming they NEVER overlap) + for (const llama_seq_id seq_id : src_cell.seq_id) { + cache.cells[seq_id].tail = src_id; + } + for (const llama_seq_id seq_id : dst_cell.seq_id) { + cache.cells[seq_id].tail = dst_id; + } + } + } + + // update the pos of the used seqs + for (uint32_t s = 0; s < n_seqs; ++s) { + const llama_pos last_pos = batch.pos[n_seq_tokens * s + n_seq_tokens - 1]; + int32_t cell_id = s + min; + llama_kv_cell & cell = cache.cells[cell_id]; + + if (cell.pos >= 0 && last_pos != cell.pos + (llama_pos) n_seq_tokens) { + // What should happen when the pos backtracks or skips a value? + // Clearing the state mid-batch would require special-casing which isn't done. + LLAMA_LOG_WARN("%s: non-consecutive token position %d after %d for sequence %d with %u new tokens\n", + __func__, last_pos, cell.pos, batch.seq_id[s][0], n_seq_tokens); + } + cell.pos = last_pos; + cell.seq_id.clear(); + for (int32_t j = 0; j < batch.n_seq_id[s]; ++j) { + const llama_seq_id seq_id = batch.seq_id[s][j]; + cell.seq_id.insert(seq_id); + cache.cells[seq_id].tail = cell_id; + } + } + + // allow getting the range of used cells, from head to head + n + cache.head = min; + cache.n = max - min + 1; + cache.used = std::count_if(cache.cells.begin(), cache.cells.end(), + [](const llama_kv_cell& cell){ return !cell.is_empty(); }); + + // sanity check + return llama_kv_cache_slot_info(cache.n >= n_seqs); + } + // otherwise, one cell per token. + + if (n_tokens > cache.size) { + LLAMA_LOG_ERROR("%s: n_tokens=%d > cache.size=%d\n", __func__, n_tokens, cache.size); + return llama_kv_cache_slot_info_failed; + } + + uint32_t n_tested = 0; + + while (true) { + if (cache.head + n_tokens > cache.size) { + n_tested += cache.size - cache.head; + cache.head = 0; + continue; + } + + bool found = true; + for (uint32_t i = 0; i < n_tokens; i++) { + if (cache.cells[cache.head + i].pos >= 0) { + found = false; + cache.head += i + 1; + n_tested += i + 1; + break; + } + } + + if (found) { + break; + } + + if (n_tested >= cache.size) { + //LLAMA_LOG_ERROR("%s: failed to find a slot for %d tokens\n", __func__, n_tokens); + return llama_kv_cache_slot_info_failed; + } + } + + for (uint32_t s = 0; s < n_seqs; s++) { + for (uint32_t i = 0; i < n_seq_tokens; ++i) { + uint32_t k = s*n_seq_tokens + i; + cache.cells[cache.head + k].pos = batch.pos[k]; + + for (int32_t j = 0; j < batch.n_seq_id[s]; j++) { + cache.cells[cache.head + k].seq_id.insert(batch.seq_id[s][j]); + } + } + } + + cache.used += n_tokens; + + return llama_kv_cache_slot_info(cache.head, cache.head + n_tokens); +} + +uint32_t llama_kv_cache_cell_max(const struct llama_kv_cache & cache) { + for (uint32_t i = cache.size; i > 0; --i) { + const llama_kv_cell & cell = cache.cells[i - 1]; + + if (cell.pos >= 0 && !cell.is_empty()) { + return i; + } + } + + return 0; +} + +void llama_kv_cache_clear(struct llama_kv_cache & cache) { + for (int32_t i = 0; i < (int32_t) cache.size; ++i) { + cache.cells[i].pos = -1; + cache.cells[i].seq_id.clear(); + cache.cells[i].src = -1; + cache.cells[i].tail = -1; + } + cache.head = 0; + cache.used = 0; + + for (auto & buf : cache.bufs) { + ggml_backend_buffer_clear(buf.get(), 0); + } +} + +bool llama_kv_cache_seq_rm( + struct llama_kv_cache & cache, + llama_seq_id seq_id, + llama_pos p0, + llama_pos p1) { + uint32_t new_head = cache.size; + + if (p0 < 0) p0 = 0; + if (p1 < 0) p1 = std::numeric_limits::max(); + + // models like Mamba or RWKV can't have a state partially erased + if (cache.recurrent) { + if (seq_id >= (int64_t) cache.size) { + // could be fatal + return false; + } + if (0 <= seq_id) { + int32_t & tail_id = cache.cells[seq_id].tail; + if (tail_id >= 0) { + const llama_kv_cell & cell = cache.cells[tail_id]; + // partial intersection is invalid + if ((0 < p0 && p0 <= cell.pos) || (0 < p1 && p1 <= cell.pos)) { + return false; + } + // invalidate tails which will be cleared + if (p0 <= cell.pos && cell.pos < p1) { + tail_id = -1; + } + } + } else { + // seq_id is negative, then the range should include everything or nothing + if (p0 != p1 && (p0 != 0 || p1 != std::numeric_limits::max())) { + return false; + } + } + } + + for (uint32_t i = 0; i < cache.size; ++i) { + if (cache.cells[i].pos >= p0 && cache.cells[i].pos < p1) { + if (seq_id < 0) { + cache.cells[i].seq_id.clear(); + } else if (cache.cells[i].has_seq_id(seq_id)) { + cache.cells[i].seq_id.erase(seq_id); + } else { + continue; + } + if (cache.cells[i].is_empty()) { + // keep count of the number of used cells + if (cache.cells[i].pos >= 0) cache.used--; + + cache.cells[i].pos = -1; + cache.cells[i].src = -1; + if (new_head == cache.size) new_head = i; + } + } + } + + // If we freed up a slot, set head to it so searching can start there. + if (new_head != cache.size && new_head < cache.head) cache.head = new_head; + + return true; +} + +void llama_kv_cache_seq_cp( + struct llama_kv_cache & cache, + llama_seq_id seq_id_src, + llama_seq_id seq_id_dst, + llama_pos p0, + llama_pos p1) { + if (p0 < 0) p0 = 0; + if (p1 < 0) p1 = std::numeric_limits::max(); + + if (cache.recurrent) { + if ((uint32_t) seq_id_dst < cache.size && (uint32_t) seq_id_src < cache.size) { + llama_kv_cell & tail_src = cache.cells[seq_id_src]; + llama_kv_cell & tail_dst = cache.cells[seq_id_dst]; + if (tail_dst.tail >= 0) { + // clear destination seq_id if it wasn't empty + llama_kv_cell & cell_dst = cache.cells[tail_dst.tail]; + + cell_dst.seq_id.erase(seq_id_dst); + tail_dst.tail = -1; + if (cell_dst.seq_id.empty()) { + cell_dst.pos = -1; + cell_dst.delta = -1; + cell_dst.src = -1; + cache.used -= 1; + } + } + if (tail_src.tail >= 0) { + llama_kv_cell & cell_src = cache.cells[tail_src.tail]; + + cell_src.seq_id.insert(seq_id_dst); + tail_dst.tail = tail_src.tail; + } + } + + return; + } + // otherwise, this is the KV cache of a Transformer-like model + + cache.head = 0; + + for (uint32_t i = 0; i < cache.size; ++i) { + if (cache.cells[i].has_seq_id(seq_id_src) && cache.cells[i].pos >= p0 && cache.cells[i].pos < p1) { + cache.cells[i].seq_id.insert(seq_id_dst); + } + } +} + +void llama_kv_cache_seq_keep(struct llama_kv_cache & cache, llama_seq_id seq_id) { + uint32_t new_head = cache.size; + + for (uint32_t i = 0; i < cache.size; ++i) { + if (cache.recurrent && (llama_seq_id) i != seq_id) { + cache.cells[i].tail = -1; + } + if (!cache.cells[i].has_seq_id(seq_id)) { + if (cache.cells[i].pos >= 0) cache.used--; + cache.cells[i].pos = -1; + cache.cells[i].src = -1; + cache.cells[i].seq_id.clear(); + if (new_head == cache.size) new_head = i; + } else { + cache.cells[i].seq_id.clear(); + cache.cells[i].seq_id.insert(seq_id); + } + } + + // If we freed up a slot, set head to it so searching can start there. + if (new_head != cache.size && new_head < cache.head) cache.head = new_head; +} + +void llama_kv_cache_seq_add( + struct llama_kv_cache & cache, + llama_seq_id seq_id, + llama_pos p0, + llama_pos p1, + llama_pos delta) { + uint32_t new_head = cache.size; + + if (p0 < 0) p0 = 0; + if (p1 < 0) p1 = std::numeric_limits::max(); + // If there is no range then return early to avoid looping over the cache. + if (p0 == p1) return; + + if (cache.recurrent) { + // for Mamba-like or RWKV models, only the pos needs to be shifted + if (0 <= seq_id && seq_id < (int64_t) cache.size) { + const int32_t tail_id = cache.cells[seq_id].tail; + if (tail_id >= 0) { + llama_kv_cell & cell = cache.cells[tail_id]; + if (cell.has_seq_id(seq_id) && p0 <= cell.pos && cell.pos < p1) { + cell.pos += delta; + } + } + } + return; + } + + for (uint32_t i = 0; i < cache.size; ++i) { + if (cache.cells[i].has_seq_id(seq_id) && cache.cells[i].pos >= p0 && cache.cells[i].pos < p1) { + cache.has_shift = true; + cache.cells[i].pos += delta; + cache.cells[i].delta += delta; + + if (cache.cells[i].pos < 0) { + if (!cache.cells[i].is_empty()) { + cache.used--; + } + cache.cells[i].pos = -1; + cache.cells[i].seq_id.clear(); + if (new_head == cache.size) { + new_head = i; + } + } + } + } + + // If we freed up a slot, set head to it so searching can start there. + // Otherwise we just start the next search from the beginning. + cache.head = new_head != cache.size ? new_head : 0; +} + +void llama_kv_cache_seq_div( + struct llama_kv_cache & cache, + llama_seq_id seq_id, + llama_pos p0, + llama_pos p1, + int d) { + if (p0 < 0) p0 = 0; + if (p1 < 0) p1 = std::numeric_limits::max(); + // If there is no range then return early to avoid looping over the cache. + if (p0 == p1) return; + + if (cache.recurrent) { + // for Mamba-like or RWKV models, only the pos needs to be changed + if (0 <= seq_id && seq_id < (int64_t) cache.size) { + const int32_t tail_id = cache.cells[seq_id].tail; + if (tail_id >= 0) { + llama_kv_cell & cell = cache.cells[tail_id]; + if (cell.has_seq_id(seq_id) && p0 <= cell.pos && cell.pos < p1) { + cell.pos /= d; + } + } + } + return; + } + + for (uint32_t i = 0; i < cache.size; ++i) { + if (cache.cells[i].has_seq_id(seq_id) && cache.cells[i].pos >= p0 && cache.cells[i].pos < p1) { + cache.has_shift = true; + + { + llama_pos p_old = cache.cells[i].pos; + cache.cells[i].pos /= d; + cache.cells[i].delta += cache.cells[i].pos - p_old; + } + } + } +} + +llama_pos llama_kv_cache_seq_pos_max(struct llama_kv_cache & cache, llama_seq_id seq_id) { + llama_pos result = 0; + + for (uint32_t i = 0; i < cache.size; ++i) { + if (cache.cells[i].has_seq_id(seq_id)) { + result = std::max(result, cache.cells[i].pos); + } + } + + return result; +} + +void llama_kv_cache_defrag(struct llama_kv_cache & cache) { + if (!cache.recurrent) { + cache.do_defrag = true; + } +} + +int32_t llama_get_kv_cache_token_count(const struct llama_kv_cache & kv) { + int result = 0; + + for (uint32_t i = 0; i < kv.size; i++) { + result += kv.cells[i].seq_id.size(); + } + + return result; +} + +int32_t llama_get_kv_cache_used_cells(const struct llama_kv_cache & kv) { + return kv.used; +} + +bool llama_kv_cache_can_shift(const struct llama_kv_cache & kv) { + return kv.can_shift; +} + +// +// kv cache view +// + +struct llama_kv_cache_view llama_kv_cache_view_init(const struct llama_kv_cache & kv, int32_t n_seq_max) { + struct llama_kv_cache_view result = { + /*.n_cells = */ 0, + /*.n_seq_max = */ n_seq_max, + /*.token_count = */ 0, + /*.used_cells = */ llama_get_kv_cache_used_cells(kv), + /*.max_contiguous = */ 0, + /*.max_contiguous_idx = */ -1, + /*.cells = */ nullptr, + /*.cells_sequences = */ nullptr, + }; + + return result; +} + +void llama_kv_cache_view_free(struct llama_kv_cache_view * view) { + if (view->cells != nullptr) { + free(view->cells); + view->cells = nullptr; + } + if (view->cells_sequences != nullptr) { + free(view->cells_sequences); + view->cells_sequences = nullptr; + } +} + +void llama_kv_cache_view_update(struct llama_kv_cache_view * view, const struct llama_kv_cache & kv) { + if (uint32_t(view->n_cells) < kv.size || view->cells == nullptr) { + view->n_cells = int32_t(kv.size); + void * p = realloc(view->cells, sizeof(struct llama_kv_cache_view_cell) * view->n_cells); + GGML_ASSERT(p != nullptr && "Failed to alloc kv_cache_view cells"); + view->cells = (struct llama_kv_cache_view_cell *)p; + p = realloc(view->cells_sequences, sizeof(llama_seq_id) * view->n_seq_max * view->n_cells); + GGML_ASSERT(p != nullptr && "Failed to alloc kv_cache_view cells sequences"); + view->cells_sequences = (llama_seq_id *)p; + } + + const std::vector & kv_cells = kv.cells; + llama_kv_cache_view_cell * c_curr = view->cells; + llama_seq_id * cs_curr = view->cells_sequences; + int32_t used_cells = 0; + int32_t token_count = 0; + int32_t curr_contig_idx = -1; + uint32_t max_contig = 0; + int32_t max_contig_idx = -1; + + for (int32_t i = 0; i < int32_t(kv.size); i++, c_curr++, cs_curr += view->n_seq_max) { + const size_t curr_size = kv_cells[i].seq_id.size(); + token_count += curr_size; + c_curr->pos = kv_cells[i].pos + kv_cells[i].delta; + + if (curr_size > 0) { + if (curr_contig_idx >= 0 && uint32_t(i - curr_contig_idx) > max_contig) { + max_contig = i - curr_contig_idx; + max_contig_idx = curr_contig_idx; + } + curr_contig_idx = -1; + } else if (curr_contig_idx < 0) { + curr_contig_idx = i; + } + + int seq_idx = 0; + for (const llama_seq_id it : kv_cells[i].seq_id) { + if (seq_idx >= view->n_seq_max) { + break; + } + cs_curr[seq_idx] = it; + seq_idx++; + } + if (seq_idx != 0) { + used_cells++; + } + for (; seq_idx < view->n_seq_max; seq_idx++) { + cs_curr[seq_idx] = -1; + } + } + if (curr_contig_idx >= 0 && kv_cells.size() - curr_contig_idx > max_contig) { + max_contig_idx = curr_contig_idx; + max_contig = kv_cells.size() - curr_contig_idx; + } + view->max_contiguous = max_contig; + view->max_contiguous_idx = max_contig_idx; + view->token_count = token_count; + view->used_cells = used_cells; + if (uint32_t(used_cells) != kv.used) { + LLAMA_LOG_ERROR("%s: used cells mismatch. kv_cache says %d but we calculated %d\n", + __func__, kv.used, used_cells); + } +} diff --git a/src/llama-kv-cache.h b/src/llama-kv-cache.h new file mode 100644 index 000000000..dca6f3998 --- /dev/null +++ b/src/llama-kv-cache.h @@ -0,0 +1,218 @@ +#pragma once + +#include "llama.h" + +#include "ggml-cpp.h" + +#include +#include + +struct llama_kv_cell { + llama_pos pos = -1; + llama_pos delta = 0; + int32_t src = -1; // used by recurrent state models to copy states + int32_t tail = -1; + + std::set seq_id; + + bool has_seq_id(const llama_seq_id & id) const { + return seq_id.find(id) != seq_id.end(); + } + + bool is_empty() const { + return seq_id.empty(); + } + + bool is_same_seq(const llama_kv_cell & other) const { + return seq_id == other.seq_id; + } +}; + +// ring-buffer of cached KV data +struct llama_kv_cache { + bool has_shift = false; + bool do_defrag = false; + bool recurrent = false; // with recurrent state models, a cell can hold the state for more than one past token + bool v_trans = true; // the value tensor is transposed + bool can_shift = false; + + // Note: The value of head isn't only used to optimize searching + // for a free KV slot. llama_decode_internal also uses it, so it + // cannot be freely changed after a slot has been allocated. + uint32_t head = 0; + uint32_t size = 0; + uint32_t used = 0; // used cells (i.e. at least one seq_id) + + // computed before each graph build + uint32_t n = 0; + + ggml_type type_k = GGML_TYPE_F16; + ggml_type type_v = GGML_TYPE_F16; + + std::vector cells; + + std::vector k_l; // per layer + std::vector v_l; + + std::vector ctxs; + std::vector bufs; + + size_t total_size() const { + size_t size = 0; + for (const auto & buf : bufs) { + size += ggml_backend_buffer_get_size(buf.get()); + } + + return size; + } + + // TODO: better data structures to reduce the cost of this operation + llama_pos max_pos() const { + llama_pos max_pos = -1; + for (const auto & cell : cells) { + max_pos = std::max(max_pos, cell.pos); + } + + return max_pos; + } +}; + +// a structure holds information about the slot found in llama_kv_cache_find_slot +struct llama_kv_cache_slot_info { + std::pair boundaries; // slot boundaries [begin, end) + bool found = false; // the slot was found + + explicit llama_kv_cache_slot_info(bool found_) : found{found_} {} + llama_kv_cache_slot_info(uint32_t begin, uint32_t end) : boundaries{begin, end}, found{true} {} + + operator bool() const { return found; } +}; + +// TODO: maybe not needed +uint32_t llama_kv_cache_get_padding(const struct llama_cparams & cparams); + +bool llama_kv_cache_init( + struct llama_kv_cache & cache, + const llama_model & model, + const llama_cparams & cparams, + ggml_type type_k, + ggml_type type_v, + uint32_t kv_size, + bool offload); + +// find an empty slot of size "n_tokens" in the cache +// updates the cache head +// returns a structure holding information about the slot found +// Note: On success, it's important that cache.head points +// to the first cell of the slot. +struct llama_kv_cache_slot_info llama_kv_cache_find_slot( + struct llama_kv_cache & cache, + const struct llama_ubatch & batch); + +// find how many cells are currently in use +uint32_t llama_kv_cache_cell_max(const struct llama_kv_cache & cache); + +void llama_kv_cache_clear(struct llama_kv_cache & cache); + +bool llama_kv_cache_seq_rm( + struct llama_kv_cache & cache, + llama_seq_id seq_id, + llama_pos p0, + llama_pos p1); + +void llama_kv_cache_seq_cp( + struct llama_kv_cache & cache, + llama_seq_id seq_id_src, + llama_seq_id seq_id_dst, + llama_pos p0, + llama_pos p1); + +void llama_kv_cache_seq_keep( + struct llama_kv_cache & cache, + llama_seq_id seq_id); + +void llama_kv_cache_seq_add( + struct llama_kv_cache & cache, + llama_seq_id seq_id, + llama_pos p0, + llama_pos p1, + llama_pos delta); + +void llama_kv_cache_seq_div( + struct llama_kv_cache & cache, + llama_seq_id seq_id, + llama_pos p0, + llama_pos p1, + int d); + +llama_pos llama_kv_cache_seq_pos_max( + struct llama_kv_cache & cache, + llama_seq_id seq_id); + +void llama_kv_cache_defrag(struct llama_kv_cache & cache); + +int32_t llama_get_kv_cache_token_count(const struct llama_kv_cache & kv); + +int32_t llama_get_kv_cache_used_cells(const struct llama_kv_cache & kv); + +bool llama_kv_cache_can_shift(const struct llama_kv_cache & kv); + +// +// kv cache view +// + +struct llama_kv_cache_view llama_kv_cache_view_init(const struct llama_kv_cache & kv, int32_t n_seq_max); + +void llama_kv_cache_view_update(struct llama_kv_cache_view * view, const struct llama_kv_cache & kv); + +// +// kv cache restore +// + +// saves the kv_cache state for future recovery. +// used to rollback llama_kv_cache_find_slot changes. +struct llama_kv_slot_restorer { + struct llama_kv_cache_state { + uint32_t head = 0; + uint32_t n = 0; + } old_state; + + // for non-recurrent models only + // list of slots to restore + std::vector> slot_boundaries; + + bool do_restore = false; + + explicit llama_kv_slot_restorer(const struct llama_kv_cache & cache) { + old_state.head = cache.head; + old_state.n = cache.n; + } + + // saves a slot information for future restoration + void save(const struct llama_kv_cache_slot_info & slot) { + if (slot) { + do_restore = true; + if (slot.boundaries.first != slot.boundaries.second) { + slot_boundaries.push_back(slot.boundaries); + } + } + } + + // must be explicitly called to restore the kv_cache state + // and rollback changes from all llama_kv_cache_find_slot calls + void restore(struct llama_kv_cache & cache) { + if (do_restore) { + cache.head = old_state.head; + cache.n = old_state.n; + + if (cache.recurrent) { // recurrent models like Mamba or RWKV can't have a state partially erased + llama_kv_cache_seq_rm(cache, -1, -1, -1); + } else { + for (auto & slot : slot_boundaries) { + llama_kv_cache_seq_rm(cache, -1, slot.first, slot.second); + } + } + } + } +}; + diff --git a/src/llama-mmap.cpp b/src/llama-mmap.cpp new file mode 100644 index 000000000..a99326335 --- /dev/null +++ b/src/llama-mmap.cpp @@ -0,0 +1,585 @@ +#include "llama-mmap.h" + +#include "llama-impl.h" + +#include "ggml.h" + +#include +#include +#include + +#ifdef __has_include + #if __has_include() + #include + #if defined(_POSIX_MAPPED_FILES) + #include + #include + #endif + #if defined(_POSIX_MEMLOCK_RANGE) + #include + #endif + #endif +#endif + +#if defined(_WIN32) + #define WIN32_LEAN_AND_MEAN + #ifndef NOMINMAX + #define NOMINMAX + #endif + #include + #ifndef PATH_MAX + #define PATH_MAX MAX_PATH + #endif + #include +#endif + +// TODO: consider moving to llama-impl.h if needed in more places +#if defined(_WIN32) +std::string llama_format_win_err(DWORD err) { + LPSTR buf; + size_t size = FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, + NULL, err, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPSTR)&buf, 0, NULL); + if (!size) { + return "FormatMessageA failed"; + } + std::string ret(buf, size); + LocalFree(buf); + return ret; +} +#endif + +// llama_file + +struct llama_file::impl { +#if defined(_WIN32) + HANDLE fp_win32; + std::string GetErrorMessageWin32(DWORD error_code) const { + std::string ret; + LPSTR lpMsgBuf = NULL; + DWORD bufLen = FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, + NULL, error_code, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPSTR)&lpMsgBuf, 0, NULL); + if (!bufLen) { + ret = format("Win32 error code: %lx", error_code); + } else { + ret = lpMsgBuf; + LocalFree(lpMsgBuf); + } + + return ret; + } + + impl(const char * fname, const char * mode) { + fp = ggml_fopen(fname, mode); + if (fp == NULL) { + throw std::runtime_error(format("failed to open %s: %s", fname, strerror(errno))); + } + fp_win32 = (HANDLE) _get_osfhandle(_fileno(fp)); + seek(0, SEEK_END); + size = tell(); + seek(0, SEEK_SET); + } + + size_t tell() const { + LARGE_INTEGER li; + li.QuadPart = 0; + BOOL ret = SetFilePointerEx(fp_win32, li, &li, FILE_CURRENT); + if (!ret) { + throw std::runtime_error(format("read error: %s", GetErrorMessageWin32(GetLastError()).c_str())); + } + + return li.QuadPart; + } + + void seek(size_t offset, int whence) const { + static_assert(SEEK_SET == FILE_BEGIN, "SEEK_SET != FILE_BEGIN"); + static_assert(SEEK_CUR == FILE_CURRENT, "SEEK_CUR != FILE_CURRENT"); + static_assert(SEEK_END == FILE_END, "SEEK_END != FILE_END"); + + LARGE_INTEGER li; + li.QuadPart = offset; + BOOL ret = SetFilePointerEx(fp_win32, li, NULL, whence); + if (!ret) { + throw std::runtime_error(format("read error: %s", GetErrorMessageWin32(GetLastError()).c_str())); + } + } + + void read_raw(void * ptr, size_t len) const { + size_t bytes_read = 0; + while (bytes_read < len) { + size_t chunk_size = std::min(len - bytes_read, 64*1024*1024); + DWORD chunk_read = 0; + BOOL result = ReadFile(fp_win32, reinterpret_cast(ptr) + bytes_read, chunk_size, &chunk_read, NULL); + if (!result) { + throw std::runtime_error(format("read error: %s", GetErrorMessageWin32(GetLastError()).c_str())); + } + if (chunk_read < chunk_size || chunk_read == 0) { + throw std::runtime_error("unexpectedly reached end of file"); + } + + bytes_read += chunk_read; + } + } + + uint32_t read_u32() const { + uint32_t val; + read_raw(&val, sizeof(val)); + return val; + } + + void write_raw(const void * ptr, size_t len) const { + size_t bytes_written = 0; + while (bytes_written < len) { + size_t chunk_size = std::min(len - bytes_written, 64*1024*1024); + DWORD chunk_written = 0; + BOOL result = WriteFile(fp_win32, reinterpret_cast(ptr) + bytes_written, chunk_size, &chunk_written, NULL); + if (!result) { + throw std::runtime_error(format("write error: %s", GetErrorMessageWin32(GetLastError()).c_str())); + } + if (chunk_written < chunk_size || chunk_written == 0) { + throw std::runtime_error("unexpectedly failed to write bytes"); + } + + bytes_written += chunk_written; + } + } + + void write_u32(uint32_t val) const { + write_raw(&val, sizeof(val)); + } + + ~impl() { + if (fp) { + std::fclose(fp); + } + } +#else + impl(const char * fname, const char * mode) { + fp = ggml_fopen(fname, mode); + if (fp == NULL) { + throw std::runtime_error(format("failed to open %s: %s", fname, strerror(errno))); + } + seek(0, SEEK_END); + size = tell(); + seek(0, SEEK_SET); + } + + size_t tell() const { +// TODO: this ifdef is never true? +#ifdef _WIN32 + __int64 ret = _ftelli64(fp); +#else + long ret = std::ftell(fp); +#endif + if (ret == -1) { + throw std::runtime_error(format("ftell error: %s", strerror(errno))); + } + + return (size_t) ret; + } + + void seek(size_t offset, int whence) const { +// TODO: this ifdef is never true? +#ifdef _WIN32 + int ret = _fseeki64(fp, (__int64) offset, whence); +#else + int ret = std::fseek(fp, (long) offset, whence); +#endif + if (ret != 0) { + throw std::runtime_error(format("seek error: %s", strerror(errno))); + } + } + + void read_raw(void * ptr, size_t len) const { + if (len == 0) { + return; + } + errno = 0; + std::size_t ret = std::fread(ptr, len, 1, fp); + if (ferror(fp)) { + throw std::runtime_error(format("read error: %s", strerror(errno))); + } + if (ret != 1) { + throw std::runtime_error("unexpectedly reached end of file"); + } + } + + uint32_t read_u32() const { + uint32_t ret; + read_raw(&ret, sizeof(ret)); + return ret; + } + + void write_raw(const void * ptr, size_t len) const { + if (len == 0) { + return; + } + errno = 0; + size_t ret = std::fwrite(ptr, len, 1, fp); + if (ret != 1) { + throw std::runtime_error(format("write error: %s", strerror(errno))); + } + } + + void write_u32(uint32_t val) const { + write_raw(&val, sizeof(val)); + } + + ~impl() { + if (fp) { + std::fclose(fp); + } + } +#endif + + FILE * fp; + size_t size; +}; + +llama_file::llama_file(const char * fname, const char * mode) : pimpl(std::make_unique(fname, mode)) {} +llama_file::~llama_file() = default; + +size_t llama_file::tell() const { return pimpl->tell(); } +size_t llama_file::size() const { return pimpl->size; } + +int llama_file::fileno() const { +#ifdef _WIN32 + return _fileno(pimpl->fp); +#else + return ::fileno(pimpl->fp); +#endif +} + +void llama_file::seek(size_t offset, int whence) const { pimpl->seek(offset, whence); } +void llama_file::read_raw(void * ptr, size_t len) const { pimpl->read_raw(ptr, len); } + +uint32_t llama_file::read_u32() const { return pimpl->read_u32(); } + +void llama_file::write_raw(const void * ptr, size_t len) const { pimpl->write_raw(ptr, len); } +void llama_file::write_u32(uint32_t val) const { pimpl->write_u32(val); } + +// llama_mmap + +struct llama_mmap::impl { +#ifdef _POSIX_MAPPED_FILES + std::vector> mapped_fragments; + + impl(struct llama_file * file, size_t prefetch, bool numa) { + size = file->size(); + int fd = file->fileno(); + int flags = MAP_SHARED; + if (numa) { prefetch = 0; } +#ifdef __linux__ + if (posix_fadvise(fd, 0, 0, POSIX_FADV_SEQUENTIAL)) { + LLAMA_LOG_WARN("warning: posix_fadvise(.., POSIX_FADV_SEQUENTIAL) failed: %s\n", + strerror(errno)); + } + if (prefetch) { flags |= MAP_POPULATE; } +#endif + addr = mmap(NULL, file->size(), PROT_READ, flags, fd, 0); + if (addr == MAP_FAILED) { + throw std::runtime_error(format("mmap failed: %s", strerror(errno))); + } + + if (prefetch > 0) { + if (posix_madvise(addr, std::min(file->size(), prefetch), POSIX_MADV_WILLNEED)) { + LLAMA_LOG_WARN("warning: posix_madvise(.., POSIX_MADV_WILLNEED) failed: %s\n", + strerror(errno)); + } + } + if (numa) { + if (posix_madvise(addr, file->size(), POSIX_MADV_RANDOM)) { + LLAMA_LOG_WARN("warning: posix_madvise(.., POSIX_MADV_RANDOM) failed: %s\n", + strerror(errno)); + } + } + + mapped_fragments.emplace_back(0, file->size()); + } + + static void align_range(size_t * first, size_t * last, size_t page_size) { + size_t offset_in_page = *first & (page_size - 1); + size_t offset_to_page = offset_in_page == 0 ? 0 : page_size - offset_in_page; + *first += offset_to_page; + + *last = *last & ~(page_size - 1); + + if (*last <= *first) { + *last = *first; + } + } + + void unmap_fragment(size_t first, size_t last) { + int page_size = sysconf(_SC_PAGESIZE); + align_range(&first, &last, page_size); + size_t len = last - first; + + if (len == 0) { + return; + } + + GGML_ASSERT(first % page_size == 0); + GGML_ASSERT(last % page_size == 0); + GGML_ASSERT(last > first); + + void * next_page_start = (uint8_t *) addr + first; + + if (munmap(next_page_start, len)) { + LLAMA_LOG_WARN("warning: munmap failed: %s\n", strerror(errno)); + } + + std::vector> new_mapped_fragments; + for (const auto & frag : mapped_fragments) { + if (frag.first < first && frag.second > last) { + new_mapped_fragments.emplace_back(frag.first, first); + new_mapped_fragments.emplace_back(last, frag.second); + } else if (frag.first < first && frag.second > first) { + new_mapped_fragments.emplace_back(frag.first, first); + } else if (frag.first < last && frag.second > last) { + new_mapped_fragments.emplace_back(last, frag.second); + } else if (frag.first >= first && frag.second <= last) { + } else { + new_mapped_fragments.push_back(frag); + } + } + mapped_fragments = std::move(new_mapped_fragments); + } + + ~impl() { + for (const auto & frag : mapped_fragments) { + if (munmap((char *) addr + frag.first, frag.second - frag.first)) { + LLAMA_LOG_WARN("warning: munmap failed: %s\n", strerror(errno)); + } + } + } +#elif defined(_WIN32) + impl(struct llama_file * file, size_t prefetch, bool numa) { + GGML_UNUSED(numa); + + size = file->size(); + + HANDLE hFile = (HANDLE) _get_osfhandle(file->fileno()); + + HANDLE hMapping = CreateFileMappingA(hFile, NULL, PAGE_READONLY, 0, 0, NULL); + + if (hMapping == NULL) { + DWORD error = GetLastError(); + throw std::runtime_error(format("CreateFileMappingA failed: %s", llama_format_win_err(error).c_str())); + } + + addr = MapViewOfFile(hMapping, FILE_MAP_READ, 0, 0, 0); + DWORD error = GetLastError(); + CloseHandle(hMapping); + + if (addr == NULL) { + throw std::runtime_error(format("MapViewOfFile failed: %s", llama_format_win_err(error).c_str())); + } + + if (prefetch > 0) { +#if _WIN32_WINNT >= 0x602 + BOOL (WINAPI *pPrefetchVirtualMemory) (HANDLE, ULONG_PTR, PWIN32_MEMORY_RANGE_ENTRY, ULONG); + HMODULE hKernel32 = GetModuleHandleW(L"kernel32.dll"); + + pPrefetchVirtualMemory = (decltype(pPrefetchVirtualMemory))(void *) GetProcAddress(hKernel32, "PrefetchVirtualMemory"); + + if (pPrefetchVirtualMemory) { + WIN32_MEMORY_RANGE_ENTRY range; + range.VirtualAddress = addr; + range.NumberOfBytes = (SIZE_T) std::min(size, prefetch); + if (!pPrefetchVirtualMemory(GetCurrentProcess(), 1, &range, 0)) { + LLAMA_LOG_WARN("warning: PrefetchVirtualMemory failed: %s\n", + llama_format_win_err(GetLastError()).c_str()); + } + } +#else + throw std::runtime_error("PrefetchVirtualMemory unavailable"); +#endif + } + } + + void unmap_fragment(size_t first, size_t last) { + GGML_UNUSED(first); + GGML_UNUSED(last); + } + + ~impl() { + if (!UnmapViewOfFile(addr)) { + LLAMA_LOG_WARN("warning: UnmapViewOfFile failed: %s\n", + llama_format_win_err(GetLastError()).c_str()); + } + } +#else + impl(struct llama_file * file, size_t prefetch, bool numa) { + GGML_UNUSED(file); + GGML_UNUSED(prefetch); + GGML_UNUSED(numa); + + throw std::runtime_error("mmap not supported"); + } + + void unmap_fragment(size_t first, size_t last) { + GGML_UNUSED(first); + GGML_UNUSED(last); + + throw std::runtime_error("mmap not supported"); + } +#endif + + void * addr; + size_t size; +}; + +llama_mmap::llama_mmap(struct llama_file * file, size_t prefetch, bool numa) : pimpl(std::make_unique(file, prefetch, numa)) {} +llama_mmap::~llama_mmap() = default; + +size_t llama_mmap::size() const { return pimpl->size; } +void * llama_mmap::addr() const { return pimpl->addr; } + +void llama_mmap::unmap_fragment(size_t first, size_t last) { pimpl->unmap_fragment(first, last); } + +#if defined(_POSIX_MEMLOCK_RANGE) || defined(_WIN32) +const bool llama_mmap::SUPPORTED = true; +#else +const bool llama_mmap::SUPPORTED = false; +#endif + +// llama_mlock + +struct llama_mlock::impl { +#ifdef _POSIX_MEMLOCK_RANGE + static size_t lock_granularity() { + return (size_t) sysconf(_SC_PAGESIZE); + } + + bool raw_lock(const void * addr, size_t size) const { + if (!mlock(addr, size)) { + return true; + } + +#ifdef __APPLE__ +#define MLOCK_SUGGESTION \ + "Try increasing the sysctl values 'vm.user_wire_limit' and 'vm.global_user_wire_limit' and/or " \ + "decreasing 'vm.global_no_user_wire_amount'. Also try increasing RLIMIT_MEMLOCK (ulimit -l).\n" +#else +#define MLOCK_SUGGESTION \ + "Try increasing RLIMIT_MEMLOCK ('ulimit -l' as root).\n" +#endif + + char* errmsg = std::strerror(errno); + bool suggest = (errno == ENOMEM); + + struct rlimit lock_limit; + if (suggest && getrlimit(RLIMIT_MEMLOCK, &lock_limit)) { + suggest = false; + } + if (suggest && (lock_limit.rlim_max > lock_limit.rlim_cur + size)) { + suggest = false; + } + + LLAMA_LOG_WARN("warning: failed to mlock %zu-byte buffer (after previously locking %zu bytes): %s\n%s", + size, this->size, errmsg, suggest ? MLOCK_SUGGESTION : ""); + return false; + } + + static void raw_unlock(void * addr, size_t size) { + if (munlock(addr, size)) { + LLAMA_LOG_WARN("warning: failed to munlock buffer: %s\n", std::strerror(errno)); + } + } +#elif defined(_WIN32) + static size_t lock_granularity() { + SYSTEM_INFO si; + GetSystemInfo(&si); + return (size_t) si.dwPageSize; + } + + bool raw_lock(void * ptr, size_t len) const { + for (int tries = 1; ; tries++) { + if (VirtualLock(ptr, len)) { + return true; + } + if (tries == 2) { + LLAMA_LOG_WARN("warning: failed to VirtualLock %zu-byte buffer (after previously locking %zu bytes): %s\n", + len, size, llama_format_win_err(GetLastError()).c_str()); + return false; + } + + SIZE_T min_ws_size, max_ws_size; + if (!GetProcessWorkingSetSize(GetCurrentProcess(), &min_ws_size, &max_ws_size)) { + LLAMA_LOG_WARN("warning: GetProcessWorkingSetSize failed: %s\n", + llama_format_win_err(GetLastError()).c_str()); + return false; + } + size_t increment = len + 1048576; + min_ws_size += increment; + max_ws_size += increment; + if (!SetProcessWorkingSetSize(GetCurrentProcess(), min_ws_size, max_ws_size)) { + LLAMA_LOG_WARN("warning: SetProcessWorkingSetSize failed: %s\n", + llama_format_win_err(GetLastError()).c_str()); + return false; + } + } + } + + static void raw_unlock(void * ptr, size_t len) { + if (!VirtualUnlock(ptr, len)) { + LLAMA_LOG_WARN("warning: failed to VirtualUnlock buffer: %s\n", + llama_format_win_err(GetLastError()).c_str()); + } + } +#else + static size_t lock_granularity() { + return (size_t) 65536; + } + + bool raw_lock(const void * addr, size_t len) const { + LLAMA_LOG_WARN("warning: mlock not supported on this system\n"); + return false; + } + + static void raw_unlock(const void * addr, size_t len) {} +#endif + + impl() : addr(NULL), size(0), failed_already(false) {} + + void init(void * ptr) { + GGML_ASSERT(addr == NULL && size == 0); + addr = ptr; + } + + void grow_to(size_t target_size) { + GGML_ASSERT(addr); + if (failed_already) { + return; + } + size_t granularity = lock_granularity(); + target_size = (target_size + granularity - 1) & ~(granularity - 1); + if (target_size > size) { + if (raw_lock((uint8_t *) addr + size, target_size - size)) { + size = target_size; + } else { + failed_already = true; + } + } + } + + void * addr; + size_t size; + + bool failed_already; +}; + +llama_mlock::llama_mlock() : pimpl(std::make_unique()) {} +llama_mlock::~llama_mlock() = default; + +void llama_mlock::init(void * ptr) { pimpl->init(ptr); } +void llama_mlock::grow_to(size_t target_size) { pimpl->grow_to(target_size); } + +#if defined(_POSIX_MEMLOCK_RANGE) || defined(_WIN32) +const bool llama_mlock::SUPPORTED = true; +#else +const bool llama_mlock::SUPPORTED = false; +#endif + +size_t llama_path_max() { + return PATH_MAX; +} diff --git a/src/llama-mmap.h b/src/llama-mmap.h new file mode 100644 index 000000000..6bcddee8c --- /dev/null +++ b/src/llama-mmap.h @@ -0,0 +1,67 @@ +#pragma once + +#include +#include + +struct llama_file; +struct llama_mmap; +struct llama_mlock; + +using llama_files = std::vector>; +using llama_mmaps = std::vector>; +using llama_mlocks = std::vector>; + +struct llama_file { + llama_file(const char * fname, const char * mode); + ~llama_file(); + + size_t tell() const; + size_t size() const; + + int fileno() const; + + void seek(size_t offset, int whence) const; + + void read_raw(void * ptr, size_t len) const; + uint32_t read_u32() const; + + void write_raw(const void * ptr, size_t len) const; + void write_u32(uint32_t val) const; + +private: + struct impl; + std::unique_ptr pimpl; +}; + +struct llama_mmap { + llama_mmap(const llama_mmap &) = delete; + llama_mmap(struct llama_file * file, size_t prefetch = (size_t) -1, bool numa = false); + ~llama_mmap(); + + size_t size() const; + void * addr() const; + + void unmap_fragment(size_t first, size_t last); + + static const bool SUPPORTED; + +private: + struct impl; + std::unique_ptr pimpl; +}; + +struct llama_mlock { + llama_mlock(); + ~llama_mlock(); + + void init(void * ptr); + void grow_to(size_t target_size); + + static const bool SUPPORTED; + +private: + struct impl; + std::unique_ptr pimpl; +}; + +size_t llama_path_max(); diff --git a/src/llama-model-loader.cpp b/src/llama-model-loader.cpp new file mode 100644 index 000000000..7743b4652 --- /dev/null +++ b/src/llama-model-loader.cpp @@ -0,0 +1,1010 @@ +#include "llama-model-loader.h" + +#include "ggml.h" + +#include +#include +#include +#include + +const char * llama_file_version_name(llama_fver version) { + switch (version) { + case GGUF_FILE_VERSION_V1: return "GGUF V1 (support until nov 2023)"; + case GGUF_FILE_VERSION_V2: return "GGUF V2"; + case GGUF_FILE_VERSION_V3: return "GGUF V3 (latest)"; + } + + return "unknown"; +} + +namespace GGUFMeta { + template + struct GKV_Base_Type { + static constexpr gguf_type gt = gt_; + + static T getter(const gguf_context * ctx, const int kid) { + return gfun(ctx, kid); + } + }; + + template struct GKV_Base; + + template<> struct GKV_Base: GKV_Base_Type {}; + template<> struct GKV_Base: GKV_Base_Type {}; + template<> struct GKV_Base: GKV_Base_Type {}; + template<> struct GKV_Base: GKV_Base_Type {}; + template<> struct GKV_Base: GKV_Base_Type {}; + template<> struct GKV_Base: GKV_Base_Type {}; + template<> struct GKV_Base: GKV_Base_Type {}; + template<> struct GKV_Base: GKV_Base_Type {}; + template<> struct GKV_Base: GKV_Base_Type {}; + template<> struct GKV_Base: GKV_Base_Type {}; + template<> struct GKV_Base: GKV_Base_Type {}; + template<> struct GKV_Base: GKV_Base_Type {}; + + template<> struct GKV_Base { + static constexpr gguf_type gt = GGUF_TYPE_STRING; + + static std::string getter(const gguf_context * ctx, const int kid) { + return gguf_get_val_str(ctx, kid); + } + }; + + struct ArrayInfo { + const gguf_type gt; + const size_t length; + const void * data; + }; + + template<> struct GKV_Base { + public: + static constexpr gguf_type gt = GGUF_TYPE_ARRAY; + static ArrayInfo getter(const gguf_context *ctx, const int k) { + return ArrayInfo { + gguf_get_arr_type(ctx, k), + size_t(gguf_get_arr_n(ctx, k)), + gguf_get_arr_data(ctx, k), + }; + } + }; + + template + class GKV : public GKV_Base { + GKV() = delete; + + public: + static T get_kv(const gguf_context * ctx, const int k) { + const enum gguf_type kt = gguf_get_kv_type(ctx, k); + + if (kt != GKV::gt) { + throw std::runtime_error(format("key %s has wrong type %s but expected type %s", + gguf_get_key(ctx, k), gguf_type_name(kt), gguf_type_name(GKV::gt))); + } + return GKV::getter(ctx, k); + } + + static const char * override_type_to_str(const llama_model_kv_override_type ty) { + switch (ty) { + case LLAMA_KV_OVERRIDE_TYPE_BOOL: return "bool"; + case LLAMA_KV_OVERRIDE_TYPE_INT: return "int"; + case LLAMA_KV_OVERRIDE_TYPE_FLOAT: return "float"; + case LLAMA_KV_OVERRIDE_TYPE_STR: return "str"; + } + return "unknown"; + } + + static bool validate_override(const llama_model_kv_override_type expected_type, const struct llama_model_kv_override * ovrd) { + if (!ovrd) { return false; } + if (ovrd->tag == expected_type) { + LLAMA_LOG_INFO("%s: Using metadata override (%5s) '%s' = ", + __func__, override_type_to_str(ovrd->tag), ovrd->key); + switch (ovrd->tag) { + case LLAMA_KV_OVERRIDE_TYPE_BOOL: { + LLAMA_LOG_INFO("%s\n", ovrd->val_bool ? "true" : "false"); + } break; + case LLAMA_KV_OVERRIDE_TYPE_INT: { + LLAMA_LOG_INFO("%" PRId64 "\n", ovrd->val_i64); + } break; + case LLAMA_KV_OVERRIDE_TYPE_FLOAT: { + LLAMA_LOG_INFO("%.6f\n", ovrd->val_f64); + } break; + case LLAMA_KV_OVERRIDE_TYPE_STR: { + LLAMA_LOG_INFO("%s\n", ovrd->val_str); + } break; + default: + // Shouldn't be possible to end up here, but just in case... + throw std::runtime_error( + format("Unsupported attempt to override %s type for metadata key %s\n", + override_type_to_str(ovrd->tag), ovrd->key)); + } + return true; + } + LLAMA_LOG_WARN("%s: Warning: Bad metadata override type for key '%s', expected %s but got %s\n", + __func__, ovrd->key, override_type_to_str(expected_type), override_type_to_str(ovrd->tag)); + return false; + } + + template + static typename std::enable_if::value, bool>::type + try_override(OT & target, const struct llama_model_kv_override * ovrd) { + if (validate_override(LLAMA_KV_OVERRIDE_TYPE_BOOL, ovrd)) { + target = ovrd->val_bool; + return true; + } + return false; + } + + template + static typename std::enable_if::value && std::is_integral::value, bool>::type + try_override(OT & target, const struct llama_model_kv_override * ovrd) { + if (validate_override(LLAMA_KV_OVERRIDE_TYPE_INT, ovrd)) { + target = ovrd->val_i64; + return true; + } + return false; + } + + template + static typename std::enable_if::value, bool>::type + try_override(T & target, const struct llama_model_kv_override * ovrd) { + if (validate_override(LLAMA_KV_OVERRIDE_TYPE_FLOAT, ovrd)) { + target = ovrd->val_f64; + return true; + } + return false; + } + + template + static typename std::enable_if::value, bool>::type + try_override(T & target, const struct llama_model_kv_override * ovrd) { + if (validate_override(LLAMA_KV_OVERRIDE_TYPE_STR, ovrd)) { + target = ovrd->val_str; + return true; + } + return false; + } + + static bool set(const gguf_context * ctx, const int k, T & target, const struct llama_model_kv_override * ovrd = nullptr) { + if (try_override(target, ovrd)) { + return true; + } + if (k < 0) { return false; } + target = get_kv(ctx, k); + return true; + } + + static bool set(const gguf_context * ctx, const char * key, T & target, const struct llama_model_kv_override * ovrd = nullptr) { + return set(ctx, gguf_find_key(ctx, key), target, ovrd); + } + + static bool set(const gguf_context * ctx, const std::string & key, T & target, const struct llama_model_kv_override * ovrd = nullptr) { + return set(ctx, key.c_str(), target, ovrd); + } + }; +} + + template + typename std::enable_if::value, bool>::type + llama_model_loader::get_arr_n(const std::string & key, T & result, bool required) { + const int kid = gguf_find_key(meta.get(), key.c_str()); + + if (kid < 0) { + if (required) { + throw std::runtime_error(format("key not found in model: %s", key.c_str())); + } + return false; + } + + struct GGUFMeta::ArrayInfo arr_info = + GGUFMeta::GKV::get_kv(meta.get(), kid); + + + result = arr_info.length; + return true; + } + + template + typename std::enable_if::value, bool>::type + llama_model_loader::get_arr_n(enum llm_kv kid, T & result, bool required) { + return get_arr_n(llm_kv(kid), result, required); + } + + template bool llama_model_loader::get_arr_n(enum llm_kv kid, uint32_t & result, bool required); + + template + bool llama_model_loader::get_arr(const std::string & key, std::vector & result, bool required) { + const int kid = gguf_find_key(meta.get(), key.c_str()); + + if (kid < 0 || gguf_get_kv_type(meta.get(), kid) != GGUF_TYPE_ARRAY) { + if (required) { + throw std::runtime_error(format("array key not found in model: %s", key.c_str())); + } + return false; + } + + struct GGUFMeta::ArrayInfo arr_info = + GGUFMeta::GKV::get_kv(meta.get(), kid); + + switch (arr_info.gt) { + case GGUF_TYPE_FLOAT32: GGML_ASSERT((std::is_same::value)); break; + case GGUF_TYPE_INT32: GGML_ASSERT( + (std::is_same::value) || + (std::is_same::value)); break; + default: + throw std::runtime_error(format("%s is not a float32, int32 array", key.c_str())); + } + + result.resize(arr_info.length); + result.assign((const T*)arr_info.data, (const T *)arr_info.data + arr_info.length); + + return true; + } + + template + bool llama_model_loader::get_arr(const std::string & key, std::array & result, bool required) { + const int kid = gguf_find_key(meta.get(), key.c_str()); + + if (kid < 0 || gguf_get_kv_type(meta.get(), kid) != GGUF_TYPE_ARRAY) { + if (required) { + throw std::runtime_error(format("array key not found in model: %s", key.c_str())); + } + return false; + } + + struct GGUFMeta::ArrayInfo arr_info = + GGUFMeta::GKV::get_kv(meta.get(), kid); + + switch (arr_info.gt) { + case GGUF_TYPE_FLOAT32: GGML_ASSERT((std::is_same::value)); break; + case GGUF_TYPE_INT32: GGML_ASSERT( + (std::is_same::value) || + (std::is_same::value)); break; + default: + throw std::runtime_error(format("%s is not a float32, int32 array", key.c_str())); + } + + if (arr_info.length > N_MAX) { + throw std::runtime_error(format("array length %u for key %s exceeds max %u", (uint32_t) arr_info.length, key.c_str(), (uint32_t) N_MAX)); + } + + std::copy((const T*)arr_info.data, (const T *)arr_info.data + arr_info.length, result.begin()); + + return true; + } + + template + bool llama_model_loader::get_arr(enum llm_kv kid, T & result, bool required) { + return get_arr(llm_kv(kid), result, required); + } + + template + bool llama_model_loader::get_key(const std::string & key, T & result, bool required) { + auto it = kv_overrides.find(key); + + const struct llama_model_kv_override * override = + it != kv_overrides.end() ? &it->second : nullptr; + + const bool found = GGUFMeta::GKV::set(meta.get(), key, result, override); + + if (required && !found) { + throw std::runtime_error(format("key not found in model: %s", key.c_str())); + } + + return found; + } + + template + bool llama_model_loader::get_key(enum llm_kv kid, T & result, bool required) { + return get_key(llm_kv(kid), result, required); + } + + template bool llama_model_loader::get_key (enum llm_kv kid, bool & result, bool required); + template bool llama_model_loader::get_key (enum llm_kv kid, float & result, bool required); + template bool llama_model_loader::get_key (enum llm_kv kid, uint32_t & result, bool required); + template bool llama_model_loader::get_key(enum llm_kv kid, std::string & result, bool required); + + template<> + bool llama_model_loader::get_key(enum llm_kv kid, enum llama_pooling_type & result, bool required) { + uint32_t tmp; + const bool found = get_key(kid, tmp, required); + if (found) { + result = (enum llama_pooling_type) tmp; + } else { + result = LLAMA_POOLING_TYPE_UNSPECIFIED; + } + return found; + } + + // get array of n <= N_MAX elements, or a single element repeated n times + template + bool llama_model_loader::get_key_or_arr(const std::string & key, std::array & result, uint32_t n, bool required) { + const int kid = gguf_find_key(meta.get(), key.c_str()); + + if (kid < 0) { + if (required) { + throw std::runtime_error(format("key not found in model: %s", key.c_str())); + } + return false; + } + + if (n > N_MAX) { + throw std::runtime_error(format("n > N_MAX: %u > %u for key %s", (uint32_t) n, (uint32_t) N_MAX, key.c_str())); + } + + if (gguf_get_kv_type(meta.get(), kid) == GGUF_TYPE_ARRAY) { + struct GGUFMeta::ArrayInfo arr_info = + GGUFMeta::GKV::get_kv(meta.get(), kid); + + if (n != arr_info.length) { + throw std::runtime_error(format("key %s has wrong array length; expected %u, got %u", key.c_str(), n, (uint32_t) arr_info.length)); + } + + return get_arr(key, result, required); + } + + T value; + + bool ok = get_key(key, value, required); + if (!ok) { + return false; + } + + for (uint32_t i = 0; i < n; i++) { + result[i] = value; + } + + return true; + } + + template + bool llama_model_loader::get_key_or_arr(enum llm_kv kid, T & result, uint32_t n, bool required) { + return get_key_or_arr(llm_kv(kid), result, n, required); + } + + // TODO: this is not very clever - figure out something better + template bool llama_model_loader::get_key_or_arr>(enum llm_kv kid, std::array & result, uint32_t n, bool required); + template bool llama_model_loader::get_key_or_arr>(enum llm_kv kid, std::array & result, uint32_t n, bool required); + +llama_model_loader::llama_model_loader(const std::string & fname, bool use_mmap, bool check_tensors, const struct llama_model_kv_override * param_overrides_p) { + int trace = 0; + if (getenv("LLAMA_TRACE")) { + trace = atoi(getenv("LLAMA_TRACE")); + } + + if (param_overrides_p != nullptr) { + for (const struct llama_model_kv_override * p = param_overrides_p; p->key[0] != 0; p++) { + kv_overrides.insert({std::string(p->key), *p}); + } + } + + struct ggml_context * ctx = NULL; + struct gguf_init_params params = { + /*.no_alloc = */ true, + /*.ctx = */ &ctx, + }; + + meta.reset(gguf_init_from_file(fname.c_str(), params)); + if (!meta) { + throw std::runtime_error(format("%s: failed to load model from %s\n", __func__, fname.c_str())); + } + + get_key(llm_kv(LLM_KV_GENERAL_ARCHITECTURE), arch_name, false); + llm_kv = LLM_KV(llm_arch_from_string(arch_name)); + + files.emplace_back(new llama_file(fname.c_str(), "rb")); + contexts.emplace_back(ctx); + + // Save tensors data offset of the main file. + // For subsidiary files, `meta` tensor data offset must not be used, + // so we build a unified tensors index for weights. + for (ggml_tensor * cur = ggml_get_first_tensor(ctx); cur; cur = ggml_get_next_tensor(ctx, cur)) { + std::string tensor_name = std::string(cur->name); + // make sure there is no duplicated tensor names + if (weights_map.find(tensor_name) != weights_map.end()) { + throw std::runtime_error(format("invalid model: tensor '%s' is duplicated", ggml_get_name(cur))); + } + n_elements += ggml_nelements(cur); + n_bytes += ggml_nbytes(cur); + weights_map.emplace(tensor_name, llama_tensor_weight(files.back().get(), 0, meta.get(), cur)); + } + uint16_t n_split = 0; + get_key(llm_kv(LLM_KV_SPLIT_COUNT), n_split, false); + + // Load additional GGML contexts + if (n_split > 1) { + uint16_t idx = 0; + get_key(llm_kv(LLM_KV_SPLIT_NO), idx); + if (idx != 0) { + throw std::runtime_error(format("illegal split file: %d, model must be loaded with the first split", idx)); + } + + std::vector split_prefix(llama_path_max(), 0); + if (!llama_split_prefix(split_prefix.data(), split_prefix.size(), fname.c_str(), idx, n_split)) { + throw std::runtime_error(format("invalid split file: %s", fname.c_str())); + } + + if (trace > 0) { + LLAMA_LOG_INFO("%s: loading additional %d GGUFs\n", __func__, n_split); + } + + std::vector split_path(llama_path_max(), 0); + for (idx = 1; idx < n_split; idx++) { + llama_split_path(split_path.data(), split_path.size(), split_prefix.data(), idx, n_split); + + struct gguf_init_params split_params = { + /*.no_alloc = */ true, + /*.ctx = */ &ctx, + }; + gguf_context_ptr ctx_gguf { gguf_init_from_file(split_path.data(), split_params) }; + if (!ctx_gguf) { + throw std::runtime_error(format("%s: failed to load GGUF split from %s\n", __func__, split_path.data())); + } + + files.emplace_back(new llama_file(split_path.data(), "rb")); + contexts.emplace_back(ctx); + + // Save tensors data offset info of the shard. + for (ggml_tensor * cur = ggml_get_first_tensor(ctx); cur; cur = ggml_get_next_tensor(ctx, cur)) { + std::string tensor_name = std::string(cur->name); + // make sure there is no duplicated tensor names + if (weights_map.find(tensor_name) != weights_map.end()) { + throw std::runtime_error(format("invalid model: tensor '%s' is duplicated", ggml_get_name(cur))); + } + n_elements += ggml_nelements(cur); + n_bytes += ggml_nbytes(cur); + weights_map.emplace(tensor_name, llama_tensor_weight(files.back().get(), idx, ctx_gguf.get(), cur)); + } + } + + get_key(llm_kv(LLM_KV_SPLIT_TENSORS_COUNT), n_tensors); + + // sanity check + { + const int n_tensors_loaded = (int) weights_map.size(); + if (n_tensors != n_tensors_loaded) { + throw std::runtime_error(format("corrupted model: %d tensors expected but %d found", n_tensors, n_tensors_loaded)); + } + } + + LLAMA_LOG_INFO("%s: additional %d GGUFs metadata loaded.\n", __func__, n_split - 1); + } + + n_kv = gguf_get_n_kv(meta.get()); + n_tensors = weights_map.size(); + + fver = (enum llama_fver) gguf_get_version(meta.get()); + + LLAMA_LOG_INFO("%s: loaded meta data with %d key-value pairs and %d tensors from %s (version %s)\n", + __func__, n_kv, n_tensors, fname.c_str(), llama_file_version_name(fver)); + + // determine file type based on the number of tensors for each quantization and print meta data + // TODO: make optional + { + std::map n_type; + + uint32_t n_type_max = 0; + enum ggml_type type_max = GGML_TYPE_F32; + + for (const auto & it : weights_map) { + const llama_tensor_weight & w = it.second; + const ggml_tensor * tensor = w.tensor; + + enum ggml_type type = tensor->type; + + n_type[type]++; + + if (n_type_max < n_type[type]) { + n_type_max = n_type[type]; + type_max = type; + } + + if (trace > 0) { + const uint16_t sid = w.idx; + LLAMA_LOG_INFO("%s: - tensor split %2d: %32s %-8s [ %s ]\n", __func__, sid, ggml_get_name(tensor), ggml_type_name(type), llama_format_tensor_shape(tensor).c_str()); + } + } + + switch (type_max) { + case GGML_TYPE_F32: ftype = LLAMA_FTYPE_ALL_F32; break; + case GGML_TYPE_F16: ftype = LLAMA_FTYPE_MOSTLY_F16; break; + case GGML_TYPE_BF16: ftype = LLAMA_FTYPE_MOSTLY_BF16; break; + case GGML_TYPE_Q4_0: ftype = LLAMA_FTYPE_MOSTLY_Q4_0; break; + case GGML_TYPE_Q4_1: ftype = LLAMA_FTYPE_MOSTLY_Q4_1; break; + case GGML_TYPE_Q5_0: ftype = LLAMA_FTYPE_MOSTLY_Q5_0; break; + case GGML_TYPE_Q5_1: ftype = LLAMA_FTYPE_MOSTLY_Q5_1; break; + case GGML_TYPE_Q8_0: ftype = LLAMA_FTYPE_MOSTLY_Q8_0; break; + case GGML_TYPE_Q2_K: ftype = LLAMA_FTYPE_MOSTLY_Q2_K; break; + case GGML_TYPE_Q3_K: ftype = LLAMA_FTYPE_MOSTLY_Q3_K_M; break; + case GGML_TYPE_Q4_K: ftype = LLAMA_FTYPE_MOSTLY_Q4_K_M; break; + case GGML_TYPE_Q5_K: ftype = LLAMA_FTYPE_MOSTLY_Q5_K_M; break; + case GGML_TYPE_Q6_K: ftype = LLAMA_FTYPE_MOSTLY_Q6_K; break; + case GGML_TYPE_TQ1_0: ftype = LLAMA_FTYPE_MOSTLY_TQ1_0; break; + case GGML_TYPE_TQ2_0: ftype = LLAMA_FTYPE_MOSTLY_TQ2_0; break; + case GGML_TYPE_IQ2_XXS: ftype = LLAMA_FTYPE_MOSTLY_IQ2_XXS; break; + case GGML_TYPE_IQ2_XS: ftype = LLAMA_FTYPE_MOSTLY_IQ2_XS; break; + case GGML_TYPE_IQ2_S: ftype = LLAMA_FTYPE_MOSTLY_IQ2_S; break; + case GGML_TYPE_IQ3_XXS: ftype = LLAMA_FTYPE_MOSTLY_IQ3_XXS; break; + case GGML_TYPE_IQ1_S: ftype = LLAMA_FTYPE_MOSTLY_IQ1_S; break; + case GGML_TYPE_IQ1_M: ftype = LLAMA_FTYPE_MOSTLY_IQ1_M; break; + case GGML_TYPE_IQ4_NL: ftype = LLAMA_FTYPE_MOSTLY_IQ4_NL; break; + case GGML_TYPE_IQ4_XS: ftype = LLAMA_FTYPE_MOSTLY_IQ4_XS; break; + case GGML_TYPE_IQ3_S: ftype = LLAMA_FTYPE_MOSTLY_IQ3_S; break; + default: + { + LLAMA_LOG_WARN("%s: unknown type %s\n", __func__, ggml_type_name(type_max)); + ftype = LLAMA_FTYPE_ALL_F32; + } break; + } + + // this is a way to mark that we have "guessed" the file type + ftype = (llama_ftype) (ftype | LLAMA_FTYPE_GUESSED); + + { + const int kid = gguf_find_key(meta.get(), "general.file_type"); // TODO: use LLM_KV + if (kid >= 0) { + ftype = (llama_ftype) gguf_get_val_u32(meta.get(), kid); + } + } + + LLAMA_LOG_INFO("%s: Dumping metadata keys/values. Note: KV overrides do not apply in this output.\n", __func__); + + for (int i = 0; i < n_kv; i++) { + const char * name = gguf_get_key(meta.get(), i); + const enum gguf_type type = gguf_get_kv_type(meta.get(), i); + const std::string type_name = + type == GGUF_TYPE_ARRAY + ? format("%s[%s,%d]", gguf_type_name(type), gguf_type_name(gguf_get_arr_type(meta.get(), i)), gguf_get_arr_n(meta.get(), i)) + : gguf_type_name(type); + + std::string value = gguf_kv_to_str(meta.get(), i); + const size_t MAX_VALUE_LEN = 40; + if (value.size() > MAX_VALUE_LEN) { + value = format("%s...", value.substr(0, MAX_VALUE_LEN - 3).c_str()); + } + replace_all(value, "\n", "\\n"); + + LLAMA_LOG_INFO("%s: - kv %3d: %42s %-16s = %s\n", __func__, i, name, type_name.c_str(), value.c_str()); + } + + // print type counts + for (auto & kv : n_type) { + if (kv.second == 0) { + continue; + } + + LLAMA_LOG_INFO("%s: - type %4s: %4d tensors\n", __func__, ggml_type_name(kv.first), kv.second); + } + } + + if (!llama_mmap::SUPPORTED) { + LLAMA_LOG_WARN("%s: mmap is not supported on this platform\n", __func__); + use_mmap = false; + } + + this->use_mmap = use_mmap; + this->check_tensors = check_tensors; +} + +std::string llama_model_loader::get_arch_name() const { + return arch_name; +} + +enum llm_arch llama_model_loader::get_arch() const { + return llm_kv.arch; +} + +const llama_model_loader::llama_tensor_weight * llama_model_loader::get_weight(const char * name) const { + auto pos = weights_map.find(name); + if (pos != weights_map.end()) { + return &pos->second; + } + + return nullptr; +} + +const llama_model_loader::llama_tensor_weight & llama_model_loader::require_weight(const char * name) const { + const llama_tensor_weight * weight = get_weight(name); + if (!weight) { + throw std::runtime_error(format("%s: tensor '%s' not found", __func__, name)); + } + return *weight; +} + +struct ggml_tensor * llama_model_loader::get_tensor_meta(const char * name) const { + const auto * weight = get_weight(name); + if (!weight) { + return nullptr; + } + return weight->tensor; +} + +struct ggml_tensor * llama_model_loader::require_tensor_meta(const std::string & name) const { + struct ggml_tensor * tensor = get_tensor_meta(name.c_str()); + if (!tensor) { + throw std::runtime_error(format("%s: tensor '%s' not found", __func__, name.c_str())); + } + return tensor; +} + +const struct ggml_tensor * llama_model_loader::check_tensor_dims(const std::string & name, const std::vector & ne, bool required) const { + const struct ggml_tensor * cur = get_tensor_meta(name.c_str()); + + if (cur == NULL) { + if (!required) { + return NULL; + } + throw std::runtime_error(format("%s: tensor '%s' not found", __func__, name.c_str())); + } + + { + bool is_ok = true; + for (size_t i = 0; i < GGML_MAX_DIMS; ++i) { + if ((i < ne.size() && ne[i] != cur->ne[i]) || (i >= ne.size() && cur->ne[i] != 1)) { + is_ok = false; + break; + } + } + if (!is_ok) { + throw std::runtime_error( + format("%s: tensor '%s' has wrong shape; expected %s, got %s", + __func__, name.c_str(), + llama_format_tensor_shape(ne).c_str(), + llama_format_tensor_shape(cur).c_str())); + } + } + + return cur; +} + +struct ggml_tensor * llama_model_loader::create_tensor(struct ggml_context * ctx, const std::string & name, const std::initializer_list & ne, int flags) { + const struct ggml_tensor * cur = check_tensor_dims(name, ne, !(flags & TENSOR_NOT_REQUIRED)); + + if (cur == NULL) { + return NULL; + } + + bool duplicated = flags & TENSOR_DUPLICATED; + + struct ggml_tensor * tensor = ggml_dup_tensor(ctx, cur); + ggml_set_name(tensor, ggml_get_name(cur)); + + if (duplicated) { + size_data += ggml_nbytes(cur); + } else { + n_created++; + } + + return tensor; + +} + +struct ggml_tensor * llama_model_loader::create_tensor_as_view(struct ggml_context * ctx, struct ggml_tensor * base, const std::string & name, const std::initializer_list & ne, size_t offset, bool required) { + const struct ggml_tensor * cur = check_tensor_dims(name, ne, required); + + if (cur == NULL) { + return NULL; + } + + if (cur->type != base->type) { + throw std::runtime_error(format("%s: tensor '%s' has wrong type; expected %s, got %s", __func__, name.c_str(), ggml_type_name(base->type), ggml_type_name(cur->type))); + } + + std::array dims; + for (size_t i = 0; i < GGML_MAX_DIMS; ++i) { + dims[i] = i < ne.size() ? ne.begin()[i] : 1; + } + + struct ggml_tensor * tensor = ggml_view_4d(ctx, base, + dims[0], dims[1], dims[2], dims[3], + cur->nb[1], cur->nb[2], cur->nb[3], + offset); + + ggml_set_name(tensor, name.c_str()); + + n_created++; + + return tensor; +} + +void llama_model_loader::done_getting_tensors() const { + if (n_created != n_tensors) { + throw std::runtime_error(format("%s: wrong number of tensors; expected %d, got %d", __func__, n_tensors, n_created)); + } +} + +void llama_model_loader::init_mappings(bool prefetch, llama_mlocks * mlock_mmaps) { + if (use_mmap) { + mappings.reserve(files.size()); + mmaps_used.reserve(files.size()); + for (const auto & file : files) { + auto * reg = ggml_backend_dev_backend_reg(ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU)); + auto * is_numa_fn = (decltype(ggml_is_numa) *) ggml_backend_reg_get_proc_address(reg, "ggml_backend_cpu_is_numa"); + std::unique_ptr mapping(new llama_mmap(file.get(), prefetch ? -1 : 0, is_numa_fn())); + mmaps_used.emplace_back(mapping->size(), 0); + if (mlock_mmaps) { + std::unique_ptr mlock_mmap(new llama_mlock()); + mlock_mmap->init(mapping->addr()); + mlock_mmaps->emplace_back(std::move(mlock_mmap)); + } + mappings.emplace_back(std::move(mapping)); + } + } + + // compute the total size of all tensors for progress reporting + for (const auto & it : weights_map) { + size_data += ggml_nbytes(it.second.tensor); + } +} + +void llama_model_loader::get_mapping_range(size_t * first, size_t * last, void ** addr, int idx, ggml_context * ctx) const { + GGML_ASSERT(!mappings.empty()); + const auto & mapping = mappings.at(idx); + + *first = mapping->size(); + *last = 0; + *addr = mapping->addr(); + for (ggml_tensor * tensor = ggml_get_first_tensor(ctx); tensor; tensor = ggml_get_next_tensor(ctx, tensor)) { + const auto * weight = get_weight(ggml_get_name(tensor)); + if (!weight || weight->idx != idx) { + continue; + } + *first = std::min(*first, weight->offs); + *last = std::max(*last, weight->offs + ggml_nbytes(tensor)); + } +} + +void llama_model_loader::load_data_for(struct ggml_tensor * cur) const { + const auto & w = require_weight(ggml_get_name(cur)); + + if (use_mmap) { + const auto & mapping = mappings.at(w.idx); + if (cur->data == nullptr) { + cur->data = (uint8_t *)mapping->addr() + w.offs; + } else { + memcpy(cur->data, (uint8_t *)mapping->addr() + w.offs, ggml_nbytes(cur)); + } + } else { + GGML_ASSERT(cur->data != nullptr); + GGML_ASSERT(w.idx < files.size()); + const auto & file = files.at(w.idx); + file->seek(w.offs, SEEK_SET); + file->read_raw(cur->data, ggml_nbytes(cur)); + } + + if (check_tensors && !ggml_validate_row_data(cur->type, cur->data, ggml_nbytes(cur))) { + throw std::runtime_error(format("tensor '%s' has invalid data", ggml_get_name(cur))); + } +} + +bool llama_model_loader::load_all_data( + struct ggml_context * ctx, + llama_buf_map & bufs, + llama_mlocks * lmlocks, + llama_progress_callback progress_callback, + void * progress_callback_user_data) { + GGML_ASSERT(size_data != 0 && "call init_mappings() first"); + + std::vector> read_buf; + std::vector>> validation_result; + + // 4 staging buffers for async uploads, each sized 1MB seems to be a good default for single NVMe drives. + // NVMe raid configurations might require more / larger buffers. + constexpr size_t n_buffers = 4; + constexpr size_t buffer_size = 1 * 1024 * 1024; // 1MB + + std::vector host_buffers; + std::vector events; + std::vector host_ptrs; + size_t buffer_idx = 0; // buffer to use for async loads + ggml_backend_t upload_backend = [&](const char * func) -> ggml_backend_t { + if (use_mmap || check_tensors) { + return nullptr; + } + // When not using mmaped io use async uploads from pinned memory to GPU memory. + // First determine if the backend supports the necessary features for async uploads. + auto * buf = bufs.count(0) ? bufs.at(0) : nullptr; + if (!buf) { + LLAMA_LOG_DEBUG("%s: no buffer found for async uploads\n", func); + return nullptr; + } + + auto * buft = ggml_backend_buffer_get_type(buf); + auto * dev = ggml_backend_buft_get_device(buft); + if (!dev) { + LLAMA_LOG_DEBUG("%s: no device found for buffer type %s for async uploads\n", func, + ggml_backend_buft_name(buft)); + return nullptr; + } + + if (buft != ggml_backend_dev_buffer_type(dev)) { + LLAMA_LOG_DEBUG("%s: buffer type %s is not the default buffer type for device %s for async uploads\n", func, + ggml_backend_buft_name(buft), ggml_backend_dev_name(dev)); + return nullptr; + } + + ggml_backend_dev_props props; + ggml_backend_dev_get_props(dev, &props); + if (!props.caps.async || !props.caps.host_buffer || !props.caps.events) { + LLAMA_LOG_DEBUG("%s: device %s does not support async, host buffers or events\n", func, + ggml_backend_dev_name(dev)); + return nullptr; + } + + auto * host_buft = ggml_backend_dev_host_buffer_type(dev); + if (!host_buft) { + LLAMA_LOG_DEBUG("%s: no host buffer type found for device %s\n", func, + ggml_backend_dev_name(dev)); + return nullptr; + } + + // If the backend is supported, create pinned memory buffers and events for synchronisation. + for (size_t idx = 0; idx < n_buffers; ++idx) { + auto * buf = ggml_backend_buft_alloc_buffer(host_buft, buffer_size); + if (!buf) { + LLAMA_LOG_DEBUG("%s: failed to allocate host buffer for async uploads for device %s\n", func, + ggml_backend_dev_name(dev)); + return nullptr; + } + + host_buffers.emplace_back(buf); + host_ptrs.emplace_back(ggml_backend_buffer_get_base(buf)); + + auto * event = ggml_backend_event_new(dev); + if (!event) { + LLAMA_LOG_DEBUG("%s: failed to create event for async uploads for device %s\n", func, + ggml_backend_dev_name(dev)); + return nullptr; + } + + events.emplace_back(event); + } + + ggml_backend_t backend = ggml_backend_dev_init(dev, nullptr); + if (!backend) { + LLAMA_LOG_DEBUG("%s: failed to initialize backend for device %s for async uploads\n", func, + ggml_backend_dev_name(dev)); + return nullptr; + } + + return backend; + }(__func__); + + if (upload_backend) { + LLAMA_LOG_DEBUG("%s: using async uploads for device %s, buffer type %s, backend %s\n", __func__, + ggml_backend_dev_name(ggml_backend_get_device(upload_backend)), + ggml_backend_buft_name(ggml_backend_buffer_get_type(bufs.at(0))), + ggml_backend_name(upload_backend)); + } + + for (struct ggml_tensor * cur = ggml_get_first_tensor(ctx); cur != NULL; cur = ggml_get_next_tensor(ctx, cur)) { + const auto * weight = get_weight(ggml_get_name(cur)); + if (weight == nullptr) { + // this can happen with split experts models + continue; + } + + if (progress_callback) { + if (!progress_callback((float) size_done / size_data, progress_callback_user_data)) { + return false; + } + } + + size_t n_size = ggml_nbytes(cur); + + if (use_mmap) { + const auto & mapping = mappings.at(weight->idx); + ggml_backend_buffer_t buf_mmap = nullptr; + if (bufs.count(weight->idx)) { + buf_mmap = bufs.at(weight->idx); + } + uint8_t * data = (uint8_t *) mapping->addr() + weight->offs; + + if (check_tensors) { + validation_result.emplace_back(std::async(std::launch::async, [cur, data, n_size] { + return std::make_pair(cur, ggml_validate_row_data(cur->type, data, n_size)); + })); + } + + GGML_ASSERT(buf_mmap || cur->data); // either we have a buffer to allocate the tensor in, or it is already allocated + if (buf_mmap && cur->data == nullptr) { + ggml_backend_tensor_alloc(buf_mmap, cur, data); + if (lmlocks) { + const auto & lmlock = lmlocks->at(weight->idx); + lmlock->grow_to(weight->offs + n_size); + } + + auto & mmap_used = mmaps_used[weight->idx]; + mmap_used.first = std::min(mmap_used.first, weight->offs); + mmap_used.second = std::max(mmap_used.second, weight->offs + n_size); + } else { + ggml_backend_tensor_set(cur, data, 0, n_size); + } + } else { + const auto & file = files.at(weight->idx); + if (ggml_backend_buffer_is_host(cur->buffer)) { + file->seek(weight->offs, SEEK_SET); + file->read_raw(cur->data, n_size); + if (check_tensors) { + validation_result.emplace_back(std::async(std::launch::async, [cur, n_size] { + return std::make_pair(cur, ggml_validate_row_data(cur->type, cur->data, n_size)); + })); + } + } else { + // If upload_backend is valid load the tensor in chunks to pinned memory and upload the buffers asynchronously to the GPU. + if (upload_backend) { + file->seek(weight->offs, SEEK_SET); + + size_t bytes_read = 0; + + while (bytes_read < n_size) { + size_t read_iteration = std::min(buffer_size, n_size - bytes_read); + + ggml_backend_event_synchronize(events[buffer_idx]); + file->read_raw(host_ptrs[buffer_idx], read_iteration); + ggml_backend_tensor_set_async(upload_backend, cur, host_ptrs[buffer_idx], bytes_read, read_iteration); + ggml_backend_event_record(events[buffer_idx], upload_backend); + + bytes_read += read_iteration; + ++buffer_idx; + buffer_idx %= n_buffers; + } + } else { + read_buf.resize(n_size); + file->seek(weight->offs, SEEK_SET); + file->read_raw(read_buf.data(), n_size); + ggml_backend_tensor_set(cur, read_buf.data(), 0, n_size); + if (check_tensors && !ggml_validate_row_data(cur->type, read_buf.data(), n_size)) { + throw std::runtime_error(format("tensor '%s' has invalid data", ggml_get_name(cur))); + } + } + } + } + + size_done += n_size; + } + + // free temporary resources used for async uploads + for (auto * event : events) { + ggml_backend_event_synchronize(event); + ggml_backend_event_free(event); + } + for (auto * buf : host_buffers) { + ggml_backend_buffer_free(buf); + } + ggml_backend_free(upload_backend); + + // check validation results + bool validation_failed = false; + for (auto & future : validation_result) { + auto result = future.get(); + if (!result.second) { + LLAMA_LOG_ERROR("%s: tensor '%s' has invalid data\n", __func__, ggml_get_name(result.first)); + validation_failed = true; + } + } + if (validation_failed) { + throw std::runtime_error("found tensors with invalid data"); + } + + // check if this is the last call and do final cleanup + if (size_done >= size_data) { + // unmap offloaded tensors and metadata + if (use_mmap) { + for (uint32_t idx = 0; idx < mappings.size(); idx++) { + const auto & mmap_used = mmaps_used.at(idx); + auto & mapping = mappings.at(idx); + mapping->unmap_fragment(0, mmap_used.first); + if (mmap_used.second != 0) { + mapping->unmap_fragment(mmap_used.second, mapping->size()); + } + } + } + if (progress_callback) { + // Even though the model is done loading, we still honor + // cancellation since we need to free allocations. + return progress_callback(1.0f, progress_callback_user_data); + } + } + + return true; +} diff --git a/src/llama-model-loader.h b/src/llama-model-loader.h new file mode 100644 index 000000000..1ec478195 --- /dev/null +++ b/src/llama-model-loader.h @@ -0,0 +1,158 @@ +#pragma once + +#include "llama.h" + +#include "llama-impl.h" +#include "llama-arch.h" +#include "llama-mmap.h" + +#include "ggml-cpp.h" + +#include +#include +#include +#include + +using llama_buf_map = std::unordered_map; + +enum llama_fver { + GGUF_FILE_VERSION_V1 = 1, + GGUF_FILE_VERSION_V2 = 2, + GGUF_FILE_VERSION_V3 = 3, +}; + +const char * llama_file_version_name(llama_fver version); + +struct llama_model_loader { + // Holds information on a model weight + struct llama_tensor_weight { + uint16_t idx; // source file index + size_t offs; // tensor data offset in the original file + + ggml_tensor * tensor; + + llama_tensor_weight(const llama_file * file, uint16_t idx, const struct gguf_context * gguf_ctx, ggml_tensor * tensor) : idx(idx), tensor(tensor) { + const int tensor_idx = gguf_find_tensor(gguf_ctx, ggml_get_name(tensor)); + if (tensor_idx < 0) { + throw std::runtime_error(format("tensor '%s' not found in the model", ggml_get_name(tensor))); + } + + offs = gguf_get_data_offset(gguf_ctx) + gguf_get_tensor_offset(gguf_ctx, tensor_idx); + if (offs + ggml_nbytes(tensor) < offs || offs + ggml_nbytes(tensor) > file->size()) { + throw std::runtime_error(format("tensor '%s' data is not within the file bounds, model is corrupted or incomplete", ggml_get_name(tensor))); + } + } + }; + + // custom comparator to sort weights more nicely by layer + struct weight_name_comparer { + bool operator()(const std::string & a, const std::string & b) const { + int a_layer = -1; + int b_layer = -1; + sscanf(a.c_str(), "blk.%d.", &a_layer); + sscanf(b.c_str(), "blk.%d.", &b_layer); + if (a_layer != b_layer) { + return a_layer < b_layer; + } + return a < b; + } + }; + + static const int TENSOR_NOT_REQUIRED = 1; + static const int TENSOR_DUPLICATED = 2; + + int n_kv = 0; + int n_tensors = 0; + int n_created = 0; + + uint64_t n_elements = 0; + size_t n_bytes = 0; + + bool use_mmap = false; + bool check_tensors; + + llama_files files; + llama_ftype ftype; + llama_fver fver; + + llama_mmaps mappings; + + std::map weights_map; + std::unordered_map kv_overrides; + + gguf_context_ptr meta; + std::vector contexts; + + std::string arch_name; + LLM_KV llm_kv = LLM_KV(LLM_ARCH_UNKNOWN); + + size_t size_done = 0; + size_t size_data = 0; + std::vector> mmaps_used; + + llama_model_loader(const std::string & fname, bool use_mmap, bool check_tensors, const struct llama_model_kv_override * param_overrides_p); + + template + typename std::enable_if::value, bool>::type + get_arr_n(const std::string & key, T & result, bool required = true); + + template + typename std::enable_if::value, bool>::type + get_arr_n(enum llm_kv kid, T & result, bool required = true); + + template + bool get_arr(const std::string & key, std::vector & result, bool required = true); + + template + bool get_arr(const std::string & key, std::array & result, bool required = true); + + template + bool get_arr(enum llm_kv kid, T & result, bool required = true); + + template + bool get_key(const std::string & key, T & result, bool required = true); + + template + bool get_key(enum llm_kv kid, T & result, bool required = true); + + template + bool get_key_or_arr(const std::string & key, std::array & result, uint32_t n, bool required = true); + + template + bool get_key_or_arr(enum llm_kv kid, T & result, uint32_t n, bool required = true); + + std::string get_arch_name() const; + + enum llm_arch get_arch() const; + + const llama_tensor_weight * get_weight(const char * name) const; + + const llama_tensor_weight & require_weight(const char * name) const; + + struct ggml_tensor * get_tensor_meta(const char * name) const; + + struct ggml_tensor * require_tensor_meta(const std::string & name) const; + + const struct ggml_tensor * check_tensor_dims(const std::string & name, const std::vector & ne, bool required) const; + + struct ggml_tensor * create_tensor(struct ggml_context * ctx, const std::string & name, const std::initializer_list & ne, int flags = 0); + + struct ggml_tensor * create_tensor_as_view(struct ggml_context * ctx, struct ggml_tensor * base, const std::string & name, const std::initializer_list & ne, size_t offset, bool required = true); + + void done_getting_tensors() const; + + void init_mappings(bool prefetch = true, llama_mlocks * mlock_mmaps = nullptr); + + void get_mapping_range(size_t * first, size_t * last, void ** addr, int idx, ggml_context * ctx) const; + + // for backwards compatibility, does not support ggml-backend + void load_data_for(struct ggml_tensor * cur) const; + + // Returns false if cancelled by progress_callback + bool load_all_data( + struct ggml_context * ctx, + llama_buf_map & bufs, + llama_mlocks * lmlocks, + llama_progress_callback progress_callback, + void * progress_callback_user_data); +}; diff --git a/src/llama-model.cpp b/src/llama-model.cpp new file mode 100644 index 000000000..ace0ba262 --- /dev/null +++ b/src/llama-model.cpp @@ -0,0 +1,2164 @@ +#include "llama-model.h" + +#include "llama-impl.h" +#include "llama-model-loader.h" + +#include "unicode.h" // TODO: remove + +#include +#include +#include +#include +#include + +static const size_t kiB = 1024; +static const size_t MiB = 1024*kiB; +static const size_t GiB = 1024*MiB; + +const char * llm_type_name(llm_type type) { + switch (type) { + case MODEL_14M: return "14M"; + case MODEL_17M: return "17M"; + case MODEL_22M: return "22M"; + case MODEL_33M: return "33M"; + case MODEL_60M: return "60M"; + case MODEL_70M: return "70M"; + case MODEL_80M: return "80M"; + case MODEL_109M: return "109M"; + case MODEL_137M: return "137M"; + case MODEL_160M: return "160M"; + case MODEL_220M: return "220M"; + case MODEL_250M: return "250M"; + case MODEL_270M: return "270M"; + case MODEL_335M: return "335M"; + case MODEL_410M: return "410M"; + case MODEL_450M: return "450M"; + case MODEL_770M: return "770M"; + case MODEL_780M: return "780M"; + case MODEL_0_5B: return "0.5B"; + case MODEL_1B: return "1B"; + case MODEL_1_3B: return "1.3B"; + case MODEL_1_4B: return "1.4B"; + case MODEL_1_5B: return "1.5B"; + case MODEL_1_6B: return "1.6B"; + case MODEL_2B: return "2B"; + case MODEL_2_8B: return "2.8B"; + case MODEL_3B: return "3B"; + case MODEL_4B: return "4B"; + case MODEL_6B: return "6B"; + case MODEL_6_9B: return "6.9B"; + case MODEL_7B: return "7B"; + case MODEL_8B: return "8B"; + case MODEL_9B: return "9B"; + case MODEL_11B: return "11B"; + case MODEL_12B: return "12B"; + case MODEL_13B: return "13B"; + case MODEL_14B: return "14B"; + case MODEL_15B: return "15B"; + case MODEL_16B: return "16B"; + case MODEL_20B: return "20B"; + case MODEL_30B: return "30B"; + case MODEL_32B: return "32B"; + case MODEL_34B: return "34B"; + case MODEL_35B: return "35B"; + case MODEL_40B: return "40B"; + case MODEL_65B: return "65B"; + case MODEL_70B: return "70B"; + case MODEL_236B: return "236B"; + case MODEL_314B: return "314B"; + case MODEL_SMALL: return "0.1B"; + case MODEL_MEDIUM: return "0.4B"; + case MODEL_LARGE: return "0.8B"; + case MODEL_XL: return "1.5B"; + case MODEL_A1_7B: return "A1.7B"; + case MODEL_A2_7B: return "A2.7B"; + case MODEL_8x7B: return "8x7B"; + case MODEL_8x22B: return "8x22B"; + case MODEL_16x12B: return "16x12B"; + case MODEL_10B_128x3_66B: return "10B+128x3.66B"; + case MODEL_57B_A14B: return "57B.A14B"; + case MODEL_27B: return "27B"; + default: return "?B"; + } +} + +static std::string llama_model_ftype_name(llama_ftype ftype) { + if (ftype & LLAMA_FTYPE_GUESSED) { + return llama_model_ftype_name((enum llama_ftype) (ftype & ~LLAMA_FTYPE_GUESSED)) + " (guessed)"; + } + + switch (ftype) { + case LLAMA_FTYPE_ALL_F32: return "all F32"; + case LLAMA_FTYPE_MOSTLY_F16: return "F16"; + case LLAMA_FTYPE_MOSTLY_BF16: return "BF16"; + case LLAMA_FTYPE_MOSTLY_Q4_0: return "Q4_0"; + case LLAMA_FTYPE_MOSTLY_Q4_1: return "Q4_1"; + case LLAMA_FTYPE_MOSTLY_Q5_0: return "Q5_0"; + case LLAMA_FTYPE_MOSTLY_Q5_1: return "Q5_1"; + case LLAMA_FTYPE_MOSTLY_Q8_0: return "Q8_0"; + case LLAMA_FTYPE_MOSTLY_Q2_K: return "Q2_K - Medium"; + case LLAMA_FTYPE_MOSTLY_Q2_K_S: return "Q2_K - Small"; + case LLAMA_FTYPE_MOSTLY_Q3_K_S: return "Q3_K - Small"; + case LLAMA_FTYPE_MOSTLY_Q3_K_M: return "Q3_K - Medium"; + case LLAMA_FTYPE_MOSTLY_Q3_K_L: return "Q3_K - Large"; + case LLAMA_FTYPE_MOSTLY_Q4_K_S: return "Q4_K - Small"; + case LLAMA_FTYPE_MOSTLY_Q4_K_M: return "Q4_K - Medium"; + case LLAMA_FTYPE_MOSTLY_Q5_K_S: return "Q5_K - Small"; + case LLAMA_FTYPE_MOSTLY_Q5_K_M: return "Q5_K - Medium"; + case LLAMA_FTYPE_MOSTLY_Q6_K: return "Q6_K"; + case LLAMA_FTYPE_MOSTLY_TQ1_0: return "TQ1_0 - 1.69 bpw ternary"; + case LLAMA_FTYPE_MOSTLY_TQ2_0: return "TQ2_0 - 2.06 bpw ternary"; + case LLAMA_FTYPE_MOSTLY_IQ2_XXS: return "IQ2_XXS - 2.0625 bpw"; + case LLAMA_FTYPE_MOSTLY_IQ2_XS: return "IQ2_XS - 2.3125 bpw"; + case LLAMA_FTYPE_MOSTLY_IQ2_S: return "IQ2_S - 2.5 bpw"; + case LLAMA_FTYPE_MOSTLY_IQ2_M: return "IQ2_M - 2.7 bpw"; + case LLAMA_FTYPE_MOSTLY_IQ3_XS: return "IQ3_XS - 3.3 bpw"; + case LLAMA_FTYPE_MOSTLY_IQ3_XXS: return "IQ3_XXS - 3.0625 bpw"; + case LLAMA_FTYPE_MOSTLY_IQ1_S: return "IQ1_S - 1.5625 bpw"; + case LLAMA_FTYPE_MOSTLY_IQ1_M: return "IQ1_M - 1.75 bpw"; + case LLAMA_FTYPE_MOSTLY_IQ4_NL: return "IQ4_NL - 4.5 bpw"; + case LLAMA_FTYPE_MOSTLY_IQ4_XS: return "IQ4_XS - 4.25 bpw"; + case LLAMA_FTYPE_MOSTLY_IQ3_S: return "IQ3_S - 3.4375 bpw"; + case LLAMA_FTYPE_MOSTLY_IQ3_M: return "IQ3_S mix - 3.66 bpw"; + + default: return "unknown, may not work"; + } +} + +std::string llama_model_arch_name (const llama_model & model) { + return llm_arch_name(model.arch); +} + +std::string llama_model_type_name (const llama_model & model) { + return llm_type_name(model.type); +} + +std::string llama_model_ftype_name(const llama_model & model) { + return llama_model_ftype_name(model.ftype); +} + +template +static bool buft_supported(ggml_backend_buffer_type_t buft, ggml_backend_dev_t dev, F & fn) { + ggml_init_params params = { + /*.mem_size =*/ ggml_tensor_overhead()*8, + /*.mem_buffer =*/ NULL, + /*.no_alloc =*/ true, + }; + + ggml_context_ptr ctx { ggml_init(params) }; + if (!ctx) { + throw std::runtime_error(format("failed to create ggml context")); + } + + ggml_backend_buffer_ptr buf { ggml_backend_buft_alloc_buffer(buft, 0) }; + ggml_tensor * op_tensor = fn(ctx.get()); + for (int i = 0; i < GGML_MAX_SRC; i++) { + if (op_tensor->src[i] != nullptr) { + assert(op_tensor->src[i]->buffer == nullptr); + op_tensor->src[i]->buffer = buf.get(); + } + } + + bool op_supported = ggml_backend_dev_supports_op(dev, op_tensor); + + return op_supported; +} + +template +static ggml_backend_buffer_type_t select_buft(const llama_model::buft_list_t & buft_list, const F & fn) { + for (const auto & cur : buft_list) { + ggml_backend_dev_t cur_dev = cur.first; + ggml_backend_buffer_type_t cur_buft = cur.second; + if (buft_supported(cur_buft, cur_dev, fn)) { + return cur_buft; + } + } + + throw std::runtime_error(format("no suitable buffer type found")); +} + +ggml_backend_buffer_type_t llama_model_select_buft(const llama_model & model, int il) { + return select_buft( + *model.dev_layer.at(il).buft_list, + [&](ggml_context * ctx) { + ggml_tensor * cur = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, model.hparams.n_embd); + ggml_tensor * layer_dir = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, model.hparams.n_embd); + return ggml_add(ctx, cur, layer_dir); + }); +} + +struct ggml_tensor * llama_model_get_tensor(const struct llama_model & model, const char * name) { + auto it = std::find_if(model.tensors_by_name.begin(), model.tensors_by_name.end(), + [name](const std::pair & it) { + return it.first == name; + }); + if (it == model.tensors_by_name.end()) { + return nullptr; + } + + return it->second; +} + +size_t llama_model_max_nodes(const llama_model & model) { + return std::max(8192, model.tensors_by_name.size()*5); +} + +static const std::map LLAMA_ROPE_SCALING_TYPES = { + { LLAMA_ROPE_SCALING_TYPE_NONE, "none" }, + { LLAMA_ROPE_SCALING_TYPE_LINEAR, "linear" }, + { LLAMA_ROPE_SCALING_TYPE_YARN, "yarn" }, + { LLAMA_ROPE_SCALING_TYPE_LONGROPE, "longrope" }, +}; + +static llama_rope_scaling_type llama_rope_scaling_type_from_string(const std::string & name) { + for (const auto & kv : LLAMA_ROPE_SCALING_TYPES) { + if (kv.second == name) { + return (llama_rope_scaling_type) kv.first; + } + } + + return LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED; +} + +// NOTE: avoid ever using this except for building the token_to_piece caches +static std::string llama_token_to_piece(const struct llama_model * model, llama_token token, bool special) { + std::string piece; + piece.resize(piece.capacity()); // using string internal cache + const int n_chars = llama_token_to_piece(model, token, &piece[0], piece.size(), 0, special); + if (n_chars < 0) { + piece.resize(-n_chars); + int check = llama_token_to_piece(model, token, &piece[0], piece.size(), 0, special); + GGML_ASSERT(check == -n_chars); + } + else { + piece.resize(n_chars); + } + + return piece; +} + +void llm_load_stats(llama_model_loader & ml, llama_model & model) { + model.n_elements = ml.n_elements; + model.n_bytes = ml.n_bytes; +} + +void llm_load_arch(llama_model_loader & ml, llama_model & model) { + model.arch = ml.get_arch(); + if (model.arch == LLM_ARCH_UNKNOWN) { + throw std::runtime_error("unknown model architecture: '" + ml.get_arch_name() + "'"); + } +} + +void llm_load_hparams(llama_model_loader & ml, llama_model & model) { + auto & hparams = model.hparams; + const gguf_context * ctx = ml.meta.get(); + + // get metadata as string + for (int i = 0; i < gguf_get_n_kv(ctx); i++) { + enum gguf_type type = gguf_get_kv_type(ctx, i); + if (type == GGUF_TYPE_ARRAY) { + continue; + } + const char * name = gguf_get_key(ctx, i); + const std::string value = gguf_kv_to_str(ctx, i); + model.gguf_kv.emplace(name, value); + } + + // get general kv + ml.get_key(LLM_KV_GENERAL_NAME, model.name, false); + + // get hparams kv + ml.get_key(LLM_KV_VOCAB_SIZE, hparams.n_vocab, false) || ml.get_arr_n(LLM_KV_TOKENIZER_LIST, hparams.n_vocab, false); + + // everything past this point is not vocab-related + if (hparams.vocab_only) { + return; + } + + ml.get_key(LLM_KV_CONTEXT_LENGTH, hparams.n_ctx_train); + ml.get_key(LLM_KV_EMBEDDING_LENGTH, hparams.n_embd); + ml.get_key(LLM_KV_BLOCK_COUNT, hparams.n_layer); + ml.get_key(LLM_KV_EXPERT_COUNT, hparams.n_expert, false); + ml.get_key(LLM_KV_EXPERT_USED_COUNT, hparams.n_expert_used, false); + + if (model.arch == LLM_ARCH_WAVTOKENIZER_DEC) { + ml.get_key(LLM_KV_FEATURES_LENGTH, hparams.n_embd_features); + + ml.get_key(LLM_KV_POSNET_EMBEDDING_LENGTH, hparams.posnet.n_embd); + ml.get_key(LLM_KV_POSNET_BLOCK_COUNT, hparams.posnet.n_layer); + + ml.get_key(LLM_KV_CONVNEXT_EMBEDDING_LENGTH, hparams.convnext.n_embd); + ml.get_key(LLM_KV_CONVNEXT_BLOCK_COUNT, hparams.convnext.n_layer); + } + + GGML_ASSERT(hparams.n_expert <= LLAMA_MAX_EXPERTS); + GGML_ASSERT(hparams.n_expert_used <= hparams.n_expert); + if (hparams.n_expert > 0) { + GGML_ASSERT(hparams.n_expert_used > 0); + } else { + GGML_ASSERT(hparams.n_expert_used == 0); + } + + // zero-out the array hparams + std::fill(hparams.n_head_arr.begin(), hparams.n_head_arr.end(), 0); + std::fill(hparams.n_head_kv_arr.begin(), hparams.n_head_kv_arr.end(), 0); + std::fill(hparams.n_ff_arr.begin(), hparams.n_ff_arr.end(), 0); + + ml.get_key_or_arr(LLM_KV_FEED_FORWARD_LENGTH, hparams.n_ff_arr, hparams.n_layer, false); + ml.get_key_or_arr(LLM_KV_ATTENTION_HEAD_COUNT, hparams.n_head_arr, hparams.n_layer, false); + + // n_head_kv is optional, default to n_head + hparams.n_head_kv_arr = hparams.n_head_arr; + + ml.get_key_or_arr(LLM_KV_ATTENTION_HEAD_COUNT_KV, hparams.n_head_kv_arr, hparams.n_layer, false); + + bool rope_finetuned = false; + ml.get_key(LLM_KV_ROPE_SCALING_FINETUNED, rope_finetuned, false); + hparams.rope_finetuned = rope_finetuned; + + hparams.n_ctx_orig_yarn = hparams.n_ctx_train; + ml.get_key(LLM_KV_ROPE_SCALING_ORIG_CTX_LEN, hparams.n_ctx_orig_yarn, false); + + // rope_freq_base (optional) + hparams.rope_freq_base_train = 10000.0f; + ml.get_key(LLM_KV_ROPE_FREQ_BASE, hparams.rope_freq_base_train, false); + + std::string rope_scaling("linear"); + ml.get_key(LLM_KV_ROPE_SCALING_TYPE, rope_scaling, false); + hparams.rope_scaling_type_train = llama_rope_scaling_type_from_string(rope_scaling); + GGML_ASSERT(hparams.rope_scaling_type_train != LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED); + + // rope_freq_scale (inverse of the kv) is optional + float ropescale = 0.0f; + if (!ml.get_key(LLM_KV_ROPE_SCALING_FACTOR, ropescale, false)) { + // try the old key name + ml.get_key(LLM_KV_ROPE_SCALE_LINEAR, ropescale, false); + } + hparams.rope_freq_scale_train = ropescale == 0.0f ? 1.0f : 1.0f/ropescale; + + ml.get_key(LLM_KV_ROPE_SCALING_ATTN_FACTOR, hparams.rope_attn_factor, false); + + // non-transformer models do not have attention heads + if (hparams.n_head() > 0) { + // gpt-neox n_rot = rotary_pct * (n_embd / n_head) + // gpt-j n_rot = rotary_dim + + hparams.n_embd_head_k = hparams.n_embd / hparams.n_head(); + ml.get_key(LLM_KV_ATTENTION_KEY_LENGTH, hparams.n_embd_head_k, false); + + hparams.n_embd_head_v = hparams.n_embd / hparams.n_head(); + ml.get_key(LLM_KV_ATTENTION_VALUE_LENGTH, hparams.n_embd_head_v, false); + + // sanity check for n_rot (optional) + hparams.n_rot = hparams.n_embd_head_k; + + ml.get_key(LLM_KV_ROPE_DIMENSION_COUNT, hparams.n_rot, false); + + if (model.arch == LLM_ARCH_LLAMA || model.arch == LLM_ARCH_DECI || model.arch == LLM_ARCH_FALCON) { + if (hparams.n_rot != hparams.n_embd_head_k) { + throw std::runtime_error(format("invalid n_rot: %u, expected %u", hparams.n_rot, hparams.n_embd_head_k)); + } + } + } else { + hparams.n_rot = 0; + hparams.n_embd_head_k = 0; + hparams.n_embd_head_v = 0; + } + + using e_model = llm_type; // TMP + + // arch-specific KVs + switch (model.arch) { + case LLM_ARCH_LLAMA: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + + if (hparams.n_expert == 8) { + switch (hparams.n_layer) { + case 32: model.type = e_model::MODEL_8x7B; break; + case 56: model.type = e_model::MODEL_8x22B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } else { + switch (hparams.n_layer) { + case 16: model.type = e_model::MODEL_1B; break; // Llama 3.2 1B + case 22: model.type = e_model::MODEL_1B; break; + case 26: model.type = e_model::MODEL_3B; break; + case 28: model.type = e_model::MODEL_3B; break; // Llama 3.2 3B + // granite uses a vocab with len 49152 + case 32: model.type = hparams.n_vocab == 49152 ? e_model::MODEL_3B : (hparams.n_vocab < 40000 ? e_model::MODEL_7B : e_model::MODEL_8B); break; + case 36: model.type = e_model::MODEL_8B; break; // granite + case 40: model.type = e_model::MODEL_13B; break; + case 48: model.type = e_model::MODEL_34B; break; + case 60: model.type = e_model::MODEL_30B; break; + case 80: model.type = hparams.n_head() == hparams.n_head_kv() ? e_model::MODEL_65B : e_model::MODEL_70B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } + } break; + case LLM_ARCH_DECI: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + switch (hparams.n_layer) { + case 32: model.type = e_model::MODEL_7B; break; + case 80: model.type = e_model::MODEL_70B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_MINICPM: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + ml.get_key(LLM_KV_EMBEDDING_SCALE, hparams.f_embedding_scale); + ml.get_key(LLM_KV_RESIDUAL_SCALE, hparams.f_residual_scale); + ml.get_key(LLM_KV_LOGIT_SCALE, hparams.f_logit_scale); + + switch (hparams.n_layer) { + case 52: model.type = e_model::MODEL_1B; break; + case 40: model.type = e_model::MODEL_2B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_MINICPM3: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + ml.get_key(LLM_KV_ATTENTION_Q_LORA_RANK, hparams.n_lora_q); + ml.get_key(LLM_KV_ATTENTION_KV_LORA_RANK, hparams.n_lora_kv); + + switch (hparams.n_layer) { + case 62: model.type = e_model::MODEL_4B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_GROK: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + + switch (hparams.n_layer) { + case 64: model.type = e_model::MODEL_314B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_FALCON: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); + + switch (hparams.n_layer) { + case 32: model.type = e_model::MODEL_7B; break; + case 60: model.type = e_model::MODEL_40B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_BAICHUAN: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + switch (hparams.n_layer) { + case 32: model.type = e_model::MODEL_7B; break; + case 40: model.type = e_model::MODEL_13B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + + if (model.type == e_model::MODEL_13B) { + // TODO: become GGUF KV parameter + hparams.f_max_alibi_bias = 8.0f; + } + } break; + case LLM_ARCH_STARCODER: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); + switch (hparams.n_layer) { + case 24: model.type = e_model::MODEL_1B; break; + case 36: model.type = e_model::MODEL_3B; break; + case 42: model.type = e_model::MODEL_7B; break; + case 40: model.type = e_model::MODEL_15B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_REFACT: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + switch (hparams.n_layer) { + case 32: model.type = e_model::MODEL_1B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + + // TODO: become GGUF KV parameter + hparams.f_max_alibi_bias = 8.0f; + } break; + case LLM_ARCH_BERT: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); + ml.get_key(LLM_KV_ATTENTION_CAUSAL, hparams.causal_attn); + ml.get_key(LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT, hparams.n_vocab_type); + ml.get_key(LLM_KV_POOLING_TYPE, hparams.pooling_type, false); + + switch (hparams.n_layer) { + case 3: + model.type = e_model::MODEL_17M; break; // bge-micro + case 6: + model.type = e_model::MODEL_22M; break; // MiniLM-L6 + case 12: + switch (hparams.n_embd) { + case 384: model.type = e_model::MODEL_33M; break; // MiniLM-L12, bge-small + case 768: model.type = e_model::MODEL_109M; break; // bge-base + default: model.type = e_model::MODEL_UNKNOWN; + } break; + case 24: + model.type = e_model::MODEL_335M; break; // bge-large + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_JINA_BERT_V2: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); + ml.get_key(LLM_KV_ATTENTION_CAUSAL, hparams.causal_attn); + ml.get_key(LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT, hparams.n_vocab_type); + ml.get_key(LLM_KV_POOLING_TYPE, hparams.pooling_type, false); + hparams.f_max_alibi_bias = 8.0f; + + switch (hparams.n_layer) { + case 4: model.type = e_model::MODEL_33M; break; // jina-embeddings-small + case 12: model.type = e_model::MODEL_137M; break; // jina-embeddings-base + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_NOMIC_BERT: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); + ml.get_key(LLM_KV_ATTENTION_CAUSAL, hparams.causal_attn); + ml.get_key(LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT, hparams.n_vocab_type); + ml.get_key(LLM_KV_POOLING_TYPE, hparams.pooling_type); + + if (hparams.n_layer == 12 && hparams.n_embd == 768) { + model.type = e_model::MODEL_137M; + } + } break; + case LLM_ARCH_BLOOM: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); + + switch (hparams.n_layer) { + case 24: model.type = e_model::MODEL_1B; break; + case 30: + switch (hparams.n_embd) { + case 2560: model.type = e_model::MODEL_3B; break; + case 4096: model.type = e_model::MODEL_7B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } break; + default: model.type = e_model::MODEL_UNKNOWN; + } + + // TODO: become GGUF KV parameter + hparams.f_max_alibi_bias = 8.0f; + } break; + case LLM_ARCH_MPT: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); + ml.get_key(LLM_KV_ATTENTION_CLAMP_KQV, hparams.f_clamp_kqv, false); + ml.get_key(LLM_KV_ATTENTION_MAX_ALIBI_BIAS, hparams.f_max_alibi_bias); + + switch (hparams.n_layer) { + case 32: model.type = e_model::MODEL_7B; break; + case 48: model.type = e_model::MODEL_30B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_STABLELM: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); + + switch (hparams.n_layer) { + case 24: model.type = e_model::MODEL_1B; break; + case 32: model.type = e_model::MODEL_3B; break; + case 40: model.type = e_model::MODEL_12B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_QWEN: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + + switch (hparams.n_layer) { + case 32: model.type = e_model::MODEL_7B; break; + case 40: model.type = e_model::MODEL_13B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_QWEN2VL: + { + ml.get_key_or_arr(LLM_KV_ROPE_DIMENSION_SECTIONS, hparams.rope_sections, 4, true); + } + // fall through + case LLM_ARCH_QWEN2: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + switch (hparams.n_layer) { + case 24: model.type = hparams.n_embd == 1024 ? e_model::MODEL_0_5B : e_model::MODEL_1B; break; + case 28: model.type = hparams.n_embd == 1536 ? e_model::MODEL_1_5B : e_model::MODEL_7B; break; + case 32: model.type = e_model::MODEL_7B; break; + case 36: model.type = e_model::MODEL_3B; break; + case 40: model.type = hparams.n_head() == 20 ? e_model::MODEL_4B : e_model::MODEL_13B; break; + case 48: model.type = e_model::MODEL_14B; break; + case 64: model.type = e_model::MODEL_32B; break; + case 80: model.type = e_model::MODEL_70B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_QWEN2MOE: + { + ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp, false); + ml.get_key(LLM_KV_EXPERT_SHARED_FEED_FORWARD_LENGTH, hparams.n_ff_shexp, false); + + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + switch (hparams.n_layer) { + case 24: model.type = e_model::MODEL_A2_7B; break; + case 28: model.type = e_model::MODEL_57B_A14B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_PHI2: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); + + switch (hparams.n_layer) { + case 24: model.type = e_model::MODEL_1B; break; + case 32: model.type = e_model::MODEL_3B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_PHI3: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + + switch (hparams.n_layer) { + case 24: model.type = e_model::MODEL_1B; break; + case 32: model.type = e_model::MODEL_3B; break; + case 40: model.type = e_model::MODEL_14B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + + // for backward compatibility ; see: https://github.com/ggerganov/llama.cpp/pull/8931 + if ((hparams.n_layer == 32 || hparams.n_layer == 40) && hparams.n_ctx_train == 4096) { + // default value for Phi-3-mini-4k-instruct and Phi-3-medium-4k-instruct + hparams.n_swa = 2047; + } else if (hparams.n_layer == 32 && hparams.n_head_kv(0) == 32 && hparams.n_ctx_train == 131072) { + // default value for Phi-3-mini-128k-instruct + hparams.n_swa = 262144; + } else if (hparams.n_layer == 40 && hparams.n_ctx_train == 131072) { + // default value for Phi-3-medium-128k-instruct + hparams.n_swa = 131072; + } + bool found_swa = ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa, false); + if (!found_swa && hparams.n_swa == 0) { + throw std::runtime_error("invalid value for sliding_window"); + } + } break; + case LLM_ARCH_PLAMO: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + + switch (hparams.n_layer) { + case 40: model.type = e_model::MODEL_13B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_GPT2: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); + switch (hparams.n_layer) { + case 12: model.type = e_model::MODEL_SMALL; break; + case 24: model.type = e_model::MODEL_MEDIUM; break; + case 36: model.type = e_model::MODEL_LARGE; break; + case 48: model.type = e_model::MODEL_XL; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_CODESHELL: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); + switch (hparams.n_layer) { + case 42: model.type = e_model::MODEL_7B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_ORION: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); + + switch (hparams.n_layer) { + case 40: model.type = e_model::MODEL_14B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_INTERNLM2: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + switch (hparams.n_layer) { + case 32: model.type = e_model::MODEL_7B; break; + case 48: model.type = e_model::MODEL_20B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_GEMMA: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + + switch (hparams.n_layer) { + case 18: model.type = e_model::MODEL_2B; break; + case 28: model.type = e_model::MODEL_7B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_GEMMA2: + { + hparams.n_swa = 4096; // default value of gemma 2 + ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa, false); + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + ml.get_key(LLM_KV_ATTN_LOGIT_SOFTCAPPING, hparams.f_attn_logit_softcapping, false); + ml.get_key(LLM_KV_FINAL_LOGIT_SOFTCAPPING, hparams.f_final_logit_softcapping, false); + hparams.attn_soft_cap = true; + + switch (hparams.n_layer) { + case 26: model.type = e_model::MODEL_2B; break; + case 42: model.type = e_model::MODEL_9B; break; + case 46: model.type = e_model::MODEL_27B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_STARCODER2: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); + switch (hparams.n_layer) { + case 30: model.type = e_model::MODEL_3B; break; + case 32: model.type = e_model::MODEL_7B; break; + case 40: model.type = e_model::MODEL_15B; break; + case 52: model.type = e_model::MODEL_20B; break; // granite + case 88: model.type = e_model::MODEL_34B; break; // granite + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_MAMBA: + { + ml.get_key(LLM_KV_SSM_CONV_KERNEL, hparams.ssm_d_conv); + ml.get_key(LLM_KV_SSM_INNER_SIZE, hparams.ssm_d_inner); + ml.get_key(LLM_KV_SSM_STATE_SIZE, hparams.ssm_d_state); + ml.get_key(LLM_KV_SSM_TIME_STEP_RANK, hparams.ssm_dt_rank); + ml.get_key(LLM_KV_SSM_DT_B_C_RMS, hparams.ssm_dt_b_c_rms, false); + + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + + switch (hparams.n_layer) { + case 24: + switch (hparams.n_embd) { + case 768: model.type = e_model::MODEL_SMALL; break; + default: model.type = e_model::MODEL_UNKNOWN; + } break; + case 48: + switch (hparams.n_embd) { + case 1024: model.type = e_model::MODEL_MEDIUM; break; + case 1536: model.type = e_model::MODEL_LARGE; break; + case 2048: model.type = e_model::MODEL_XL; break; + default: model.type = e_model::MODEL_UNKNOWN; + } break; + case 64: + switch (hparams.n_embd) { + case 2560: model.type = e_model::MODEL_3B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_XVERSE: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + switch (hparams.n_layer) { + case 32: model.type = e_model::MODEL_7B; break; + case 40: model.type = e_model::MODEL_13B; break; + case 80: model.type = e_model::MODEL_65B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_COMMAND_R: + { + ml.get_key(LLM_KV_LOGIT_SCALE, hparams.f_logit_scale); + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); + switch (hparams.n_layer) { + case 40: model.type = e_model::MODEL_35B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_DBRX: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); + ml.get_key(LLM_KV_ATTENTION_CLAMP_KQV, hparams.f_clamp_kqv); + + switch (hparams.n_layer) { + case 40: model.type = e_model::MODEL_16x12B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_OLMO: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); + ml.get_key(LLM_KV_ATTENTION_CLAMP_KQV, hparams.f_clamp_kqv, false); + + switch (hparams.n_layer) { + case 22: model.type = e_model::MODEL_1B; break; + case 32: model.type = e_model::MODEL_7B; break; + case 80: model.type = e_model::MODEL_70B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_OLMO2: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + + switch (hparams.n_layer) { + case 16: model.type = e_model::MODEL_1B; break; + case 32: model.type = e_model::MODEL_7B; break; + case 40: model.type = e_model::MODEL_13B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_OLMOE: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + switch (hparams.n_layer) { + case 16: model.type = e_model::MODEL_A1_7B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_OPENELM: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + + switch (hparams.n_layer) { + case 16: model.type = e_model::MODEL_270M; break; + case 20: model.type = e_model::MODEL_450M; break; + case 28: model.type = e_model::MODEL_1B; break; + case 36: model.type = e_model::MODEL_3B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_GPTNEOX: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); + ml.get_key(LLM_KV_USE_PARALLEL_RESIDUAL, hparams.use_par_res); + switch (hparams.n_layer) { + case 6: + switch (hparams.n_ff()) { + case 512: model.type = e_model::MODEL_14M; break; + case 2048: model.type = e_model::MODEL_70M; break; + default: model.type = e_model::MODEL_UNKNOWN; + } break; + case 12: + switch (hparams.n_ff()) { + case 3072: model.type = e_model::MODEL_160M; break; + default: model.type = e_model::MODEL_UNKNOWN; + } break; + case 16: + switch (hparams.n_ff()) { + case 8192: model.type = e_model::MODEL_1B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } break; + case 24: + switch (hparams.n_ff()) { + case 4096: model.type = e_model::MODEL_410M; break; + case 8192: model.type = e_model::MODEL_1_4B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } break; + case 32: + switch (hparams.n_ff()) { + case 10240: model.type = e_model::MODEL_2_8B; break; + case 16384: model.type = e_model::MODEL_6_9B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } break; + case 36: + switch (hparams.n_ff()) { + case 20480: model.type = e_model::MODEL_12B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } break; + case 44: + switch (hparams.n_ff()) { + case 24576: model.type = e_model::MODEL_20B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_ARCTIC: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + + if (hparams.n_expert == 128) { + switch (hparams.n_layer) { + case 35: model.type = e_model::MODEL_10B_128x3_66B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } else { + model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_DEEPSEEK: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + ml.get_key(LLM_KV_LEADING_DENSE_BLOCK_COUNT, hparams.n_layer_dense_lead); + ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp); + ml.get_key(LLM_KV_EXPERT_SHARED_COUNT, hparams.n_expert_shared); + ml.get_key(LLM_KV_EXPERT_WEIGHTS_SCALE, hparams.expert_weights_scale); + + switch (hparams.n_layer) { + case 28: model.type = e_model::MODEL_20B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_DEEPSEEK2: + { + bool is_lite = (hparams.n_layer == 27); + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + ml.get_key(LLM_KV_LEADING_DENSE_BLOCK_COUNT, hparams.n_layer_dense_lead); + if (!is_lite) { + ml.get_key(LLM_KV_ATTENTION_Q_LORA_RANK, hparams.n_lora_q); + } + ml.get_key(LLM_KV_ATTENTION_KV_LORA_RANK, hparams.n_lora_kv); + ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp); + ml.get_key(LLM_KV_EXPERT_SHARED_COUNT, hparams.n_expert_shared); + ml.get_key(LLM_KV_EXPERT_WEIGHTS_SCALE, hparams.expert_weights_scale); + ml.get_key(LLM_KV_ROPE_SCALING_YARN_LOG_MUL, hparams.rope_yarn_log_mul); + + switch (hparams.n_layer) { + case 27: model.type = e_model::MODEL_16B; break; + case 60: model.type = e_model::MODEL_236B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_CHATGLM: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + switch (hparams.n_layer) { + case 28: model.type = e_model::MODEL_6B; break; + case 40: model.type = e_model::MODEL_9B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_BITNET: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + + switch (hparams.n_layer) { + case 26: model.type = e_model::MODEL_3B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_T5: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + ml.get_key(LLM_KV_ATTENTION_RELATIVE_BUCKETS_COUNT, hparams.n_rel_attn_bkts); + + uint32_t dec_start_token_id; + if (ml.get_key(LLM_KV_DECODER_START_TOKEN_ID, dec_start_token_id, false)) { + hparams.dec_start_token_id = dec_start_token_id; + } + + switch (hparams.n_layer) { + case 6: model.type = e_model::MODEL_60M; break; // t5-small + case 8: model.type = e_model::MODEL_80M; break; // flan-t5-small + case 12: + switch (hparams.n_ff()) { + case 3072: model.type = e_model::MODEL_220M; break; // t5-base + case 2048: model.type = e_model::MODEL_250M; break; // flan-t5-base + default: model.type = e_model::MODEL_UNKNOWN; + } break; + case 24: + switch (hparams.n_ff()) { + case 4096: model.type = e_model::MODEL_770M; break; // t5-large + case 2816: model.type = e_model::MODEL_780M; break; // flan-t5-large + case 16384: model.type = e_model::MODEL_3B; break; // t5-3b + case 5120: model.type = e_model::MODEL_3B; break; // flan-t5-xl + case 65536: model.type = e_model::MODEL_11B; break; // t5-11b + case 10240: model.type = e_model::MODEL_11B; break; // flan-t5-xxl + default: model.type = e_model::MODEL_UNKNOWN; + } break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_T5ENCODER: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + ml.get_key(LLM_KV_ATTENTION_RELATIVE_BUCKETS_COUNT, hparams.n_rel_attn_bkts); + model.type = e_model::MODEL_UNKNOWN; + } break; + case LLM_ARCH_JAIS: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); + ml.get_key(LLM_KV_ATTENTION_MAX_ALIBI_BIAS, hparams.f_max_alibi_bias); + + switch (hparams.n_layer) { + case 24: model.type = e_model::MODEL_1_3B; break; + case 40: model.type = e_model::MODEL_13B; break; + /* TODO: add variants */ + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_NEMOTRON: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); + switch (hparams.n_layer) { + case 32: model.type = e_model::MODEL_4B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_EXAONE: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + + switch (hparams.n_layer) { + case 32: model.type = e_model::MODEL_8B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_RWKV6: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); + ml.get_key(LLM_KV_WKV_HEAD_SIZE, hparams.wkv_head_size); + ml.get_key(LLM_KV_TIME_MIX_EXTRA_DIM, hparams.time_mix_extra_dim); + ml.get_key(LLM_KV_TIME_DECAY_EXTRA_DIM, hparams.time_decay_extra_dim); + ml.get_key(LLM_KV_RESCALE_EVERY_N_LAYERS, hparams.rescale_every_n_layers, false); + + switch (hparams.n_layer) { + case 24: model.type = e_model::MODEL_1_6B; break; + case 32: + switch (hparams.n_embd) { + case 2560: model.type = e_model::MODEL_3B; break; + case 4096: model.type = e_model::MODEL_7B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } break; + case 61: model.type = e_model::MODEL_14B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_GRANITE: + case LLM_ARCH_GRANITE_MOE: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + ml.get_key(LLM_KV_LOGIT_SCALE, hparams.f_logit_scale); + ml.get_key(LLM_KV_RESIDUAL_SCALE, hparams.f_residual_scale); + ml.get_key(LLM_KV_EMBEDDING_SCALE, hparams.f_embedding_scale); + ml.get_key(LLM_KV_ATTENTION_SCALE, hparams.f_attention_scale); + + switch (hparams.n_layer) { + case 32: model.type = e_model::MODEL_3B; break; + case 40: model.type = e_model::MODEL_3B; break; + // Add additional layer/vocab/etc checks here for other model sizes + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_CHAMELEON: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + hparams.f_norm_eps = 1e-5; // eps for qk-norm, torch default + ml.get_key(LLM_KV_SWIN_NORM, hparams.swin_norm); + + switch (hparams.n_layer) { + case 32: model.type = e_model::MODEL_7B; break; + case 48: model.type = e_model::MODEL_34B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_WAVTOKENIZER_DEC: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); + ml.get_key(LLM_KV_ATTENTION_GROUPNORM_EPS, hparams.f_norm_group_eps); + ml.get_key(LLM_KV_ATTENTION_GROUPNORM_GROUPS, hparams.n_norm_groups); + ml.get_key(LLM_KV_ATTENTION_CAUSAL, hparams.causal_attn); + } break; + default: throw std::runtime_error("unsupported model architecture"); + } + + model.ftype = ml.ftype; + + if (hparams.f_max_alibi_bias > 0.0f) { + hparams.use_alibi = true; + } + + hparams.rope_type = llama_rope_type(&model); +} + +void llm_load_vocab(llama_model_loader & ml, llama_model & model) { + auto & vocab = model.vocab; + + struct gguf_context * ctx = ml.meta.get(); + + const auto kv = LLM_KV(model.arch); + + // determine vocab type + { + std::string tokenizer_model; + std::string tokenizer_pre; + + ml.get_key(LLM_KV_TOKENIZER_MODEL, tokenizer_model); + ml.get_key(LLM_KV_TOKENIZER_PRE, tokenizer_pre, false); + + if (tokenizer_model == "no_vocab" || tokenizer_model == "none") { + vocab.type = LLAMA_VOCAB_TYPE_NONE; + + // default special tokens + vocab.special_bos_id = LLAMA_TOKEN_NULL; + vocab.special_eos_id = LLAMA_TOKEN_NULL; + vocab.special_unk_id = LLAMA_TOKEN_NULL; + vocab.special_sep_id = LLAMA_TOKEN_NULL; + vocab.special_pad_id = LLAMA_TOKEN_NULL; + vocab.special_cls_id = LLAMA_TOKEN_NULL; + vocab.special_mask_id = LLAMA_TOKEN_NULL; + vocab.linefeed_id = LLAMA_TOKEN_NULL; + + // read vocab size from metadata + if (!ml.get_key(LLM_KV_VOCAB_SIZE, vocab.n_vocab, false)) { + vocab.n_vocab = 0; + LLAMA_LOG_WARN("%s: there is no vocab_size in metadata, vocab.n_vocab will be set to %u\n", __func__, vocab.n_vocab); + } + return; + } + + if (tokenizer_model == "llama") { + vocab.type = LLAMA_VOCAB_TYPE_SPM; + + // default special tokens + vocab.special_bos_id = 1; + vocab.special_eos_id = 2; + vocab.special_unk_id = 0; + vocab.special_sep_id = LLAMA_TOKEN_NULL; + vocab.special_pad_id = LLAMA_TOKEN_NULL; + vocab.special_cls_id = LLAMA_TOKEN_NULL; + vocab.special_mask_id = LLAMA_TOKEN_NULL; + } else if (tokenizer_model == "bert") { + vocab.type = LLAMA_VOCAB_TYPE_WPM; + + // default special tokens + vocab.special_bos_id = LLAMA_TOKEN_NULL; + vocab.special_eos_id = LLAMA_TOKEN_NULL; + vocab.special_unk_id = 100; + vocab.special_sep_id = 102; + vocab.special_pad_id = 0; + vocab.special_cls_id = 101; + vocab.special_mask_id = 103; + } else if (tokenizer_model == "gpt2") { + vocab.type = LLAMA_VOCAB_TYPE_BPE; + + // read bpe merges and populate bpe ranks + const int merges_keyidx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_MERGES).c_str()); + if (merges_keyidx == -1) { + throw std::runtime_error("cannot find tokenizer merges in model file\n"); + } + + const int n_merges = gguf_get_arr_n(ctx, merges_keyidx); + for (int i = 0; i < n_merges; i++) { + const std::string word = gguf_get_arr_str(ctx, merges_keyidx, i); + GGML_ASSERT(unicode_cpts_from_utf8(word).size() > 0); + + std::string first; + std::string second; + + const size_t pos = word.find(' ', 1); + + if (pos != std::string::npos) { + first = word.substr(0, pos); + second = word.substr(pos + 1); + } + + vocab.bpe_ranks.emplace(std::make_pair(first, second), i); + } + + // default special tokens + vocab.special_bos_id = 11; + vocab.special_eos_id = 11; + vocab.special_unk_id = LLAMA_TOKEN_NULL; + vocab.special_sep_id = LLAMA_TOKEN_NULL; + vocab.special_pad_id = LLAMA_TOKEN_NULL; + vocab.special_cls_id = LLAMA_TOKEN_NULL; + vocab.special_mask_id = LLAMA_TOKEN_NULL; + } else if (tokenizer_model == "t5") { + vocab.type = LLAMA_VOCAB_TYPE_UGM; + + // default special tokens + vocab.special_bos_id = LLAMA_TOKEN_NULL; + vocab.special_eos_id = 1; + vocab.special_unk_id = 2; + vocab.special_sep_id = LLAMA_TOKEN_NULL; + vocab.special_pad_id = 0; + vocab.special_cls_id = LLAMA_TOKEN_NULL; + vocab.special_mask_id = LLAMA_TOKEN_NULL; + + const int precompiled_charsmap_keyidx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_PRECOMPILED_CHARSMAP).c_str()); + if (precompiled_charsmap_keyidx != -1) { + size_t n_precompiled_charsmap = gguf_get_arr_n(ctx, precompiled_charsmap_keyidx); + const char * precompiled_charsmap = (const char *) gguf_get_arr_data(ctx, precompiled_charsmap_keyidx); + vocab.precompiled_charsmap.assign(precompiled_charsmap, precompiled_charsmap + n_precompiled_charsmap); +#ifdef IS_BIG_ENDIAN + // correct endiannes of data in precompiled_charsmap binary blob + uint32_t * xcda_blob_size = (uint32_t *) &vocab.precompiled_charsmap[0]; + *xcda_blob_size = __builtin_bswap32(*xcda_blob_size); + assert(*xcda_blob_size + sizeof(uint32_t) < n_precompiled_charsmap); + size_t xcda_array_size = *xcda_blob_size / sizeof(uint32_t); + uint32_t * xcda_array = (uint32_t *) &vocab.precompiled_charsmap[sizeof(uint32_t)]; + for (size_t i = 0; i < xcda_array_size; ++i) { + xcda_array[i] = __builtin_bswap32(xcda_array[i]); + } +#endif + } + } else if (tokenizer_model == "rwkv") { + vocab.type = LLAMA_VOCAB_TYPE_RWKV; + + // default special tokens + vocab.special_bos_id = LLAMA_TOKEN_NULL; + vocab.special_eos_id = LLAMA_TOKEN_NULL; + vocab.special_unk_id = LLAMA_TOKEN_NULL; + vocab.special_sep_id = LLAMA_TOKEN_NULL; + vocab.special_pad_id = LLAMA_TOKEN_NULL; + } else { + throw std::runtime_error(format("unknown tokenizer: '%s'", tokenizer_model.c_str())); + } + + // for now, only BPE models have pre-tokenizers + if (vocab.type == LLAMA_VOCAB_TYPE_BPE) { + vocab.tokenizer_add_space_prefix = false; + vocab.tokenizer_clean_spaces = true; + if (tokenizer_pre.empty()) { + LLAMA_LOG_WARN("%s: missing pre-tokenizer type, using: 'default'\n", __func__); + LLAMA_LOG_WARN("%s: \n", __func__); + LLAMA_LOG_WARN("%s: ************************************ \n", __func__); + LLAMA_LOG_WARN("%s: GENERATION QUALITY WILL BE DEGRADED! \n", __func__); + LLAMA_LOG_WARN("%s: CONSIDER REGENERATING THE MODEL \n", __func__); + LLAMA_LOG_WARN("%s: ************************************ \n", __func__); + LLAMA_LOG_WARN("%s: \n", __func__); + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT; + } else if (tokenizer_pre == "default") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT; + } else if ( + tokenizer_pre == "llama3" || + tokenizer_pre == "llama-v3" || + tokenizer_pre == "llama-bpe"|| + tokenizer_pre == "falcon3") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_LLAMA3; + vocab.tokenizer_ignore_merges = true; + vocab.tokenizer_add_bos = true; + } else if ( + tokenizer_pre == "deepseek-llm") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_LLM; + vocab.tokenizer_clean_spaces = false; + } else if ( + tokenizer_pre == "deepseek-coder") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_CODER; + vocab.tokenizer_clean_spaces = false; + } else if ( + tokenizer_pre == "falcon") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_FALCON; + } else if ( + tokenizer_pre == "mpt") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_MPT; + } else if ( + tokenizer_pre == "starcoder") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_STARCODER; + } else if ( + tokenizer_pre == "gpt-2" || + tokenizer_pre == "phi-2" || + tokenizer_pre == "jina-es" || + tokenizer_pre == "jina-de" || + tokenizer_pre == "gigachat" || + tokenizer_pre == "jina-v1-en" || + tokenizer_pre == "jina-v2-es" || + tokenizer_pre == "jina-v2-de" || + tokenizer_pre == "jina-v2-code" || + tokenizer_pre == "roberta-bpe") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_GPT2; + } else if ( + tokenizer_pre == "refact") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_REFACT; + } else if ( + tokenizer_pre == "command-r") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_COMMAND_R; + vocab.tokenizer_clean_spaces = false; + } else if ( + tokenizer_pre == "qwen2") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_QWEN2; + vocab.tokenizer_clean_spaces = false; + } else if ( + tokenizer_pre == "stablelm2") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_STABLELM2; + } else if ( + tokenizer_pre == "olmo") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_OLMO; + } else if ( + tokenizer_pre == "dbrx") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DBRX; + } else if ( + tokenizer_pre == "smaug-bpe") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_SMAUG; + } else if ( + tokenizer_pre == "poro-chat") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_PORO; + vocab.tokenizer_clean_spaces = false; + } else if ( + tokenizer_pre == "chatglm-bpe") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_CHATGLM4; + vocab.special_bos_id = LLAMA_TOKEN_NULL; + } else if ( + tokenizer_pre == "viking") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_VIKING; + vocab.tokenizer_clean_spaces = false; + } else if ( + tokenizer_pre == "jais") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_JAIS; + } else if ( + tokenizer_pre == "tekken") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_TEKKEN; + vocab.tokenizer_clean_spaces = false; + vocab.tokenizer_ignore_merges = true; + vocab.tokenizer_add_bos = true; + } else if ( + tokenizer_pre == "smollm") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_SMOLLM; + vocab.tokenizer_clean_spaces = false; + } else if ( + tokenizer_pre == "codeshell") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_CODESHELL; + } else if ( + tokenizer_pre == "bloom") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_BLOOM; + } else if ( + tokenizer_pre == "gpt3-finnish") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_GPT3_FINNISH; + } else if ( + tokenizer_pre == "exaone") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_EXAONE; + } else if ( + tokenizer_pre == "chameleon") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_CHAMELEON; + vocab.tokenizer_add_bos = true; + vocab.tokenizer_clean_spaces = false; + } else if ( + tokenizer_pre == "minerva-7b") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_MINERVA; + } else if ( + tokenizer_pre == "megrez") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_QWEN2; + } else { + throw std::runtime_error(format("unknown pre-tokenizer type: '%s'", tokenizer_pre.c_str())); + } + } else if (vocab.type == LLAMA_VOCAB_TYPE_SPM) { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT; + vocab.tokenizer_add_space_prefix = true; + vocab.tokenizer_clean_spaces = false; + vocab.tokenizer_add_bos = true; + vocab.tokenizer_add_eos = false; + } else if (vocab.type == LLAMA_VOCAB_TYPE_WPM) { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT; + vocab.tokenizer_add_space_prefix = false; + vocab.tokenizer_clean_spaces = true; + vocab.tokenizer_add_bos = true; + vocab.tokenizer_add_eos = false; + } else if (vocab.type == LLAMA_VOCAB_TYPE_UGM) { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT; + vocab.tokenizer_add_bos = false; + vocab.tokenizer_add_eos = true; + } else if (vocab.type == LLAMA_VOCAB_TYPE_RWKV) { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT; + vocab.tokenizer_add_space_prefix = false; + vocab.tokenizer_clean_spaces = false; + vocab.tokenizer_add_bos = false; + vocab.tokenizer_add_eos = false; + } else { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT; + } + + ml.get_key(LLM_KV_TOKENIZER_ADD_PREFIX, vocab.tokenizer_add_space_prefix, false); + ml.get_key(LLM_KV_TOKENIZER_REMOVE_EXTRA_WS, vocab.tokenizer_remove_extra_whitespaces, false); + } + + const int token_idx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_LIST).c_str()); + if (token_idx == -1) { + throw std::runtime_error("cannot find tokenizer vocab in model file\n"); + } + + const float * scores = nullptr; + const int score_idx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_SCORES).c_str()); + if (score_idx != -1) { + scores = (const float * ) gguf_get_arr_data(ctx, score_idx); + } + + const int * toktypes = nullptr; + const int toktype_idx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_TOKEN_TYPE).c_str()); + if (toktype_idx != -1) { + toktypes = (const int * ) gguf_get_arr_data(ctx, toktype_idx); + } + + const uint32_t n_vocab = gguf_get_arr_n(ctx, token_idx); + + vocab.n_vocab = n_vocab; + vocab.id_to_token.resize(n_vocab); + + for (uint32_t i = 0; i < n_vocab; i++) { + std::string word = gguf_get_arr_str(ctx, token_idx, i); + if (word.empty()) { + LLAMA_LOG_WARN("%s: empty token at index %u\n", __func__, i); + word = "[EMPTY_" + std::to_string(i) + "]"; + } + + vocab.token_to_id[word] = i; + vocab.max_token_len = std::max(vocab.max_token_len, (int) word.size()); + + auto & token_data = vocab.id_to_token[i]; + token_data.text = std::move(word); + token_data.score = scores ? scores[i] : 0.0f; + token_data.attr = LLAMA_TOKEN_ATTR_NORMAL; + + if (toktypes) { //TODO: remove, required until per token attributes are available from GGUF file + switch(toktypes[i]) { + case LLAMA_TOKEN_TYPE_UNKNOWN: token_data.attr = LLAMA_TOKEN_ATTR_UNKNOWN; break; + case LLAMA_TOKEN_TYPE_UNUSED: token_data.attr = LLAMA_TOKEN_ATTR_UNUSED; break; + case LLAMA_TOKEN_TYPE_NORMAL: token_data.attr = LLAMA_TOKEN_ATTR_NORMAL; break; + case LLAMA_TOKEN_TYPE_CONTROL: token_data.attr = LLAMA_TOKEN_ATTR_CONTROL; break; + case LLAMA_TOKEN_TYPE_USER_DEFINED: token_data.attr = LLAMA_TOKEN_ATTR_USER_DEFINED; break; + case LLAMA_TOKEN_TYPE_BYTE: token_data.attr = LLAMA_TOKEN_ATTR_BYTE; break; + case LLAMA_TOKEN_TYPE_UNDEFINED: token_data.attr = LLAMA_TOKEN_ATTR_UNDEFINED; break; + default: token_data.attr = LLAMA_TOKEN_ATTR_UNDEFINED; break; + } + } + } + GGML_ASSERT(vocab.id_to_token.size() == vocab.token_to_id.size()); + + vocab.init_tokenizer(); + + // determine the newline token: LLaMA "<0x0A>" == 10 == '\n', Falcon 193 == '\n' + if (vocab.type == LLAMA_VOCAB_TYPE_SPM) { + try { + vocab.linefeed_id = llama_byte_to_token_impl(vocab, '\n'); + } catch (const std::exception & e) { + LLAMA_LOG_WARN("%s: SPM vocabulary, but newline token not found: %s! Using special_pad_id instead.", __func__, e.what()); + vocab.linefeed_id = vocab.special_pad_id; + } + } else if (vocab.type == LLAMA_VOCAB_TYPE_WPM) { + vocab.linefeed_id = vocab.special_pad_id; + } else if (vocab.type == LLAMA_VOCAB_TYPE_RWKV) { + const std::vector ids = llama_tokenize_internal(vocab, "\n", false); + GGML_ASSERT(!ids.empty() && "model vocab missing newline token"); + vocab.linefeed_id = ids[0]; + } else { + const std::vector ids = llama_tokenize_internal(vocab, "\xC4\x8A", false); // U+010A + + //GGML_ASSERT(!ids.empty() && "model vocab missing newline token"); + if (ids.empty()) { + LLAMA_LOG_WARN("%s: model vocab missing newline token, using special_pad_id instead\n", __func__); + vocab.linefeed_id = vocab.special_pad_id; + } else { + vocab.linefeed_id = ids[0]; + } + } + + // special tokens + { + const std::vector> special_token_types = { + { LLM_KV_TOKENIZER_BOS_ID, vocab.special_bos_id }, + { LLM_KV_TOKENIZER_EOS_ID, vocab.special_eos_id }, + { LLM_KV_TOKENIZER_EOT_ID, vocab.special_eot_id }, + { LLM_KV_TOKENIZER_EOM_ID, vocab.special_eom_id }, + { LLM_KV_TOKENIZER_UNK_ID, vocab.special_unk_id }, + { LLM_KV_TOKENIZER_SEP_ID, vocab.special_sep_id }, + { LLM_KV_TOKENIZER_PAD_ID, vocab.special_pad_id }, + { LLM_KV_TOKENIZER_CLS_ID, vocab.special_cls_id }, + { LLM_KV_TOKENIZER_MASK_ID, vocab.special_mask_id }, + { LLM_KV_TOKENIZER_FIM_PRE_ID, vocab.special_fim_pre_id }, + { LLM_KV_TOKENIZER_FIM_SUF_ID, vocab.special_fim_suf_id }, + { LLM_KV_TOKENIZER_FIM_MID_ID, vocab.special_fim_mid_id }, + { LLM_KV_TOKENIZER_FIM_PAD_ID, vocab.special_fim_pad_id }, + { LLM_KV_TOKENIZER_FIM_REP_ID, vocab.special_fim_rep_id }, + { LLM_KV_TOKENIZER_FIM_SEP_ID, vocab.special_fim_sep_id }, + + // deprecated + { LLM_KV_TOKENIZER_PREFIX_ID, vocab.special_fim_pre_id }, + { LLM_KV_TOKENIZER_SUFFIX_ID, vocab.special_fim_suf_id }, + { LLM_KV_TOKENIZER_MIDDLE_ID, vocab.special_fim_mid_id }, + }; + + for (const auto & it : special_token_types) { + const std::string & key = kv(std::get<0>(it)); + int32_t & id = std::get<1>(it); + + uint32_t new_id; + if (!ml.get_key(std::get<0>(it), new_id, false)) { + continue; + } + if (new_id >= vocab.id_to_token.size()) { + LLAMA_LOG_WARN("%s: bad special token: '%s' = %ud, using default id %d\n", + __func__, key.c_str(), new_id, id); + } else { + id = new_id; + } + } + + // Handle add_bos_token and add_eos_token + { + bool temp = true; + + if (ml.get_key(LLM_KV_TOKENIZER_ADD_BOS, temp, false)) { + vocab.tokenizer_add_bos = temp; + } + if (ml.get_key(LLM_KV_TOKENIZER_ADD_EOS, temp, false)) { + vocab.tokenizer_add_eos = temp; + } + } + + // auto-detect special tokens by text + // TODO: convert scripts should provide these tokens through the KV metadata LLM_KV_TOKENIZER_... + // for now, we apply this workaround to find the tokens based on their text + + for (const auto & t : vocab.token_to_id) { + // find EOT token: "<|eot_id|>", "<|im_end|>", "", etc. + if (vocab.special_eot_id == LLAMA_TOKEN_NULL) { + if (false + || t.first == "<|eot_id|>" + || t.first == "<|im_end|>" + || t.first == "<|end|>" + || t.first == "" + || t.first == "<|endoftext|>" + || t.first == "" + || t.first == "<|end▁of▁sentence|>" // DeepSeek + ) { + vocab.special_eot_id = t.second; + if ((vocab.id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) { + LLAMA_LOG_WARN("%s: control-looking token: %6d '%s' was not control-type; this is probably a bug in the model. its type will be overridden\n", + __func__, t.second, t.first.c_str()); + vocab.id_to_token[t.second].attr = LLAMA_TOKEN_ATTR_CONTROL; + } + } + } + + // find EOM token: "<|eom_id|>" + if (vocab.special_eom_id == LLAMA_TOKEN_NULL) { + if (false + || t.first == "<|eom_id|>" + ) { + vocab.special_eom_id = t.second; + if ((vocab.id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) { + LLAMA_LOG_WARN("%s: control-looking token: %6d '%s' was not control-type; this is probably a bug in the model. its type will be overridden\n", + __func__, t.second, t.first.c_str()); + vocab.id_to_token[t.second].attr = LLAMA_TOKEN_ATTR_CONTROL; + } + } + } + + // find FIM_PRE token: "<|fim_prefix|>", "", "
", etc.
+            if (vocab.special_fim_pre_id == LLAMA_TOKEN_NULL) {
+                if (false
+                        || t.first == "<|fim_prefix|>"  // Qwen
+                        || t.first == ""
+                        || t.first == "<|fim▁begin|>" // DeepSeek
+                        || t.first == "
"
+                        ) {
+                    vocab.special_fim_pre_id = t.second;
+                    if ((vocab.id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
+                        LLAMA_LOG_WARN("%s: control-looking token: %6d '%s' was not control-type; this is probably a bug in the model. its type will be overridden\n",
+                                __func__, t.second, t.first.c_str());
+                        vocab.id_to_token[t.second].attr = LLAMA_TOKEN_ATTR_CONTROL;
+                    }
+                }
+            }
+
+            // find FIM_SUF token: "<|fim_suffix|>", "", "", etc.
+            if (vocab.special_fim_suf_id == LLAMA_TOKEN_NULL) {
+                if (false
+                        || t.first == "<|fim_suffix|>" // Qwen
+                        || t.first == ""
+                        || t.first == "<|fim▁hole|>" // DeepSeek
+                        || t.first == ""
+                        ) {
+                    vocab.special_fim_suf_id = t.second;
+                    if ((vocab.id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
+                        LLAMA_LOG_WARN("%s: control-looking token: %6d '%s' was not control-type; this is probably a bug in the model. its type will be overridden\n",
+                                __func__, t.second, t.first.c_str());
+                        vocab.id_to_token[t.second].attr = LLAMA_TOKEN_ATTR_CONTROL;
+                    }
+                }
+            }
+
+            // find FIM_MID token: "<|fim_middle|>", "", "", etc.
+            if (vocab.special_fim_mid_id == LLAMA_TOKEN_NULL) {
+                if (false
+                        || t.first == "<|fim_middle|>" // Qwen
+                        || t.first == ""
+                        || t.first == "<|fim▁end|>"  // DeepSeek
+                        || t.first == ""
+                        ) {
+                    vocab.special_fim_mid_id = t.second;
+                    if ((vocab.id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
+                        LLAMA_LOG_WARN("%s: control-looking token: %6d '%s' was not control-type; this is probably a bug in the model. its type will be overridden\n",
+                                __func__, t.second, t.first.c_str());
+                        vocab.id_to_token[t.second].attr = LLAMA_TOKEN_ATTR_CONTROL;
+                    }
+                }
+            }
+
+            // find FIM_PAD token: "<|fim_pad|>", "", "", etc.
+            if (vocab.special_fim_pad_id == LLAMA_TOKEN_NULL) {
+                if (false
+                        || t.first == "<|fim_pad|>" // Qwen
+                        || t.first == ""
+                        || t.first == ""
+                        ) {
+                    vocab.special_fim_pad_id = t.second;
+                    if ((vocab.id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
+                        LLAMA_LOG_WARN("%s: control-looking token: %6d '%s' was not control-type; this is probably a bug in the model. its type will be overridden\n",
+                                __func__, t.second, t.first.c_str());
+                        vocab.id_to_token[t.second].attr = LLAMA_TOKEN_ATTR_CONTROL;
+                    }
+                }
+            }
+
+            // find FIM_REP token: "<|fim_repo|>", "", "", etc.
+            if (vocab.special_fim_rep_id == LLAMA_TOKEN_NULL) {
+                if (false
+                        || t.first == "<|fim_repo|>"  // Qwen
+                        || t.first == "<|repo_name|>"
+                        || t.first == ""
+                        || t.first == ""
+                        ) {
+                    vocab.special_fim_rep_id = t.second;
+                    if ((vocab.id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
+                        LLAMA_LOG_WARN("%s: control-looking token: %6d '%s' was not control-type; this is probably a bug in the model. its type will be overridden\n",
+                                __func__, t.second, t.first.c_str());
+                        vocab.id_to_token[t.second].attr = LLAMA_TOKEN_ATTR_CONTROL;
+                    }
+                }
+            }
+
+            // find FIM_SEP token: "<|file_sep|>"
+            if (vocab.special_fim_sep_id == LLAMA_TOKEN_NULL) {
+                if (false
+                        || t.first == "<|file_sep|>" // Qwen
+                        ) {
+                    vocab.special_fim_sep_id = t.second;
+                    if ((vocab.id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
+                        LLAMA_LOG_WARN("%s: control-looking token: %6d '%s' was not control-type; this is probably a bug in the model. its type will be overridden\n",
+                                __func__, t.second, t.first.c_str());
+                        vocab.id_to_token[t.second].attr = LLAMA_TOKEN_ATTR_CONTROL;
+                    }
+                }
+            }
+        }
+
+        // maintain a list of tokens that cause end-of-generation
+        // this is currently determined based on the token text, which is obviously not ideal
+        // ref: https://github.com/ggerganov/llama.cpp/issues/9606
+        vocab.special_eog_ids.clear();
+
+        if (vocab.special_fim_pad_id != LLAMA_TOKEN_NULL && vocab.special_eog_ids.count(vocab.special_fim_pad_id) == 0) {
+            vocab.special_eog_ids.insert(vocab.special_fim_pad_id);
+        }
+
+        if (vocab.special_fim_rep_id != LLAMA_TOKEN_NULL && vocab.special_eog_ids.count(vocab.special_fim_rep_id) == 0) {
+            vocab.special_eog_ids.insert(vocab.special_fim_rep_id);
+        }
+
+        if (vocab.special_fim_sep_id != LLAMA_TOKEN_NULL && vocab.special_eog_ids.count(vocab.special_fim_sep_id) == 0) {
+            vocab.special_eog_ids.insert(vocab.special_fim_sep_id);
+        }
+
+        for (const auto & t : vocab.token_to_id) {
+            if (false
+                    || t.first == "<|eot_id|>"
+                    || t.first == "<|im_end|>"
+                    || t.first == "<|end|>"
+                    || t.first == ""
+                    || t.first == "<|endoftext|>"
+                    || t.first == "<|eom_id|>"
+                    || t.first == ""
+               ) {
+                vocab.special_eog_ids.insert(t.second);
+                if ((vocab.id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
+                    LLAMA_LOG_WARN("%s: control-looking token: %6d '%s' was not control-type; this is probably a bug in the model. its type will be overridden\n",
+                            __func__, t.second, t.first.c_str());
+                    vocab.id_to_token[t.second].attr = LLAMA_TOKEN_ATTR_CONTROL;
+                }
+            } else {
+                // token is control, but not marked as EOG -> print a debug log
+                if (vocab.id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL && vocab.special_eog_ids.count(t.second) == 0) {
+                    LLAMA_LOG_DEBUG("%s: control token: %6d '%s' is not marked as EOG\n",
+                            __func__, t.second, t.first.c_str());
+                }
+            }
+        }
+
+        // sanity checks
+        if (vocab.special_eos_id != LLAMA_TOKEN_NULL && vocab.special_eog_ids.count(vocab.special_eos_id) == 0) {
+            vocab.special_eog_ids.insert(vocab.special_eos_id);
+            LLAMA_LOG_WARN("%s: special_eos_id is not in special_eog_ids - the tokenizer config may be incorrect\n", __func__);
+        }
+
+        if (vocab.special_eot_id != LLAMA_TOKEN_NULL && vocab.special_eog_ids.count(vocab.special_eot_id) == 0) {
+            vocab.special_eog_ids.insert(vocab.special_eot_id);
+            LLAMA_LOG_WARN("%s: special_eot_id is not in special_eog_ids - the tokenizer config may be incorrect\n", __func__);
+        }
+
+        if (vocab.special_eom_id != LLAMA_TOKEN_NULL && vocab.special_eog_ids.count(vocab.special_eom_id) == 0) {
+            vocab.special_eog_ids.insert(vocab.special_eom_id);
+            LLAMA_LOG_WARN("%s: special_eom_id is not in special_eog_ids - the tokenizer config may be incorrect\n", __func__);
+        }
+    }
+
+    // build special tokens cache
+    {
+        for (llama_vocab::id id = 0; id < (llama_vocab::id)n_vocab; ++id) {
+            if (vocab.id_to_token[id].attr & (LLAMA_TOKEN_ATTR_CONTROL | LLAMA_TOKEN_ATTR_USER_DEFINED | LLAMA_TOKEN_ATTR_UNKNOWN)) {
+                vocab.cache_special_tokens.push_back(id);
+            }
+        }
+
+        std::sort(vocab.cache_special_tokens.begin(), vocab.cache_special_tokens.end(),
+            [&] (const llama_vocab::id a, const llama_vocab::id b) {
+                return vocab.id_to_token[a].text.size() > vocab.id_to_token[b].text.size();
+            }
+        );
+
+        LLAMA_LOG_INFO("%s: special tokens cache size = %u\n", __func__, (uint32_t)vocab.cache_special_tokens.size());
+    }
+
+    // build token to piece cache
+    {
+        size_t size_cache = 0;
+
+        std::vector cache_token_to_piece(n_vocab);
+
+        for (uint32_t id = 0; id < n_vocab; ++id) {
+            cache_token_to_piece[id] = llama_token_to_piece(&model, id, true);
+
+            size_cache += cache_token_to_piece[id].size();
+        }
+
+        std::swap(vocab.cache_token_to_piece, cache_token_to_piece);
+
+        LLAMA_LOG_INFO("%s: token to piece cache size = %.4f MB\n", __func__, size_cache / 1024.0 / 1024.0);
+    }
+
+    // Handle per token attributes
+    //NOTE: Each model customizes per token attributes.
+    //NOTE: Per token attributes are missing from the GGUF file.
+    //TODO: Extract attributes from GGUF file.
+    {
+        auto _contains_any = [] (const std::string &str, const std::vector &substrs) -> bool {
+            for (auto substr : substrs) {
+                if (str.find(substr) < std::string::npos) {
+                    return true;
+                }
+            }
+            return false;
+        };
+
+        auto _set_tokenid_attr = [&] (const llama_vocab::id id, llama_token_attr attr, bool value) {
+            uint32_t current = vocab.id_to_token.at(id).attr;
+            current = value ? (current | attr) : (current & ~attr);
+            vocab.id_to_token[id].attr = (llama_token_attr) current;
+        };
+
+        auto _set_token_attr = [&] (const std::string & token, llama_token_attr attr, bool value) {
+            _set_tokenid_attr(vocab.token_to_id.at(token), attr, value);
+        };
+
+        std::string model_name;
+        std::string tokenizer_pre;
+
+        ml.get_key(LLM_KV_GENERAL_NAME, model_name, false);
+        ml.get_key(LLM_KV_TOKENIZER_PRE, tokenizer_pre, false);
+
+        // model name to lowercase
+        std::transform(model_name.begin(), model_name.end(), model_name.begin(),
+            [] (const std::string::value_type x) {
+                return std::tolower(x);
+            }
+        );
+
+        // set attributes by model/tokenizer name
+        if (_contains_any(tokenizer_pre, {"jina-v2-de", "jina-v2-es", "jina-v2-code"})) {
+            _set_token_attr("", LLAMA_TOKEN_ATTR_LSTRIP, true);
+        } else if (_contains_any(model_name, {"phi-3", "phi3"})) {
+            for (auto id : vocab.cache_special_tokens) {
+                _set_tokenid_attr(id, LLAMA_TOKEN_ATTR_RSTRIP, true);
+            }
+            for (auto token : {""}) {
+                _set_token_attr(token, LLAMA_TOKEN_ATTR_RSTRIP, true);
+            }
+            for (auto token : {"", "", "<|endoftext|>"}) {
+                _set_token_attr(token, LLAMA_TOKEN_ATTR_RSTRIP, false);
+            }
+        }
+    }
+}
+
+void llm_load_print_meta(llama_model_loader & ml, llama_model & model) {
+    const auto & hparams = model.hparams;
+    const auto & vocab   = model.vocab;
+
+    const char * rope_scaling_type = LLAMA_ROPE_SCALING_TYPES.at(hparams.rope_scaling_type_train);
+
+    auto print_f = [](const std::function & f, uint32_t n) {
+        bool is_var = false;
+
+        std::vector v;
+        for (uint32_t i = 0; i < n; ++i) {
+            v.push_back(f(i));
+            if (v[i] != v[0]) {
+                is_var = true;
+            }
+        }
+
+        std::stringstream ss;
+
+        if (is_var) {
+            ss << "[";
+            for (uint32_t i = 0; i < n; ++i) {
+                ss << v[i];
+                if (i < n - 1) {
+                    ss << ", ";
+                }
+            }
+            ss << "]";
+        } else {
+            ss << v[0];
+        }
+
+        return ss.str();
+    };
+
+    // hparams
+    LLAMA_LOG_INFO("%s: format           = %s\n",     __func__, llama_file_version_name(ml.fver));
+    LLAMA_LOG_INFO("%s: arch             = %s\n",     __func__, llm_arch_name(model.arch));
+    LLAMA_LOG_INFO("%s: vocab type       = %s\n",     __func__, llama_model_vocab_type_name(vocab.type));
+    LLAMA_LOG_INFO("%s: n_vocab          = %u\n",     __func__, hparams.n_vocab);
+    LLAMA_LOG_INFO("%s: n_merges         = %u\n",     __func__, (int) vocab.bpe_ranks.size());
+    LLAMA_LOG_INFO("%s: vocab_only       = %d\n",     __func__, hparams.vocab_only);
+
+    if (!hparams.vocab_only) {
+        LLAMA_LOG_INFO("%s: n_ctx_train      = %u\n",     __func__, hparams.n_ctx_train);
+        LLAMA_LOG_INFO("%s: n_embd           = %u\n",     __func__, hparams.n_embd);
+        LLAMA_LOG_INFO("%s: n_layer          = %u\n",     __func__, hparams.n_layer);
+        LLAMA_LOG_INFO("%s: n_head           = %s\n",     __func__, print_f([&](uint32_t il) { return hparams.n_head(il);    }, hparams.n_layer).c_str());
+        LLAMA_LOG_INFO("%s: n_head_kv        = %s\n",     __func__, print_f([&](uint32_t il) { return hparams.n_head_kv(il); }, hparams.n_layer).c_str());
+        LLAMA_LOG_INFO("%s: n_rot            = %u\n",     __func__, hparams.n_rot);
+        LLAMA_LOG_INFO("%s: n_swa            = %u\n",     __func__, hparams.n_swa);
+        LLAMA_LOG_INFO("%s: n_embd_head_k    = %u\n",     __func__, hparams.n_embd_head_k);
+        LLAMA_LOG_INFO("%s: n_embd_head_v    = %u\n",     __func__, hparams.n_embd_head_v);
+        LLAMA_LOG_INFO("%s: n_gqa            = %s\n",     __func__, print_f([&](uint32_t il) { return hparams.n_gqa(il);        }, hparams.n_layer).c_str());
+        LLAMA_LOG_INFO("%s: n_embd_k_gqa     = %s\n",     __func__, print_f([&](uint32_t il) { return hparams.n_embd_k_gqa(il); }, hparams.n_layer).c_str());
+        LLAMA_LOG_INFO("%s: n_embd_v_gqa     = %s\n",     __func__, print_f([&](uint32_t il) { return hparams.n_embd_v_gqa(il); }, hparams.n_layer).c_str());
+        LLAMA_LOG_INFO("%s: f_norm_eps       = %.1e\n",   __func__, hparams.f_norm_eps);
+        LLAMA_LOG_INFO("%s: f_norm_rms_eps   = %.1e\n",   __func__, hparams.f_norm_rms_eps);
+        LLAMA_LOG_INFO("%s: f_clamp_kqv      = %.1e\n",   __func__, hparams.f_clamp_kqv);
+        LLAMA_LOG_INFO("%s: f_max_alibi_bias = %.1e\n",   __func__, hparams.f_max_alibi_bias);
+        LLAMA_LOG_INFO("%s: f_logit_scale    = %.1e\n",   __func__, hparams.f_logit_scale);
+        LLAMA_LOG_INFO("%s: n_ff             = %s\n",     __func__, print_f([&](uint32_t il) { return hparams.n_ff(il); }, hparams.n_layer).c_str());
+        LLAMA_LOG_INFO("%s: n_expert         = %u\n",     __func__, hparams.n_expert);
+        LLAMA_LOG_INFO("%s: n_expert_used    = %u\n",     __func__, hparams.n_expert_used);
+        LLAMA_LOG_INFO("%s: causal attn      = %d\n",     __func__, hparams.causal_attn);
+        LLAMA_LOG_INFO("%s: pooling type     = %d\n",     __func__, hparams.pooling_type);
+        LLAMA_LOG_INFO("%s: rope type        = %d\n",     __func__, hparams.rope_type);
+        LLAMA_LOG_INFO("%s: rope scaling     = %s\n",     __func__, rope_scaling_type);
+        LLAMA_LOG_INFO("%s: freq_base_train  = %.1f\n",   __func__, hparams.rope_freq_base_train);
+        LLAMA_LOG_INFO("%s: freq_scale_train = %g\n",     __func__, hparams.rope_freq_scale_train);
+        LLAMA_LOG_INFO("%s: n_ctx_orig_yarn  = %u\n",     __func__, hparams.n_ctx_orig_yarn);
+        LLAMA_LOG_INFO("%s: rope_finetuned   = %s\n",     __func__, hparams.rope_finetuned ? "yes" : "unknown");
+        LLAMA_LOG_INFO("%s: ssm_d_conv       = %u\n",     __func__, hparams.ssm_d_conv);
+        LLAMA_LOG_INFO("%s: ssm_d_inner      = %u\n",     __func__, hparams.ssm_d_inner);
+        LLAMA_LOG_INFO("%s: ssm_d_state      = %u\n",     __func__, hparams.ssm_d_state);
+        LLAMA_LOG_INFO("%s: ssm_dt_rank      = %u\n",     __func__, hparams.ssm_dt_rank);
+        LLAMA_LOG_INFO("%s: ssm_dt_b_c_rms   = %d\n",     __func__, hparams.ssm_dt_b_c_rms);
+    }
+
+    LLAMA_LOG_INFO("%s: model type       = %s\n",     __func__, llama_model_type_name(model).c_str());
+    LLAMA_LOG_INFO("%s: model ftype      = %s\n",     __func__, llama_model_ftype_name(model).c_str());
+    if (ml.n_elements >= 1e12) {
+        LLAMA_LOG_INFO("%s: model params     = %.2f T\n", __func__, ml.n_elements*1e-12);
+    } else if (ml.n_elements >= 1e9) {
+        LLAMA_LOG_INFO("%s: model params     = %.2f B\n", __func__, ml.n_elements*1e-9);
+    } else if (ml.n_elements >= 1e6) {
+        LLAMA_LOG_INFO("%s: model params     = %.2f M\n", __func__, ml.n_elements*1e-6);
+    } else {
+        LLAMA_LOG_INFO("%s: model params     = %.2f K\n", __func__, ml.n_elements*1e-3);
+    }
+    if (ml.n_bytes < GiB) {
+        LLAMA_LOG_INFO("%s: model size       = %.2f MiB (%.2f BPW) \n", __func__, ml.n_bytes/1024.0/1024.0,        ml.n_bytes*8.0/ml.n_elements);
+    } else {
+        LLAMA_LOG_INFO("%s: model size       = %.2f GiB (%.2f BPW) \n", __func__, ml.n_bytes/1024.0/1024.0/1024.0, ml.n_bytes*8.0/ml.n_elements);
+    }
+
+    // general kv
+    LLAMA_LOG_INFO("%s: general.name     = %s\n",    __func__, model.name.c_str());
+
+    // special tokens
+    if (vocab.special_bos_id  != -1)    { LLAMA_LOG_INFO( "%s: BOS token        = %d '%s'\n", __func__, vocab.special_bos_id,     vocab.id_to_token[vocab.special_bos_id].text.c_str() );  }
+    if (vocab.special_eos_id  != -1)    { LLAMA_LOG_INFO( "%s: EOS token        = %d '%s'\n", __func__, vocab.special_eos_id,     vocab.id_to_token[vocab.special_eos_id].text.c_str() );  }
+    if (vocab.special_eot_id  != -1)    { LLAMA_LOG_INFO( "%s: EOT token        = %d '%s'\n", __func__, vocab.special_eot_id,     vocab.id_to_token[vocab.special_eot_id].text.c_str() );  }
+    if (vocab.special_eom_id  != -1)    { LLAMA_LOG_INFO( "%s: EOM token        = %d '%s'\n", __func__, vocab.special_eom_id,     vocab.id_to_token[vocab.special_eom_id].text.c_str() );  }
+    if (vocab.special_unk_id  != -1)    { LLAMA_LOG_INFO( "%s: UNK token        = %d '%s'\n", __func__, vocab.special_unk_id,     vocab.id_to_token[vocab.special_unk_id].text.c_str() );  }
+    if (vocab.special_sep_id  != -1)    { LLAMA_LOG_INFO( "%s: SEP token        = %d '%s'\n", __func__, vocab.special_sep_id,     vocab.id_to_token[vocab.special_sep_id].text.c_str() );  }
+    if (vocab.special_pad_id  != -1)    { LLAMA_LOG_INFO( "%s: PAD token        = %d '%s'\n", __func__, vocab.special_pad_id,     vocab.id_to_token[vocab.special_pad_id].text.c_str() );  }
+    if (vocab.special_cls_id  != -1)    { LLAMA_LOG_INFO( "%s: CLS token        = %d '%s'\n", __func__, vocab.special_cls_id,     vocab.id_to_token[vocab.special_cls_id].text.c_str() );  }
+    if (vocab.special_mask_id != -1)    { LLAMA_LOG_INFO( "%s: MASK token       = %d '%s'\n", __func__, vocab.special_mask_id,    vocab.id_to_token[vocab.special_mask_id].text.c_str() ); }
+
+    if (vocab.linefeed_id != -1)        { LLAMA_LOG_INFO( "%s: LF token         = %d '%s'\n", __func__, vocab.linefeed_id,        vocab.id_to_token[vocab.linefeed_id].text.c_str() ); }
+
+    if (vocab.special_fim_pre_id != -1) { LLAMA_LOG_INFO( "%s: FIM PRE token    = %d '%s'\n", __func__, vocab.special_fim_pre_id, vocab.id_to_token[vocab.special_fim_pre_id].text.c_str() ); }
+    if (vocab.special_fim_suf_id != -1) { LLAMA_LOG_INFO( "%s: FIM SUF token    = %d '%s'\n", __func__, vocab.special_fim_suf_id, vocab.id_to_token[vocab.special_fim_suf_id].text.c_str() ); }
+    if (vocab.special_fim_mid_id != -1) { LLAMA_LOG_INFO( "%s: FIM MID token    = %d '%s'\n", __func__, vocab.special_fim_mid_id, vocab.id_to_token[vocab.special_fim_mid_id].text.c_str() ); }
+    if (vocab.special_fim_pad_id != -1) { LLAMA_LOG_INFO( "%s: FIM PAD token    = %d '%s'\n", __func__, vocab.special_fim_pad_id, vocab.id_to_token[vocab.special_fim_pad_id].text.c_str() ); }
+    if (vocab.special_fim_rep_id != -1) { LLAMA_LOG_INFO( "%s: FIM REP token    = %d '%s'\n", __func__, vocab.special_fim_rep_id, vocab.id_to_token[vocab.special_fim_rep_id].text.c_str() ); }
+    if (vocab.special_fim_sep_id != -1) { LLAMA_LOG_INFO( "%s: FIM SEP token    = %d '%s'\n", __func__, vocab.special_fim_sep_id, vocab.id_to_token[vocab.special_fim_sep_id].text.c_str() ); }
+
+    for (const auto & id : vocab.special_eog_ids) {
+        LLAMA_LOG_INFO( "%s: EOG token        = %d '%s'\n", __func__, id, vocab.id_to_token[id].text.c_str() );
+    }
+
+    LLAMA_LOG_INFO("%s: max token length = %d\n", __func__, vocab.max_token_len);
+
+    if (model.arch == LLM_ARCH_DEEPSEEK) {
+        LLAMA_LOG_INFO("%s: n_layer_dense_lead   = %d\n",     __func__, hparams.n_layer_dense_lead);
+        LLAMA_LOG_INFO("%s: n_ff_exp             = %d\n",     __func__, hparams.n_ff_exp);
+        LLAMA_LOG_INFO("%s: n_expert_shared      = %d\n",     __func__, hparams.n_expert_shared);
+        LLAMA_LOG_INFO("%s: expert_weights_scale = %.1f\n",   __func__, hparams.expert_weights_scale);
+    }
+
+    if (model.arch == LLM_ARCH_DEEPSEEK2) {
+        LLAMA_LOG_INFO("%s: n_layer_dense_lead   = %d\n",     __func__, hparams.n_layer_dense_lead);
+        LLAMA_LOG_INFO("%s: n_lora_q             = %d\n",     __func__, hparams.n_lora_q);
+        LLAMA_LOG_INFO("%s: n_lora_kv            = %d\n",     __func__, hparams.n_lora_kv);
+        LLAMA_LOG_INFO("%s: n_ff_exp             = %d\n",     __func__, hparams.n_ff_exp);
+        LLAMA_LOG_INFO("%s: n_expert_shared      = %d\n",     __func__, hparams.n_expert_shared);
+        LLAMA_LOG_INFO("%s: expert_weights_scale = %.1f\n",   __func__, hparams.expert_weights_scale);
+        LLAMA_LOG_INFO("%s: rope_yarn_log_mul    = %.4f\n",   __func__, hparams.rope_yarn_log_mul);
+    }
+
+    if (model.arch == LLM_ARCH_QWEN2MOE) {
+        LLAMA_LOG_INFO("%s: n_ff_exp         = %d\n",     __func__, hparams.n_ff_exp);
+        LLAMA_LOG_INFO("%s: n_ff_shexp       = %d\n",     __func__, hparams.n_ff_shexp);
+    }
+
+    if (model.arch == LLM_ARCH_MINICPM || model.arch == LLM_ARCH_GRANITE || model.arch == LLM_ARCH_GRANITE_MOE) {
+        LLAMA_LOG_INFO("%s: f_embedding_scale = %f\n", __func__, hparams.f_embedding_scale);
+        LLAMA_LOG_INFO("%s: f_residual_scale  = %f\n", __func__, hparams.f_residual_scale);
+        LLAMA_LOG_INFO("%s: f_attention_scale = %f\n", __func__, hparams.f_attention_scale);
+    }
+}
+
+//
+// interface implementation
+//
+
+struct llama_model_params llama_model_default_params() {
+    struct llama_model_params result = {
+        /*.devices                     =*/ nullptr,
+        /*.n_gpu_layers                =*/ 0,
+        /*.split_mode                  =*/ LLAMA_SPLIT_MODE_LAYER,
+        /*.main_gpu                    =*/ 0,
+        /*.tensor_split                =*/ nullptr,
+        /*.rpc_servers                 =*/ nullptr,
+        /*.progress_callback           =*/ nullptr,
+        /*.progress_callback_user_data =*/ nullptr,
+        /*.kv_overrides                =*/ nullptr,
+        /*.vocab_only                  =*/ false,
+        /*.use_mmap                    =*/ true,
+        /*.use_mlock                   =*/ false,
+        /*.check_tensors               =*/ false,
+    };
+
+#ifdef GGML_USE_METAL
+    // note: we usually have plenty of VRAM, so by default offload all layers to the GPU
+    result.n_gpu_layers = 999;
+#endif
+
+    return result;
+}
+
+void llama_free_model(struct llama_model * model) {
+    delete model;
+}
+
+enum llama_vocab_type llama_vocab_type(const struct llama_model * model) {
+    return model->vocab.type;
+}
+
+int32_t llama_n_vocab(const struct llama_model * model) {
+    return model->hparams.n_vocab;
+}
+
+int32_t llama_n_ctx_train(const struct llama_model * model) {
+    return model->hparams.n_ctx_train;
+}
+
+int32_t llama_n_embd(const struct llama_model * model) {
+    return model->hparams.n_embd;
+}
+
+int32_t llama_n_layer(const struct llama_model * model) {
+    return model->hparams.n_layer;
+}
+
+int32_t llama_n_head(const struct llama_model * model) {
+    return model->hparams.n_head();
+}
+
+enum llama_rope_type llama_rope_type(const struct llama_model * model) {
+    switch (model->arch) {
+        // these models do not use RoPE
+        case LLM_ARCH_GPT2:
+        case LLM_ARCH_GPTJ:
+        case LLM_ARCH_MPT:
+        case LLM_ARCH_REFACT:
+        case LLM_ARCH_BLOOM:
+        case LLM_ARCH_MAMBA:
+        case LLM_ARCH_JINA_BERT_V2:
+        case LLM_ARCH_T5:
+        case LLM_ARCH_T5ENCODER:
+        case LLM_ARCH_JAIS:
+        case LLM_ARCH_RWKV6:
+        case LLM_ARCH_WAVTOKENIZER_DEC:
+            return LLAMA_ROPE_TYPE_NONE;
+
+        // use what we call a normal RoPE, operating on pairs of consecutive head values
+        case LLM_ARCH_LLAMA:
+        case LLM_ARCH_DECI:
+        case LLM_ARCH_BAICHUAN:
+        case LLM_ARCH_STARCODER:
+        case LLM_ARCH_PLAMO:
+        case LLM_ARCH_ORION:
+        case LLM_ARCH_INTERNLM2:
+        case LLM_ARCH_MINICPM:
+        case LLM_ARCH_XVERSE:
+        case LLM_ARCH_COMMAND_R:
+        case LLM_ARCH_OLMO:
+        case LLM_ARCH_ARCTIC:
+        case LLM_ARCH_DEEPSEEK:
+        case LLM_ARCH_DEEPSEEK2:
+        case LLM_ARCH_CHATGLM:
+        case LLM_ARCH_GRANITE:
+        case LLM_ARCH_GRANITE_MOE:
+        case LLM_ARCH_CHAMELEON:
+            return LLAMA_ROPE_TYPE_NORM;
+
+        // the pairs of head values are offset by n_rot/2
+        case LLM_ARCH_FALCON:
+        case LLM_ARCH_GROK:
+        case LLM_ARCH_DBRX:
+        case LLM_ARCH_BERT:
+        case LLM_ARCH_NOMIC_BERT:
+        case LLM_ARCH_STABLELM:
+        case LLM_ARCH_BITNET:
+        case LLM_ARCH_QWEN:
+        case LLM_ARCH_QWEN2:
+        case LLM_ARCH_QWEN2MOE:
+        case LLM_ARCH_OLMO2:
+        case LLM_ARCH_OLMOE:
+        case LLM_ARCH_PHI2:
+        case LLM_ARCH_PHI3:
+        case LLM_ARCH_GEMMA:
+        case LLM_ARCH_GEMMA2:
+        case LLM_ARCH_STARCODER2:
+        case LLM_ARCH_OPENELM:
+        case LLM_ARCH_GPTNEOX:
+        case LLM_ARCH_CODESHELL:
+        case LLM_ARCH_NEMOTRON:
+        case LLM_ARCH_EXAONE:
+        case LLM_ARCH_MINICPM3:
+            return LLAMA_ROPE_TYPE_NEOX;
+
+        case LLM_ARCH_QWEN2VL:
+            return LLAMA_ROPE_TYPE_MROPE;
+
+        // all model arches should be listed explicitly here
+        case LLM_ARCH_UNKNOWN:
+            GGML_ABORT("unknown architecture");
+    }
+
+    return LLAMA_ROPE_TYPE_NONE;
+}
+
+float llama_rope_freq_scale_train(const struct llama_model * model) {
+    return model->hparams.rope_freq_scale_train;
+}
+
+int32_t llama_model_meta_val_str(const struct llama_model * model, const char * key, char * buf, size_t buf_size) {
+    const auto & it = model->gguf_kv.find(key);
+    if (it == model->gguf_kv.end()) {
+        if (buf_size > 0) {
+            buf[0] = '\0';
+        }
+        return -1;
+    }
+    return snprintf(buf, buf_size, "%s", it->second.c_str());
+}
+
+int32_t llama_model_meta_count(const struct llama_model * model) {
+    return (int)model->gguf_kv.size();
+}
+
+int32_t llama_model_meta_key_by_index(const struct llama_model * model, int i, char * buf, size_t buf_size) {
+    if (i < 0 || i >= (int)model->gguf_kv.size()) {
+        if (buf_size > 0) {
+            buf[0] = '\0';
+        }
+        return -1;
+    }
+    auto it = model->gguf_kv.begin();
+    std::advance(it, i);
+    return snprintf(buf, buf_size, "%s", it->first.c_str());
+}
+
+int32_t llama_model_meta_val_str_by_index(const struct llama_model * model, int32_t i, char * buf, size_t buf_size) {
+    if (i < 0 || i >= (int)model->gguf_kv.size()) {
+        if (buf_size > 0) {
+            buf[0] = '\0';
+        }
+        return -1;
+    }
+    auto it = model->gguf_kv.begin();
+    std::advance(it, i);
+    return snprintf(buf, buf_size, "%s", it->second.c_str());
+}
+
+int32_t llama_model_desc(const struct llama_model * model, char * buf, size_t buf_size) {
+    return snprintf(buf, buf_size, "%s %s %s",
+            llama_model_arch_name (*model).c_str(),
+            llama_model_type_name (*model).c_str(),
+            llama_model_ftype_name(*model).c_str());
+}
+
+uint64_t llama_model_size(const struct llama_model * model) {
+    return model->n_bytes;
+}
+
+uint64_t llama_model_n_params(const struct llama_model * model) {
+    return model->n_elements;
+}
+
+bool llama_model_has_encoder(const struct llama_model * model) {
+    switch (model->arch) {
+        case LLM_ARCH_T5:        return true;
+        case LLM_ARCH_T5ENCODER: return true;
+        default:                 return false;
+    }
+}
+
+bool llama_model_has_decoder(const struct llama_model * model) {
+    switch (model->arch) {
+        case LLM_ARCH_T5ENCODER: return false;
+        default:                 return true;
+    }
+}
+
+llama_token llama_model_decoder_start_token(const struct llama_model * model) {
+    return model->hparams.dec_start_token_id;
+}
+
+bool llama_model_is_recurrent(const struct llama_model * model) {
+    switch (model->arch) {
+        case LLM_ARCH_MAMBA:  return true;
+        case LLM_ARCH_RWKV6:  return true;
+        default:              return false;
+    }
+}
diff --git a/src/llama-model.h b/src/llama-model.h
new file mode 100644
index 000000000..01c780c41
--- /dev/null
+++ b/src/llama-model.h
@@ -0,0 +1,389 @@
+#pragma once
+
+#include "llama.h"
+#include "llama-arch.h"
+#include "llama-hparams.h"
+#include "llama-vocab.h"
+#include "llama-mmap.h"
+
+#include "ggml-cpp.h"
+
+#include 
+
+// available models
+// TODO: this enum does not follow the enum naming convention
+enum llm_type {
+    MODEL_UNKNOWN,
+    MODEL_14M,
+    MODEL_17M,
+    MODEL_22M,
+    MODEL_33M,
+    MODEL_60M,
+    MODEL_70M,
+    MODEL_80M,
+    MODEL_109M,
+    MODEL_137M,
+    MODEL_160M,
+    MODEL_220M,
+    MODEL_250M,
+    MODEL_270M,
+    MODEL_335M,
+    MODEL_410M,
+    MODEL_450M,
+    MODEL_770M,
+    MODEL_780M,
+    MODEL_0_5B,
+    MODEL_1B,
+    MODEL_1_3B,
+    MODEL_1_4B,
+    MODEL_1_5B,
+    MODEL_1_6B,
+    MODEL_2B,
+    MODEL_2_8B,
+    MODEL_3B,
+    MODEL_4B,
+    MODEL_6B,
+    MODEL_6_9B,
+    MODEL_7B,
+    MODEL_8B,
+    MODEL_9B,
+    MODEL_11B,
+    MODEL_12B,
+    MODEL_13B,
+    MODEL_14B,
+    MODEL_15B,
+    MODEL_16B,
+    MODEL_20B,
+    MODEL_30B,
+    MODEL_32B,
+    MODEL_34B,
+    MODEL_35B,
+    MODEL_40B,
+    MODEL_65B,
+    MODEL_70B,
+    MODEL_236B,
+    MODEL_314B,
+    MODEL_SMALL,
+    MODEL_MEDIUM,
+    MODEL_LARGE,
+    MODEL_XL,
+    MODEL_A1_7B,
+    MODEL_A2_7B,
+    MODEL_8x7B,
+    MODEL_8x22B,
+    MODEL_16x12B,
+    MODEL_10B_128x3_66B,
+    MODEL_57B_A14B,
+    MODEL_27B,
+};
+
+struct llama_layer_posnet {
+    // resnet
+    struct ggml_tensor * norm1   = nullptr;
+    struct ggml_tensor * norm1_b = nullptr;
+
+    struct ggml_tensor * conv1   = nullptr;
+    struct ggml_tensor * conv1_b = nullptr;
+
+    struct ggml_tensor * norm2   = nullptr;
+    struct ggml_tensor * norm2_b = nullptr;
+
+    struct ggml_tensor * conv2   = nullptr;
+    struct ggml_tensor * conv2_b = nullptr;
+
+    // attention
+    struct ggml_tensor * attn_norm   = nullptr;
+    struct ggml_tensor * attn_norm_b = nullptr;
+
+    struct ggml_tensor * attn_q   = nullptr;
+    struct ggml_tensor * attn_q_b = nullptr;
+
+    struct ggml_tensor * attn_k   = nullptr;
+    struct ggml_tensor * attn_k_b = nullptr;
+
+    struct ggml_tensor * attn_v   = nullptr;
+    struct ggml_tensor * attn_v_b = nullptr;
+
+    struct ggml_tensor * attn_o   = nullptr;
+    struct ggml_tensor * attn_o_b = nullptr;
+
+    // normalize
+    struct ggml_tensor * norm   = nullptr;
+    struct ggml_tensor * norm_b = nullptr;
+};
+
+struct llama_layer_convnext {
+    struct ggml_tensor * dw   = nullptr;
+    struct ggml_tensor * dw_b = nullptr;
+
+    struct ggml_tensor * norm   = nullptr;
+    struct ggml_tensor * norm_b = nullptr;
+
+    struct ggml_tensor * pw1   = nullptr;
+    struct ggml_tensor * pw1_b = nullptr;
+
+    struct ggml_tensor * pw2   = nullptr;
+    struct ggml_tensor * pw2_b = nullptr;
+
+    struct ggml_tensor * gamma = nullptr;
+};
+
+struct llama_layer {
+    // normalization
+    struct ggml_tensor * attn_norm       = nullptr;
+    struct ggml_tensor * attn_norm_b     = nullptr;
+    struct ggml_tensor * attn_norm_2     = nullptr;
+    struct ggml_tensor * attn_norm_2_b   = nullptr;
+    struct ggml_tensor * attn_q_norm     = nullptr;
+    struct ggml_tensor * attn_q_norm_b   = nullptr;
+    struct ggml_tensor * attn_k_norm     = nullptr;
+    struct ggml_tensor * attn_k_norm_b   = nullptr;
+    struct ggml_tensor * attn_out_norm   = nullptr;
+    struct ggml_tensor * attn_out_norm_b = nullptr;
+    struct ggml_tensor * attn_q_a_norm   = nullptr;
+    struct ggml_tensor * attn_kv_a_norm  = nullptr;
+    struct ggml_tensor * attn_sub_norm   = nullptr;
+    struct ggml_tensor * attn_post_norm  = nullptr;
+    struct ggml_tensor * ffn_sub_norm    = nullptr;
+    struct ggml_tensor * attn_norm_cross = nullptr;
+    struct ggml_tensor * attn_norm_enc   = nullptr;
+
+    // attention
+    struct ggml_tensor * wq        = nullptr;
+    struct ggml_tensor * wk        = nullptr;
+    struct ggml_tensor * wv        = nullptr;
+    struct ggml_tensor * wo        = nullptr;
+    struct ggml_tensor * wqkv      = nullptr;
+    struct ggml_tensor * wq_a      = nullptr;
+    struct ggml_tensor * wq_b      = nullptr;
+    struct ggml_tensor * wkv_a_mqa = nullptr;
+    struct ggml_tensor * wkv_b     = nullptr;
+    struct ggml_tensor * wq_cross  = nullptr;
+    struct ggml_tensor * wk_cross  = nullptr;
+    struct ggml_tensor * wv_cross  = nullptr;
+    struct ggml_tensor * wo_cross  = nullptr;
+    struct ggml_tensor * wq_enc    = nullptr;
+    struct ggml_tensor * wk_enc    = nullptr;
+    struct ggml_tensor * wv_enc    = nullptr;
+    struct ggml_tensor * wo_enc    = nullptr;
+
+    // attention bias
+    struct ggml_tensor * bq   = nullptr;
+    struct ggml_tensor * bk   = nullptr;
+    struct ggml_tensor * bv   = nullptr;
+    struct ggml_tensor * bo   = nullptr;
+    struct ggml_tensor * bqkv = nullptr;
+
+    // relative position bias
+    struct ggml_tensor * attn_rel_b       = nullptr;
+    struct ggml_tensor * attn_rel_b_enc   = nullptr;
+    struct ggml_tensor * attn_rel_b_cross = nullptr;
+
+    // normalization
+    struct ggml_tensor * ffn_norm         = nullptr;
+    struct ggml_tensor * ffn_norm_b       = nullptr;
+    struct ggml_tensor * ffn_post_norm    = nullptr;
+    struct ggml_tensor * layer_out_norm   = nullptr;
+    struct ggml_tensor * layer_out_norm_b = nullptr;
+    struct ggml_tensor * ffn_norm_exps    = nullptr;
+    struct ggml_tensor * ffn_norm_enc     = nullptr;
+
+    // ff
+    struct ggml_tensor * ffn_gate     = nullptr; // w1
+    struct ggml_tensor * ffn_down     = nullptr; // w2
+    struct ggml_tensor * ffn_up       = nullptr; // w3
+    struct ggml_tensor * ffn_gate_enc = nullptr;
+    struct ggml_tensor * ffn_down_enc = nullptr;
+    struct ggml_tensor * ffn_up_enc   = nullptr;
+
+    // ff MoE
+    struct ggml_tensor * ffn_gate_inp  = nullptr;
+    struct ggml_tensor * ffn_gate_exps = nullptr;
+    struct ggml_tensor * ffn_down_exps = nullptr;
+    struct ggml_tensor * ffn_up_exps   = nullptr;
+
+    // ff shared expert (shexp)
+    struct ggml_tensor * ffn_gate_inp_shexp = nullptr;
+    struct ggml_tensor * ffn_gate_shexp     = nullptr;
+    struct ggml_tensor * ffn_down_shexp     = nullptr;
+    struct ggml_tensor * ffn_up_shexp       = nullptr;
+
+    // ff bias
+    struct ggml_tensor * ffn_gate_b = nullptr;
+    struct ggml_tensor * ffn_down_b = nullptr; // b2
+    struct ggml_tensor * ffn_up_b   = nullptr; // b3
+    struct ggml_tensor * ffn_act    = nullptr;
+
+    // mamba proj
+    struct ggml_tensor * ssm_in  = nullptr;
+    struct ggml_tensor * ssm_x   = nullptr;
+    struct ggml_tensor * ssm_dt  = nullptr;
+    struct ggml_tensor * ssm_out = nullptr;
+
+    // mamba
+    struct ggml_tensor * ssm_conv1d = nullptr;
+    struct ggml_tensor * ssm_a      = nullptr;
+    struct ggml_tensor * ssm_d      = nullptr;
+
+    // mamba bias
+    struct ggml_tensor * ssm_conv1d_b = nullptr;
+    struct ggml_tensor * ssm_dt_b     = nullptr;
+
+    // rwkv
+    struct ggml_tensor * time_mix_w1         = nullptr;
+    struct ggml_tensor * time_mix_w2         = nullptr;
+    struct ggml_tensor * time_mix_lerp_x     = nullptr;
+    struct ggml_tensor * time_mix_lerp_w     = nullptr;
+    struct ggml_tensor * time_mix_lerp_k     = nullptr;
+    struct ggml_tensor * time_mix_lerp_v     = nullptr;
+    struct ggml_tensor * time_mix_lerp_r     = nullptr;
+    struct ggml_tensor * time_mix_lerp_g     = nullptr;
+
+    struct ggml_tensor * time_mix_first      = nullptr;
+    struct ggml_tensor * time_mix_decay      = nullptr;
+    struct ggml_tensor * time_mix_decay_w1   = nullptr;
+    struct ggml_tensor * time_mix_decay_w2   = nullptr;
+    struct ggml_tensor * time_mix_key        = nullptr;
+    struct ggml_tensor * time_mix_value      = nullptr;
+    struct ggml_tensor * time_mix_receptance = nullptr;
+    struct ggml_tensor * time_mix_gate       = nullptr;
+
+    struct ggml_tensor * time_mix_ln     = nullptr;
+    struct ggml_tensor * time_mix_ln_b   = nullptr;
+    struct ggml_tensor * time_mix_output = nullptr;
+
+    struct ggml_tensor * channel_mix_lerp_k = nullptr;
+    struct ggml_tensor * channel_mix_lerp_r = nullptr;
+
+    struct ggml_tensor * channel_mix_key        = nullptr;
+    struct ggml_tensor * channel_mix_receptance = nullptr;
+    struct ggml_tensor * channel_mix_value      = nullptr;
+
+    // long rope factors
+    struct ggml_tensor * rope_long  = nullptr;
+    struct ggml_tensor * rope_short = nullptr;
+    struct ggml_tensor * rope_freqs = nullptr;
+
+    // bitnet scale
+    struct ggml_tensor * wq_scale       = nullptr;
+    struct ggml_tensor * wk_scale       = nullptr;
+    struct ggml_tensor * wv_scale       = nullptr;
+    struct ggml_tensor * wo_scale       = nullptr;
+    struct ggml_tensor * ffn_gate_scale = nullptr;
+    struct ggml_tensor * ffn_up_scale   = nullptr;
+    struct ggml_tensor * ffn_down_scale = nullptr;
+
+    struct llama_layer_posnet posnet;
+
+    struct llama_layer_convnext convnext;
+};
+
+struct llama_model {
+    llm_type type = MODEL_UNKNOWN;
+    llm_arch arch = LLM_ARCH_UNKNOWN;
+
+    llama_ftype ftype = LLAMA_FTYPE_ALL_F32;
+
+    std::string name = "n/a";
+
+    llama_hparams hparams = {};
+    llama_vocab   vocab;
+
+    struct ggml_tensor * tok_embd   = nullptr;
+    struct ggml_tensor * type_embd  = nullptr;
+    struct ggml_tensor * pos_embd   = nullptr;
+    struct ggml_tensor * tok_norm   = nullptr;
+    struct ggml_tensor * tok_norm_b = nullptr;
+
+    struct ggml_tensor * output_norm     = nullptr;
+    struct ggml_tensor * output_norm_b   = nullptr;
+    struct ggml_tensor * output          = nullptr;
+    struct ggml_tensor * output_b        = nullptr;
+    struct ggml_tensor * output_norm_enc = nullptr;
+
+    // classifier
+    struct ggml_tensor * cls       = nullptr;
+    struct ggml_tensor * cls_b     = nullptr;
+    struct ggml_tensor * cls_out   = nullptr;
+    struct ggml_tensor * cls_out_b = nullptr;
+
+    struct ggml_tensor * conv1d   = nullptr;
+    struct ggml_tensor * conv1d_b = nullptr;
+
+    std::vector layers;
+
+    // gguf metadata
+    std::unordered_map gguf_kv;
+
+    llama_split_mode split_mode;
+    int main_gpu;
+    int n_gpu_layers;
+
+    std::vector rpc_servers;
+
+    // list of devices used in this model
+    std::vector devices;
+
+
+    // lists of buffer types used for each layer
+    using buft_list_t = std::vector>;
+    buft_list_t cpu_buft_list;
+    std::map gpu_buft_list;
+
+    struct layer_dev {
+        ggml_backend_dev_t dev;
+        buft_list_t * buft_list;
+    };
+
+    layer_dev dev_input = {};
+    layer_dev dev_output = {};
+    std::vector dev_layer;
+
+    // contexts where the model tensors metadata is stored
+    std::vector ctxs;
+
+    // the model memory buffers for the tensor data
+    std::vector bufs;
+
+    // model memory mapped files
+    llama_mmaps mappings;
+
+    // objects representing data potentially being locked in memory
+    llama_mlocks mlock_bufs;
+    llama_mlocks mlock_mmaps;
+
+    // for quantize-stats only
+    std::vector> tensors_by_name;
+
+    int64_t t_load_us  = 0;
+    int64_t t_start_us = 0;
+
+    // total number of parameters in the model
+    uint64_t n_elements = 0;
+
+    // total size of all the tensors in the model in bytes
+    size_t  n_bytes     = 0;
+};
+
+const char * llm_type_name(llm_type type);
+
+std::string llama_model_arch_name (const llama_model & model);
+std::string llama_model_type_name (const llama_model & model);
+std::string llama_model_ftype_name(const llama_model & model);
+
+// used by llama_adapter_cvec
+ggml_backend_buffer_type_t llama_model_select_buft(const llama_model & model, int il);
+
+// used by llama_adapter_lora
+struct ggml_tensor * llama_model_get_tensor(const struct llama_model & model, const char * name);
+
+size_t llama_model_max_nodes(const llama_model & model);
+
+struct llama_model_loader;
+
+// TODO: become llama_model methods
+void llm_load_stats     (llama_model_loader & ml, llama_model & model);
+void llm_load_arch      (llama_model_loader & ml, llama_model & model);
+void llm_load_hparams   (llama_model_loader & ml, llama_model & model);
+void llm_load_vocab     (llama_model_loader & ml, llama_model & model);
+void llm_load_print_meta(llama_model_loader & ml, llama_model & model);
diff --git a/src/llama-quant.cpp b/src/llama-quant.cpp
new file mode 100644
index 000000000..42974f8f1
--- /dev/null
+++ b/src/llama-quant.cpp
@@ -0,0 +1,929 @@
+#include "llama-quant.h"
+
+#include "llama-impl.h"
+#include "llama-model.h"
+#include "llama-model-loader.h"
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+// TODO: replace with ggml API call
+#define QK_K 256
+
+static void zeros(std::ofstream & file, size_t n) {
+    char zero = 0;
+    for (size_t i = 0; i < n; ++i) {
+        file.write(&zero, 1);
+    }
+}
+
+struct quantize_state_internal {
+    const llama_model                 & model;
+    const llama_model_quantize_params * params;
+
+    int n_attention_wv = 0;
+    int n_ffn_down     = 0;
+    int n_ffn_gate     = 0;
+    int n_ffn_up       = 0;
+    int i_attention_wv = 0;
+    int i_ffn_down     = 0;
+    int i_ffn_gate     = 0;
+    int i_ffn_up       = 0;
+
+    int n_k_quantized = 0;
+    int n_fallback    = 0;
+
+    bool has_imatrix = false;
+
+    // used to figure out if a model shares tok_embd with the output weight
+    bool has_output = false;
+
+    quantize_state_internal(const llama_model & model, const llama_model_quantize_params * params)
+        : model(model)
+        , params(params)
+        {}
+};
+
+static void llama_tensor_dequantize_internal(
+    struct ggml_tensor * tensor, std::vector> & output, std::vector & workers,
+    const size_t nelements, const int nthread
+) {
+    if (output.size() < nelements) {
+        output.resize(nelements);
+    }
+    float * f32_output = (float *) output.data();
+
+    const ggml_type_traits * qtype = ggml_get_type_traits(tensor->type);
+    if (ggml_is_quantized(tensor->type)) {
+        if (qtype->to_float == NULL) {
+            throw std::runtime_error(format("type %s unsupported for integer quantization: no dequantization available", ggml_type_name(tensor->type)));
+        }
+    } else if (tensor->type != GGML_TYPE_F16 &&
+               tensor->type != GGML_TYPE_BF16) {
+        throw std::runtime_error(format("cannot dequantize/convert tensor type %s", ggml_type_name(tensor->type)));
+    }
+
+    if (nthread < 2) {
+        if (tensor->type == GGML_TYPE_F16) {
+            ggml_fp16_to_fp32_row((ggml_fp16_t *)tensor->data, f32_output, nelements);
+        } else if (tensor->type == GGML_TYPE_BF16) {
+            ggml_bf16_to_fp32_row((ggml_bf16_t *)tensor->data, f32_output, nelements);
+        } else if (ggml_is_quantized(tensor->type)) {
+            qtype->to_float(tensor->data, f32_output, nelements);
+        } else {
+            GGML_ABORT("fatal error"); // unreachable
+        }
+        return;
+    }
+
+    size_t block_size;
+    if (tensor->type == GGML_TYPE_F16 ||
+        tensor->type == GGML_TYPE_BF16) {
+        block_size = 1;
+    } else {
+        block_size = (size_t)ggml_blck_size(tensor->type);
+    }
+
+    size_t block_size_bytes = ggml_type_size(tensor->type);
+
+    GGML_ASSERT(nelements % block_size == 0);
+    size_t nblocks = nelements / block_size;
+    size_t blocks_per_thread = nblocks / nthread;
+    size_t spare_blocks = nblocks - (blocks_per_thread * nthread); // if blocks aren't divisible by thread count
+
+    size_t in_buff_offs = 0;
+    size_t out_buff_offs = 0;
+
+    for (int tnum = 0; tnum < nthread; tnum++) {
+        size_t thr_blocks = blocks_per_thread + (tnum == nthread - 1 ? spare_blocks : 0); // num blocks for this thread
+        size_t thr_elems = thr_blocks * block_size; // number of elements for this thread
+        size_t thr_block_bytes = thr_blocks * block_size_bytes; // number of input bytes for this thread
+
+        auto compute = [qtype] (ggml_type typ, uint8_t * inbuf, float * outbuf, int nels) {
+            if (typ == GGML_TYPE_F16) {
+                ggml_fp16_to_fp32_row((ggml_fp16_t *)inbuf, outbuf, nels);
+            } else if (typ == GGML_TYPE_BF16) {
+                ggml_bf16_to_fp32_row((ggml_bf16_t *)inbuf, outbuf, nels);
+            } else {
+                qtype->to_float(inbuf, outbuf, nels);
+            }
+        };
+        workers.emplace_back(compute, tensor->type, (uint8_t *) tensor->data + in_buff_offs, f32_output + out_buff_offs, thr_elems);
+        in_buff_offs += thr_block_bytes;
+        out_buff_offs += thr_elems;
+    }
+    for (auto & w : workers) { w.join(); }
+    workers.clear();
+}
+
+static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type new_type, const ggml_tensor * tensor, llama_ftype ftype) {
+    const std::string name = ggml_get_name(tensor);
+
+    // TODO: avoid hardcoded tensor names - use the TN_* constants
+    const llm_arch arch = qs.model.arch;
+    const auto       tn = LLM_TN(arch);
+
+    auto use_more_bits = [](int i_layer, int n_layers) -> bool {
+        return i_layer < n_layers/8 || i_layer >= 7*n_layers/8 || (i_layer - n_layers/8)%3 == 2;
+    };
+    const int n_expert = std::max(1, (int)qs.model.hparams.n_expert);
+    auto layer_info = [n_expert] (int i_layer, int n_layer, const char * name) {
+        if (n_expert > 1) {
+            // Believe it or not, "experts" in the FFN of Mixtral-8x7B are not consecutive, but occasionally randomly
+            // sprinkled in the model. Hence, simply dividing i_ffn_down by n_expert does not work
+            // for getting the current layer as I initially thought, and we need to resort to parsing the
+            // tensor name.
+            if (sscanf(name, "blk.%d.", &i_layer) != 1) {
+                throw std::runtime_error(format("Failed to determine layer for tensor %s", name));
+            }
+            if (i_layer < 0 || i_layer >= n_layer) {
+                throw std::runtime_error(format("Bad layer %d for tensor %s. Must be in [0, %d)", i_layer, name, n_layer));
+            }
+        }
+        return std::make_pair(i_layer, n_layer);
+    };
+
+    // for arches that share the same tensor between the token embeddings and the output, we quantize the token embeddings
+    // with the quantization of the output tensor
+    if (name == tn(LLM_TENSOR_OUTPUT, "weight") || (!qs.has_output && name == tn(LLM_TENSOR_TOKEN_EMBD, "weight"))) {
+        if (qs.params->output_tensor_type < GGML_TYPE_COUNT) {
+            new_type = qs.params->output_tensor_type;
+        } else {
+            int nx = tensor->ne[0];
+            if (arch == LLM_ARCH_FALCON || nx % QK_K != 0) {
+                new_type = GGML_TYPE_Q8_0;
+            }
+            else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS || ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS ||
+                     ftype == LLAMA_FTYPE_MOSTLY_IQ1_S   || ftype == LLAMA_FTYPE_MOSTLY_IQ2_S  || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M   ||
+                     ftype == LLAMA_FTYPE_MOSTLY_IQ1_M) {
+                new_type = GGML_TYPE_Q5_K;
+            }
+            else if (new_type != GGML_TYPE_Q8_0) {
+                new_type = GGML_TYPE_Q6_K;
+            }
+        }
+    } else if (name == "token_embd.weight") {
+        if (qs.params->token_embedding_type < GGML_TYPE_COUNT) {
+            new_type = qs.params->token_embedding_type;
+        } else {
+            if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS || ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS ||
+                ftype == LLAMA_FTYPE_MOSTLY_IQ1_S   || ftype == LLAMA_FTYPE_MOSTLY_IQ1_M) {
+                new_type = GGML_TYPE_Q2_K;
+            }
+            else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M) {
+                new_type = GGML_TYPE_IQ3_S;
+            }
+            else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) {
+                new_type = GGML_TYPE_IQ3_S;
+            }
+            else if (ftype == LLAMA_FTYPE_MOSTLY_TQ1_0 || ftype == LLAMA_FTYPE_MOSTLY_TQ2_0) {
+                new_type = GGML_TYPE_Q4_K;
+            }
+        }
+    } else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS || ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ1_S ||
+               ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M    || ftype == LLAMA_FTYPE_MOSTLY_IQ1_M) {
+        if (name.find("attn_v.weight") != std::string::npos) {
+            if (qs.model.hparams.n_gqa() >= 4 || qs.model.hparams.n_expert >= 4) new_type = GGML_TYPE_Q4_K;
+            else new_type = ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M ? GGML_TYPE_IQ3_S : GGML_TYPE_Q2_K;
+            ++qs.i_attention_wv;
+        }
+        else if (qs.model.hparams.n_expert == 8 && name.find("attn_k.weight") != std::string::npos) {
+            new_type = GGML_TYPE_Q4_K;
+        }
+        else if (name.find("ffn_down") != std::string::npos) {
+            if (qs.i_ffn_down < qs.n_ffn_down/8) {
+                new_type = ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M ? GGML_TYPE_IQ3_S : GGML_TYPE_Q2_K;
+            }
+            ++qs.i_ffn_down;
+        }
+        else if (name.find("attn_output.weight") != std::string::npos) {
+            if (qs.model.hparams.n_expert == 8) {
+                new_type = GGML_TYPE_Q5_K;
+            } else {
+                if (ftype == LLAMA_FTYPE_MOSTLY_IQ1_S || ftype == LLAMA_FTYPE_MOSTLY_IQ1_M) new_type = GGML_TYPE_IQ2_XXS;
+                else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M) new_type = GGML_TYPE_IQ3_S;
+            }
+        }
+    } else if (name.find("attn_v.weight") != std::string::npos) {
+        if      (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) {
+            new_type = qs.model.hparams.n_gqa() >= 4 ? GGML_TYPE_Q4_K : GGML_TYPE_Q3_K;
+        }
+        else if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S && qs.model.hparams.n_gqa() >= 4) {
+            new_type = GGML_TYPE_Q4_K;
+        }
+        else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) {
+            new_type = qs.model.hparams.n_gqa() >= 4 ? GGML_TYPE_Q4_K : !qs.has_imatrix ? GGML_TYPE_IQ3_S : GGML_TYPE_IQ3_XXS;
+        }
+        else if ((ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ3_S) && qs.model.hparams.n_gqa() >= 4) {
+            new_type = GGML_TYPE_Q4_K;
+        }
+        else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_M) {
+            new_type = GGML_TYPE_Q4_K;
+        }
+        else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M) {
+            new_type = qs.i_attention_wv < 2 ? GGML_TYPE_Q5_K : GGML_TYPE_Q4_K;
+        }
+        else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q5_K;
+        else if ((ftype == LLAMA_FTYPE_MOSTLY_IQ4_NL || ftype == LLAMA_FTYPE_MOSTLY_IQ4_XS) && qs.model.hparams.n_gqa() >= 4) {
+            new_type = GGML_TYPE_Q5_K;
+        }
+        else if ((ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) &&
+                use_more_bits(qs.i_attention_wv, qs.n_attention_wv)) new_type = GGML_TYPE_Q6_K;
+        else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S && qs.i_attention_wv < 4) new_type = GGML_TYPE_Q5_K;
+        if (qs.model.type == MODEL_70B) {
+            // In the 70B model we have 8 heads sharing the same attn_v weights. As a result, the attn_v.weight tensor is
+            // 8x smaller compared to attn_q.weight. Hence, we can get a nice boost in quantization accuracy with
+            // nearly negligible increase in model size by quantizing this tensor with more bits:
+            if (new_type == GGML_TYPE_Q3_K || new_type == GGML_TYPE_Q4_K) new_type = GGML_TYPE_Q5_K;
+        }
+        if (qs.model.hparams.n_expert == 8) {
+            // for the 8-expert model, bumping this to Q8_0 trades just ~128MB
+            // TODO: explore better strategies
+            new_type = GGML_TYPE_Q8_0;
+        }
+        ++qs.i_attention_wv;
+    } else if (name.find("attn_k.weight") != std::string::npos) {
+        if (qs.model.hparams.n_expert == 8) {
+            // for the 8-expert model, bumping this to Q8_0 trades just ~128MB
+            // TODO: explore better strategies
+            new_type = GGML_TYPE_Q8_0;
+        }
+        else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS) {
+            new_type = GGML_TYPE_IQ3_XXS;
+        }
+        else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) {
+            new_type = GGML_TYPE_IQ2_S;
+        }
+    } else if (name.find("attn_q.weight") != std::string::npos) {
+        if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS) {
+            new_type = GGML_TYPE_IQ3_XXS;
+        }
+        else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) {
+            new_type = GGML_TYPE_IQ2_S;
+        }
+    } else if (name.find("ffn_down") != std::string::npos) {
+        auto info = layer_info(qs.i_ffn_down, qs.n_ffn_down, name.c_str());
+        int i_layer = info.first, n_layer = info.second;
+        if      (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q3_K;
+        else if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S) {
+            if (i_layer < n_layer/8) new_type = GGML_TYPE_Q4_K;
+        }
+        else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS && !qs.has_imatrix) {
+            new_type = i_layer < n_layer/8 ? GGML_TYPE_Q4_K : GGML_TYPE_Q3_K;
+        }
+        else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M) {
+            new_type = i_layer < n_layer/16 ? GGML_TYPE_Q5_K
+                     : arch != LLM_ARCH_FALCON || use_more_bits(i_layer, n_layer) ? GGML_TYPE_Q4_K
+                     : GGML_TYPE_Q3_K;
+        }
+        else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_M && (i_layer < n_layer/8 ||
+                    (qs.model.hparams.n_expert == 8 && use_more_bits(i_layer, n_layer)))) {
+            new_type = GGML_TYPE_Q4_K;
+        }
+        else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) {
+            new_type = arch == LLM_ARCH_FALCON ? GGML_TYPE_Q4_K : GGML_TYPE_Q5_K;
+        }
+        else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M) {
+            if (arch == LLM_ARCH_FALCON) {
+                new_type = i_layer < n_layer/16 ? GGML_TYPE_Q6_K :
+                           use_more_bits(i_layer, n_layer) ? GGML_TYPE_Q5_K : GGML_TYPE_Q4_K;
+            } else {
+                if (use_more_bits(i_layer, n_layer)) new_type = GGML_TYPE_Q6_K;
+            }
+        }
+        else if (i_layer < n_layer/8 && (ftype == LLAMA_FTYPE_MOSTLY_IQ4_NL || ftype == LLAMA_FTYPE_MOSTLY_IQ4_XS) && !qs.has_imatrix) {
+            new_type = GGML_TYPE_Q5_K;
+        }
+        else if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M && use_more_bits(i_layer, n_layer)) new_type = GGML_TYPE_Q6_K;
+        else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S && arch != LLM_ARCH_FALCON && i_layer < n_layer/8) {
+            new_type = GGML_TYPE_Q5_K;
+        }
+        else if ((ftype == LLAMA_FTYPE_MOSTLY_Q4_0 || ftype == LLAMA_FTYPE_MOSTLY_Q5_0)
+                && qs.has_imatrix && i_layer < n_layer/8) {
+            // Guard against craziness in the first few ffn_down layers that can happen even with imatrix for Q4_0/Q5_0.
+            // We only do it when an imatrix is provided because a) we want to make sure that one can always get the
+            // same quantization as before imatrix stuff, and b) Q4_1/Q5_1 do go crazy on ffn_down without an imatrix.
+            new_type = ftype == LLAMA_FTYPE_MOSTLY_Q4_0 ? GGML_TYPE_Q4_1 : GGML_TYPE_Q5_1;
+        }
+        ++qs.i_ffn_down;
+    } else if (name.find("attn_output.weight") != std::string::npos) {
+        if (arch != LLM_ARCH_FALCON) {
+            if (qs.model.hparams.n_expert == 8) {
+                if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K   || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS ||
+                    ftype == LLAMA_FTYPE_MOSTLY_Q3_K_S || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M  || ftype == LLAMA_FTYPE_MOSTLY_IQ4_NL  ||
+                    ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S || ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M  || ftype == LLAMA_FTYPE_MOSTLY_IQ3_S  ||
+                    ftype == LLAMA_FTYPE_MOSTLY_IQ3_M  || ftype == LLAMA_FTYPE_MOSTLY_IQ4_XS) {
+                    new_type = GGML_TYPE_Q5_K;
+                }
+            } else {
+                if      (ftype == LLAMA_FTYPE_MOSTLY_Q2_K   ) new_type = GGML_TYPE_Q3_K;
+                else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) new_type = GGML_TYPE_IQ3_S;
+                else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M ) new_type = GGML_TYPE_Q4_K;
+                else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L ) new_type = GGML_TYPE_Q5_K;
+                else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_M  ) new_type = GGML_TYPE_Q4_K;
+            }
+        } else {
+            if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q4_K;
+        }
+    }
+    else if (name.find("attn_qkv.weight") != std::string::npos) {
+        if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L || ftype == LLAMA_FTYPE_MOSTLY_IQ3_M) {
+            new_type = GGML_TYPE_Q4_K;
+        }
+        else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M) new_type = GGML_TYPE_Q5_K;
+        else if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) new_type = GGML_TYPE_Q6_K;
+    }
+    else if (name.find("ffn_gate") != std::string::npos) {
+        auto info = layer_info(qs.i_ffn_gate, qs.n_ffn_gate, name.c_str());
+        int i_layer = info.first, n_layer = info.second;
+        if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS && (i_layer >= n_layer/8 && i_layer < 7*n_layer/8)) {
+            new_type = GGML_TYPE_IQ3_XXS;
+        }
+        ++qs.i_ffn_gate;
+    }
+    else if (name.find("ffn_up") != std::string::npos) {
+        auto info = layer_info(qs.i_ffn_up, qs.n_ffn_up, name.c_str());
+        int i_layer = info.first, n_layer = info.second;
+        if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS && (i_layer >= n_layer/8 && i_layer < 7*n_layer/8)) {
+            new_type = GGML_TYPE_IQ3_XXS;
+        }
+        ++qs.i_ffn_up;
+    }
+
+    //    if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q3_K;
+    //}
+    // IK: let's remove this, else Q2_K is almost the same as Q3_K_S
+    //else if (name.find("ffn_gate") != std::string::npos || name.find("ffn_up") != std::string::npos) {
+    //    if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q3_K;
+    //}
+    // This can be used to reduce the size of the Q5_K_S model.
+    // The associated PPL increase is fully in line with the size reduction
+    //else {
+    //    if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_S) new_type = GGML_TYPE_Q4_K;
+    //}
+    bool convert_incompatible_tensor = false;
+    if (new_type == GGML_TYPE_Q2_K    || new_type == GGML_TYPE_Q3_K    || new_type == GGML_TYPE_Q4_K   ||
+        new_type == GGML_TYPE_Q5_K    || new_type == GGML_TYPE_Q6_K    || new_type == GGML_TYPE_IQ4_XS ||
+        new_type == GGML_TYPE_IQ2_XS  || new_type == GGML_TYPE_IQ2_XXS || new_type == GGML_TYPE_IQ2_S  ||
+        new_type == GGML_TYPE_IQ3_XXS || new_type == GGML_TYPE_IQ1_S   || new_type == GGML_TYPE_IQ3_S  ||
+        new_type == GGML_TYPE_IQ1_M) {
+        int nx = tensor->ne[0];
+        int ny = tensor->ne[1];
+        if (nx % QK_K != 0) {
+            LLAMA_LOG_WARN("\n\n%s : tensor cols %d x %d are not divisible by %d, required for %s", __func__, nx, ny, QK_K, ggml_type_name(new_type));
+            convert_incompatible_tensor = true;
+        } else {
+            ++qs.n_k_quantized;
+        }
+    }
+    if (convert_incompatible_tensor) {
+        switch (new_type) {
+            case GGML_TYPE_TQ1_0:
+            case GGML_TYPE_TQ2_0:  new_type = GGML_TYPE_Q4_0; break;  // TODO: use a symmetric type instead
+            case GGML_TYPE_IQ2_XXS:
+            case GGML_TYPE_IQ2_XS:
+            case GGML_TYPE_IQ2_S:
+            case GGML_TYPE_IQ3_XXS:
+            case GGML_TYPE_IQ3_S:
+            case GGML_TYPE_IQ1_S:
+            case GGML_TYPE_IQ1_M:
+            case GGML_TYPE_Q2_K:
+            case GGML_TYPE_Q3_K:
+            case GGML_TYPE_IQ4_XS: new_type = GGML_TYPE_IQ4_NL; break;
+            case GGML_TYPE_Q4_K:   new_type = GGML_TYPE_Q5_0;   break;
+            case GGML_TYPE_Q5_K:   new_type = GGML_TYPE_Q5_1;   break;
+            case GGML_TYPE_Q6_K:   new_type = GGML_TYPE_Q8_0;   break;
+            default: throw std::runtime_error("\nUnsupported tensor size encountered\n");
+        }
+        if (tensor->ne[0] % ggml_blck_size(new_type) != 0) {
+            new_type = GGML_TYPE_F16;
+        }
+        LLAMA_LOG_WARN(" - using fallback quantization %s\n", ggml_type_name(new_type));
+        ++qs.n_fallback;
+    }
+
+    return new_type;
+}
+
+static size_t llama_tensor_quantize_internal(enum ggml_type new_type, const float * f32_data, void * new_data, const int64_t chunk_size, int64_t nrows, int64_t n_per_row, const float * imatrix, std::vector & workers, const int nthread) {
+    if (nthread < 2) {
+        // single-thread
+        size_t new_size = ggml_quantize_chunk(new_type, f32_data, new_data, 0, nrows, n_per_row, imatrix);
+        if (!ggml_validate_row_data(new_type, new_data, new_size)) {
+            throw std::runtime_error("quantized data validation failed");
+        }
+        return new_size;
+    }
+
+    std::mutex mutex;
+    int64_t counter = 0;
+    size_t new_size = 0;
+    bool valid = true;
+    auto compute = [&mutex, &counter, &new_size, &valid, new_type, f32_data, new_data, chunk_size,
+            nrows, n_per_row, imatrix]() {
+        const int64_t nrows_per_chunk = chunk_size / n_per_row;
+        size_t local_size = 0;
+        while (true) {
+            std::unique_lock lock(mutex);
+            int64_t first_row = counter; counter += nrows_per_chunk;
+            if (first_row >= nrows) {
+                if (local_size > 0) {
+                    new_size += local_size;
+                }
+                break;
+            }
+            lock.unlock();
+            const int64_t this_nrow = std::min(nrows - first_row, nrows_per_chunk);
+            size_t this_size = ggml_quantize_chunk(new_type, f32_data, new_data, first_row * n_per_row, this_nrow, n_per_row, imatrix);
+            local_size += this_size;
+
+            // validate the quantized data
+            const size_t row_size  = ggml_row_size(new_type, n_per_row);
+            void * this_data = (char *) new_data + first_row * row_size;
+            if (!ggml_validate_row_data(new_type, this_data, this_size)) {
+                std::unique_lock lock(mutex);
+                valid = false;
+                break;
+            }
+        }
+    };
+    for (int it = 0; it < nthread - 1; ++it) {
+        workers.emplace_back(compute);
+    }
+    compute();
+    for (auto & w : workers) { w.join(); }
+    workers.clear();
+    if (!valid) {
+        throw std::runtime_error("quantized data validation failed");
+    }
+    return new_size;
+}
+
+static void llama_model_quantize_internal(const std::string & fname_inp, const std::string & fname_out, const llama_model_quantize_params * params) {
+    ggml_type default_type;
+    llama_ftype ftype = params->ftype;
+
+    switch (params->ftype) {
+        case LLAMA_FTYPE_MOSTLY_Q4_0: default_type = GGML_TYPE_Q4_0; break;
+        case LLAMA_FTYPE_MOSTLY_Q4_1: default_type = GGML_TYPE_Q4_1; break;
+        case LLAMA_FTYPE_MOSTLY_Q5_0: default_type = GGML_TYPE_Q5_0; break;
+        case LLAMA_FTYPE_MOSTLY_Q5_1: default_type = GGML_TYPE_Q5_1; break;
+        case LLAMA_FTYPE_MOSTLY_Q8_0: default_type = GGML_TYPE_Q8_0; break;
+        case LLAMA_FTYPE_MOSTLY_F16:  default_type = GGML_TYPE_F16;  break;
+        case LLAMA_FTYPE_MOSTLY_BF16: default_type = GGML_TYPE_BF16; break;
+        case LLAMA_FTYPE_ALL_F32:     default_type = GGML_TYPE_F32;  break;
+
+        // K-quants
+        case LLAMA_FTYPE_MOSTLY_Q2_K_S:
+        case LLAMA_FTYPE_MOSTLY_Q2_K:    default_type = GGML_TYPE_Q2_K;    break;
+        case LLAMA_FTYPE_MOSTLY_IQ3_XS:  default_type = GGML_TYPE_IQ3_S;   break;
+        case LLAMA_FTYPE_MOSTLY_Q3_K_S:
+        case LLAMA_FTYPE_MOSTLY_Q3_K_M:
+        case LLAMA_FTYPE_MOSTLY_Q3_K_L:  default_type = GGML_TYPE_Q3_K;    break;
+        case LLAMA_FTYPE_MOSTLY_Q4_K_S:
+        case LLAMA_FTYPE_MOSTLY_Q4_K_M:  default_type = GGML_TYPE_Q4_K;    break;
+        case LLAMA_FTYPE_MOSTLY_Q5_K_S:
+        case LLAMA_FTYPE_MOSTLY_Q5_K_M:  default_type = GGML_TYPE_Q5_K;    break;
+        case LLAMA_FTYPE_MOSTLY_Q6_K:    default_type = GGML_TYPE_Q6_K;    break;
+        case LLAMA_FTYPE_MOSTLY_TQ1_0:   default_type = GGML_TYPE_TQ1_0;   break;
+        case LLAMA_FTYPE_MOSTLY_TQ2_0:   default_type = GGML_TYPE_TQ2_0;   break;
+        case LLAMA_FTYPE_MOSTLY_IQ2_XXS: default_type = GGML_TYPE_IQ2_XXS; break;
+        case LLAMA_FTYPE_MOSTLY_IQ2_XS:  default_type = GGML_TYPE_IQ2_XS;  break;
+        case LLAMA_FTYPE_MOSTLY_IQ2_S:   default_type = GGML_TYPE_IQ2_XS;  break;
+        case LLAMA_FTYPE_MOSTLY_IQ2_M:   default_type = GGML_TYPE_IQ2_S;   break;
+        case LLAMA_FTYPE_MOSTLY_IQ3_XXS: default_type = GGML_TYPE_IQ3_XXS; break;
+        case LLAMA_FTYPE_MOSTLY_IQ1_S:   default_type = GGML_TYPE_IQ1_S;   break;
+        case LLAMA_FTYPE_MOSTLY_IQ1_M:   default_type = GGML_TYPE_IQ1_M;   break;
+        case LLAMA_FTYPE_MOSTLY_IQ4_NL:  default_type = GGML_TYPE_IQ4_NL;  break;
+        case LLAMA_FTYPE_MOSTLY_IQ4_XS:  default_type = GGML_TYPE_IQ4_XS;  break;
+        case LLAMA_FTYPE_MOSTLY_IQ3_S:   default_type = GGML_TYPE_IQ3_S;   break;
+        case LLAMA_FTYPE_MOSTLY_IQ3_M:   default_type = GGML_TYPE_IQ3_S;   break;
+
+        default: throw std::runtime_error(format("invalid output file type %d\n", ftype));
+    }
+
+    int nthread = params->nthread;
+
+    if (nthread <= 0) {
+        nthread = std::thread::hardware_concurrency();
+    }
+
+    // mmap consistently increases speed Linux, and also increases speed on Windows with
+    // hot cache. It may cause a slowdown on macOS, possibly related to free memory.
+#if defined(__linux__) || defined(_WIN32)
+    constexpr bool use_mmap = true;
+#else
+    constexpr bool use_mmap = false;
+#endif
+
+    llama_model_kv_override * kv_overrides = nullptr;
+    if (params->kv_overrides) {
+        auto v = (std::vector*)params->kv_overrides;
+        kv_overrides = v->data();
+    }
+    llama_model_loader ml(fname_inp, use_mmap, /*check_tensors*/ true, kv_overrides);
+    ml.init_mappings(false); // no prefetching
+
+    llama_model model;
+    llm_load_arch   (ml, model);
+    llm_load_hparams(ml, model);
+    llm_load_stats  (ml, model);
+
+    struct quantize_state_internal qs(model, params);
+
+    if (params->only_copy) {
+        ftype = model.ftype;
+    }
+    const std::unordered_map> * imatrix_data = nullptr;
+    if (params->imatrix) {
+        imatrix_data = static_cast>*>(params->imatrix);
+        if (imatrix_data) {
+            LLAMA_LOG_INFO("================================ Have weights data with %d entries\n",int(imatrix_data->size()));
+            qs.has_imatrix = true;
+            // check imatrix for nans or infs
+            for (const auto & kv : *imatrix_data) {
+                for (float f : kv.second) {
+                    if (!std::isfinite(f)) {
+                        throw std::runtime_error(format("imatrix contains non-finite value %f\n", f));
+                    }
+                }
+            }
+        }
+    }
+
+    const size_t align = GGUF_DEFAULT_ALIGNMENT;
+    gguf_context_ptr ctx_out { gguf_init_empty() };
+
+    // copy the KV pairs from the input file
+    gguf_set_kv     (ctx_out.get(), ml.meta.get());
+    gguf_set_val_u32(ctx_out.get(), "general.quantization_version", GGML_QNT_VERSION); // TODO: use LLM_KV
+    gguf_set_val_u32(ctx_out.get(), "general.file_type", ftype); // TODO: use LLM_KV
+
+    // Remove split metadata
+    gguf_remove_key(ctx_out.get(), ml.llm_kv(LLM_KV_SPLIT_NO).c_str());
+    gguf_remove_key(ctx_out.get(), ml.llm_kv(LLM_KV_SPLIT_COUNT).c_str());
+    gguf_remove_key(ctx_out.get(), ml.llm_kv(LLM_KV_SPLIT_TENSORS_COUNT).c_str());
+
+    if (params->kv_overrides) {
+        const std::vector & overrides = *(const std::vector *)params->kv_overrides;
+        for (const auto & o : overrides) {
+            if (o.key[0] == 0) break;
+            if (o.tag == LLAMA_KV_OVERRIDE_TYPE_FLOAT) {
+                gguf_set_val_f32(ctx_out.get(), o.key, o.val_f64);
+            } else if (o.tag == LLAMA_KV_OVERRIDE_TYPE_INT) {
+                gguf_set_val_i32(ctx_out.get(), o.key, o.val_i64);
+            } else if (o.tag == LLAMA_KV_OVERRIDE_TYPE_BOOL) {
+                gguf_set_val_bool(ctx_out.get(), o.key, o.val_bool);
+            } else if (o.tag == LLAMA_KV_OVERRIDE_TYPE_STR) {
+                gguf_set_val_str(ctx_out.get(), o.key, o.val_str);
+            } else {
+                LLAMA_LOG_WARN("%s: unknown KV override type for key %s\n", __func__, o.key);
+            }
+        }
+    }
+
+    // make a list of weights
+    std::vector tensors;
+    tensors.reserve(ml.weights_map.size());
+    for (const auto & it : ml.weights_map) {
+        tensors.push_back(&it.second);
+    }
+
+    // keep_split requires that the weights are sorted by split index
+    if (params->keep_split) {
+        std::sort(tensors.begin(), tensors.end(), [](const llama_model_loader::llama_tensor_weight * a, const llama_model_loader::llama_tensor_weight * b) {
+            if (a->idx == b->idx) {
+                return a->offs < b->offs;
+            }
+            return a->idx < b->idx;
+        });
+    }
+
+    for (const auto * it : tensors) {
+        const struct ggml_tensor * tensor = it->tensor;
+
+        const std::string name = ggml_get_name(tensor);
+
+        // TODO: avoid hardcoded tensor names - use the TN_* constants
+        if (name.find("attn_v.weight")   != std::string::npos ||
+            name.find("attn_qkv.weight") != std::string::npos ||
+            name.find("attn_kv_b.weight")!= std::string::npos) {
+            ++qs.n_attention_wv;
+        } else if (name == LLM_TN(model.arch)(LLM_TENSOR_OUTPUT, "weight")) {
+            qs.has_output = true;
+        }
+    }
+
+    qs.n_ffn_down = qs.n_ffn_gate = qs.n_ffn_up = (int)model.hparams.n_layer;
+
+    // sanity checks
+    {
+        const auto & n_head_kv_iter = model.hparams.n_head_kv_arr.begin();
+        // attention layers have a non-zero number of kv heads
+        int32_t n_attn_layer = model.hparams.n_layer - std::count(n_head_kv_iter, n_head_kv_iter + model.hparams.n_layer, 0);
+        if (llama_model_has_encoder(&model)) {
+            n_attn_layer *= 3;
+        }
+        GGML_ASSERT((qs.n_attention_wv == n_attn_layer) && "n_attention_wv is unexpected");
+    }
+
+    size_t total_size_org = 0;
+    size_t total_size_new = 0;
+
+    std::vector workers;
+    workers.reserve(nthread);
+
+    int idx = 0;
+
+    std::vector> read_data;
+    std::vector> work;
+    std::vector> f32_conv_buf;
+
+    uint16_t n_split = 1;
+
+    // Assume split index is continuous
+    if (params->keep_split) {
+        for (const auto * it : tensors) {
+            n_split = std::max(uint16_t(it->idx + 1), n_split);
+        }
+    }
+    std::vector ctx_outs(n_split);
+    ctx_outs[0] = std::move(ctx_out);
+
+    // populate the original tensors so we get an initial meta data
+    for (const auto * it : tensors) {
+        uint16_t i_split = params->keep_split ? it->idx : 0;
+        struct ggml_tensor * tensor = it->tensor;
+        if (!ctx_outs[i_split]) {
+            ctx_outs[i_split].reset(gguf_init_empty());
+        }
+        gguf_add_tensor(ctx_outs[i_split].get(), tensor);
+    }
+
+    // Set split info if needed
+    if (n_split > 1) {
+        for (size_t i = 0; i < ctx_outs.size(); ++i) {
+            gguf_set_val_u16(ctx_outs[i].get(), ml.llm_kv(LLM_KV_SPLIT_NO).c_str(), i);
+            gguf_set_val_u16(ctx_outs[i].get(), ml.llm_kv(LLM_KV_SPLIT_COUNT).c_str(), n_split);
+            gguf_set_val_i32(ctx_outs[i].get(), ml.llm_kv(LLM_KV_SPLIT_TENSORS_COUNT).c_str(), ml.n_tensors);
+        }
+    }
+
+    int cur_split = -1;
+    std::ofstream fout;
+    auto close_ofstream = [&]() {
+        // Write metadata and close file handler
+        if (fout.is_open()) {
+            fout.seekp(0);
+            std::vector data(gguf_get_meta_size(ctx_outs[cur_split].get()));
+            gguf_get_meta_data(ctx_outs[cur_split].get(), data.data());
+            fout.write((const char *) data.data(), data.size());
+            fout.close();
+        }
+    };
+    auto new_ofstream = [&](int index) {
+        cur_split = index;
+        GGML_ASSERT(ctx_outs[cur_split] && "Find uninitialized gguf_context");
+        std::string fname = fname_out;
+        if (params->keep_split) {
+            std::vector split_path(llama_path_max(), 0);
+            llama_split_path(split_path.data(), split_path.size(), fname_out.c_str(), cur_split, n_split);
+            fname = std::string(split_path.data());
+        }
+
+        fout = std::ofstream(fname, std::ios::binary);
+        fout.exceptions(std::ofstream::failbit); // fail fast on write errors
+        const size_t meta_size = gguf_get_meta_size(ctx_outs[cur_split].get());
+        // placeholder for the meta data
+        ::zeros(fout, meta_size);
+    };
+
+    const auto tn = LLM_TN(model.arch);
+    new_ofstream(0);
+    for (const auto * it : tensors) {
+        const auto & weight = *it;
+        struct ggml_tensor * tensor = weight.tensor;
+        if (weight.idx != cur_split && params->keep_split) {
+            close_ofstream();
+            new_ofstream(weight.idx);
+        }
+
+        const std::string name = ggml_get_name(tensor);
+
+        if (!ml.use_mmap) {
+            if (read_data.size() < ggml_nbytes(tensor)) {
+                read_data.resize(ggml_nbytes(tensor));
+            }
+            tensor->data = read_data.data();
+        }
+        ml.load_data_for(tensor);
+
+        LLAMA_LOG_INFO("[%4d/%4d] %36s - [%s], type = %6s, ",
+               ++idx, ml.n_tensors,
+               ggml_get_name(tensor),
+               llama_format_tensor_shape(tensor).c_str(),
+               ggml_type_name(tensor->type));
+
+        // This used to be a regex, but  has an extreme cost to compile times.
+        bool quantize = name.rfind("weight") == name.size() - 6; // ends with 'weight'?
+
+        // quantize only 2D and 3D tensors (experts)
+        quantize &= (ggml_n_dims(tensor) >= 2);
+
+        // do not quantize norm tensors
+        quantize &= name.find("_norm.weight") == std::string::npos;
+
+        quantize &= params->quantize_output_tensor || name != "output.weight";
+        quantize &= !params->only_copy;
+
+        // do not quantize expert gating tensors
+        // NOTE: can't use LLM_TN here because the layer number is not known
+        quantize &= name.find("ffn_gate_inp.weight") == std::string::npos;
+
+        // do not quantize positional embeddings and token types (BERT)
+        quantize &= name != LLM_TN(model.arch)(LLM_TENSOR_POS_EMBD,    "weight");
+        quantize &= name != LLM_TN(model.arch)(LLM_TENSOR_TOKEN_TYPES, "weight");
+
+        // do not quantize Mamba's small yet 2D weights
+        // NOTE: can't use LLM_TN here because the layer number is not known
+        quantize &= name.find("ssm_conv1d.weight") == std::string::npos;
+
+        // do not quantize RWKV's time_mix_first tensors
+        quantize &= name.find("time_mix_first.weight") == std::string::npos;
+        quantize &= name.find("time_mix_w1.weight") == std::string::npos;
+        quantize &= name.find("time_mix_w2.weight") == std::string::npos;
+        quantize &= name.find("time_mix_decay_w1.weight") == std::string::npos;
+        quantize &= name.find("time_mix_decay_w2.weight") == std::string::npos;
+
+        // do not quantize relative position bias (T5)
+        quantize &= name.find("attn_rel_b.weight") == std::string::npos;
+
+        enum ggml_type new_type;
+        void * new_data;
+        size_t new_size;
+
+        if (quantize) {
+            new_type = default_type;
+
+            // get more optimal quantization type based on the tensor shape, layer, etc.
+            if (!params->pure && ggml_is_quantized(default_type)) {
+                new_type = llama_tensor_get_type(qs, new_type, tensor, ftype);
+            }
+            if (params->token_embedding_type < GGML_TYPE_COUNT && strcmp(tensor->name, "token_embd.weight") == 0) {
+                new_type = params->token_embedding_type;
+            }
+            if (params->output_tensor_type < GGML_TYPE_COUNT && strcmp(tensor->name, "output.weight") == 0) {
+                new_type = params->output_tensor_type;
+            }
+
+            // If we've decided to quantize to the same type the tensor is already
+            // in then there's nothing to do.
+            quantize = tensor->type != new_type;
+        }
+
+        if (!quantize) {
+            new_type = tensor->type;
+            new_data = tensor->data;
+            new_size = ggml_nbytes(tensor);
+            LLAMA_LOG_INFO("size = %8.3f MB\n", ggml_nbytes(tensor)/1024.0/1024.0);
+        } else {
+            const int64_t nelements = ggml_nelements(tensor);
+
+            const float * imatrix = nullptr;
+            if (imatrix_data) {
+                auto it = imatrix_data->find(tensor->name);
+                if (it == imatrix_data->end()) {
+                    LLAMA_LOG_INFO("\n====== %s: did not find weights for %s\n", __func__, tensor->name);
+                } else {
+                    if (it->second.size() == (size_t)tensor->ne[0]*tensor->ne[2]) {
+                        imatrix = it->second.data();
+                    } else {
+                        LLAMA_LOG_INFO("\n====== %s: imatrix size %d is different from tensor size %d for %s\n", __func__,
+                                int(it->second.size()), int(tensor->ne[0]*tensor->ne[2]), tensor->name);
+
+                        // this can happen when quantizing an old mixtral model with split tensors with a new incompatible imatrix
+                        // this is a significant error and it may be good idea to abort the process if this happens,
+                        // since many people will miss the error and not realize that most of the model is being quantized without an imatrix
+                        // tok_embd should be ignored in this case, since it always causes this warning
+                        if (name != tn(LLM_TENSOR_TOKEN_EMBD, "weight")) {
+                            throw std::runtime_error(format("imatrix size %d is different from tensor size %d for %s",
+                                    int(it->second.size()), int(tensor->ne[0]*tensor->ne[2]), tensor->name));
+                        }
+                    }
+                }
+            }
+            if ((new_type == GGML_TYPE_IQ2_XXS ||
+                 new_type == GGML_TYPE_IQ2_XS  ||
+                 new_type == GGML_TYPE_IQ2_S   ||
+                 new_type == GGML_TYPE_IQ1_S   ||
+                (new_type == GGML_TYPE_IQ1_M && strcmp(tensor->name, "token_embd.weight") && strcmp(tensor->name, "output.weight"))  ||
+                (new_type == GGML_TYPE_Q2_K && params->ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S && strcmp(tensor->name, "token_embd.weight") != 0)) && !imatrix) {
+                LLAMA_LOG_ERROR("\n\n============================================================\n");
+                LLAMA_LOG_ERROR("Missing importance matrix for tensor %s in a very low-bit quantization\n", tensor->name);
+                LLAMA_LOG_ERROR("The result will be garbage, so bailing out\n");
+                LLAMA_LOG_ERROR("============================================================\n\n");
+                throw std::runtime_error(format("Missing importance matrix for tensor %s in a very low-bit quantization", tensor->name));
+            }
+
+            float * f32_data;
+
+            if (tensor->type == GGML_TYPE_F32) {
+                f32_data = (float *) tensor->data;
+            } else if (ggml_is_quantized(tensor->type) && !params->allow_requantize) {
+                throw std::runtime_error(format("requantizing from type %s is disabled", ggml_type_name(tensor->type)));
+            } else {
+                llama_tensor_dequantize_internal(tensor, f32_conv_buf, workers, nelements, nthread);
+                f32_data = (float *) f32_conv_buf.data();
+            }
+
+            LLAMA_LOG_INFO("converting to %s .. ", ggml_type_name(new_type));
+            fflush(stdout);
+
+            if (work.size() < (size_t)nelements * 4) {
+                work.resize(nelements * 4); // upper bound on size
+            }
+            new_data = work.data();
+
+            const int64_t n_per_row = tensor->ne[0];
+            const int64_t nrows = tensor->ne[1];
+
+            static const int64_t min_chunk_size = 32 * 512;
+            const int64_t chunk_size = (n_per_row >= min_chunk_size ? n_per_row : n_per_row * ((min_chunk_size + n_per_row - 1)/n_per_row));
+
+            const int64_t nelements_matrix = tensor->ne[0] * tensor->ne[1];
+            const int64_t nchunk = (nelements_matrix + chunk_size - 1)/chunk_size;
+            const int64_t nthread_use = nthread > 1 ? std::max((int64_t)1, std::min((int64_t)nthread, nchunk)) : 1;
+
+            // quantize each expert separately since they have different importance matrices
+            new_size = 0;
+            for (int64_t i03 = 0; i03 < tensor->ne[2]; ++i03) {
+                const float * f32_data_03 = f32_data + i03 * nelements_matrix;
+                void * new_data_03 = (char *)new_data + ggml_row_size(new_type, n_per_row) * i03 * nrows;
+                const float * imatrix_03 = imatrix ? imatrix + i03 * n_per_row : nullptr;
+
+                new_size += llama_tensor_quantize_internal(new_type, f32_data_03, new_data_03, chunk_size, nrows, n_per_row, imatrix_03, workers, nthread_use);
+            }
+            LLAMA_LOG_INFO("size = %8.2f MiB -> %8.2f MiB\n", ggml_nbytes(tensor)/1024.0/1024.0, new_size/1024.0/1024.0);
+        }
+        total_size_org += ggml_nbytes(tensor);
+        total_size_new += new_size;
+
+        // update the gguf meta data as we go
+        gguf_set_tensor_type(ctx_outs[cur_split].get(), name.c_str(), new_type);
+        gguf_set_tensor_data(ctx_outs[cur_split].get(), name.c_str(), new_data, new_size);
+
+        // write tensor data + padding
+        fout.write((const char *) new_data, new_size);
+        zeros(fout, GGML_PAD(new_size, align) - new_size);
+    }
+    close_ofstream();
+
+    LLAMA_LOG_INFO("%s: model size  = %8.2f MB\n", __func__, total_size_org/1024.0/1024.0);
+    LLAMA_LOG_INFO("%s: quant size  = %8.2f MB\n", __func__, total_size_new/1024.0/1024.0);
+
+    if (qs.n_fallback > 0) {
+        LLAMA_LOG_WARN("%s: WARNING: %d of %d tensor(s) required fallback quantization\n",
+                __func__, qs.n_fallback, qs.n_k_quantized + qs.n_fallback);
+    }
+}
+
+//
+// interface implementation
+//
+
+struct llama_model_quantize_params llama_model_quantize_default_params() {
+    struct llama_model_quantize_params result = {
+        /*.nthread                     =*/ 0,
+        /*.ftype                       =*/ LLAMA_FTYPE_MOSTLY_Q5_1,
+        /*.output_tensor_type          =*/ GGML_TYPE_COUNT,
+        /*.token_embedding_type        =*/ GGML_TYPE_COUNT,
+        /*.allow_requantize            =*/ false,
+        /*.quantize_output_tensor      =*/ true,
+        /*.only_copy                   =*/ false,
+        /*.pure                        =*/ false,
+        /*.keep_split                  =*/ false,
+        /*.imatrix                     =*/ nullptr,
+        /*.kv_overrides                =*/ nullptr,
+    };
+
+    return result;
+}
+
+uint32_t llama_model_quantize(
+        const char * fname_inp,
+        const char * fname_out,
+        const llama_model_quantize_params * params) {
+    try {
+        llama_model_quantize_internal(fname_inp, fname_out, params);
+    } catch (const std::exception & err) {
+        LLAMA_LOG_ERROR("%s: failed to quantize: %s\n", __func__, err.what());
+        return 1;
+    }
+
+    return 0;
+}
diff --git a/src/llama-quant.h b/src/llama-quant.h
new file mode 100644
index 000000000..6f70f09be
--- /dev/null
+++ b/src/llama-quant.h
@@ -0,0 +1 @@
+#pragma once
diff --git a/src/llama-sampling.cpp b/src/llama-sampling.cpp
index bebff77cf..69cea2f14 100644
--- a/src/llama-sampling.cpp
+++ b/src/llama-sampling.cpp
@@ -1,5 +1,6 @@
 #include "llama-sampling.h"
 
+#include "llama-impl.h"
 #include "llama-vocab.h"
 #include "llama-grammar.h"
 
@@ -14,6 +15,118 @@
 #include 
 #include 
 #include 
+#include 
+
+// the ring buffer works similarly to std::deque, but with a fixed capacity
+template
+struct ring_buffer {
+    ring_buffer(size_t cap) : capacity(cap), data(cap) {}
+
+    T & front() {
+        if (sz == 0) {
+            throw std::runtime_error("ring buffer is empty");
+        }
+        return data[first];
+    }
+
+    const T & front() const {
+        if (sz == 0) {
+            throw std::runtime_error("ring buffer is empty");
+        }
+        return data[first];
+    }
+
+    T & back() {
+        if (sz == 0) {
+            throw std::runtime_error("ring buffer is empty");
+        }
+        return data[pos];
+    }
+
+    const T & back() const {
+        if (sz == 0) {
+            throw std::runtime_error("ring buffer is empty");
+        }
+        return data[pos];
+    }
+
+    void push_back(const T & value) {
+        if (capacity == 0) {
+            throw std::runtime_error("ring buffer: capacity is zero");
+        }
+
+        if (sz == capacity) {
+            // advance the start when buffer is full
+            first = (first + 1) % capacity;
+        } else {
+            sz++;
+        }
+        data[pos] = value;
+        pos = (pos + 1) % capacity;
+    }
+
+    T pop_front() {
+        if (sz == 0) {
+            throw std::runtime_error("ring buffer is empty");
+        }
+        T value = data[first];
+        first = (first + 1) % capacity;
+        sz--;
+        return value;
+    }
+
+    //T & operator[](size_t i) {
+    //    if (i >= sz) {
+    //        throw std::runtime_error("ring buffer: index out of bounds");
+    //    }
+    //    return data[(first + i) % capacity];
+    //}
+
+    //const T & at(size_t i) const {
+    //    if (i >= sz) {
+    //        throw std::runtime_error("ring buffer: index out of bounds");
+    //    }
+    //    return data[(first + i) % capacity];
+    //}
+
+    const T & rat(size_t i) const {
+        if (i >= sz) {
+            throw std::runtime_error("ring buffer: index out of bounds");
+        }
+        return data[(first + sz - i - 1) % capacity];
+    }
+
+    std::vector to_vector() const {
+        std::vector result;
+        result.reserve(sz);
+        for (size_t i = 0; i < sz; i++) {
+            result.push_back(data[(first + i) % capacity]);
+        }
+        return result;
+    }
+
+    void clear() {
+        // here only reset the status of the buffer
+        sz = 0;
+        first = 0;
+        pos = 0;
+    }
+
+    bool empty() const {
+        return sz == 0;
+    }
+
+    size_t size() const {
+        return sz;
+    }
+
+    size_t capacity = 0;
+    size_t sz = 0;
+    size_t first = 0;
+    size_t pos = 0;
+
+    std::vector data;
+};
 
 static int llama_sample_dist(llama_token_data_array * cur_p, std::mt19937 & rng) {
     // iterator for the probabilities
diff --git a/src/llama-vocab.cpp b/src/llama-vocab.cpp
index 0a477d6dd..909e04871 100644
--- a/src/llama-vocab.cpp
+++ b/src/llama-vocab.cpp
@@ -1,5 +1,7 @@
 #include "llama-vocab.h"
 
+#include "llama-impl.h"
+
 #include "unicode.h"
 
 #include 
@@ -16,22 +18,6 @@
 // helpers
 //
 
-LLAMA_ATTRIBUTE_FORMAT(1, 2)
-static std::string format(const char * fmt, ...) {
-    va_list ap;
-    va_list ap2;
-    va_start(ap, fmt);
-    va_copy(ap2, ap);
-    int size = vsnprintf(NULL, 0, fmt, ap);
-    GGML_ASSERT(size >= 0 && size < INT_MAX); // NOLINT
-    std::vector buf(size + 1);
-    int size2 = vsnprintf(buf.data(), size + 1, fmt, ap2);
-    GGML_ASSERT(size2 == size);
-    va_end(ap2);
-    va_end(ap);
-    return std::string(buf.data(), size);
-}
-
 struct naive_trie {
     naive_trie() : has_value(false), value(0) {
     }
diff --git a/src/llama-vocab.h b/src/llama-vocab.h
index a9b0da5ef..0d00086da 100644
--- a/src/llama-vocab.h
+++ b/src/llama-vocab.h
@@ -1,6 +1,6 @@
 #pragma once
 
-#include "llama-impl.h"
+#include "llama.h"
 
 #include 
 #include 
@@ -8,6 +8,18 @@
 #include 
 #include 
 
+static const char * llama_model_vocab_type_name(enum llama_vocab_type type){
+    switch (type) {
+        case LLAMA_VOCAB_TYPE_NONE: return "no vocab";
+        case LLAMA_VOCAB_TYPE_SPM:  return "SPM";
+        case LLAMA_VOCAB_TYPE_BPE:  return "BPE";
+        case LLAMA_VOCAB_TYPE_WPM:  return "WPM";
+        case LLAMA_VOCAB_TYPE_UGM:  return "UGM";
+        case LLAMA_VOCAB_TYPE_RWKV: return "RWKV";
+        default:                    return "unknown";
+    }
+}
+
 struct llm_tokenizer;
 
 struct llama_vocab {
diff --git a/src/llama.cpp b/src/llama.cpp
index 4d41602fe..d7110b90b 100644
--- a/src/llama.cpp
+++ b/src/llama.cpp
@@ -1,48 +1,20 @@
 #include "llama-impl.h"
+
+#include "llama-chat.h"
+#include "llama-mmap.h"
+#include "llama-context.h"
 #include "llama-vocab.h"
 #include "llama-sampling.h"
-
-#include "unicode.h"
+#include "llama-kv-cache.h"
+#include "llama-model-loader.h"
+#include "llama-model.h"
+#include "llama-quant.h"
 
 #include "ggml.h"
 #include "ggml-alloc.h"
 #include "ggml-backend.h"
 #include "ggml-cpp.h"
 
-// TODO: replace with ggml API call
-#define QK_K 256
-
-#ifdef __has_include
-    #if __has_include()
-        #include 
-        #if defined(_POSIX_MAPPED_FILES)
-            #include 
-            #include 
-        #endif
-        #if defined(_POSIX_MEMLOCK_RANGE)
-            #include 
-        #endif
-    #endif
-#endif
-
-#if defined(_WIN32)
-    #define WIN32_LEAN_AND_MEAN
-    #ifndef NOMINMAX
-        #define NOMINMAX
-    #endif
-    #include 
-    #ifndef PATH_MAX
-        #define PATH_MAX MAX_PATH
-    #endif
-    #include 
-#endif
-
-#if __cplusplus >= 202000L
-    #define LU8(x) (const char*)(u8##x)
-#else
-    #define LU8(x) u8##x
-#endif
-
 #include 
 #include 
 #include 
@@ -57,7416 +29,25 @@
 #include 
 #include 
 #include 
-#include 
 #include 
-#include 
 #include 
 #include 
 #include 
-#include 
-#include 
 #include 
-#include 
-#include 
-#include 
 #include 
-#include 
 
 #if defined(_MSC_VER)
 #pragma warning(disable: 4244 4267) // possible loss of data
 #endif
 
-// bump if necessary
-#define LLAMA_MAX_LAYERS  512
-#define LLAMA_MAX_EXPERTS 160  // DeepSeekV2
-
 //
-// helpers
+// tensor loading (TODO: add llama_tesor_loader?)
 //
 
-// trim whitespace from the beginning and end of a string
-static std::string trim(const std::string & str) {
-    size_t start = 0;
-    size_t end = str.size();
-    while (start < end && isspace(str[start])) {
-        start += 1;
-    }
-    while (end > start && isspace(str[end - 1])) {
-        end -= 1;
-    }
-    return str.substr(start, end - start);
-}
-
-static bool is_float_close(float a, float b, float abs_tol) {
-    // Check for non-negative tolerance
-    if (abs_tol < 0.0) {
-        throw std::invalid_argument("Tolerance must be non-negative");
-    }
-
-    // Exact equality check
-    if (a == b) {
-        return true;
-    }
-
-    // Check for infinities
-    if (std::isinf(a) || std::isinf(b)) {
-        return false;
-    }
-
-    // Regular comparison using the provided absolute tolerance
-    return std::fabs(b - a) <= abs_tol;
-}
-
-static void zeros(std::ofstream & file, size_t n) {
-    char zero = 0;
-    for (size_t i = 0; i < n; ++i) {
-        file.write(&zero, 1);
-    }
-}
-
-LLAMA_ATTRIBUTE_FORMAT(1, 2)
-static std::string format(const char * fmt, ...) {
-    va_list ap;
-    va_list ap2;
-    va_start(ap, fmt);
-    va_copy(ap2, ap);
-    int size = vsnprintf(NULL, 0, fmt, ap);
-    GGML_ASSERT(size >= 0 && size < INT_MAX); // NOLINT
-    std::vector buf(size + 1);
-    int size2 = vsnprintf(buf.data(), size + 1, fmt, ap2);
-    GGML_ASSERT(size2 == size);
-    va_end(ap2);
-    va_end(ap);
-    return std::string(buf.data(), size);
-}
-
-//
-// gguf constants (sync with gguf.py)
-//
-
-enum llm_arch {
-    LLM_ARCH_LLAMA,
-    LLM_ARCH_DECI,
-    LLM_ARCH_FALCON,
-    LLM_ARCH_BAICHUAN,
-    LLM_ARCH_GROK,
-    LLM_ARCH_GPT2,
-    LLM_ARCH_GPTJ,
-    LLM_ARCH_GPTNEOX,
-    LLM_ARCH_MPT,
-    LLM_ARCH_STARCODER,
-    LLM_ARCH_REFACT,
-    LLM_ARCH_BERT,
-    LLM_ARCH_NOMIC_BERT,
-    LLM_ARCH_JINA_BERT_V2,
-    LLM_ARCH_BLOOM,
-    LLM_ARCH_STABLELM,
-    LLM_ARCH_QWEN,
-    LLM_ARCH_QWEN2,
-    LLM_ARCH_QWEN2MOE,
-    LLM_ARCH_QWEN2VL,
-    LLM_ARCH_PHI2,
-    LLM_ARCH_PHI3,
-    LLM_ARCH_PLAMO,
-    LLM_ARCH_CODESHELL,
-    LLM_ARCH_ORION,
-    LLM_ARCH_INTERNLM2,
-    LLM_ARCH_MINICPM,
-    LLM_ARCH_MINICPM3,
-    LLM_ARCH_GEMMA,
-    LLM_ARCH_GEMMA2,
-    LLM_ARCH_STARCODER2,
-    LLM_ARCH_MAMBA,
-    LLM_ARCH_XVERSE,
-    LLM_ARCH_COMMAND_R,
-    LLM_ARCH_DBRX,
-    LLM_ARCH_OLMO,
-    LLM_ARCH_OLMO2,
-    LLM_ARCH_OLMOE,
-    LLM_ARCH_OPENELM,
-    LLM_ARCH_ARCTIC,
-    LLM_ARCH_DEEPSEEK,
-    LLM_ARCH_DEEPSEEK2,
-    LLM_ARCH_CHATGLM,
-    LLM_ARCH_BITNET,
-    LLM_ARCH_T5,
-    LLM_ARCH_T5ENCODER,
-    LLM_ARCH_JAIS,
-    LLM_ARCH_NEMOTRON,
-    LLM_ARCH_EXAONE,
-    LLM_ARCH_RWKV6,
-    LLM_ARCH_GRANITE,
-    LLM_ARCH_GRANITE_MOE,
-    LLM_ARCH_CHAMELEON,
-    LLM_ARCH_WAVTOKENIZER_DEC,
-    LLM_ARCH_UNKNOWN,
-};
-
-static const std::map LLM_ARCH_NAMES = {
-    { LLM_ARCH_LLAMA,            "llama"            },
-    { LLM_ARCH_DECI,             "deci"            },
-    { LLM_ARCH_FALCON,           "falcon"           },
-    { LLM_ARCH_GROK,             "grok"             },
-    { LLM_ARCH_GPT2,             "gpt2"             },
-    { LLM_ARCH_GPTJ,             "gptj"             },
-    { LLM_ARCH_GPTNEOX,          "gptneox"          },
-    { LLM_ARCH_MPT,              "mpt"              },
-    { LLM_ARCH_BAICHUAN,         "baichuan"         },
-    { LLM_ARCH_STARCODER,        "starcoder"        },
-    { LLM_ARCH_REFACT,           "refact"           },
-    { LLM_ARCH_BERT,             "bert"             },
-    { LLM_ARCH_NOMIC_BERT,       "nomic-bert"       },
-    { LLM_ARCH_JINA_BERT_V2,     "jina-bert-v2"     },
-    { LLM_ARCH_BLOOM,            "bloom"            },
-    { LLM_ARCH_STABLELM,         "stablelm"         },
-    { LLM_ARCH_QWEN,             "qwen"             },
-    { LLM_ARCH_QWEN2,            "qwen2"            },
-    { LLM_ARCH_QWEN2MOE,         "qwen2moe"         },
-    { LLM_ARCH_QWEN2VL,          "qwen2vl"          },
-    { LLM_ARCH_PHI2,             "phi2"             },
-    { LLM_ARCH_PHI3,             "phi3"             },
-    { LLM_ARCH_PLAMO,            "plamo"            },
-    { LLM_ARCH_CODESHELL,        "codeshell"        },
-    { LLM_ARCH_ORION,            "orion"            },
-    { LLM_ARCH_INTERNLM2,        "internlm2"        },
-    { LLM_ARCH_MINICPM,          "minicpm"          },
-    { LLM_ARCH_MINICPM3,         "minicpm3"         },
-    { LLM_ARCH_GEMMA,            "gemma"            },
-    { LLM_ARCH_GEMMA2,           "gemma2"           },
-    { LLM_ARCH_STARCODER2,       "starcoder2"       },
-    { LLM_ARCH_MAMBA,            "mamba"            },
-    { LLM_ARCH_XVERSE,           "xverse"           },
-    { LLM_ARCH_COMMAND_R,        "command-r"        },
-    { LLM_ARCH_DBRX,             "dbrx"             },
-    { LLM_ARCH_OLMO,             "olmo"             },
-    { LLM_ARCH_OLMO2,            "olmo2"            },
-    { LLM_ARCH_OLMOE,            "olmoe"            },
-    { LLM_ARCH_OPENELM,          "openelm"          },
-    { LLM_ARCH_ARCTIC,           "arctic"           },
-    { LLM_ARCH_DEEPSEEK,         "deepseek"         },
-    { LLM_ARCH_DEEPSEEK2,        "deepseek2"        },
-    { LLM_ARCH_CHATGLM,          "chatglm"          },
-    { LLM_ARCH_BITNET,           "bitnet"           },
-    { LLM_ARCH_T5,               "t5"               },
-    { LLM_ARCH_T5ENCODER,        "t5encoder"        },
-    { LLM_ARCH_JAIS,             "jais"             },
-    { LLM_ARCH_NEMOTRON,         "nemotron"         },
-    { LLM_ARCH_EXAONE,           "exaone"           },
-    { LLM_ARCH_RWKV6,            "rwkv6"            },
-    { LLM_ARCH_GRANITE,          "granite"          },
-    { LLM_ARCH_GRANITE_MOE,      "granitemoe"       },
-    { LLM_ARCH_CHAMELEON,        "chameleon"        },
-    { LLM_ARCH_WAVTOKENIZER_DEC, "wavtokenizer-dec" },
-    { LLM_ARCH_UNKNOWN,          "(unknown)"        },
-};
-
-enum llm_kv {
-    LLM_KV_GENERAL_TYPE,
-    LLM_KV_GENERAL_ARCHITECTURE,
-    LLM_KV_GENERAL_QUANTIZATION_VERSION,
-    LLM_KV_GENERAL_ALIGNMENT,
-    LLM_KV_GENERAL_NAME,
-    LLM_KV_GENERAL_AUTHOR,
-    LLM_KV_GENERAL_VERSION,
-    LLM_KV_GENERAL_URL,
-    LLM_KV_GENERAL_DESCRIPTION,
-    LLM_KV_GENERAL_LICENSE,
-    LLM_KV_GENERAL_SOURCE_URL,
-    LLM_KV_GENERAL_SOURCE_HF_REPO,
-
-    LLM_KV_VOCAB_SIZE,
-    LLM_KV_CONTEXT_LENGTH,
-    LLM_KV_EMBEDDING_LENGTH,
-    LLM_KV_FEATURES_LENGTH,
-    LLM_KV_BLOCK_COUNT,
-    LLM_KV_LEADING_DENSE_BLOCK_COUNT,
-    LLM_KV_FEED_FORWARD_LENGTH,
-    LLM_KV_EXPERT_FEED_FORWARD_LENGTH,
-    LLM_KV_EXPERT_SHARED_FEED_FORWARD_LENGTH,
-    LLM_KV_USE_PARALLEL_RESIDUAL,
-    LLM_KV_TENSOR_DATA_LAYOUT,
-    LLM_KV_EXPERT_COUNT,
-    LLM_KV_EXPERT_USED_COUNT,
-    LLM_KV_EXPERT_SHARED_COUNT,
-    LLM_KV_EXPERT_WEIGHTS_SCALE,
-    LLM_KV_POOLING_TYPE,
-    LLM_KV_LOGIT_SCALE,
-    LLM_KV_DECODER_START_TOKEN_ID,
-    LLM_KV_ATTN_LOGIT_SOFTCAPPING,
-    LLM_KV_FINAL_LOGIT_SOFTCAPPING,
-    LLM_KV_SWIN_NORM,
-    LLM_KV_RESCALE_EVERY_N_LAYERS,
-    LLM_KV_TIME_MIX_EXTRA_DIM,
-    LLM_KV_TIME_DECAY_EXTRA_DIM,
-    LLM_KV_RESIDUAL_SCALE,
-    LLM_KV_EMBEDDING_SCALE,
-
-    LLM_KV_ATTENTION_HEAD_COUNT,
-    LLM_KV_ATTENTION_HEAD_COUNT_KV,
-    LLM_KV_ATTENTION_MAX_ALIBI_BIAS,
-    LLM_KV_ATTENTION_CLAMP_KQV,
-    LLM_KV_ATTENTION_KEY_LENGTH,
-    LLM_KV_ATTENTION_VALUE_LENGTH,
-    LLM_KV_ATTENTION_LAYERNORM_EPS,
-    LLM_KV_ATTENTION_LAYERNORM_RMS_EPS,
-    LLM_KV_ATTENTION_GROUPNORM_EPS,
-    LLM_KV_ATTENTION_GROUPNORM_GROUPS,
-    LLM_KV_ATTENTION_CAUSAL,
-    LLM_KV_ATTENTION_Q_LORA_RANK,
-    LLM_KV_ATTENTION_KV_LORA_RANK,
-    LLM_KV_ATTENTION_RELATIVE_BUCKETS_COUNT,
-    LLM_KV_ATTENTION_SLIDING_WINDOW,
-    LLM_KV_ATTENTION_SCALE,
-
-    LLM_KV_ROPE_DIMENSION_COUNT,
-    LLM_KV_ROPE_DIMENSION_SECTIONS,
-    LLM_KV_ROPE_FREQ_BASE,
-    LLM_KV_ROPE_SCALE_LINEAR,
-    LLM_KV_ROPE_SCALING_TYPE,
-    LLM_KV_ROPE_SCALING_FACTOR,
-    LLM_KV_ROPE_SCALING_ATTN_FACTOR,
-    LLM_KV_ROPE_SCALING_ORIG_CTX_LEN,
-    LLM_KV_ROPE_SCALING_FINETUNED,
-    LLM_KV_ROPE_SCALING_YARN_LOG_MUL,
-
-    LLM_KV_SPLIT_NO,
-    LLM_KV_SPLIT_COUNT,
-    LLM_KV_SPLIT_TENSORS_COUNT,
-
-    LLM_KV_SSM_INNER_SIZE,
-    LLM_KV_SSM_CONV_KERNEL,
-    LLM_KV_SSM_STATE_SIZE,
-    LLM_KV_SSM_TIME_STEP_RANK,
-    LLM_KV_SSM_DT_B_C_RMS,
-
-    LLM_KV_WKV_HEAD_SIZE,
-
-    LLM_KV_TOKENIZER_MODEL,
-    LLM_KV_TOKENIZER_PRE,
-    LLM_KV_TOKENIZER_LIST,
-    LLM_KV_TOKENIZER_TOKEN_TYPE,
-    LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT,
-    LLM_KV_TOKENIZER_SCORES,
-    LLM_KV_TOKENIZER_MERGES,
-    LLM_KV_TOKENIZER_BOS_ID,
-    LLM_KV_TOKENIZER_EOS_ID,
-    LLM_KV_TOKENIZER_EOT_ID,
-    LLM_KV_TOKENIZER_EOM_ID,
-    LLM_KV_TOKENIZER_UNK_ID,
-    LLM_KV_TOKENIZER_SEP_ID,
-    LLM_KV_TOKENIZER_PAD_ID,
-    LLM_KV_TOKENIZER_CLS_ID,
-    LLM_KV_TOKENIZER_MASK_ID,
-    LLM_KV_TOKENIZER_ADD_BOS,
-    LLM_KV_TOKENIZER_ADD_EOS,
-    LLM_KV_TOKENIZER_ADD_PREFIX,
-    LLM_KV_TOKENIZER_REMOVE_EXTRA_WS,
-    LLM_KV_TOKENIZER_PRECOMPILED_CHARSMAP,
-    LLM_KV_TOKENIZER_HF_JSON,
-    LLM_KV_TOKENIZER_RWKV,
-    LLM_KV_TOKENIZER_FIM_PRE_ID,
-    LLM_KV_TOKENIZER_FIM_SUF_ID,
-    LLM_KV_TOKENIZER_FIM_MID_ID,
-    LLM_KV_TOKENIZER_FIM_PAD_ID,
-    LLM_KV_TOKENIZER_FIM_REP_ID,
-    LLM_KV_TOKENIZER_FIM_SEP_ID,
-
-    LLM_KV_ADAPTER_TYPE,
-    LLM_KV_ADAPTER_LORA_ALPHA,
-
-    LLM_KV_POSNET_EMBEDDING_LENGTH,
-    LLM_KV_POSNET_BLOCK_COUNT,
-
-    LLM_KV_CONVNEXT_EMBEDDING_LENGTH,
-    LLM_KV_CONVNEXT_BLOCK_COUNT,
-
-    // deprecated:
-    LLM_KV_TOKENIZER_PREFIX_ID,
-    LLM_KV_TOKENIZER_SUFFIX_ID,
-    LLM_KV_TOKENIZER_MIDDLE_ID,
-};
-
-static const std::map LLM_KV_NAMES = {
-    { LLM_KV_GENERAL_TYPE,                  "general.type"                          },
-    { LLM_KV_GENERAL_ARCHITECTURE,          "general.architecture"                  },
-    { LLM_KV_GENERAL_QUANTIZATION_VERSION,  "general.quantization_version"          },
-    { LLM_KV_GENERAL_ALIGNMENT,             "general.alignment"                     },
-    { LLM_KV_GENERAL_NAME,                  "general.name"                          },
-    { LLM_KV_GENERAL_AUTHOR,                "general.author"                        },
-    { LLM_KV_GENERAL_VERSION,               "general.version"                       },
-    { LLM_KV_GENERAL_URL,                   "general.url"                           },
-    { LLM_KV_GENERAL_DESCRIPTION,           "general.description"                   },
-    { LLM_KV_GENERAL_LICENSE,               "general.license"                       },
-    { LLM_KV_GENERAL_SOURCE_URL,            "general.source.url"                    },
-    { LLM_KV_GENERAL_SOURCE_HF_REPO,        "general.source.huggingface.repository" },
-
-    { LLM_KV_VOCAB_SIZE,                        "%s.vocab_size"                        },
-    { LLM_KV_CONTEXT_LENGTH,                    "%s.context_length"                    },
-    { LLM_KV_EMBEDDING_LENGTH,                  "%s.embedding_length"                  },
-    { LLM_KV_FEATURES_LENGTH,                   "%s.features_length"                   },
-    { LLM_KV_BLOCK_COUNT,                       "%s.block_count"                       },
-    { LLM_KV_LEADING_DENSE_BLOCK_COUNT,         "%s.leading_dense_block_count"         },
-    { LLM_KV_FEED_FORWARD_LENGTH,               "%s.feed_forward_length"               },
-    { LLM_KV_EXPERT_FEED_FORWARD_LENGTH,        "%s.expert_feed_forward_length"        },
-    { LLM_KV_EXPERT_SHARED_FEED_FORWARD_LENGTH, "%s.expert_shared_feed_forward_length" },
-    { LLM_KV_USE_PARALLEL_RESIDUAL,             "%s.use_parallel_residual"             },
-    { LLM_KV_TENSOR_DATA_LAYOUT,                "%s.tensor_data_layout"                },
-    { LLM_KV_EXPERT_COUNT,                      "%s.expert_count"                      },
-    { LLM_KV_EXPERT_USED_COUNT,                 "%s.expert_used_count"                 },
-    { LLM_KV_EXPERT_SHARED_COUNT,               "%s.expert_shared_count"               },
-    { LLM_KV_EXPERT_WEIGHTS_SCALE,              "%s.expert_weights_scale"              },
-    { LLM_KV_POOLING_TYPE,                      "%s.pooling_type"                      },
-    { LLM_KV_LOGIT_SCALE,                       "%s.logit_scale"                       },
-    { LLM_KV_DECODER_START_TOKEN_ID,            "%s.decoder_start_token_id"            },
-    { LLM_KV_ATTN_LOGIT_SOFTCAPPING,            "%s.attn_logit_softcapping"            },
-    { LLM_KV_FINAL_LOGIT_SOFTCAPPING,           "%s.final_logit_softcapping"           },
-    { LLM_KV_SWIN_NORM,                         "%s.swin_norm"                         },
-    { LLM_KV_RESCALE_EVERY_N_LAYERS,            "%s.rescale_every_n_layers"            },
-    { LLM_KV_TIME_MIX_EXTRA_DIM,                "%s.time_mix_extra_dim"                },
-    { LLM_KV_TIME_DECAY_EXTRA_DIM,              "%s.time_decay_extra_dim"              },
-    { LLM_KV_RESIDUAL_SCALE,                    "%s.residual_scale"                    },
-    { LLM_KV_EMBEDDING_SCALE,                   "%s.embedding_scale"                   },
-
-    { LLM_KV_ATTENTION_HEAD_COUNT,             "%s.attention.head_count"             },
-    { LLM_KV_ATTENTION_HEAD_COUNT_KV,          "%s.attention.head_count_kv"          },
-    { LLM_KV_ATTENTION_MAX_ALIBI_BIAS,         "%s.attention.max_alibi_bias"         },
-    { LLM_KV_ATTENTION_CLAMP_KQV,              "%s.attention.clamp_kqv"              },
-    { LLM_KV_ATTENTION_KEY_LENGTH,             "%s.attention.key_length"             },
-    { LLM_KV_ATTENTION_VALUE_LENGTH,           "%s.attention.value_length"           },
-    { LLM_KV_ATTENTION_LAYERNORM_EPS,          "%s.attention.layer_norm_epsilon"     },
-    { LLM_KV_ATTENTION_LAYERNORM_RMS_EPS,      "%s.attention.layer_norm_rms_epsilon" },
-    { LLM_KV_ATTENTION_GROUPNORM_EPS,          "%s.attention.group_norm_epsilon"     },
-    { LLM_KV_ATTENTION_GROUPNORM_GROUPS,       "%s.attention.group_norm_groups"      },
-    { LLM_KV_ATTENTION_CAUSAL,                 "%s.attention.causal"                 },
-    { LLM_KV_ATTENTION_Q_LORA_RANK,            "%s.attention.q_lora_rank"            },
-    { LLM_KV_ATTENTION_KV_LORA_RANK,           "%s.attention.kv_lora_rank"           },
-    { LLM_KV_ATTENTION_RELATIVE_BUCKETS_COUNT, "%s.attention.relative_buckets_count" },
-    { LLM_KV_ATTENTION_SLIDING_WINDOW,         "%s.attention.sliding_window"         },
-    { LLM_KV_ATTENTION_SCALE,                  "%s.attention.scale"                  },
-
-    { LLM_KV_ROPE_DIMENSION_COUNT,             "%s.rope.dimension_count"                 },
-    { LLM_KV_ROPE_DIMENSION_SECTIONS,          "%s.rope.dimension_sections"              },
-    { LLM_KV_ROPE_FREQ_BASE,                   "%s.rope.freq_base"                       },
-    { LLM_KV_ROPE_SCALE_LINEAR,                "%s.rope.scale_linear"                    },
-    { LLM_KV_ROPE_SCALING_TYPE,                "%s.rope.scaling.type"                    },
-    { LLM_KV_ROPE_SCALING_FACTOR,              "%s.rope.scaling.factor"                  },
-    { LLM_KV_ROPE_SCALING_ATTN_FACTOR,         "%s.rope.scaling.attn_factor"             },
-    { LLM_KV_ROPE_SCALING_ORIG_CTX_LEN,        "%s.rope.scaling.original_context_length" },
-    { LLM_KV_ROPE_SCALING_FINETUNED,           "%s.rope.scaling.finetuned"               },
-    { LLM_KV_ROPE_SCALING_YARN_LOG_MUL,        "%s.rope.scaling.yarn_log_multiplier"     },
-
-    { LLM_KV_SPLIT_NO,                         "split.no"            },
-    { LLM_KV_SPLIT_COUNT,                      "split.count"         },
-    { LLM_KV_SPLIT_TENSORS_COUNT,              "split.tensors.count" },
-
-    { LLM_KV_SSM_CONV_KERNEL,                  "%s.ssm.conv_kernel"    },
-    { LLM_KV_SSM_INNER_SIZE,                   "%s.ssm.inner_size"     },
-    { LLM_KV_SSM_STATE_SIZE,                   "%s.ssm.state_size"     },
-    { LLM_KV_SSM_TIME_STEP_RANK,               "%s.ssm.time_step_rank" },
-    { LLM_KV_SSM_DT_B_C_RMS,                   "%s.ssm.dt_b_c_rms"     },
-
-    { LLM_KV_WKV_HEAD_SIZE,                    "%s.wkv.head_size" },
-
-    { LLM_KV_POSNET_EMBEDDING_LENGTH,          "%s.posnet.embedding_length" },
-    { LLM_KV_POSNET_BLOCK_COUNT,               "%s.posnet.block_count"      },
-
-    { LLM_KV_CONVNEXT_EMBEDDING_LENGTH,        "%s.convnext.embedding_length" },
-    { LLM_KV_CONVNEXT_BLOCK_COUNT,             "%s.convnext.block_count"      },
-
-    { LLM_KV_TOKENIZER_MODEL,                  "tokenizer.ggml.model"                    },
-    { LLM_KV_TOKENIZER_PRE,                    "tokenizer.ggml.pre"                      },
-    { LLM_KV_TOKENIZER_LIST,                   "tokenizer.ggml.tokens"                   },
-    { LLM_KV_TOKENIZER_TOKEN_TYPE,             "tokenizer.ggml.token_type"               },
-    { LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT,       "tokenizer.ggml.token_type_count"         },
-    { LLM_KV_TOKENIZER_SCORES,                 "tokenizer.ggml.scores"                   },
-    { LLM_KV_TOKENIZER_MERGES,                 "tokenizer.ggml.merges"                   },
-    { LLM_KV_TOKENIZER_BOS_ID,                 "tokenizer.ggml.bos_token_id"             },
-    { LLM_KV_TOKENIZER_EOS_ID,                 "tokenizer.ggml.eos_token_id"             },
-    { LLM_KV_TOKENIZER_EOT_ID,                 "tokenizer.ggml.eot_token_id"             },
-    { LLM_KV_TOKENIZER_EOM_ID,                 "tokenizer.ggml.eom_token_id"             },
-    { LLM_KV_TOKENIZER_UNK_ID,                 "tokenizer.ggml.unknown_token_id"         },
-    { LLM_KV_TOKENIZER_SEP_ID,                 "tokenizer.ggml.seperator_token_id"       },
-    { LLM_KV_TOKENIZER_PAD_ID,                 "tokenizer.ggml.padding_token_id"         },
-    { LLM_KV_TOKENIZER_CLS_ID,                 "tokenizer.ggml.cls_token_id"             },
-    { LLM_KV_TOKENIZER_MASK_ID,                "tokenizer.ggml.mask_token_id"            },
-    { LLM_KV_TOKENIZER_ADD_BOS,                "tokenizer.ggml.add_bos_token"            },
-    { LLM_KV_TOKENIZER_ADD_EOS,                "tokenizer.ggml.add_eos_token"            },
-    { LLM_KV_TOKENIZER_ADD_PREFIX,             "tokenizer.ggml.add_space_prefix"         },
-    { LLM_KV_TOKENIZER_REMOVE_EXTRA_WS,        "tokenizer.ggml.remove_extra_whitespaces" },
-    { LLM_KV_TOKENIZER_PRECOMPILED_CHARSMAP,   "tokenizer.ggml.precompiled_charsmap"     },
-    { LLM_KV_TOKENIZER_HF_JSON,                "tokenizer.huggingface.json"              },
-    { LLM_KV_TOKENIZER_RWKV,                   "tokenizer.rwkv.world"                    },
-    { LLM_KV_TOKENIZER_FIM_PRE_ID,             "tokenizer.ggml.fim_pre_token_id"         },
-    { LLM_KV_TOKENIZER_FIM_SUF_ID,             "tokenizer.ggml.fim_suf_token_id"         },
-    { LLM_KV_TOKENIZER_FIM_MID_ID,             "tokenizer.ggml.fim_mid_token_id"         },
-    { LLM_KV_TOKENIZER_FIM_PAD_ID,             "tokenizer.ggml.fim_pad_token_id"         },
-    { LLM_KV_TOKENIZER_FIM_REP_ID,             "tokenizer.ggml.fim_rep_token_id"         },
-    { LLM_KV_TOKENIZER_FIM_SEP_ID,             "tokenizer.ggml.fim_sep_token_id"         },
-
-    { LLM_KV_ADAPTER_TYPE,                     "adapter.type"       },
-    { LLM_KV_ADAPTER_LORA_ALPHA,               "adapter.lora.alpha" },
-
-    // deprecated
-    { LLM_KV_TOKENIZER_PREFIX_ID,              "tokenizer.ggml.prefix_token_id" },
-    { LLM_KV_TOKENIZER_SUFFIX_ID,              "tokenizer.ggml.suffix_token_id" },
-    { LLM_KV_TOKENIZER_MIDDLE_ID,              "tokenizer.ggml.middle_token_id" },
-};
-
-struct LLM_KV {
-    LLM_KV(llm_arch arch) : arch(arch) {}
-
-    llm_arch arch;
-
-    std::string operator()(llm_kv kv) const {
-        return ::format(LLM_KV_NAMES.at(kv), LLM_ARCH_NAMES.at(arch));
-    }
-};
-
-enum llm_tensor {
-    LLM_TENSOR_TOKEN_EMBD,
-    LLM_TENSOR_TOKEN_EMBD_NORM,
-    LLM_TENSOR_TOKEN_TYPES,
-    LLM_TENSOR_POS_EMBD,
-    LLM_TENSOR_OUTPUT,
-    LLM_TENSOR_OUTPUT_NORM,
-    LLM_TENSOR_ROPE_FREQS,
-    LLM_TENSOR_ROPE_FACTORS_LONG,
-    LLM_TENSOR_ROPE_FACTORS_SHORT,
-    LLM_TENSOR_ATTN_Q,
-    LLM_TENSOR_ATTN_K,
-    LLM_TENSOR_ATTN_V,
-    LLM_TENSOR_ATTN_QKV,
-    LLM_TENSOR_ATTN_OUT,
-    LLM_TENSOR_ATTN_NORM,
-    LLM_TENSOR_ATTN_NORM_2,
-    LLM_TENSOR_ATTN_OUT_NORM,
-    LLM_TENSOR_ATTN_POST_NORM,
-    LLM_TENSOR_ATTN_ROT_EMBD,
-    LLM_TENSOR_FFN_GATE_INP,
-    LLM_TENSOR_FFN_GATE_INP_SHEXP,
-    LLM_TENSOR_FFN_NORM,
-    LLM_TENSOR_FFN_POST_NORM,
-    LLM_TENSOR_FFN_GATE,
-    LLM_TENSOR_FFN_DOWN,
-    LLM_TENSOR_FFN_UP,
-    LLM_TENSOR_FFN_ACT,
-    LLM_TENSOR_FFN_DOWN_EXP,  // split experts for backward compatibility
-    LLM_TENSOR_FFN_GATE_EXP,
-    LLM_TENSOR_FFN_UP_EXP,
-    LLM_TENSOR_FFN_NORM_EXPS,
-    LLM_TENSOR_FFN_DOWN_EXPS, // merged experts
-    LLM_TENSOR_FFN_GATE_EXPS,
-    LLM_TENSOR_FFN_UP_EXPS,
-    LLM_TENSOR_FFN_DOWN_SHEXP,
-    LLM_TENSOR_FFN_GATE_SHEXP,
-    LLM_TENSOR_FFN_UP_SHEXP,
-    LLM_TENSOR_ATTN_Q_NORM,
-    LLM_TENSOR_ATTN_K_NORM,
-    LLM_TENSOR_LAYER_OUT_NORM,
-    LLM_TENSOR_SSM_IN,
-    LLM_TENSOR_SSM_CONV1D,
-    LLM_TENSOR_SSM_X,
-    LLM_TENSOR_SSM_DT,
-    LLM_TENSOR_SSM_A,
-    LLM_TENSOR_SSM_D,
-    LLM_TENSOR_SSM_OUT,
-    LLM_TENSOR_TIME_MIX_W1,
-    LLM_TENSOR_TIME_MIX_W2,
-    LLM_TENSOR_TIME_MIX_LERP_X,
-    LLM_TENSOR_TIME_MIX_LERP_W,
-    LLM_TENSOR_TIME_MIX_LERP_K,
-    LLM_TENSOR_TIME_MIX_LERP_V,
-    LLM_TENSOR_TIME_MIX_LERP_R,
-    LLM_TENSOR_TIME_MIX_LERP_G,
-    LLM_TENSOR_TIME_MIX_FIRST,
-    LLM_TENSOR_TIME_MIX_DECAY,
-    LLM_TENSOR_TIME_MIX_DECAY_W1,
-    LLM_TENSOR_TIME_MIX_DECAY_W2,
-    LLM_TENSOR_TIME_MIX_KEY,
-    LLM_TENSOR_TIME_MIX_VALUE,
-    LLM_TENSOR_TIME_MIX_RECEPTANCE,
-    LLM_TENSOR_TIME_MIX_GATE,
-    LLM_TENSOR_TIME_MIX_LN,
-    LLM_TENSOR_TIME_MIX_OUTPUT,
-    LLM_TENSOR_CHANNEL_MIX_LERP_K,
-    LLM_TENSOR_CHANNEL_MIX_LERP_R,
-    LLM_TENSOR_CHANNEL_MIX_KEY,
-    LLM_TENSOR_CHANNEL_MIX_RECEPTANCE,
-    LLM_TENSOR_CHANNEL_MIX_VALUE,
-    LLM_TENSOR_ATTN_Q_A,
-    LLM_TENSOR_ATTN_Q_B,
-    LLM_TENSOR_ATTN_KV_A_MQA,
-    LLM_TENSOR_ATTN_KV_B,
-    LLM_TENSOR_ATTN_Q_A_NORM,
-    LLM_TENSOR_ATTN_KV_A_NORM,
-    LLM_TENSOR_ATTN_SUB_NORM,
-    LLM_TENSOR_FFN_SUB_NORM,
-    LLM_TENSOR_DEC_ATTN_NORM,
-    LLM_TENSOR_DEC_ATTN_Q,
-    LLM_TENSOR_DEC_ATTN_K,
-    LLM_TENSOR_DEC_ATTN_V,
-    LLM_TENSOR_DEC_ATTN_OUT,
-    LLM_TENSOR_DEC_ATTN_REL_B,
-    LLM_TENSOR_DEC_CROSS_ATTN_NORM,
-    LLM_TENSOR_DEC_CROSS_ATTN_Q,
-    LLM_TENSOR_DEC_CROSS_ATTN_K,
-    LLM_TENSOR_DEC_CROSS_ATTN_V,
-    LLM_TENSOR_DEC_CROSS_ATTN_OUT,
-    LLM_TENSOR_DEC_CROSS_ATTN_REL_B,
-    LLM_TENSOR_DEC_FFN_NORM,
-    LLM_TENSOR_DEC_FFN_GATE,
-    LLM_TENSOR_DEC_FFN_DOWN,
-    LLM_TENSOR_DEC_FFN_UP,
-    LLM_TENSOR_DEC_OUTPUT_NORM,
-    LLM_TENSOR_ENC_ATTN_NORM,
-    LLM_TENSOR_ENC_ATTN_Q,
-    LLM_TENSOR_ENC_ATTN_K,
-    LLM_TENSOR_ENC_ATTN_V,
-    LLM_TENSOR_ENC_ATTN_OUT,
-    LLM_TENSOR_ENC_ATTN_REL_B,
-    LLM_TENSOR_ENC_FFN_NORM,
-    LLM_TENSOR_ENC_FFN_GATE,
-    LLM_TENSOR_ENC_FFN_DOWN,
-    LLM_TENSOR_ENC_FFN_UP,
-    LLM_TENSOR_ENC_OUTPUT_NORM,
-    LLM_TENSOR_CLS,
-    LLM_TENSOR_CLS_OUT,
-    LLM_TENSOR_CONV1D,
-    LLM_TENSOR_CONVNEXT_DW,
-    LLM_TENSOR_CONVNEXT_NORM,
-    LLM_TENSOR_CONVNEXT_PW1,
-    LLM_TENSOR_CONVNEXT_PW2,
-    LLM_TENSOR_CONVNEXT_GAMMA,
-    LLM_TENSOR_POS_NET_CONV1,
-    LLM_TENSOR_POS_NET_CONV2,
-    LLM_TENSOR_POS_NET_NORM,
-    LLM_TENSOR_POS_NET_NORM1,
-    LLM_TENSOR_POS_NET_NORM2,
-    LLM_TENSOR_POS_NET_ATTN_NORM,
-    LLM_TENSOR_POS_NET_ATTN_Q,
-    LLM_TENSOR_POS_NET_ATTN_K,
-    LLM_TENSOR_POS_NET_ATTN_V,
-    LLM_TENSOR_POS_NET_ATTN_OUT,
-};
-
-static const std::map> LLM_TENSOR_NAMES = {
-    {
-        LLM_ARCH_LLAMA,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },
-            { LLM_TENSOR_FFN_GATE_INP,    "blk.%d.ffn_gate_inp" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-            { LLM_TENSOR_FFN_GATE_EXP,    "blk.%d.ffn_gate.%d" },
-            { LLM_TENSOR_FFN_DOWN_EXP,    "blk.%d.ffn_down.%d" },
-            { LLM_TENSOR_FFN_UP_EXP,      "blk.%d.ffn_up.%d" },
-            { LLM_TENSOR_FFN_GATE_EXPS,   "blk.%d.ffn_gate_exps" },
-            { LLM_TENSOR_FFN_DOWN_EXPS,   "blk.%d.ffn_down_exps" },
-            { LLM_TENSOR_FFN_UP_EXPS,     "blk.%d.ffn_up_exps" },
-        },
-    },
-    {
-        LLM_ARCH_DECI,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },
-            { LLM_TENSOR_FFN_GATE_INP,    "blk.%d.ffn_gate_inp" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-            { LLM_TENSOR_FFN_GATE_EXP,    "blk.%d.ffn_gate.%d" },
-            { LLM_TENSOR_FFN_DOWN_EXP,    "blk.%d.ffn_down.%d" },
-            { LLM_TENSOR_FFN_UP_EXP,      "blk.%d.ffn_up.%d" },
-            { LLM_TENSOR_FFN_GATE_EXPS,   "blk.%d.ffn_gate_exps" },
-            { LLM_TENSOR_FFN_DOWN_EXPS,   "blk.%d.ffn_down_exps" },
-            { LLM_TENSOR_FFN_UP_EXPS,     "blk.%d.ffn_up_exps" },
-        },
-    },
-    {
-        LLM_ARCH_BAICHUAN,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_FALCON,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_NORM_2,     "blk.%d.attn_norm_2" },
-            { LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_GROK,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },
-            { LLM_TENSOR_FFN_GATE_INP,    "blk.%d.ffn_gate_inp" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE_EXP,    "blk.%d.ffn_gate.%d" },
-            { LLM_TENSOR_FFN_DOWN_EXP,    "blk.%d.ffn_down.%d" },
-            { LLM_TENSOR_FFN_UP_EXP,      "blk.%d.ffn_up.%d" },
-            { LLM_TENSOR_FFN_GATE_EXPS,   "blk.%d.ffn_gate_exps" },
-            { LLM_TENSOR_FFN_DOWN_EXPS,   "blk.%d.ffn_down_exps" },
-            { LLM_TENSOR_FFN_UP_EXPS,     "blk.%d.ffn_up_exps" },
-            { LLM_TENSOR_LAYER_OUT_NORM,  "blk.%d.layer_output_norm" },
-            { LLM_TENSOR_ATTN_OUT_NORM,   "blk.%d.attn_output_norm" },
-        },
-    },
-    {
-        LLM_ARCH_GPT2,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_POS_EMBD,        "position_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-        },
-    },
-    {
-        LLM_ARCH_GPTJ,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-        },
-    },
-    {
-        LLM_ARCH_GPTNEOX,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_MPT,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output"},
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-            { LLM_TENSOR_FFN_ACT,         "blk.%d.ffn.act" },
-            { LLM_TENSOR_POS_EMBD,        "position_embd" },
-            { LLM_TENSOR_ATTN_Q_NORM,     "blk.%d.attn_q_norm"},
-            { LLM_TENSOR_ATTN_K_NORM,     "blk.%d.attn_k_norm"},
-        },
-    },
-    {
-        LLM_ARCH_STARCODER,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_POS_EMBD,        "position_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-        },
-    },
-    {
-        LLM_ARCH_REFACT,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_BERT,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
-            { LLM_TENSOR_TOKEN_TYPES,     "token_types" },
-            { LLM_TENSOR_POS_EMBD,        "position_embd" },
-            { LLM_TENSOR_ATTN_OUT_NORM,   "blk.%d.attn_output_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_LAYER_OUT_NORM,  "blk.%d.layer_output_norm" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-            { LLM_TENSOR_CLS,             "cls" },
-            { LLM_TENSOR_CLS_OUT,         "cls.output" },
-        },
-    },
-    {
-        LLM_ARCH_NOMIC_BERT,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
-            { LLM_TENSOR_TOKEN_TYPES,     "token_types" },
-            { LLM_TENSOR_ATTN_OUT_NORM,   "blk.%d.attn_output_norm" },
-            { LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_LAYER_OUT_NORM,  "blk.%d.layer_output_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_JINA_BERT_V2,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
-            { LLM_TENSOR_TOKEN_TYPES,     "token_types" },
-            { LLM_TENSOR_ATTN_NORM_2,     "blk.%d.attn_norm_2" },
-            { LLM_TENSOR_ATTN_OUT_NORM,   "blk.%d.attn_output_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_Q_NORM,     "blk.%d.attn_q_norm" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_K_NORM,     "blk.%d.attn_k_norm" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_LAYER_OUT_NORM,  "blk.%d.layer_output_norm" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-            { LLM_TENSOR_CLS,             "cls" },
-        },
-    },
-    {
-        LLM_ARCH_BLOOM,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-        },
-    },
-    {
-        LLM_ARCH_STABLELM,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-            { LLM_TENSOR_ATTN_Q_NORM,     "blk.%d.attn_q_norm" },
-            { LLM_TENSOR_ATTN_K_NORM,     "blk.%d.attn_k_norm" },
-        },
-    },
-    {
-        LLM_ARCH_QWEN,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_QWEN2,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_QWEN2VL,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_QWEN2MOE,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,         "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,        "output_norm" },
-            { LLM_TENSOR_OUTPUT,             "output" },
-            { LLM_TENSOR_ATTN_NORM,          "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,             "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,             "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,             "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,           "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,           "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE_INP,       "blk.%d.ffn_gate_inp" },
-            { LLM_TENSOR_FFN_GATE_EXPS,      "blk.%d.ffn_gate_exps" },
-            { LLM_TENSOR_FFN_DOWN_EXPS,      "blk.%d.ffn_down_exps" },
-            { LLM_TENSOR_FFN_UP_EXPS,        "blk.%d.ffn_up_exps" },
-            { LLM_TENSOR_FFN_GATE_INP_SHEXP, "blk.%d.ffn_gate_inp_shexp" },
-            { LLM_TENSOR_FFN_GATE_SHEXP,     "blk.%d.ffn_gate_shexp" },
-            { LLM_TENSOR_FFN_DOWN_SHEXP,     "blk.%d.ffn_down_shexp" },
-            { LLM_TENSOR_FFN_UP_SHEXP,       "blk.%d.ffn_up_shexp" },
-        },
-    },
-    {
-        LLM_ARCH_PHI2,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_PHI3,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,         "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,        "output_norm" },
-            { LLM_TENSOR_OUTPUT,             "output" },
-            { LLM_TENSOR_ROPE_FACTORS_LONG,  "rope_factors_long" },
-            { LLM_TENSOR_ROPE_FACTORS_SHORT, "rope_factors_short" },
-            { LLM_TENSOR_ATTN_NORM,          "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_QKV,           "blk.%d.attn_qkv" },
-            { LLM_TENSOR_ATTN_Q,             "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,             "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,             "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,           "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,           "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_DOWN,           "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,             "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_PLAMO,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_CODESHELL,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_ORION,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_INTERNLM2,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_MINICPM,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
-            { LLM_TENSOR_ROPE_FACTORS_LONG,  "rope_factors_long" },
-            { LLM_TENSOR_ROPE_FACTORS_SHORT, "rope_factors_short" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },
-            { LLM_TENSOR_FFN_GATE_INP,    "blk.%d.ffn_gate_inp" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-            { LLM_TENSOR_FFN_GATE_EXP,    "blk.%d.ffn_gate.%d" },
-            { LLM_TENSOR_FFN_DOWN_EXP,    "blk.%d.ffn_down.%d" },
-            { LLM_TENSOR_FFN_UP_EXP,      "blk.%d.ffn_up.%d" },
-        },
-    },
-    {
-        LLM_ARCH_MINICPM3,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,         "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,        "output_norm" },
-            { LLM_TENSOR_OUTPUT,             "output" },
-            { LLM_TENSOR_ROPE_FACTORS_LONG,  "rope_factors_long" },
-            { LLM_TENSOR_ROPE_FACTORS_SHORT, "rope_factors_short" },
-            { LLM_TENSOR_ATTN_NORM,          "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q_A_NORM,      "blk.%d.attn_q_a_norm" },
-            { LLM_TENSOR_ATTN_KV_A_NORM,     "blk.%d.attn_kv_a_norm" },
-            { LLM_TENSOR_ATTN_Q,             "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_Q_A,           "blk.%d.attn_q_a" },
-            { LLM_TENSOR_ATTN_Q_B,           "blk.%d.attn_q_b" },
-            { LLM_TENSOR_ATTN_KV_A_MQA,      "blk.%d.attn_kv_a_mqa" },
-            { LLM_TENSOR_ATTN_KV_B,          "blk.%d.attn_kv_b" },
-            { LLM_TENSOR_ATTN_OUT,           "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,           "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,           "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_UP,             "blk.%d.ffn_up" },
-            { LLM_TENSOR_FFN_DOWN,           "blk.%d.ffn_down" },
-        },
-    },
-    {
-        LLM_ARCH_GEMMA,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_GEMMA2,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_ATTN_POST_NORM,  "blk.%d.post_attention_norm" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-            { LLM_TENSOR_FFN_POST_NORM,   "blk.%d.post_ffw_norm" },
-        },
-    },
-    {
-        LLM_ARCH_STARCODER2,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_MAMBA,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_SSM_IN,          "blk.%d.ssm_in" },
-            { LLM_TENSOR_SSM_CONV1D,      "blk.%d.ssm_conv1d" },
-            { LLM_TENSOR_SSM_X,           "blk.%d.ssm_x" },
-            { LLM_TENSOR_SSM_DT,          "blk.%d.ssm_dt" },
-            { LLM_TENSOR_SSM_A,           "blk.%d.ssm_a" },
-            { LLM_TENSOR_SSM_D,           "blk.%d.ssm_d" },
-            { LLM_TENSOR_SSM_OUT,         "blk.%d.ssm_out" },
-        },
-    },
-    {
-        LLM_ARCH_XVERSE,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_COMMAND_R,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-            { LLM_TENSOR_ATTN_Q_NORM,     "blk.%d.attn_q_norm" },
-            { LLM_TENSOR_ATTN_K_NORM,     "blk.%d.attn_k_norm" },
-        },
-    },
-    {
-        LLM_ARCH_DBRX,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_ATTN_OUT_NORM,   "blk.%d.attn_output_norm" },
-            { LLM_TENSOR_FFN_GATE_INP,    "blk.%d.ffn_gate_inp" },
-            { LLM_TENSOR_FFN_GATE_EXPS,   "blk.%d.ffn_gate_exps" },
-            { LLM_TENSOR_FFN_DOWN_EXPS,   "blk.%d.ffn_down_exps" },
-            { LLM_TENSOR_FFN_UP_EXPS,     "blk.%d.ffn_up_exps" },
-        },
-    },
-    {
-        LLM_ARCH_OLMO,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_OLMO2,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_ATTN_POST_NORM,  "blk.%d.post_attention_norm" },
-            { LLM_TENSOR_ATTN_Q_NORM,     "blk.%d.attn_q_norm" },
-            { LLM_TENSOR_ATTN_K_NORM,     "blk.%d.attn_k_norm" },
-            { LLM_TENSOR_FFN_POST_NORM,   "blk.%d.post_ffw_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_OLMOE,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,         "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,        "output_norm" },
-            { LLM_TENSOR_OUTPUT,             "output" },
-            { LLM_TENSOR_ATTN_NORM,          "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,             "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,             "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,             "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,           "blk.%d.attn_output" },
-            { LLM_TENSOR_ATTN_Q_NORM,        "blk.%d.attn_q_norm" },
-            { LLM_TENSOR_ATTN_K_NORM,        "blk.%d.attn_k_norm" },
-            { LLM_TENSOR_FFN_NORM,           "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE_INP,       "blk.%d.ffn_gate_inp" },
-            { LLM_TENSOR_FFN_GATE_EXPS,      "blk.%d.ffn_gate_exps" },
-            { LLM_TENSOR_FFN_DOWN_EXPS,      "blk.%d.ffn_down_exps" },
-            { LLM_TENSOR_FFN_UP_EXPS,        "blk.%d.ffn_up_exps" },
-        },
-    },
-    {
-        LLM_ARCH_OPENELM,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },
-            { LLM_TENSOR_ATTN_Q_NORM,     "blk.%d.attn_q_norm" },
-            { LLM_TENSOR_ATTN_K_NORM,     "blk.%d.attn_k_norm" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_ARCTIC,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_GATE_INP,    "blk.%d.ffn_gate_inp" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-            { LLM_TENSOR_FFN_NORM_EXPS,   "blk.%d.ffn_norm_exps" },
-            { LLM_TENSOR_FFN_GATE_EXPS,   "blk.%d.ffn_gate_exps" },
-            { LLM_TENSOR_FFN_DOWN_EXPS,   "blk.%d.ffn_down_exps" },
-            { LLM_TENSOR_FFN_UP_EXPS,     "blk.%d.ffn_up_exps" },
-        },
-    },
-    {
-        LLM_ARCH_DEEPSEEK,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,         "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,        "output_norm" },
-            { LLM_TENSOR_OUTPUT,             "output" },
-            { LLM_TENSOR_ROPE_FREQS,         "rope_freqs" },
-            { LLM_TENSOR_ATTN_NORM,          "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,             "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,             "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,             "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,           "blk.%d.attn_output" },
-            { LLM_TENSOR_ATTN_ROT_EMBD,      "blk.%d.attn_rot_embd" },
-            { LLM_TENSOR_FFN_GATE_INP,       "blk.%d.ffn_gate_inp" },
-            { LLM_TENSOR_FFN_NORM,           "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,           "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,           "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,             "blk.%d.ffn_up" },
-            { LLM_TENSOR_FFN_GATE_EXPS,      "blk.%d.ffn_gate_exps" },
-            { LLM_TENSOR_FFN_DOWN_EXPS,      "blk.%d.ffn_down_exps" },
-            { LLM_TENSOR_FFN_UP_EXPS,        "blk.%d.ffn_up_exps" },
-            { LLM_TENSOR_FFN_GATE_INP_SHEXP, "blk.%d.ffn_gate_inp_shexp" },
-            { LLM_TENSOR_FFN_GATE_SHEXP,     "blk.%d.ffn_gate_shexp" },
-            { LLM_TENSOR_FFN_DOWN_SHEXP,     "blk.%d.ffn_down_shexp" },
-            { LLM_TENSOR_FFN_UP_SHEXP,       "blk.%d.ffn_up_shexp" },
-        },
-    },
-    {
-        LLM_ARCH_DEEPSEEK2,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,         "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,        "output_norm" },
-            { LLM_TENSOR_OUTPUT,             "output" },
-            { LLM_TENSOR_ATTN_NORM,          "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q_A_NORM,      "blk.%d.attn_q_a_norm" },
-            { LLM_TENSOR_ATTN_KV_A_NORM,     "blk.%d.attn_kv_a_norm" },
-            { LLM_TENSOR_ATTN_Q,             "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_Q_A,           "blk.%d.attn_q_a" },
-            { LLM_TENSOR_ATTN_Q_B,           "blk.%d.attn_q_b" },
-            { LLM_TENSOR_ATTN_KV_A_MQA,      "blk.%d.attn_kv_a_mqa" },
-            { LLM_TENSOR_ATTN_KV_B,          "blk.%d.attn_kv_b" },
-            { LLM_TENSOR_ATTN_OUT,           "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,           "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,           "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_UP,             "blk.%d.ffn_up" },
-            { LLM_TENSOR_FFN_DOWN,           "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_GATE_INP,       "blk.%d.ffn_gate_inp" },
-            { LLM_TENSOR_FFN_GATE_EXPS,      "blk.%d.ffn_gate_exps" },
-            { LLM_TENSOR_FFN_DOWN_EXPS,      "blk.%d.ffn_down_exps" },
-            { LLM_TENSOR_FFN_UP_EXPS,        "blk.%d.ffn_up_exps" },
-            { LLM_TENSOR_FFN_GATE_INP_SHEXP, "blk.%d.ffn_gate_inp_shexp" },
-            { LLM_TENSOR_FFN_GATE_SHEXP,     "blk.%d.ffn_gate_shexp" },
-            { LLM_TENSOR_FFN_DOWN_SHEXP,     "blk.%d.ffn_down_shexp" },
-            { LLM_TENSOR_FFN_UP_SHEXP,       "blk.%d.ffn_up_shexp" },
-        },
-    },
-    {
-        LLM_ARCH_CHATGLM,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-        },
-    },
-    {
-        LLM_ARCH_BITNET,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,         "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,        "output_norm" },
-            { LLM_TENSOR_ATTN_Q,             "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,             "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,             "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,           "blk.%d.attn_output" },
-            { LLM_TENSOR_ATTN_NORM,          "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_SUB_NORM,      "blk.%d.attn_sub_norm" },
-            { LLM_TENSOR_FFN_GATE,           "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,           "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,             "blk.%d.ffn_up" },
-            { LLM_TENSOR_FFN_NORM,           "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_SUB_NORM,       "blk.%d.ffn_sub_norm" },
-        },
-    },
-    {
-        LLM_ARCH_T5,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,           "token_embd" },
-            { LLM_TENSOR_OUTPUT,               "output" },
-            { LLM_TENSOR_DEC_OUTPUT_NORM,      "dec.output_norm" },
-            { LLM_TENSOR_DEC_ATTN_NORM,        "dec.blk.%d.attn_norm" },
-            { LLM_TENSOR_DEC_ATTN_Q,           "dec.blk.%d.attn_q" },
-            { LLM_TENSOR_DEC_ATTN_K,           "dec.blk.%d.attn_k" },
-            { LLM_TENSOR_DEC_ATTN_V,           "dec.blk.%d.attn_v" },
-            { LLM_TENSOR_DEC_ATTN_OUT,         "dec.blk.%d.attn_o" },
-            { LLM_TENSOR_DEC_ATTN_REL_B,       "dec.blk.%d.attn_rel_b" },
-            { LLM_TENSOR_DEC_CROSS_ATTN_NORM,  "dec.blk.%d.cross_attn_norm" },
-            { LLM_TENSOR_DEC_CROSS_ATTN_Q,     "dec.blk.%d.cross_attn_q" },
-            { LLM_TENSOR_DEC_CROSS_ATTN_K,     "dec.blk.%d.cross_attn_k" },
-            { LLM_TENSOR_DEC_CROSS_ATTN_V,     "dec.blk.%d.cross_attn_v" },
-            { LLM_TENSOR_DEC_CROSS_ATTN_OUT,   "dec.blk.%d.cross_attn_o" },
-            { LLM_TENSOR_DEC_CROSS_ATTN_REL_B, "dec.blk.%d.cross_attn_rel_b" },
-            { LLM_TENSOR_DEC_FFN_NORM,         "dec.blk.%d.ffn_norm" },
-            { LLM_TENSOR_DEC_FFN_GATE,         "dec.blk.%d.ffn_gate" },
-            { LLM_TENSOR_DEC_FFN_DOWN,         "dec.blk.%d.ffn_down" },
-            { LLM_TENSOR_DEC_FFN_UP,           "dec.blk.%d.ffn_up" },
-            { LLM_TENSOR_ENC_OUTPUT_NORM,      "enc.output_norm" },
-            { LLM_TENSOR_ENC_ATTN_NORM,        "enc.blk.%d.attn_norm" },
-            { LLM_TENSOR_ENC_ATTN_Q,           "enc.blk.%d.attn_q" },
-            { LLM_TENSOR_ENC_ATTN_K,           "enc.blk.%d.attn_k" },
-            { LLM_TENSOR_ENC_ATTN_V,           "enc.blk.%d.attn_v" },
-            { LLM_TENSOR_ENC_ATTN_OUT,         "enc.blk.%d.attn_o" },
-            { LLM_TENSOR_ENC_ATTN_REL_B,       "enc.blk.%d.attn_rel_b" },
-            { LLM_TENSOR_ENC_FFN_NORM,         "enc.blk.%d.ffn_norm" },
-            { LLM_TENSOR_ENC_FFN_GATE,         "enc.blk.%d.ffn_gate" },
-            { LLM_TENSOR_ENC_FFN_DOWN,         "enc.blk.%d.ffn_down" },
-            { LLM_TENSOR_ENC_FFN_UP,           "enc.blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_T5ENCODER,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,           "token_embd" },
-            { LLM_TENSOR_OUTPUT,               "output" },
-            { LLM_TENSOR_ENC_OUTPUT_NORM,      "enc.output_norm" },
-            { LLM_TENSOR_ENC_ATTN_NORM,        "enc.blk.%d.attn_norm" },
-            { LLM_TENSOR_ENC_ATTN_Q,           "enc.blk.%d.attn_q" },
-            { LLM_TENSOR_ENC_ATTN_K,           "enc.blk.%d.attn_k" },
-            { LLM_TENSOR_ENC_ATTN_V,           "enc.blk.%d.attn_v" },
-            { LLM_TENSOR_ENC_ATTN_OUT,         "enc.blk.%d.attn_o" },
-            { LLM_TENSOR_ENC_ATTN_REL_B,       "enc.blk.%d.attn_rel_b" },
-            { LLM_TENSOR_ENC_FFN_NORM,         "enc.blk.%d.ffn_norm" },
-            { LLM_TENSOR_ENC_FFN_GATE,         "enc.blk.%d.ffn_gate" },
-            { LLM_TENSOR_ENC_FFN_DOWN,         "enc.blk.%d.ffn_down" },
-            { LLM_TENSOR_ENC_FFN_UP,           "enc.blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_JAIS,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-        },
-    },
-    {
-        LLM_ARCH_NEMOTRON,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_EXAONE,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_RWKV6,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,                "token_embd" },
-            { LLM_TENSOR_TOKEN_EMBD_NORM,           "token_embd_norm" },
-            { LLM_TENSOR_OUTPUT_NORM,               "output_norm" },
-            { LLM_TENSOR_OUTPUT,                    "output" },
-            { LLM_TENSOR_ATTN_NORM,                 "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_NORM_2,               "blk.%d.attn_norm_2" },
-            { LLM_TENSOR_TIME_MIX_W1,               "blk.%d.time_mix_w1" },
-            { LLM_TENSOR_TIME_MIX_W2,               "blk.%d.time_mix_w2" },
-            { LLM_TENSOR_TIME_MIX_LERP_X,           "blk.%d.time_mix_lerp_x" },
-            { LLM_TENSOR_TIME_MIX_LERP_W,           "blk.%d.time_mix_lerp_w" },
-            { LLM_TENSOR_TIME_MIX_LERP_K,           "blk.%d.time_mix_lerp_k" },
-            { LLM_TENSOR_TIME_MIX_LERP_V,           "blk.%d.time_mix_lerp_v" },
-            { LLM_TENSOR_TIME_MIX_LERP_R,           "blk.%d.time_mix_lerp_r" },
-            { LLM_TENSOR_TIME_MIX_LERP_G,           "blk.%d.time_mix_lerp_g" },
-            { LLM_TENSOR_TIME_MIX_FIRST,            "blk.%d.time_mix_first" },
-            { LLM_TENSOR_TIME_MIX_DECAY,            "blk.%d.time_mix_decay" },
-            { LLM_TENSOR_TIME_MIX_DECAY_W1,         "blk.%d.time_mix_decay_w1" },
-            { LLM_TENSOR_TIME_MIX_DECAY_W2,         "blk.%d.time_mix_decay_w2" },
-            { LLM_TENSOR_TIME_MIX_KEY,              "blk.%d.time_mix_key" },
-            { LLM_TENSOR_TIME_MIX_VALUE,            "blk.%d.time_mix_value" },
-            { LLM_TENSOR_TIME_MIX_RECEPTANCE,       "blk.%d.time_mix_receptance" },
-            { LLM_TENSOR_TIME_MIX_GATE,             "blk.%d.time_mix_gate" },
-            { LLM_TENSOR_TIME_MIX_LN,               "blk.%d.time_mix_ln" },
-            { LLM_TENSOR_TIME_MIX_OUTPUT,           "blk.%d.time_mix_output" },
-            { LLM_TENSOR_CHANNEL_MIX_LERP_K,        "blk.%d.channel_mix_lerp_k" },
-            { LLM_TENSOR_CHANNEL_MIX_LERP_R,        "blk.%d.channel_mix_lerp_r" },
-            { LLM_TENSOR_CHANNEL_MIX_KEY,           "blk.%d.channel_mix_key" },
-            { LLM_TENSOR_CHANNEL_MIX_VALUE,         "blk.%d.channel_mix_value" },
-            { LLM_TENSOR_CHANNEL_MIX_RECEPTANCE,    "blk.%d.channel_mix_receptance" },
-        },
-    },
-    {
-        LLM_ARCH_GRANITE,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_GRANITE_MOE,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE_INP,    "blk.%d.ffn_gate_inp" },
-            { LLM_TENSOR_FFN_GATE_EXPS,   "blk.%d.ffn_gate_exps" },
-            { LLM_TENSOR_FFN_DOWN_EXPS,   "blk.%d.ffn_down_exps" },
-            { LLM_TENSOR_FFN_UP_EXPS,     "blk.%d.ffn_up_exps" },
-        },
-    },
-    {
-        LLM_ARCH_CHAMELEON,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-            { LLM_TENSOR_ATTN_Q_NORM,     "blk.%d.attn_q_norm" },
-            { LLM_TENSOR_ATTN_K_NORM,     "blk.%d.attn_k_norm" },
-        },
-    },
-    {
-        LLM_ARCH_WAVTOKENIZER_DEC,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,        "token_embd" },
-            { LLM_TENSOR_TOKEN_EMBD_NORM,   "token_embd_norm" },
-            { LLM_TENSOR_CONV1D,            "conv1d" },
-            { LLM_TENSOR_CONVNEXT_DW,       "convnext.%d.dw" },
-            { LLM_TENSOR_CONVNEXT_NORM,     "convnext.%d.norm" },
-            { LLM_TENSOR_CONVNEXT_PW1,      "convnext.%d.pw1" },
-            { LLM_TENSOR_CONVNEXT_PW2,      "convnext.%d.pw2" },
-            { LLM_TENSOR_CONVNEXT_GAMMA,    "convnext.%d.gamma" },
-            { LLM_TENSOR_OUTPUT_NORM,       "output_norm" },
-            { LLM_TENSOR_OUTPUT,            "output" },
-            { LLM_TENSOR_POS_NET_CONV1,     "posnet.%d.conv1" },
-            { LLM_TENSOR_POS_NET_CONV2,     "posnet.%d.conv2" },
-            { LLM_TENSOR_POS_NET_NORM,      "posnet.%d.norm" },
-            { LLM_TENSOR_POS_NET_NORM1,     "posnet.%d.norm1" },
-            { LLM_TENSOR_POS_NET_NORM2,     "posnet.%d.norm2" },
-            { LLM_TENSOR_POS_NET_ATTN_NORM, "posnet.%d.attn_norm" },
-            { LLM_TENSOR_POS_NET_ATTN_Q,    "posnet.%d.attn_q" },
-            { LLM_TENSOR_POS_NET_ATTN_K,    "posnet.%d.attn_k" },
-            { LLM_TENSOR_POS_NET_ATTN_V,    "posnet.%d.attn_v" },
-            { LLM_TENSOR_POS_NET_ATTN_OUT,  "posnet.%d.attn_output" },
-        },
-    },
-    {
-        LLM_ARCH_UNKNOWN,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-        },
-    },
-};
-
-enum llm_chat_template {
-    LLM_CHAT_TEMPLATE_CHATML,
-    LLM_CHAT_TEMPLATE_LLAMA_2,
-    LLM_CHAT_TEMPLATE_LLAMA_2_SYS,
-    LLM_CHAT_TEMPLATE_LLAMA_2_SYS_BOS,
-    LLM_CHAT_TEMPLATE_LLAMA_2_SYS_STRIP,
-    LLM_CHAT_TEMPLATE_MISTRAL_V1,
-    LLM_CHAT_TEMPLATE_MISTRAL_V3,
-    LLM_CHAT_TEMPLATE_MISTRAL_V3_TEKKEN,
-    LLM_CHAT_TEMPLATE_MISTRAL_V7,
-    LLM_CHAT_TEMPLATE_PHI_3,
-    LLM_CHAT_TEMPLATE_FALCON_3,
-    LLM_CHAT_TEMPLATE_ZEPHYR,
-    LLM_CHAT_TEMPLATE_MONARCH,
-    LLM_CHAT_TEMPLATE_GEMMA,
-    LLM_CHAT_TEMPLATE_ORION,
-    LLM_CHAT_TEMPLATE_OPENCHAT,
-    LLM_CHAT_TEMPLATE_VICUNA,
-    LLM_CHAT_TEMPLATE_VICUNA_ORCA,
-    LLM_CHAT_TEMPLATE_DEEPSEEK,
-    LLM_CHAT_TEMPLATE_DEEPSEEK_2,
-    LLM_CHAT_TEMPLATE_COMMAND_R,
-    LLM_CHAT_TEMPLATE_LLAMA_3,
-    LLM_CHAT_TEMPLATE_CHATGML_3,
-    LLM_CHAT_TEMPLATE_CHATGML_4,
-    LLM_CHAT_TEMPLATE_MINICPM,
-    LLM_CHAT_TEMPLATE_EXAONE_3,
-    LLM_CHAT_TEMPLATE_RWKV_WORLD,
-    LLM_CHAT_TEMPLATE_GRANITE,
-    LLM_CHAT_TEMPLATE_GIGACHAT,
-    LLM_CHAT_TEMPLATE_MEGREZ,
-    LLM_CHAT_TEMPLATE_UNKNOWN,
-};
-
-static const std::map LLM_CHAT_TEMPLATES = {
-    { "chatml",            LLM_CHAT_TEMPLATE_CHATML            },
-    { "llama2",            LLM_CHAT_TEMPLATE_LLAMA_2           },
-    { "llama2-sys",        LLM_CHAT_TEMPLATE_LLAMA_2_SYS       },
-    { "llama2-sys-bos",    LLM_CHAT_TEMPLATE_LLAMA_2_SYS_BOS   },
-    { "llama2-sys-strip",  LLM_CHAT_TEMPLATE_LLAMA_2_SYS_STRIP },
-    { "mistral-v1",        LLM_CHAT_TEMPLATE_MISTRAL_V1        },
-    { "mistral-v3",        LLM_CHAT_TEMPLATE_MISTRAL_V3        },
-    { "mistral-v3-tekken", LLM_CHAT_TEMPLATE_MISTRAL_V3_TEKKEN },
-    { "mistral-v7",        LLM_CHAT_TEMPLATE_MISTRAL_V7        },
-    { "phi3",              LLM_CHAT_TEMPLATE_PHI_3             },
-    { "falcon3",           LLM_CHAT_TEMPLATE_FALCON_3          },
-    { "zephyr",            LLM_CHAT_TEMPLATE_ZEPHYR            },
-    { "monarch",           LLM_CHAT_TEMPLATE_MONARCH           },
-    { "gemma",             LLM_CHAT_TEMPLATE_GEMMA             },
-    { "orion",             LLM_CHAT_TEMPLATE_ORION             },
-    { "openchat",          LLM_CHAT_TEMPLATE_OPENCHAT          },
-    { "vicuna",            LLM_CHAT_TEMPLATE_VICUNA            },
-    { "vicuna-orca",       LLM_CHAT_TEMPLATE_VICUNA_ORCA       },
-    { "deepseek",          LLM_CHAT_TEMPLATE_DEEPSEEK          },
-    { "deepseek2",         LLM_CHAT_TEMPLATE_DEEPSEEK_2        },
-    { "command-r",         LLM_CHAT_TEMPLATE_COMMAND_R         },
-    { "llama3",            LLM_CHAT_TEMPLATE_LLAMA_3           },
-    { "chatglm3",          LLM_CHAT_TEMPLATE_CHATGML_3         },
-    { "chatglm4",          LLM_CHAT_TEMPLATE_CHATGML_4         },
-    { "minicpm",           LLM_CHAT_TEMPLATE_MINICPM           },
-    { "exaone3",           LLM_CHAT_TEMPLATE_EXAONE_3          },
-    { "rwkv-world",        LLM_CHAT_TEMPLATE_RWKV_WORLD        },
-    { "granite",           LLM_CHAT_TEMPLATE_GRANITE           },
-    { "gigachat",          LLM_CHAT_TEMPLATE_GIGACHAT          },
-    { "megrez",            LLM_CHAT_TEMPLATE_MEGREZ            },
-};
-
-static llm_arch llm_arch_from_string(const std::string & name) {
-    for (const auto & kv : LLM_ARCH_NAMES) { // NOLINT
-        if (kv.second == name) {
-            return kv.first;
-        }
-    }
-
-    return LLM_ARCH_UNKNOWN;
-}
-
-// helper to handle gguf constants
-// usage:
-//
-//   const auto tn = LLM_TN(LLM_ARCH_LLAMA);
-//
-//   std::string name = tn(LLM_TENSOR_OUTPUT);                     -> "output"
-//   std::string name = tn(LLM_TENSOR_TOKEN_EMBD, "bias");         -> "token_embd.bias"
-//   std::string name = tn(LLM_TENSOR_ATTN_NORM, "weight", 3);     -> "blk.3.attn_norm.weight"
-//
-struct LLM_TN_IMPL {
-    const llm_arch arch;
-    const llm_tensor tensor;
-    const char * const suffix;
-    const int bid;
-    const int xid;
-
-    std::string str() const {
-        if (LLM_TENSOR_NAMES.at(arch).find(tensor) == LLM_TENSOR_NAMES.at(arch).end()) {
-            return "__missing__";
-        }
-
-        std::string name = ::format(LLM_TENSOR_NAMES.at(arch).at(tensor), bid, xid);
-
-        if (suffix != nullptr) {
-            name += ".";
-            name += suffix;
-        }
-
-        return name;
-    }
-
-    operator std::string() const {
-        return str();
-    }
-
-    friend bool operator==(const std::string & str, const LLM_TN_IMPL & tn) {
-        return str == tn.str();
-    }
-
-    friend bool operator!=(const std::string & str, const LLM_TN_IMPL & tn) {
-        return str != tn.str();
-    }
-};
-
-struct LLM_TN {
-    LLM_TN(llm_arch arch) : arch(arch) {}
-
-    llm_arch arch;
-
-    LLM_TN_IMPL operator()(llm_tensor tensor, const char * suffix, int bid = -1, int xid = -1) const {
-        return { arch, tensor, suffix, bid, xid };
-    }
-
-    LLM_TN_IMPL operator()(llm_tensor tensor, int bid = -1, int xid = -1) const {
-        return { arch, tensor, nullptr, bid, xid };
-    }
-};
-
-//
-// gguf helpers
-//
-
-static const std::map LLAMA_ROPE_SCALING_TYPES = {
-    { LLAMA_ROPE_SCALING_TYPE_NONE,       "none"       },
-    { LLAMA_ROPE_SCALING_TYPE_LINEAR,     "linear"     },
-    { LLAMA_ROPE_SCALING_TYPE_YARN,       "yarn"       },
-    { LLAMA_ROPE_SCALING_TYPE_LONGROPE,   "longrope"   },
-};
-
-static llama_rope_scaling_type llama_rope_scaling_type_from_string(const std::string & name) {
-    for (const auto & kv : LLAMA_ROPE_SCALING_TYPES) {
-        if (kv.second == name) {
-            return (llama_rope_scaling_type) kv.first;
-        }
-    }
-
-    return LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED;
-}
-
-static std::string gguf_data_to_str(enum gguf_type type, const void * data, int i) {
-    switch (type) {
-        case GGUF_TYPE_UINT8:   return std::to_string(((const uint8_t  *)data)[i]);
-        case GGUF_TYPE_INT8:    return std::to_string(((const int8_t   *)data)[i]);
-        case GGUF_TYPE_UINT16:  return std::to_string(((const uint16_t *)data)[i]);
-        case GGUF_TYPE_INT16:   return std::to_string(((const int16_t  *)data)[i]);
-        case GGUF_TYPE_UINT32:  return std::to_string(((const uint32_t *)data)[i]);
-        case GGUF_TYPE_INT32:   return std::to_string(((const int32_t  *)data)[i]);
-        case GGUF_TYPE_UINT64:  return std::to_string(((const uint64_t *)data)[i]);
-        case GGUF_TYPE_INT64:   return std::to_string(((const int64_t  *)data)[i]);
-        case GGUF_TYPE_FLOAT32: return std::to_string(((const float    *)data)[i]);
-        case GGUF_TYPE_FLOAT64: return std::to_string(((const double   *)data)[i]);
-        case GGUF_TYPE_BOOL:    return ((const bool *)data)[i] ? "true" : "false";
-        default:                return format("unknown type %d", type);
-    }
-}
-
-static std::string gguf_kv_to_str(const struct gguf_context * ctx_gguf, int i) {
-    const enum gguf_type type = gguf_get_kv_type(ctx_gguf, i);
-
-    switch (type) {
-        case GGUF_TYPE_STRING:
-            return gguf_get_val_str(ctx_gguf, i);
-        case GGUF_TYPE_ARRAY:
-            {
-                const enum gguf_type arr_type = gguf_get_arr_type(ctx_gguf, i);
-                int arr_n = gguf_get_arr_n(ctx_gguf, i);
-                const void * data = gguf_get_arr_data(ctx_gguf, i);
-                std::stringstream ss;
-                ss << "[";
-                for (int j = 0; j < arr_n; j++) {
-                    if (arr_type == GGUF_TYPE_STRING) {
-                        std::string val = gguf_get_arr_str(ctx_gguf, i, j);
-                        // escape quotes
-                        replace_all(val, "\\", "\\\\");
-                        replace_all(val, "\"", "\\\"");
-                        ss << '"' << val << '"';
-                    } else if (arr_type == GGUF_TYPE_ARRAY) {
-                        ss << "???";
-                    } else {
-                        ss << gguf_data_to_str(arr_type, data, j);
-                    }
-                    if (j < arr_n - 1) {
-                        ss << ", ";
-                    }
-                }
-                ss << "]";
-                return ss.str();
-            }
-        default:
-            return gguf_data_to_str(type, gguf_get_val_data(ctx_gguf, i), 0);
-    }
-}
-
-//
-// llama helpers
-//
-
-#if defined(_WIN32)
-static std::string llama_format_win_err(DWORD err) {
-    LPSTR buf;
-    size_t size = FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS,
-                                 NULL, err, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPSTR)&buf, 0, NULL);
-    if (!size) {
-        return "FormatMessageA failed";
-    }
-    std::string ret(buf, size);
-    LocalFree(buf);
-    return ret;
-}
-#endif
-
-template 
-struct no_init {
-    T value;
-    no_init() { /* do nothing */ }
-};
-
-struct llama_file {
-
-#if defined(_WIN32)
-    // use FILE * so we don't have to re-open the file to mmap
-    FILE * fp;
-    HANDLE fp_win32;
-    size_t size;
-
-private:
-    std::string GetErrorMessageWin32(DWORD error_code) const {
-        std::string ret;
-        LPSTR lpMsgBuf = NULL;
-        DWORD bufLen = FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS,
-                                    NULL, error_code, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPSTR)&lpMsgBuf, 0, NULL);
-        if (!bufLen) {
-            ret = format("Win32 error code: %lx", error_code);
-        } else {
-            ret = lpMsgBuf;
-            LocalFree(lpMsgBuf);
-        }
-
-        return ret;
-    }
-
-public:
-
-    llama_file(const char * fname, const char * mode) {
-        fp = ggml_fopen(fname, mode);
-        if (fp == NULL) {
-            throw std::runtime_error(format("failed to open %s: %s", fname, strerror(errno)));
-        }
-        fp_win32 = (HANDLE) _get_osfhandle(_fileno(fp));
-        seek(0, SEEK_END);
-        size = tell();
-        seek(0, SEEK_SET);
-    }
-
-    size_t tell() const {
-        // SetFilePointerEx returns the current position when seeking relative 0 bytes
-        LARGE_INTEGER li;
-        li.QuadPart = 0;
-        BOOL ret = SetFilePointerEx(fp_win32, li, &li, FILE_CURRENT);
-        if (!ret) {
-            throw std::runtime_error(format("read error: %s", GetErrorMessageWin32(GetLastError()).c_str()));
-        }
-
-        return li.QuadPart;
-    }
-
-    void seek(size_t offset, int whence) const {
-        // no need to convert SEEK_* to FILE_*. The enums are the same.
-        // Still, keep static asserts to avoid failures in the future.
-        static_assert(SEEK_SET == FILE_BEGIN, "SEEK_SET != FILE_BEGIN");
-        static_assert(SEEK_CUR == FILE_CURRENT, "SEEK_CUR != FILE_CURRENT");
-        static_assert(SEEK_END == FILE_END, "SEEK_END != FILE_END");
-
-        LARGE_INTEGER li;
-        li.QuadPart = offset;
-        BOOL ret = SetFilePointerEx(fp_win32, li, NULL, whence);
-        if (!ret) {
-            throw std::runtime_error(format("read error: %s", GetErrorMessageWin32(GetLastError()).c_str()));
-        }
-    }
-
-    void read_raw(void * ptr, size_t len) const {
-        // On Win32 ReadFile is significant faster than fread which is again significant faster than std::fstream. Thus
-        // use the Win32 API to do file io instead of the C/C++ library functions.
-
-        // There are conditions under which ReadFile cannot read chunks >64MB.
-        // Thus split the operation into smaller chunks if len exceeds this limit.
-        size_t bytes_read = 0;
-        while (bytes_read < len) {
-            size_t chunk_size = std::min(len - bytes_read, 64*1024*1024);
-            DWORD chunk_read = 0;
-            BOOL result = ReadFile(fp_win32, reinterpret_cast(ptr) + bytes_read, chunk_size, &chunk_read, NULL);
-            if (!result) {
-                throw std::runtime_error(format("read error: %s", GetErrorMessageWin32(GetLastError()).c_str()));
-            }
-            if (chunk_read < chunk_size || chunk_read == 0) {
-                throw std::runtime_error("unexpectedly reached end of file");
-            }
-
-            bytes_read += chunk_read;
-        } ;
-    }
-
-    uint32_t read_u32() const {
-        uint32_t val;
-        read_raw(&val, sizeof(val));
-        return val;
-    }
-
-    void write_raw(const void * ptr, size_t len) const {
-        // There are conditions under which WriteFile cannot write chunks >64MB.
-        // Thus split the operation into smaller chunks if len exceeds this limit.
-        size_t bytes_written = 0;
-        while (bytes_written < len) {
-            size_t chunk_size = std::min(len - bytes_written, 64*1024*1024);
-            DWORD chunk_written = 0;
-            BOOL result = WriteFile(fp_win32, reinterpret_cast(ptr) + bytes_written, chunk_size, &chunk_written, NULL);
-            if (!result) {
-                throw std::runtime_error(format("write error: %s", GetErrorMessageWin32(GetLastError()).c_str()));
-            }
-            if (chunk_written < chunk_size || chunk_written == 0) {
-                throw std::runtime_error("unexpectedly failed to write bytes");
-            }
-
-            bytes_written += chunk_written;
-        }
-    }
-
-    void write_u32(std::uint32_t val) const {
-        write_raw(&val, sizeof(val));
-    }
-
-    ~llama_file() {
-        if (fp) {
-            std::fclose(fp);
-        }
-    }
-#else
-    // use FILE * so we don't have to re-open the file to mmap
-    FILE * fp;
-    size_t size;
-
-    llama_file(const char * fname, const char * mode) {
-        fp = ggml_fopen(fname, mode);
-        if (fp == NULL) {
-            throw std::runtime_error(format("failed to open %s: %s", fname, strerror(errno)));
-        }
-        seek(0, SEEK_END);
-        size = tell();
-        seek(0, SEEK_SET);
-    }
-
-    size_t tell() const {
-#ifdef _WIN32
-        __int64 ret = _ftelli64(fp);
-#else
-        long ret = std::ftell(fp);
-#endif
-        if (ret == -1) {
-            throw std::runtime_error(format("ftell error: %s", strerror(errno)));
-        }
-
-        return (size_t) ret;
-    }
-
-    void seek(size_t offset, int whence) const {
-#ifdef _WIN32
-        int ret = _fseeki64(fp, (__int64) offset, whence);
-#else
-        int ret = std::fseek(fp, (long) offset, whence);
-#endif
-        if (ret != 0) {
-            throw std::runtime_error(format("seek error: %s", strerror(errno)));
-        }
-    }
-
-    void read_raw(void * ptr, size_t len) const {
-        if (len == 0) {
-            return;
-        }
-        errno = 0;
-        std::size_t ret = std::fread(ptr, len, 1, fp);
-        if (ferror(fp)) {
-            throw std::runtime_error(format("read error: %s", strerror(errno)));
-        }
-        if (ret != 1) {
-            throw std::runtime_error("unexpectedly reached end of file");
-        }
-    }
-
-    uint32_t read_u32() const {
-        uint32_t ret;
-        read_raw(&ret, sizeof(ret));
-        return ret;
-    }
-
-    void write_raw(const void * ptr, size_t len) const {
-        if (len == 0) {
-            return;
-        }
-        errno = 0;
-        size_t ret = std::fwrite(ptr, len, 1, fp);
-        if (ret != 1) {
-            throw std::runtime_error(format("write error: %s", strerror(errno)));
-        }
-    }
-
-    void write_u32(std::uint32_t val) const {
-        write_raw(&val, sizeof(val));
-    }
-
-    ~llama_file() {
-        if (fp) {
-            std::fclose(fp);
-        }
-    }
-#endif
-};
-using llama_files = std::vector>;
-
-struct llama_mmap {
-    void * addr;
-    size_t size;
-
-    llama_mmap(const llama_mmap &) = delete;
-
-#ifdef _POSIX_MAPPED_FILES
-    static constexpr bool SUPPORTED = true;
-
-    // list of mapped fragments (first_offset, last_offset)
-    std::vector> mapped_fragments;
-
-    llama_mmap(struct llama_file * file, size_t prefetch = (size_t) -1 /* -1 = max value */, bool numa = false) {
-        size = file->size;
-        int fd = fileno(file->fp);
-        int flags = MAP_SHARED;
-        // prefetch/readahead impairs performance on NUMA systems
-        if (numa)  { prefetch = 0; }
-#ifdef __linux__
-        // advise the kernel to read the file sequentially (increases readahead)
-        if (posix_fadvise(fd, 0, 0, POSIX_FADV_SEQUENTIAL)) {
-            LLAMA_LOG_WARN("warning: posix_fadvise(.., POSIX_FADV_SEQUENTIAL) failed: %s\n",
-                    strerror(errno));
-        }
-        if (prefetch) { flags |= MAP_POPULATE; }
-#endif
-        addr = mmap(NULL, file->size, PROT_READ, flags, fd, 0);
-        if (addr == MAP_FAILED) { // NOLINT
-            throw std::runtime_error(format("mmap failed: %s", strerror(errno)));
-        }
-
-        if (prefetch > 0) {
-            // advise the kernel to preload the mapped memory
-            if (posix_madvise(addr, std::min(file->size, prefetch), POSIX_MADV_WILLNEED)) {
-                LLAMA_LOG_WARN("warning: posix_madvise(.., POSIX_MADV_WILLNEED) failed: %s\n",
-                        strerror(errno));
-            }
-        }
-        if (numa) {
-            // advise the kernel not to use readahead
-            // (because the next page might not belong on the same node)
-            if (posix_madvise(addr, file->size, POSIX_MADV_RANDOM)) {
-                LLAMA_LOG_WARN("warning: posix_madvise(.., POSIX_MADV_RANDOM) failed: %s\n",
-                        strerror(errno));
-            }
-        }
-
-        // initialize list of mapped_fragments
-        mapped_fragments.emplace_back(0, file->size);
-    }
-
-    static void align_range(size_t * first, size_t * last, size_t page_size) {
-        // align first to the next page
-        size_t offset_in_page = *first & (page_size - 1);
-        size_t offset_to_page = offset_in_page == 0 ? 0 : page_size - offset_in_page;
-        *first += offset_to_page;
-
-        // align last to the previous page
-        *last = *last & ~(page_size - 1);
-
-        if (*last <= *first) {
-            *last = *first;
-        }
-    }
-
-    // partially unmap the file in the range [first, last)
-    void unmap_fragment(size_t first, size_t last) {
-        // note: this function must not be called multiple times with overlapping ranges
-        // otherwise, there is a risk of invalidating addresses that have been repurposed for other mappings
-        int page_size = sysconf(_SC_PAGESIZE);
-        align_range(&first, &last, page_size);
-        size_t len = last - first;
-
-        if (len == 0) {
-            return;
-        }
-
-        GGML_ASSERT(first % page_size == 0);
-        GGML_ASSERT(last % page_size == 0);
-        GGML_ASSERT(last > first);
-
-        void * next_page_start = (uint8_t *) addr + first;
-
-        // unmap the range
-        if (munmap(next_page_start, len)) {
-            LLAMA_LOG_WARN("warning: munmap failed: %s\n", strerror(errno));
-        }
-
-        // update the list of mapped fragments to avoid unmapping the same range again in the destructor
-        std::vector> new_mapped_fragments;
-        for (const auto & frag : mapped_fragments) {
-            if (frag.first < first && frag.second > last) {
-                // the range is in the middle of the fragment, split it
-                new_mapped_fragments.emplace_back(frag.first, first);
-                new_mapped_fragments.emplace_back(last, frag.second);
-            } else if (frag.first < first && frag.second > first) {
-                // the range starts in the middle of the fragment
-                new_mapped_fragments.emplace_back(frag.first, first);
-            } else if (frag.first < last && frag.second > last) {
-                // the range ends in the middle of the fragment
-                new_mapped_fragments.emplace_back(last, frag.second);
-            } else if (frag.first >= first && frag.second <= last) {
-                // the range covers the entire fragment
-            } else {
-                // the range is outside the fragment
-                new_mapped_fragments.push_back(frag);
-            }
-        }
-        mapped_fragments = std::move(new_mapped_fragments);
-    }
-
-    ~llama_mmap() {
-        for (const auto & frag : mapped_fragments) {
-            if (munmap((char *) addr + frag.first, frag.second - frag.first)) {
-                LLAMA_LOG_WARN("warning: munmap failed: %s\n", strerror(errno));
-            }
-        }
-    }
-#elif defined(_WIN32)
-    static constexpr bool SUPPORTED = true;
-
-    llama_mmap(struct llama_file * file, size_t prefetch = (size_t) -1, bool numa = false) {
-        GGML_UNUSED(numa);
-
-        size = file->size;
-
-        HANDLE hFile = (HANDLE) _get_osfhandle(_fileno(file->fp));
-
-        HANDLE hMapping = CreateFileMappingA(hFile, NULL, PAGE_READONLY, 0, 0, NULL);
-
-        if (hMapping == NULL) {
-            DWORD error = GetLastError();
-            throw std::runtime_error(format("CreateFileMappingA failed: %s", llama_format_win_err(error).c_str()));
-        }
-
-        addr = MapViewOfFile(hMapping, FILE_MAP_READ, 0, 0, 0);
-        DWORD error = GetLastError();
-        CloseHandle(hMapping);
-
-        if (addr == NULL) {
-            throw std::runtime_error(format("MapViewOfFile failed: %s", llama_format_win_err(error).c_str()));
-        }
-
-        if (prefetch > 0) {
-#if _WIN32_WINNT >= 0x602
-            // PrefetchVirtualMemory is only present on Windows 8 and above, so we dynamically load it
-            BOOL (WINAPI *pPrefetchVirtualMemory) (HANDLE, ULONG_PTR, PWIN32_MEMORY_RANGE_ENTRY, ULONG);
-            HMODULE hKernel32 = GetModuleHandleW(L"kernel32.dll");
-
-            // may fail on pre-Windows 8 systems
-            pPrefetchVirtualMemory = (decltype(pPrefetchVirtualMemory))(void *) GetProcAddress(hKernel32, "PrefetchVirtualMemory");
-
-            if (pPrefetchVirtualMemory) {
-                // advise the kernel to preload the mapped memory
-                WIN32_MEMORY_RANGE_ENTRY range;
-                range.VirtualAddress = addr;
-                range.NumberOfBytes = (SIZE_T) std::min(size, prefetch);
-                if (!pPrefetchVirtualMemory(GetCurrentProcess(), 1, &range, 0)) {
-                    LLAMA_LOG_WARN("warning: PrefetchVirtualMemory failed: %s\n",
-                            llama_format_win_err(GetLastError()).c_str());
-                }
-            }
-#else
-            throw std::runtime_error("PrefetchVirtualMemory unavailable");
-#endif
-        }
-    }
-
-    void unmap_fragment(size_t first, size_t last) {
-        // not supported
-        GGML_UNUSED(first);
-        GGML_UNUSED(last);
-    }
-
-    ~llama_mmap() {
-        if (!UnmapViewOfFile(addr)) {
-            LLAMA_LOG_WARN("warning: UnmapViewOfFile failed: %s\n",
-                    llama_format_win_err(GetLastError()).c_str());
-        }
-    }
-#else
-    static constexpr bool SUPPORTED = false;
-
-    llama_mmap(struct llama_file * file, size_t prefetch = -1, bool numa = false) {
-        GGML_UNUSED(file);
-        GGML_UNUSED(prefetch);
-        GGML_UNUSED(numa);
-
-        throw std::runtime_error("mmap not supported");
-    }
-
-    void unmap_fragment(size_t first, size_t last) {
-        GGML_UNUSED(first);
-        GGML_UNUSED(last);
-
-        throw std::runtime_error("mmap not supported");
-    }
-#endif
-};
-using llama_mmaps = std::vector>;
-
-// Represents some region of memory being locked using mlock or VirtualLock;
-// will automatically unlock on destruction.
-struct llama_mlock {
-    void * addr = NULL;
-    size_t size = 0;
-
-    bool failed_already = false;
-
-    llama_mlock() {}
-    llama_mlock(const llama_mlock &) = delete;
-
-    ~llama_mlock() {
-        if (size) {
-            raw_unlock(addr, size);
-        }
-    }
-
-    void init(void * ptr) {
-        GGML_ASSERT(addr == NULL && size == 0); // NOLINT
-        addr = ptr;
-    }
-
-    void grow_to(size_t target_size) {
-        GGML_ASSERT(addr);
-        if (failed_already) {
-            return;
-        }
-        size_t granularity = lock_granularity();
-        target_size = (target_size + granularity - 1) & ~(granularity - 1);
-        if (target_size > size) {
-            if (raw_lock((uint8_t *) addr + size, target_size - size)) {
-                size = target_size;
-            } else {
-                failed_already = true;
-            }
-        }
-    }
-
-#ifdef _POSIX_MEMLOCK_RANGE
-    static constexpr bool SUPPORTED = true;
-
-    static size_t lock_granularity() {
-        return (size_t) sysconf(_SC_PAGESIZE);
-    }
-
-    #ifdef __APPLE__
-        #define MLOCK_SUGGESTION \
-            "Try increasing the sysctl values 'vm.user_wire_limit' and 'vm.global_user_wire_limit' and/or " \
-            "decreasing 'vm.global_no_user_wire_amount'.  Also try increasing RLIMIT_MEMLOCK (ulimit -l).\n"
-    #else
-        #define MLOCK_SUGGESTION \
-            "Try increasing RLIMIT_MEMLOCK ('ulimit -l' as root).\n"
-    #endif
-
-    bool raw_lock(const void * addr, size_t size) const {
-        if (!mlock(addr, size)) {
-            return true;
-        }
-
-        char* errmsg = std::strerror(errno);
-        bool suggest = (errno == ENOMEM);
-
-        // Check if the resource limit is fine after all
-        struct rlimit lock_limit;
-        if (suggest && getrlimit(RLIMIT_MEMLOCK, &lock_limit)) {
-            suggest = false;
-        }
-        if (suggest && (lock_limit.rlim_max > lock_limit.rlim_cur + size)) {
-            suggest = false;
-        }
-
-        LLAMA_LOG_WARN("warning: failed to mlock %zu-byte buffer (after previously locking %zu bytes): %s\n%s",
-                size, this->size, errmsg, suggest ? MLOCK_SUGGESTION : "");
-        return false;
-    }
-
-    #undef MLOCK_SUGGESTION
-
-    static void raw_unlock(void * addr, size_t size) {
-        if (munlock(addr, size)) {
-            LLAMA_LOG_WARN("warning: failed to munlock buffer: %s\n", std::strerror(errno));
-        }
-    }
-#elif defined(_WIN32)
-    static constexpr bool SUPPORTED = true;
-
-    static size_t lock_granularity() {
-        SYSTEM_INFO si;
-        GetSystemInfo(&si);
-        return (size_t) si.dwPageSize;
-    }
-
-    bool raw_lock(void * ptr, size_t len) const {
-        for (int tries = 1; ; tries++) {
-            if (VirtualLock(ptr, len)) {
-                return true;
-            }
-            if (tries == 2) {
-                LLAMA_LOG_WARN("warning: failed to VirtualLock %zu-byte buffer (after previously locking %zu bytes): %s\n",
-                    len, size, llama_format_win_err(GetLastError()).c_str());
-                return false;
-            }
-
-            // It failed but this was only the first try; increase the working
-            // set size and try again.
-            SIZE_T min_ws_size, max_ws_size;
-            if (!GetProcessWorkingSetSize(GetCurrentProcess(), &min_ws_size, &max_ws_size)) {
-                LLAMA_LOG_WARN("warning: GetProcessWorkingSetSize failed: %s\n",
-                        llama_format_win_err(GetLastError()).c_str());
-                return false;
-            }
-            // Per MSDN: "The maximum number of pages that a process can lock
-            // is equal to the number of pages in its minimum working set minus
-            // a small overhead."
-            // Hopefully a megabyte is enough overhead:
-            size_t increment = len + 1048576;
-            // The minimum must be <= the maximum, so we need to increase both:
-            min_ws_size += increment;
-            max_ws_size += increment;
-            if (!SetProcessWorkingSetSize(GetCurrentProcess(), min_ws_size, max_ws_size)) {
-                LLAMA_LOG_WARN("warning: SetProcessWorkingSetSize failed: %s\n",
-                        llama_format_win_err(GetLastError()).c_str());
-                return false;
-            }
-        }
-    }
-
-    static void raw_unlock(void * ptr, size_t len) {
-        if (!VirtualUnlock(ptr, len)) {
-            LLAMA_LOG_WARN("warning: failed to VirtualUnlock buffer: %s\n",
-                    llama_format_win_err(GetLastError()).c_str());
-        }
-    }
-#else
-    static constexpr bool SUPPORTED = false;
-
-    static size_t lock_granularity() {
-        return (size_t) 65536;
-    }
-
-    bool raw_lock(const void * addr, size_t len) const {
-        LLAMA_LOG_WARN("warning: mlock not supported on this system\n");
-        return false;
-    }
-
-    static void raw_unlock(const void * addr, size_t len) {}
-#endif
-};
-using llama_mlocks = std::vector>;
-
-// NOTE: avoid ever using this except for building the token_to_piece caches
-static std::string llama_token_to_piece(const struct llama_model * model, llama_token token, bool special) {
-    std::string piece;
-    piece.resize(piece.capacity());  // using string internal cache
-    const int n_chars = llama_token_to_piece(model, token, &piece[0], piece.size(), 0, special);
-    if (n_chars < 0) {
-        piece.resize(-n_chars);
-        int check = llama_token_to_piece(model, token, &piece[0], piece.size(), 0, special);
-        GGML_ASSERT(check == -n_chars);
-    }
-    else {
-        piece.resize(n_chars);
-    }
-
-    return piece;
-}
-
-//
-// globals
-//
-
-struct llama_logger_state {
-    ggml_log_callback log_callback = llama_log_callback_default;
-    void * log_callback_user_data = nullptr;
-};
-
-static llama_logger_state g_logger_state;
-
-// available llama models
-enum e_model {
-    MODEL_UNKNOWN,
-    MODEL_14M,
-    MODEL_17M,
-    MODEL_22M,
-    MODEL_33M,
-    MODEL_60M,
-    MODEL_70M,
-    MODEL_80M,
-    MODEL_109M,
-    MODEL_137M,
-    MODEL_160M,
-    MODEL_220M,
-    MODEL_250M,
-    MODEL_270M,
-    MODEL_335M,
-    MODEL_410M,
-    MODEL_450M,
-    MODEL_770M,
-    MODEL_780M,
-    MODEL_0_5B,
-    MODEL_1B,
-    MODEL_1_3B,
-    MODEL_1_4B,
-    MODEL_1_5B,
-    MODEL_1_6B,
-    MODEL_2B,
-    MODEL_2_8B,
-    MODEL_3B,
-    MODEL_4B,
-    MODEL_6B,
-    MODEL_6_9B,
-    MODEL_7B,
-    MODEL_8B,
-    MODEL_9B,
-    MODEL_11B,
-    MODEL_12B,
-    MODEL_13B,
-    MODEL_14B,
-    MODEL_15B,
-    MODEL_16B,
-    MODEL_20B,
-    MODEL_30B,
-    MODEL_32B,
-    MODEL_34B,
-    MODEL_35B,
-    MODEL_40B,
-    MODEL_65B,
-    MODEL_70B,
-    MODEL_236B,
-    MODEL_314B,
-    MODEL_SMALL,
-    MODEL_MEDIUM,
-    MODEL_LARGE,
-    MODEL_XL,
-    MODEL_A1_7B,
-    MODEL_A2_7B,
-    MODEL_8x7B,
-    MODEL_8x22B,
-    MODEL_16x12B,
-    MODEL_10B_128x3_66B,
-    MODEL_57B_A14B,
-    MODEL_27B,
-};
-
-static const size_t kiB = 1024;
-static const size_t MiB = 1024*kiB;
-static const size_t GiB = 1024*MiB;
-
-struct llama_hparams_posnet {
-    uint32_t n_embd;
-    uint32_t n_layer;
-};
-
-struct llama_hparams_convnext {
-    uint32_t n_embd;
-    uint32_t n_layer;
-};
-
-struct llama_hparams {
-    bool vocab_only;
-    bool rope_finetuned;
-    bool use_par_res;
-    bool swin_norm;
-
-    uint32_t n_vocab = 0;
-    uint32_t n_ctx_train; // context size the model was trained on
-    uint32_t n_embd;
-    uint32_t n_embd_features = 0;
-    uint32_t n_layer;
-    uint32_t n_rot;
-    uint32_t n_swa = 0; // sliding window attention (SWA)
-    uint32_t n_embd_head_k; // dimension of keys (d_k). d_q is assumed to be the same, but there are n_head q heads, and only n_head_kv k-v heads
-    uint32_t n_embd_head_v; // dimension of values (d_v) aka n_embd_head
-    uint32_t n_expert = 0;
-    uint32_t n_expert_used = 0;
-    uint32_t n_vocab_type = 0; // for BERT-style token types
-    uint32_t n_rel_attn_bkts = 0;
-
-    // for WavTokenizer
-    struct llama_hparams_posnet   posnet;
-    struct llama_hparams_convnext convnext;
-
-    std::array n_head_arr;
-    std::array n_head_kv_arr;
-    std::array n_ff_arr;
-
-    uint32_t n_layer_dense_lead = 0;
-    uint32_t n_lora_q = 0;
-    uint32_t n_lora_kv = 0;
-    uint32_t n_ff_exp = 0;
-    uint32_t n_ff_shexp = 0;
-    uint32_t n_expert_shared = 0;
-    float    expert_weights_scale = 0.0;
-
-    float f_norm_eps;
-    float f_norm_rms_eps;
-    float f_norm_group_eps;
-
-    uint32_t n_norm_groups;
-
-    float f_attn_logit_softcapping = 50.0f;
-    float f_final_logit_softcapping = 30.0f;
-
-    // for RWKV
-    uint32_t rescale_every_n_layers = 0;
-    uint32_t time_mix_extra_dim = 0;
-    uint32_t time_decay_extra_dim = 0;
-    uint32_t wkv_head_size = 0;
-
-    float     rope_attn_factor = 1.0f;
-    float     rope_freq_base_train;
-    float     rope_freq_scale_train;
-    uint32_t  n_ctx_orig_yarn;
-    float     rope_yarn_log_mul;
-    int       rope_sections[4];
-
-    // for State Space Models
-    uint32_t ssm_d_conv  = 0;
-    uint32_t ssm_d_inner = 0;
-    uint32_t ssm_d_state = 0;
-    uint32_t ssm_dt_rank = 0;
-    bool ssm_dt_b_c_rms = false;
-
-    float f_clamp_kqv      = 0.0f;
-    float f_max_alibi_bias = 0.0f;
-    float f_logit_scale    = 0.0f;
-
-    // Additional scale factors (Granite/Granite MoE)
-    float f_residual_scale  = 0.0f;
-    float f_embedding_scale = 0.0f;
-    float f_attention_scale = 0.0f;
-
-    bool causal_attn   = true;
-    bool use_alibi     = false;
-    bool attn_soft_cap = false;
-
-    // needed by encoder-decoder models (e.g. T5, FLAN-T5)
-    // ref: https://github.com/ggerganov/llama.cpp/pull/8141
-    llama_token dec_start_token_id = LLAMA_TOKEN_NULL;
-
-    enum llama_pooling_type      pooling_type            = LLAMA_POOLING_TYPE_NONE;
-    enum llama_rope_type         rope_type               = LLAMA_ROPE_TYPE_NONE;
-    enum llama_rope_scaling_type rope_scaling_type_train = LLAMA_ROPE_SCALING_TYPE_NONE;
-
-    uint32_t n_head(uint32_t il = 0) const {
-        if (il < n_layer) {
-            return n_head_arr[il];
-        }
-
-        GGML_ABORT("fatal error");
-    }
-
-    uint32_t n_head_kv(uint32_t il = 0) const {
-        if (il < n_layer) {
-            return n_head_kv_arr[il];
-        }
-
-        GGML_ABORT("fatal error");
-    }
-
-    uint32_t n_ff(uint32_t il = 0) const {
-        if (il < n_layer) {
-            return n_ff_arr[il];
-        }
-
-        GGML_ABORT("fatal error");
-    }
-
-    uint32_t n_gqa(uint32_t il = 0) const {
-        const uint32_t n_head    = this->n_head(il);
-        const uint32_t n_head_kv = this->n_head_kv(il);
-
-        if (n_head_kv == 0) {
-            return 0;
-        }
-
-        return n_head/n_head_kv;
-    }
-
-    uint32_t n_embd_k_gqa(uint32_t il = 0) const { // dimension of key embeddings across all k-v heads
-        const uint32_t n_head_kv = this->n_head_kv(il);
-
-        return n_embd_head_k * n_head_kv;
-    }
-
-    uint32_t n_embd_v_gqa(uint32_t il = 0) const { // dimension of value embeddings across all k-v heads
-        const uint32_t n_head_kv = this->n_head_kv(il);
-
-        return n_embd_head_v * n_head_kv;
-    }
-
-    uint32_t n_embd_k_s() const { // dimension of the rolling state embeddings
-        // corresponds to Mamba's conv_states size or RWKV's token_shift states size
-        if (wkv_head_size != 0) {
-            // for RWKV models
-            return 2 * n_embd;
-        }
-
-        // TODO: maybe support other convolution strides than 1
-        // NOTE: since the first column of the conv_state is shifted out each time, it's not actually needed
-        return (ssm_d_conv > 0 ? ssm_d_conv - 1 : 0) * ssm_d_inner;
-    }
-
-    uint32_t n_embd_v_s() const { // dimension of the recurrent state embeddings
-        if (wkv_head_size != 0) {
-            // corresponds to RWKV's wkv_states size
-            return n_embd * wkv_head_size;
-        }
-
-        // corresponds to Mamba's ssm_states size
-        return ssm_d_state * ssm_d_inner;
-    }
-};
-
-static_assert(std::is_trivially_copyable::value, "llama_hparams must be trivially copyable");
-
-struct llama_cparams {
-    uint32_t n_ctx;           // context size used during inference
-    uint32_t n_batch;
-    uint32_t n_ubatch;
-    uint32_t n_seq_max;
-    int      n_threads;       // number of threads to use for generation
-    int      n_threads_batch; // number of threads to use for batch processing
-
-    float rope_freq_base;
-    float rope_freq_scale;
-
-    uint32_t n_ctx_orig_yarn;
-    // These hyperparameters are not exposed in GGUF, because all
-    // existing YaRN models use the same values for them.
-    float yarn_ext_factor;
-    float yarn_attn_factor;
-    float yarn_beta_fast;
-    float yarn_beta_slow;
-    float defrag_thold;
-
-    bool embeddings;
-    bool causal_attn;
-    bool offload_kqv;
-    bool flash_attn;
-    bool no_perf;
-
-    enum llama_pooling_type pooling_type;
-
-    ggml_backend_sched_eval_callback cb_eval;
-    void * cb_eval_user_data;
-};
-
-struct llama_layer_posnet {
-    // resnet
-    struct ggml_tensor * norm1   = nullptr;
-    struct ggml_tensor * norm1_b = nullptr;
-
-    struct ggml_tensor * conv1   = nullptr;
-    struct ggml_tensor * conv1_b = nullptr;
-
-    struct ggml_tensor * norm2   = nullptr;
-    struct ggml_tensor * norm2_b = nullptr;
-
-    struct ggml_tensor * conv2   = nullptr;
-    struct ggml_tensor * conv2_b = nullptr;
-
-    // attention
-    struct ggml_tensor * attn_norm   = nullptr;
-    struct ggml_tensor * attn_norm_b = nullptr;
-
-    struct ggml_tensor * attn_q   = nullptr;
-    struct ggml_tensor * attn_q_b = nullptr;
-
-    struct ggml_tensor * attn_k   = nullptr;
-    struct ggml_tensor * attn_k_b = nullptr;
-
-    struct ggml_tensor * attn_v   = nullptr;
-    struct ggml_tensor * attn_v_b = nullptr;
-
-    struct ggml_tensor * attn_o   = nullptr;
-    struct ggml_tensor * attn_o_b = nullptr;
-
-    // normalize
-    struct ggml_tensor * norm   = nullptr;
-    struct ggml_tensor * norm_b = nullptr;
-};
-
-struct llama_layer_convnext {
-    struct ggml_tensor * dw   = nullptr;
-    struct ggml_tensor * dw_b = nullptr;
-
-    struct ggml_tensor * norm   = nullptr;
-    struct ggml_tensor * norm_b = nullptr;
-
-    struct ggml_tensor * pw1   = nullptr;
-    struct ggml_tensor * pw1_b = nullptr;
-
-    struct ggml_tensor * pw2   = nullptr;
-    struct ggml_tensor * pw2_b = nullptr;
-
-    struct ggml_tensor * gamma = nullptr;
-};
-
-struct llama_layer {
-    // normalization
-    struct ggml_tensor * attn_norm       = nullptr;
-    struct ggml_tensor * attn_norm_b     = nullptr;
-    struct ggml_tensor * attn_norm_2     = nullptr;
-    struct ggml_tensor * attn_norm_2_b   = nullptr;
-    struct ggml_tensor * attn_q_norm     = nullptr;
-    struct ggml_tensor * attn_q_norm_b   = nullptr;
-    struct ggml_tensor * attn_k_norm     = nullptr;
-    struct ggml_tensor * attn_k_norm_b   = nullptr;
-    struct ggml_tensor * attn_out_norm   = nullptr;
-    struct ggml_tensor * attn_out_norm_b = nullptr;
-    struct ggml_tensor * attn_q_a_norm   = nullptr;
-    struct ggml_tensor * attn_kv_a_norm  = nullptr;
-    struct ggml_tensor * attn_sub_norm   = nullptr;
-    struct ggml_tensor * attn_post_norm  = nullptr;
-    struct ggml_tensor * ffn_sub_norm    = nullptr;
-    struct ggml_tensor * attn_norm_cross = nullptr;
-    struct ggml_tensor * attn_norm_enc   = nullptr;
-
-    // attention
-    struct ggml_tensor * wq        = nullptr;
-    struct ggml_tensor * wk        = nullptr;
-    struct ggml_tensor * wv        = nullptr;
-    struct ggml_tensor * wo        = nullptr;
-    struct ggml_tensor * wqkv      = nullptr;
-    struct ggml_tensor * wq_a      = nullptr;
-    struct ggml_tensor * wq_b      = nullptr;
-    struct ggml_tensor * wkv_a_mqa = nullptr;
-    struct ggml_tensor * wkv_b     = nullptr;
-    struct ggml_tensor * wq_cross  = nullptr;
-    struct ggml_tensor * wk_cross  = nullptr;
-    struct ggml_tensor * wv_cross  = nullptr;
-    struct ggml_tensor * wo_cross  = nullptr;
-    struct ggml_tensor * wq_enc    = nullptr;
-    struct ggml_tensor * wk_enc    = nullptr;
-    struct ggml_tensor * wv_enc    = nullptr;
-    struct ggml_tensor * wo_enc    = nullptr;
-
-    // attention bias
-    struct ggml_tensor * bq   = nullptr;
-    struct ggml_tensor * bk   = nullptr;
-    struct ggml_tensor * bv   = nullptr;
-    struct ggml_tensor * bo   = nullptr;
-    struct ggml_tensor * bqkv = nullptr;
-
-    // relative position bias
-    struct ggml_tensor * attn_rel_b       = nullptr;
-    struct ggml_tensor * attn_rel_b_enc   = nullptr;
-    struct ggml_tensor * attn_rel_b_cross = nullptr;
-
-    // normalization
-    struct ggml_tensor * ffn_norm         = nullptr;
-    struct ggml_tensor * ffn_norm_b       = nullptr;
-    struct ggml_tensor * ffn_post_norm    = nullptr;
-    struct ggml_tensor * layer_out_norm   = nullptr;
-    struct ggml_tensor * layer_out_norm_b = nullptr;
-    struct ggml_tensor * ffn_norm_exps    = nullptr;
-    struct ggml_tensor * ffn_norm_enc     = nullptr;
-
-    // ff
-    struct ggml_tensor * ffn_gate     = nullptr; // w1
-    struct ggml_tensor * ffn_down     = nullptr; // w2
-    struct ggml_tensor * ffn_up       = nullptr; // w3
-    struct ggml_tensor * ffn_gate_enc = nullptr;
-    struct ggml_tensor * ffn_down_enc = nullptr;
-    struct ggml_tensor * ffn_up_enc   = nullptr;
-
-    // ff MoE
-    struct ggml_tensor * ffn_gate_inp  = nullptr;
-    struct ggml_tensor * ffn_gate_exps = nullptr;
-    struct ggml_tensor * ffn_down_exps = nullptr;
-    struct ggml_tensor * ffn_up_exps   = nullptr;
-
-    // ff shared expert (shexp)
-    struct ggml_tensor * ffn_gate_inp_shexp = nullptr;
-    struct ggml_tensor * ffn_gate_shexp     = nullptr;
-    struct ggml_tensor * ffn_down_shexp     = nullptr;
-    struct ggml_tensor * ffn_up_shexp       = nullptr;
-
-    // ff bias
-    struct ggml_tensor * ffn_gate_b = nullptr;
-    struct ggml_tensor * ffn_down_b = nullptr; // b2
-    struct ggml_tensor * ffn_up_b   = nullptr; // b3
-    struct ggml_tensor * ffn_act    = nullptr;
-
-    // mamba proj
-    struct ggml_tensor * ssm_in  = nullptr;
-    struct ggml_tensor * ssm_x   = nullptr;
-    struct ggml_tensor * ssm_dt  = nullptr;
-    struct ggml_tensor * ssm_out = nullptr;
-
-    // mamba
-    struct ggml_tensor * ssm_conv1d = nullptr;
-    struct ggml_tensor * ssm_a      = nullptr;
-    struct ggml_tensor * ssm_d      = nullptr;
-
-    // mamba bias
-    struct ggml_tensor * ssm_conv1d_b = nullptr;
-    struct ggml_tensor * ssm_dt_b     = nullptr;
-
-    // rwkv
-    struct ggml_tensor * time_mix_w1         = nullptr;
-    struct ggml_tensor * time_mix_w2         = nullptr;
-    struct ggml_tensor * time_mix_lerp_x     = nullptr;
-    struct ggml_tensor * time_mix_lerp_w     = nullptr;
-    struct ggml_tensor * time_mix_lerp_k     = nullptr;
-    struct ggml_tensor * time_mix_lerp_v     = nullptr;
-    struct ggml_tensor * time_mix_lerp_r     = nullptr;
-    struct ggml_tensor * time_mix_lerp_g     = nullptr;
-
-    struct ggml_tensor * time_mix_first      = nullptr;
-    struct ggml_tensor * time_mix_decay      = nullptr;
-    struct ggml_tensor * time_mix_decay_w1   = nullptr;
-    struct ggml_tensor * time_mix_decay_w2   = nullptr;
-    struct ggml_tensor * time_mix_key        = nullptr;
-    struct ggml_tensor * time_mix_value      = nullptr;
-    struct ggml_tensor * time_mix_receptance = nullptr;
-    struct ggml_tensor * time_mix_gate       = nullptr;
-
-    struct ggml_tensor * time_mix_ln     = nullptr;
-    struct ggml_tensor * time_mix_ln_b   = nullptr;
-    struct ggml_tensor * time_mix_output = nullptr;
-
-    struct ggml_tensor * channel_mix_lerp_k = nullptr;
-    struct ggml_tensor * channel_mix_lerp_r = nullptr;
-
-    struct ggml_tensor * channel_mix_key        = nullptr;
-    struct ggml_tensor * channel_mix_receptance = nullptr;
-    struct ggml_tensor * channel_mix_value      = nullptr;
-
-    // long rope factors
-    struct ggml_tensor * rope_long  = nullptr;
-    struct ggml_tensor * rope_short = nullptr;
-    struct ggml_tensor * rope_freqs = nullptr;
-
-    // bitnet scale
-    struct ggml_tensor * wq_scale       = nullptr;
-    struct ggml_tensor * wk_scale       = nullptr;
-    struct ggml_tensor * wv_scale       = nullptr;
-    struct ggml_tensor * wo_scale       = nullptr;
-    struct ggml_tensor * ffn_gate_scale = nullptr;
-    struct ggml_tensor * ffn_up_scale   = nullptr;
-    struct ggml_tensor * ffn_down_scale = nullptr;
-
-    struct llama_layer_posnet posnet;
-
-    struct llama_layer_convnext convnext;
-};
-
-// very similar to llama_batch,
-// but has more metadata about sequences
-struct llama_ubatch {
-    bool equal_seqs;
-    // TODO: whole_seqs for embeddings?
-
-    uint32_t n_tokens; // total tokens (n_seq_tokens * n_seqs)
-    uint32_t n_seq_tokens; // tokens per sequence
-    uint32_t n_seqs;
-
-    llama_token  *  token;    // [n_tokens]
-    float        *  embd;     // [n_embd, n_tokens]
-    llama_pos    *  pos;      // [n_tokens]
-    int32_t      *  n_seq_id; // [n_seqs]
-    llama_seq_id ** seq_id;   // [n_seqs]
-    int8_t       *  output;   // [n_tokens]
-};
-
-struct llama_kv_cell {
-    llama_pos pos   = -1;
-    llama_pos delta = 0;
-    int32_t   src   = -1; // used by recurrent state models to copy states
-    int32_t   tail  = -1;
-
-    std::set seq_id;
-
-    bool has_seq_id(const llama_seq_id & id) const {
-        return seq_id.find(id) != seq_id.end();
-    }
-
-    bool is_empty() const {
-        return seq_id.empty();
-    }
-
-    bool is_same_seq(const llama_kv_cell & other) const {
-        return seq_id == other.seq_id;
-    }
-};
-
-// ring-buffer of cached KV data
-struct llama_kv_cache {
-    bool has_shift = false;
-    bool do_defrag = false;
-    bool recurrent = false; // with recurrent state models, a cell can hold the state for more than one past token
-    bool v_trans   = true;  // the value tensor is transposed
-
-    // Note: The value of head isn't only used to optimize searching
-    // for a free KV slot. llama_decode_internal also uses it, so it
-    // cannot be freely changed after a slot has been allocated.
-    uint32_t head = 0;
-    uint32_t size = 0;
-    uint32_t used = 0; // used cells (i.e. at least one seq_id)
-
-    // computed before each graph build
-    uint32_t n = 0;
-
-    ggml_type type_k = GGML_TYPE_F16;
-    ggml_type type_v = GGML_TYPE_F16;
-
-    std::vector cells;
-
-    std::vector k_l; // per layer
-    std::vector v_l;
-
-    std::vector ctxs;
-    std::vector bufs;
-
-    size_t total_size() {
-        size_t size = 0;
-        for (auto & buf : bufs) {
-            size += ggml_backend_buffer_get_size(buf.get());
-        }
-        return size;
-    }
-};
-
-struct llama_control_vector {
-    std::vector tensors; // per layer
-    std::vector ctxs;
-    std::vector bufs;
-
-    int32_t layer_start = -1;
-    int32_t layer_end   = -1;
-
-    struct ggml_tensor * tensor_for(int il) const {
-        if (il < 0 || il < layer_start || il > layer_end || (size_t) il >= tensors.size()) {
-            return nullptr;
-        }
-        return tensors[il];
-    }
-
-    struct ggml_tensor * apply_to(struct ggml_context * ctx, struct ggml_tensor * cur, int  il) const {
-        ggml_tensor * layer_dir = tensor_for(il);
-        if (layer_dir != nullptr) {
-            cur = ggml_add(ctx, cur, layer_dir);
-        }
-        return cur;
-    }
-};
-
-struct llama_model {
-    e_model     type  = MODEL_UNKNOWN;
-    llm_arch    arch  = LLM_ARCH_UNKNOWN;
-    llama_ftype ftype = LLAMA_FTYPE_ALL_F32;
-
-    std::string name = "n/a";
-
-    llama_hparams hparams = {};
-    llama_vocab   vocab;
-
-    struct ggml_tensor * tok_embd = nullptr;
-    struct ggml_tensor * type_embd = nullptr;
-    struct ggml_tensor * pos_embd = nullptr;
-    struct ggml_tensor * tok_norm = nullptr;
-    struct ggml_tensor * tok_norm_b = nullptr;
-
-    struct ggml_tensor * output_norm = nullptr;
-    struct ggml_tensor * output_norm_b = nullptr;
-    struct ggml_tensor * output = nullptr;
-    struct ggml_tensor * output_b = nullptr;
-    struct ggml_tensor * output_norm_enc = nullptr;
-
-    // classifier
-    struct ggml_tensor * cls = nullptr;
-    struct ggml_tensor * cls_b = nullptr;
-    struct ggml_tensor * cls_out   = nullptr;
-    struct ggml_tensor * cls_out_b = nullptr;
-
-    struct ggml_tensor * conv1d = nullptr;
-    struct ggml_tensor * conv1d_b = nullptr;
-
-    std::vector layers;
-
-    // gguf metadata
-    std::unordered_map gguf_kv;
-
-    llama_split_mode split_mode;
-    int main_gpu;
-    int n_gpu_layers;
-
-    std::vector rpc_servers;
-
-    // list of devices used in this model
-    std::vector devices;
-
-
-    // lists of buffer types used for each layer
-    using buft_list_t = std::vector>;
-    buft_list_t cpu_buft_list;
-    std::map gpu_buft_list;
-
-    struct layer_dev {
-        ggml_backend_dev_t dev;
-        buft_list_t * buft_list;
-    };
-    layer_dev dev_input = {};
-    layer_dev dev_output = {};
-    std::vector dev_layer;
-
-    // contexts where the model tensors metadata is stored
-    std::vector ctxs;
-
-    // the model memory buffers for the tensor data
-    std::vector bufs;
-
-    // model memory mapped files
-    llama_mmaps mappings;
-
-    // objects representing data potentially being locked in memory
-    llama_mlocks mlock_bufs;
-    llama_mlocks mlock_mmaps;
-
-    // for quantize-stats only
-    std::vector> tensors_by_name;
-
-    int64_t t_load_us  = 0;
-    int64_t t_start_us = 0;
-
-    // total number of parameters in the model
-    uint64_t n_elements = 0;
-
-    // total size of all the tensors in the model in bytes
-    size_t  n_bytes     = 0;
-
-    // keep track of loaded lora adapters
-    std::set lora_adapters;
-
-    ~llama_model() {
-       while (!lora_adapters.empty()) {
-            llama_lora_adapter_free(*lora_adapters.begin());
-        }
-    }
-};
-
-struct llama_sbatch_seq {
-    int32_t n_seq_id;
-    llama_seq_id * seq_id;
-    size_t offset;
-    size_t length;
-};
-
-// sequence-length-aware batch splitting
-struct llama_sbatch {
-    // tokens left in this batch
-    size_t n_tokens;
-
-    size_t n_embd;
-
-    bool logits_all; // TODO: remove once lctx.logits_all is removed too
-
-    // sorted indices into the batch
-    std::vector ids;
-    // batch indices of the output
-    std::vector out_ids;
-    std::vector seq;
-
-    const llama_batch * batch = nullptr;
-
-    // buffers for the ubatch
-    std::vector    ubatch_token;
-    std::vector          ubatch_embd;
-    std::vector      ubatch_pos;
-    std::vector        ubatch_n_seq_id;
-    std::vector ubatch_seq_id;
-    std::vector         ubatch_output;
-
-    llama_ubatch reserve_ubatch(size_t n_ubatch, bool has_embd = false) {
-        // clear empty sequences
-        // the previous ubatch is assumed to be gone,
-        // so nothing should refer to values in these sequences anymore.
-        for (size_t i = seq.size(); i-- > 0;) {
-            if (seq[i].length == 0) {
-                seq.pop_back();
-            } else {
-                break;
-            }
-        }
-        ubatch_token.resize(!has_embd ? n_ubatch : 0);
-        ubatch_embd.resize(has_embd ? n_embd * n_ubatch : 0);
-        ubatch_pos.resize(n_ubatch);
-        ubatch_n_seq_id.resize(n_ubatch);
-        ubatch_seq_id.resize(n_ubatch);
-        ubatch_output.resize(n_ubatch);
-        llama_ubatch ubatch = {
-            /*equal_seqs   =*/ true,
-            /*n_tokens     =*/ 0,
-            /*n_seq_tokens =*/ 0,
-            /*n_seqs       =*/ 0,
-            /*token        =*/ !has_embd ? ubatch_token.data() : nullptr,
-            /*embd         =*/ has_embd  ? ubatch_embd.data()  : nullptr,
-            /*pos          =*/ ubatch_pos.data(),
-            /*n_seq_id     =*/ ubatch_n_seq_id.data(),
-            /*seq_id       =*/ ubatch_seq_id.data(),
-            /*output       =*/ ubatch_output.data(),
-        };
-        return ubatch;
-    }
-
-    void add_seq_to_ubatch(llama_ubatch & ubatch, llama_sbatch_seq & seq, size_t length) {
-        GGML_ASSERT(batch != nullptr);
-        GGML_ASSERT(length <= seq.length);
-        // Can only add sequences of equal lengths to a batch,
-        // otherwise it isn't clear to which sequence a token belongs
-        GGML_ASSERT(seq.n_seq_id == 0 || ubatch.n_seqs == 0 || length == (size_t) ubatch.n_tokens / ubatch.n_seqs);
-        GGML_ASSERT((seq.n_seq_id != 0) == ubatch.equal_seqs);
-        // NOTE: loops are separated for cache-friendliness
-        if (batch->token) {
-            if (ubatch.equal_seqs) {
-                for (size_t i = 0; i < length; ++i) {
-                    ubatch.token[ubatch.n_tokens + i] = batch->token[ids[seq.offset + i]];
-                }
-            } else {
-                // simple split
-                ubatch.token = batch->token + seq.offset;
-            }
-        } else {
-            ubatch.token = nullptr;
-        }
-        if (batch->embd) {
-            if (ubatch.equal_seqs) {
-                for (size_t i = 0; i < length; ++i) {
-                    memcpy(
-                        ubatch.embd + n_embd * (ubatch.n_tokens + i),
-                        batch->embd + n_embd * ids[seq.offset + i],
-                        n_embd * sizeof(float)
-                    );
-                }
-            } else {
-                // simple split
-                ubatch.embd = batch->embd + (n_embd * seq.offset);
-            }
-        } else {
-            ubatch.embd = nullptr;
-        }
-        if (ubatch.equal_seqs) {
-            for (size_t i = 0; i < length; ++i) {
-                ubatch.pos[ubatch.n_tokens + i] = batch->pos[ids[seq.offset + i]];
-            }
-        } else {
-            // simple split
-            ubatch.pos = batch->pos + seq.offset;
-        }
-        if (ubatch.equal_seqs) {
-            ubatch.n_seq_id[ubatch.n_seqs] = seq.n_seq_id;
-            if (seq.seq_id) {
-                ubatch.seq_id[ubatch.n_seqs] = seq.seq_id;
-            }
-        } else {
-            // simple split
-            if (batch->n_seq_id) {
-                ubatch.n_seq_id = batch->n_seq_id + seq.offset;
-            } else {
-                for (size_t i = 0; i < length; ++i) {
-                    ubatch.n_seq_id[ubatch.n_seqs + i] = 1;
-                }
-            }
-            if (batch->seq_id) {
-                ubatch.seq_id = batch->seq_id + seq.offset;
-            }
-        }
-        if (logits_all) {
-            for (size_t i = 0; i < length; ++i) {
-                ubatch.output[ubatch.n_tokens + i] = 1;
-                out_ids.push_back(ids[seq.offset + i]);
-            }
-        } else if (batch->logits) {
-            if (ubatch.equal_seqs) {
-                for (size_t i = 0; i < length; ++i) {
-                    size_t id = ids[seq.offset + i];
-                    int8_t is_output = batch->logits[id];
-                    ubatch.output[ubatch.n_tokens + i] = is_output;
-                    if (is_output) { out_ids.push_back(id); }
-                }
-            } else {
-                // simple split
-                ubatch.output = batch->logits + seq.offset;
-                for (size_t i = 0; i < length; ++i) {
-                    if (ubatch.output[i] != 0) { out_ids.push_back(seq.offset + i); }
-                }
-            }
-        } else {
-            // only get last output
-            for (size_t i = 0; i < length; ++i) {
-                size_t id = ids[seq.offset + i];
-                int8_t is_last = id == ids.size() - 1;
-                ubatch.output[ubatch.n_tokens + i] = is_last;
-                if (is_last) { out_ids.push_back(id); }
-            }
-        }
-        if (ubatch.n_tokens == 0 && ubatch.n_seqs == 0) {
-            ubatch.n_seq_tokens = ubatch.equal_seqs ? length : 1;
-        }
-        ubatch.n_tokens += length;
-        ubatch.n_seqs += ubatch.equal_seqs ? 1 : length; // virtual sequences for simple splits
-        seq.offset += length;
-        seq.length -= length;
-        n_tokens -= length;
-        GGML_ASSERT(ubatch.n_tokens == ubatch.n_seq_tokens * ubatch.n_seqs);
-    }
-
-    // simple split, unknown number of sequences of unequal lengths
-    llama_ubatch split_simple(size_t n_ubatch) {
-        n_ubatch = n_tokens < n_ubatch ? n_tokens : n_ubatch;
-        llama_ubatch ubatch = reserve_ubatch(n_ubatch, /* has_embd */ batch->embd != nullptr);
-        ubatch.equal_seqs = false;
-        if (!seq.empty()) {
-            llama_sbatch_seq & s = seq[0];
-            size_t length = s.length < n_ubatch ? s.length : n_ubatch;
-            GGML_ASSERT(seq.size() == 1 && s.n_seq_id == 0); // don't mix with other splits
-            add_seq_to_ubatch(ubatch, s, length);
-        }
-        return ubatch;
-    }
-
-    // make batches of equal-length sequences
-    llama_ubatch split_equal(size_t n_ubatch) {
-        n_ubatch = n_tokens < n_ubatch ? n_tokens : n_ubatch;
-        llama_ubatch ubatch = reserve_ubatch(n_ubatch, /* has_embd */ batch->embd != nullptr);
-        if (!seq.empty()) {
-            size_t length = 0;
-            size_t n_tokens_in_ubatch = 0;
-            GGML_ASSERT(seq[0].n_seq_id > 0); // should not be mixed with simple splits
-            // smallest first, because it's easier to split this way;
-            // starting from the end to pop in constant time.
-            for (size_t i = seq.size(); i-- > 0;) {
-                llama_sbatch_seq & s = seq[i];
-                GGML_ASSERT(s.length > 0);
-                if (length == 0) {
-                    length = s.length < n_ubatch ? s.length : n_ubatch;
-                }
-                add_seq_to_ubatch(ubatch, s, length);
-                n_tokens_in_ubatch += length;
-                // shared prompts can't be mixed with any of their sequences,
-                // so it's safer to compute them in their own ubatch
-                if (s.n_seq_id > 1) { break; }
-                // stop when there isn't enough space for another sequence
-                if (length + n_tokens_in_ubatch > n_ubatch) { break; }
-            }
-        }
-        return ubatch;
-    }
-
-    // sequence-wise split
-    llama_ubatch split_seq(size_t n_ubatch) {
-        n_ubatch = n_tokens < n_ubatch ? n_tokens : n_ubatch;
-        llama_ubatch ubatch = reserve_ubatch(n_ubatch, /* has_embd */ batch->embd != nullptr);
-        if (!seq.empty()) {
-            llama_sbatch_seq & s = seq[seq.size() - 1];
-            size_t length = s.length < n_ubatch ? s.length : n_ubatch;
-            GGML_ASSERT(s.n_seq_id > 0); // should not be mixed with simple splits
-            add_seq_to_ubatch(ubatch, s, length);
-        }
-        return ubatch;
-    }
-
-    void from_batch(const llama_batch & batch, const size_t n_embd, const bool simple_split = false, const bool logits_all = false) {
-        GGML_ASSERT(batch.n_tokens >= 0);
-        this->batch = &batch;
-        this->n_embd = n_embd;
-        this->logits_all = logits_all;
-
-        n_tokens = batch.n_tokens;
-        ids.resize(n_tokens);
-        out_ids.clear();
-        // TODO: reserve out_ids and seq
-
-        for (size_t i = 0; i < n_tokens; ++i) {
-            ids[i] = i;
-        }
-        if (simple_split) {
-            seq.resize(1);
-            llama_sbatch_seq & s = seq[0];
-            s.n_seq_id = 0;
-            s.seq_id = nullptr;
-            s.offset = 0;
-            s.length = n_tokens;
-            return;
-        }
-        std::sort(ids.begin(), ids.end(),
-            [&batch](size_t a, size_t b) {
-                int32_t n_seq_a = batch.n_seq_id ? batch.n_seq_id[a] : 1;
-                int32_t n_seq_b = batch.n_seq_id ? batch.n_seq_id[b] : 1;
-                // sort by seq_id, then by pos
-                if (n_seq_a == n_seq_b) {
-                    if (batch.seq_id) {
-                        for (int32_t i = 0; i < n_seq_a; ++i) {
-                            llama_seq_id seq_id_a = batch.seq_id[a][i];
-                            llama_seq_id seq_id_b = batch.seq_id[b][i];
-                            // smaller seq_ids go first
-                            if (seq_id_a != seq_id_b) {
-                                return seq_id_a < seq_id_b;
-                            }
-                        }
-                    }
-                    // when all else is equal, sort by pos
-                    if (batch.pos) {
-                        return batch.pos[a] < batch.pos[b];
-                    }
-                    // no pos, sort by id
-                    return a < b;
-                }
-                // shared prompts go first
-                return n_seq_a > n_seq_b;
-            }
-        );
-        // init seq
-        llama_sbatch_seq * last_seq = nullptr;
-
-        for (size_t i = 0; i < n_tokens; ++i) {
-            const size_t bi = ids[i];
-            const int32_t n_seqs = batch.n_seq_id[bi];
-            llama_seq_id * seq_ids = batch.seq_id[bi];
-            if (last_seq != nullptr) {
-                bool same = n_seqs == last_seq->n_seq_id;
-                for (int32_t j = 0; same && j < n_seqs; ++j) {
-                    if (seq_ids[j] != last_seq->seq_id[j]) {
-                        same = false;
-                    }
-                }
-                if (same) {
-                    last_seq->length += 1;
-                    continue;
-                }
-            }
-            llama_sbatch_seq new_seq = {n_seqs, seq_ids, i, 1};
-            seq.push_back(new_seq);
-            last_seq = &seq.back();
-        }
-        // keep shared prompts first at the end, then sort by length descending.
-        std::sort(seq.begin(), seq.end(),
-            [](llama_sbatch_seq & a, llama_sbatch_seq & b) {
-                if (a.n_seq_id == b.n_seq_id) {
-                    return a.length > b.length;
-                }
-                return a.n_seq_id < b.n_seq_id;
-            }
-        );
-    }
-};
-
-struct llama_context {
-    llama_context(const llama_model & model)
-        : model(model)
-        , t_start_us(model.t_start_us)
-        , t_load_us(model.t_load_us) {}
-
-    const struct llama_model & model;
-
-    struct llama_cparams        cparams;
-    struct llama_sbatch         sbatch;
-    struct llama_kv_cache       kv_self;
-    struct llama_control_vector cvec;
-
-    std::unordered_map lora_adapters;
-
-    std::vector backends;
-    std::vector> set_n_threads_fns;
-
-    ggml_backend_t backend_cpu = nullptr;
-
-    ggml_threadpool_t threadpool       = nullptr;
-    ggml_threadpool_t threadpool_batch = nullptr;
-
-    bool has_evaluated_once = false;
-
-    mutable int64_t t_start_us;
-    mutable int64_t t_load_us;
-    mutable int64_t t_p_eval_us = 0;
-    mutable int64_t t_eval_us   = 0;
-
-    mutable int64_t t_compute_start_us = 0;
-    mutable int64_t n_queued_tokens = 0;
-
-    mutable int32_t n_p_eval = 0; // number of tokens in eval calls for the prompt (with batch size > 1)
-    mutable int32_t n_eval   = 0; // number of eval calls
-
-    // host buffer for the model output (logits and embeddings)
-    ggml_backend_buffer_ptr buf_output;
-
-    // decode output (2-dimensional array: [n_outputs][n_vocab])
-    size_t  logits_size = 0; // capacity (of floats) for logits
-    float * logits      = nullptr;
-
-    std::vector output_ids; // map batch token positions to ids of the logits and embd buffers
-    size_t  output_size = 0; // capacity (of tokens positions) for the output buffers
-    int32_t n_outputs   = 0; // number of actually-used outputs in the current ubatch or last logical batch
-
-    bool logits_all = false;
-
-    // embeddings output (2-dimensional array: [n_outputs][n_embd])
-    // populated only when pooling_type == LLAMA_POOLING_TYPE_NONE
-    size_t  embd_size = 0; // capacity (of floats) for embeddings
-    float * embd      = nullptr;
-
-    // sequence embeddings output (map of [n_embd] vectors)
-    // populated only when pooling_type != LLAMA_POOLING_TYPE_NONE
-    std::map> embd_seq;
-
-    // whether we are computing encoder output or decoder output
-    bool is_encoding = false;
-
-    // TODO: find a better way to accommodate mutli-dimension position encoding methods
-    // number of position id each token get, 1 for each token in most cases.
-    // when using m-rope, it will be 3 position ids per token to representing 3 dimension coordinate.
-    int n_pos_per_token = 1;
-
-    // output of the encoder part of the encoder-decoder models
-    std::vector embd_enc;
-    std::vector> seq_ids_enc;
-
-    // memory buffers used to evaluate the model
-    std::vector buf_compute_meta;
-    ggml_backend_sched_ptr sched;
-
-    ggml_abort_callback abort_callback      = nullptr;
-    void *              abort_callback_data = nullptr;
-
-    // input tensors
-    struct ggml_tensor * inp_tokens;      // I32 [n_batch]
-    struct ggml_tensor * inp_embd;        // F32 [n_embd, n_batch]
-    struct ggml_tensor * inp_pos;         // I32 [n_batch]
-    struct ggml_tensor * inp_out_ids;     // I32 [n_outputs]
-    struct ggml_tensor * inp_KQ_mask;     // F32 [kv_size, n_batch]
-    struct ggml_tensor * inp_KQ_mask_swa; // F32 [kv_size, n_batch]
-    struct ggml_tensor * inp_K_shift;     // I32 [kv_size]
-    struct ggml_tensor * inp_mean;        // F32 [n_batch, n_batch]
-    struct ggml_tensor * inp_cls;         // I32 [n_batch]
-    struct ggml_tensor * inp_s_copy;      // I32 [kv_size]
-    struct ggml_tensor * inp_s_mask;      // F32 [1, n_kv]
-    struct ggml_tensor * inp_s_seq;       // I32 [n_kv, n_batch]
-    struct ggml_tensor * inp_pos_bucket;    // I32 [n_batch|n_kv, n_batch]
-    struct ggml_tensor * inp_embd_enc;      // F32 [n_embd, n_outputs_enc]
-    struct ggml_tensor * inp_KQ_mask_cross; // F32 [n_outputs_enc, n_batch]
-};
-
-struct llama_lora_weight {
-    struct ggml_tensor * a = nullptr;
-    struct ggml_tensor * b = nullptr;
-    llama_lora_weight() = default;
-    llama_lora_weight(struct ggml_tensor * a, struct ggml_tensor * b): a(a), b(b) {}
-};
-
-struct llama_lora_adapter {
-    struct llama_model * base_model;
-    // map tensor name to lora_a_b
-    std::unordered_map ab_map;
-    std::vector ctxs;
-    std::vector bufs;
-
-    float alpha;
-
-    llama_lora_adapter(struct llama_model * base_model): base_model(base_model) {
-        base_model->lora_adapters.insert(this);
-    }
-
-    llama_lora_weight * get_weight(struct ggml_tensor * w) {
-        std::string name(w->name);
-        auto pos = ab_map.find(name);
-        if (ab_map.find(name) != ab_map.end()) {
-            return &pos->second;
-        }
-        return nullptr;
-    }
-
-    ~llama_lora_adapter() {
-        auto pos = base_model->lora_adapters.find(this);
-        if (pos != base_model->lora_adapters.end()) {
-            base_model->lora_adapters.erase(pos);
-        }
-    }
-};
-
 static int llama_get_device_count(const llama_model & model) {
     return (int) model.devices.size();
 }
 
-static struct ggml_tensor * llama_get_model_tensor(const struct llama_model * model, const char * name) {
-    auto it = std::find_if(model->tensors_by_name.begin(), model->tensors_by_name.end(),
-            [name](const std::pair & it) {
-                return it.first == name;
-            });
-    if (it == model->tensors_by_name.end()) {
-        return nullptr;
-    }
-    return it->second;
-}
-
-template
-static bool buft_supported(ggml_backend_buffer_type_t buft, ggml_backend_dev_t dev, F & fn) {
-    ggml_init_params params = {
-        /*.mem_size   =*/ ggml_tensor_overhead()*8,
-        /*.mem_buffer =*/ NULL,
-        /*.no_alloc   =*/ true,
-    };
-    ggml_context_ptr ctx { ggml_init(params) };
-    if (!ctx) {
-        throw std::runtime_error(format("failed to create ggml context"));
-    }
-
-    ggml_backend_buffer_ptr buf { ggml_backend_buft_alloc_buffer(buft, 0) };
-    ggml_tensor * op_tensor = fn(ctx.get());
-    for (int i = 0; i < GGML_MAX_SRC; i++) {
-        if (op_tensor->src[i] != nullptr) {
-            assert(op_tensor->src[i]->buffer == nullptr);
-            op_tensor->src[i]->buffer = buf.get();
-        }
-    }
-    bool op_supported = ggml_backend_dev_supports_op(dev, op_tensor);
-
-    return op_supported;
-}
-
-template
-static ggml_backend_buffer_type_t select_buft(const llama_model::buft_list_t & buft_list, const F & fn) {
-    for (const auto & cur : buft_list) {
-        ggml_backend_dev_t cur_dev = cur.first;
-        ggml_backend_buffer_type_t cur_buft = cur.second;
-        if (buft_supported(cur_buft, cur_dev, fn)) {
-            return cur_buft;
-        }
-    }
-    throw std::runtime_error(format("no suitable buffer type found"));
-}
-
-//
-// kv cache helpers
-//
-
-static bool llama_kv_cache_init(
-             struct llama_kv_cache & cache,
-               const llama_context * ctx,
-                         ggml_type   type_k,
-                         ggml_type   type_v,
-                          uint32_t   kv_size,
-                              bool   offload) {
-    const llama_model & model = ctx->model;
-    const llama_cparams & cparams = ctx->cparams;
-
-    const struct llama_hparams & hparams = model.hparams;
-
-    const int32_t n_layer = hparams.n_layer;
-
-    LLAMA_LOG_INFO("%s: kv_size = %d, offload = %d, type_k = '%s', type_v = '%s', n_layer = %d\n", __func__, kv_size, offload, ggml_type_name(type_k), ggml_type_name(type_v), n_layer);
-
-    cache.has_shift = false;
-
-    cache.recurrent = llama_model_is_recurrent(&model);
-    cache.v_trans   = !cache.recurrent && !cparams.flash_attn;
-
-    cache.head = 0;
-    cache.size = kv_size;
-    cache.used = 0;
-
-    cache.type_k = type_k;
-    cache.type_v = type_v;
-
-    cache.cells.clear();
-    cache.cells.resize(kv_size);
-
-    // create a context for each buffer type
-    std::map ctx_map;
-    auto ctx_for_buft = [&](ggml_backend_buffer_type_t buft) -> ggml_context * {
-        auto it = ctx_map.find(buft);
-        if (it == ctx_map.end()) {
-            struct ggml_init_params params = {
-                /*.mem_size   =*/ size_t(2u*n_layer*ggml_tensor_overhead()),
-                /*.mem_buffer =*/ NULL,
-                /*.no_alloc   =*/ true,
-            };
-            ggml_context * ctx = ggml_init(params);
-            if (!ctx) {
-                return nullptr;
-            }
-            ctx_map[buft] = ctx;
-            cache.ctxs.emplace_back(ctx);
-            return ctx;
-        }
-        return it->second;
-    };
-
-    cache.k_l.reserve(n_layer);
-    cache.v_l.reserve(n_layer);
-
-    for (int i = 0; i < n_layer; i++) {
-        const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(i) + hparams.n_embd_k_s();
-        const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(i) + hparams.n_embd_v_s();
-
-        LLAMA_LOG_DEBUG("%s: layer %d: n_embd_k_gqa = %d, n_embd_v_gqa = %d\n", __func__, i, n_embd_k_gqa, n_embd_v_gqa);
-
-        ggml_backend_buffer_type_t buft;
-        if (offload) {
-            auto * dev = model.dev_layer.at(i).dev;
-            buft = ggml_backend_dev_buffer_type(dev);
-        } else {
-            buft = ggml_backend_cpu_buffer_type();
-        }
-        ggml_context * ctx = ctx_for_buft(buft);
-
-        if (!ctx) {
-            LLAMA_LOG_ERROR("%s: failed to create ggml context for kv cache\n", __func__);
-            return false;
-        }
-
-        ggml_tensor * k = ggml_new_tensor_1d(ctx, type_k, n_embd_k_gqa*kv_size);
-        ggml_tensor * v = ggml_new_tensor_1d(ctx, type_v, n_embd_v_gqa*kv_size);
-        ggml_format_name(k, "cache_k_l%d", i);
-        ggml_format_name(v, "cache_v_l%d", i);
-        cache.k_l.push_back(k);
-        cache.v_l.push_back(v);
-    }
-
-    // allocate tensors and initialize the buffers to avoid NaNs in the padding
-    for (auto it : ctx_map) {
-        auto * buft = it.first;
-        auto * ctx  = it.second;
-
-        ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, buft);
-        if (!buf) {
-            LLAMA_LOG_ERROR("%s: failed to allocate buffer for kv cache\n", __func__);
-            return false;
-        }
-        ggml_backend_buffer_clear(buf, 0);
-        LLAMA_LOG_INFO("%s: %10s KV buffer size = %8.2f MiB\n", __func__, ggml_backend_buffer_name(buf), ggml_backend_buffer_get_size(buf)/1024.0/1024.0);
-        cache.bufs.emplace_back(buf);
-    }
-
-    return true;
-}
-
-// a structure holds information about the slot found in llama_kv_cache_find_slot
-struct llama_kv_cache_slot_info {
-    std::pair boundaries; // slot boundaries [begin, end)
-    bool found = false;                       // the slot was found
-
-    explicit llama_kv_cache_slot_info(bool found_) : found{found_} {}
-    llama_kv_cache_slot_info(uint32_t begin, uint32_t end) : boundaries{begin, end}, found{true} {}
-
-    operator bool() const { return found; }
-};
-static const llama_kv_cache_slot_info llama_kv_cache_slot_info_failed{false};
-
-// find an empty slot of size "n_tokens" in the cache
-// updates the cache head
-// returns a structure holding information about the slot found
-// Note: On success, it's important that cache.head points
-// to the first cell of the slot.
-static struct llama_kv_cache_slot_info llama_kv_cache_find_slot(
-           struct llama_kv_cache & cache,
-       const struct llama_ubatch & batch) {
-    const uint32_t n_tokens = batch.n_tokens;
-    const uint32_t n_seqs   = batch.n_seqs;
-    const uint32_t n_seq_tokens = batch.n_seq_tokens;
-
-    if (cache.recurrent) {
-        // For recurrent state architectures (like Mamba or RWKV),
-        // each cache cell can store the state for a whole sequence.
-        // A slot should be always be contiguous.
-
-        // can only process batches with an equal number of new tokens in each sequence
-        GGML_ASSERT(batch.equal_seqs);
-
-        int32_t min = cache.size - 1;
-        int32_t max = 0;
-
-        // everything should fit if all seq_ids are smaller than the max
-        for (uint32_t s = 0; s < n_seqs; ++s) {
-            const uint32_t n_seq_id = batch.n_seq_id[s];
-            for (uint32_t j = 0; j < n_seq_id; ++j) {
-                const llama_seq_id seq_id = batch.seq_id[s][j];
-
-                if (seq_id < 0 || (uint32_t) seq_id >= cache.size) {
-                    // too big seq_id
-                    // TODO: would it be possible to resize the cache instead?
-                    LLAMA_LOG_ERROR("%s: seq_id=%d >= n_seq_max=%d Try using a bigger --parallel value\n", __func__, seq_id, cache.size);
-                    return llama_kv_cache_slot_info_failed;
-                }
-                if (j > 0) {
-                    llama_kv_cell & seq = cache.cells[seq_id];
-                    if (seq.tail >= 0) {
-                        llama_kv_cell & cell = cache.cells[seq.tail];
-                        // clear cells from seq_ids that become shared
-                        // (should not normally happen, but let's handle it anyway)
-                        cell.seq_id.erase(seq_id);
-                        seq.tail = -1;
-                        if (cell.seq_id.empty()) {
-                            cell.pos = -1;
-                            cell.src = -1;
-                            cache.used -= 1;
-                        }
-                    }
-                }
-            }
-        }
-
-#ifndef NDEBUG
-        {
-            std::vector tails_verif;
-            tails_verif.assign(cache.size, -1);
-            for (uint32_t i = 0; i < cache.size; ++i) {
-                llama_kv_cell & cell = cache.cells[i];
-                for (llama_seq_id seq_id : cell.seq_id) {
-                    if (tails_verif[seq_id] != -1) {
-                        LLAMA_LOG_ERROR("%s: duplicate tail for seq_id %d in cell %d and %d\n", __func__, seq_id, i, tails_verif[seq_id]);
-                    }
-                    tails_verif[seq_id] = i;
-                }
-            }
-            for (uint32_t i = 0; i < cache.size; ++i) {
-                if (tails_verif[i] != cache.cells[i].tail) {
-                    LLAMA_LOG_ERROR("%s: wrong tail for seq_id %d, (%d instead of %d)\n", __func__, i, cache.cells[i].tail, tails_verif[i]);
-                }
-            }
-        }
-#endif
-
-        // find next empty cell
-        uint32_t next_empty_cell = cache.head;
-
-        for (uint32_t i = 0; i < cache.size; ++i) {
-            if (next_empty_cell >= cache.size) { next_empty_cell -= cache.size; }
-            llama_kv_cell & cell = cache.cells[next_empty_cell];
-            if (cell.is_empty()) { break; }
-            next_empty_cell += 1;
-        }
-
-        // find usable cell range
-        for (uint32_t s = 0; s < n_seqs; ++s) {
-            const llama_seq_id seq_id = batch.seq_id[s][0];
-            llama_kv_cell & seq_meta = cache.cells[seq_id];
-            bool has_cell = false;
-            if (seq_meta.tail >= 0) {
-                llama_kv_cell & cell = cache.cells[seq_meta.tail];
-                GGML_ASSERT(cell.has_seq_id(seq_id));
-                // does this seq_id "own" the cell?
-                if (cell.seq_id.size() == 1) { has_cell = true; }
-            }
-            if (!has_cell) {
-                llama_kv_cell & empty_cell = cache.cells[next_empty_cell];
-                GGML_ASSERT(empty_cell.is_empty());
-                // copy old tail into the empty cell
-                if (seq_meta.tail >= 0) {
-                    llama_kv_cell & orig_cell = cache.cells[seq_meta.tail];
-                    empty_cell.pos = orig_cell.pos;
-                    empty_cell.src = orig_cell.src;
-                    orig_cell.seq_id.erase(seq_id);
-                    empty_cell.seq_id.insert(seq_id); // will be overwritten
-                }
-                seq_meta.tail = next_empty_cell;
-                // find next empty cell
-                if (s + 1 < n_seqs) {
-                    next_empty_cell += 1;
-                    for (uint32_t i = 0; i < cache.size; ++i) {
-                        if (next_empty_cell >= cache.size) { next_empty_cell -= cache.size; }
-                        llama_kv_cell & cell = cache.cells[next_empty_cell];
-                        if (cell.is_empty()) { break; }
-                        next_empty_cell += 1;
-                    }
-                }
-            }
-            if (min > seq_meta.tail) { min = seq_meta.tail; }
-            if (max < seq_meta.tail) { max = seq_meta.tail; }
-        }
-
-        // gather and re-order
-        for (uint32_t s = 0; s < n_seqs; ++s) {
-            int32_t dst_id = s + min;
-            int32_t src_id = cache.cells[batch.seq_id[s][0]].tail;
-            if (dst_id != src_id) {
-                llama_kv_cell & dst_cell = cache.cells[dst_id];
-                llama_kv_cell & src_cell = cache.cells[src_id];
-
-                std::swap(dst_cell.pos, src_cell.pos);
-                std::swap(dst_cell.src, src_cell.src);
-                std::swap(dst_cell.seq_id, src_cell.seq_id);
-
-                // swap tails (assuming they NEVER overlap)
-                for (const llama_seq_id seq_id : src_cell.seq_id) {
-                    cache.cells[seq_id].tail = src_id;
-                }
-                for (const llama_seq_id seq_id : dst_cell.seq_id) {
-                    cache.cells[seq_id].tail = dst_id;
-                }
-            }
-        }
-
-        // update the pos of the used seqs
-        for (uint32_t s = 0; s < n_seqs; ++s) {
-            const llama_pos last_pos = batch.pos[n_seq_tokens * s + n_seq_tokens - 1];
-            int32_t cell_id = s + min;
-            llama_kv_cell & cell = cache.cells[cell_id];
-
-            if (cell.pos >= 0 && last_pos != cell.pos + (llama_pos) n_seq_tokens) {
-                // What should happen when the pos backtracks or skips a value?
-                // Clearing the state mid-batch would require special-casing which isn't done.
-                LLAMA_LOG_WARN("%s: non-consecutive token position %d after %d for sequence %d with %u new tokens\n",
-                    __func__, last_pos, cell.pos, batch.seq_id[s][0], n_seq_tokens);
-            }
-            cell.pos = last_pos;
-            cell.seq_id.clear();
-            for (int32_t j = 0; j < batch.n_seq_id[s]; ++j) {
-                const llama_seq_id seq_id = batch.seq_id[s][j];
-                cell.seq_id.insert(seq_id);
-                cache.cells[seq_id].tail = cell_id;
-            }
-        }
-
-        // allow getting the range of used cells, from head to head + n
-        cache.head = min;
-        cache.n    = max - min + 1;
-        cache.used = std::count_if(cache.cells.begin(), cache.cells.end(),
-            [](const llama_kv_cell& cell){ return !cell.is_empty(); });
-
-        // sanity check
-        return llama_kv_cache_slot_info(cache.n >= n_seqs);
-    }
-    // otherwise, one cell per token.
-
-    if (n_tokens > cache.size) {
-        LLAMA_LOG_ERROR("%s: n_tokens=%d > cache.size=%d\n", __func__, n_tokens, cache.size);
-        return llama_kv_cache_slot_info_failed;
-    }
-
-    uint32_t n_tested = 0;
-
-    while (true) {
-        if (cache.head + n_tokens > cache.size) {
-            n_tested += cache.size - cache.head;
-            cache.head = 0;
-            continue;
-        }
-
-        bool found = true;
-        for (uint32_t i = 0; i < n_tokens; i++) {
-            if (cache.cells[cache.head + i].pos >= 0) {
-                found = false;
-                cache.head += i + 1;
-                n_tested   += i + 1;
-                break;
-            }
-        }
-
-        if (found) {
-            break;
-        }
-
-        if (n_tested >= cache.size) {
-            //LLAMA_LOG_ERROR("%s: failed to find a slot for %d tokens\n", __func__, n_tokens);
-            return llama_kv_cache_slot_info_failed;
-        }
-    }
-
-    for (uint32_t s = 0; s < n_seqs; s++) {
-        for (uint32_t i = 0; i < n_seq_tokens; ++i) {
-            uint32_t k = s*n_seq_tokens + i;
-            cache.cells[cache.head + k].pos = batch.pos[k];
-
-            for (int32_t j = 0; j < batch.n_seq_id[s]; j++) {
-                cache.cells[cache.head + k].seq_id.insert(batch.seq_id[s][j]);
-            }
-        }
-    }
-
-    cache.used += n_tokens;
-
-    return llama_kv_cache_slot_info(cache.head, cache.head + n_tokens);
-}
-
-// find how many cells are currently in use
-static uint32_t llama_kv_cache_cell_max(const struct llama_kv_cache & cache) {
-    for (uint32_t i = cache.size; i > 0; --i) {
-        const llama_kv_cell & cell = cache.cells[i - 1];
-
-        if (cell.pos >= 0 && !cell.is_empty()) {
-            return i;
-        }
-    }
-
-    return 0;
-}
-
-static void llama_kv_cache_clear(struct llama_kv_cache & cache) {
-    for (int32_t i = 0; i < (int32_t) cache.size; ++i) {
-        cache.cells[i].pos = -1;
-        cache.cells[i].seq_id.clear();
-        cache.cells[i].src = -1;
-        cache.cells[i].tail = -1;
-    }
-    cache.head = 0;
-    cache.used = 0;
-
-    for (auto & buf : cache.bufs) {
-        ggml_backend_buffer_clear(buf.get(), 0);
-    }
-}
-
-static bool llama_kv_cache_seq_rm(
-        struct llama_kv_cache & cache,
-                 llama_seq_id   seq_id,
-                    llama_pos   p0,
-                    llama_pos   p1) {
-    uint32_t new_head = cache.size;
-
-    if (p0 < 0) p0 = 0;
-    if (p1 < 0) p1 = std::numeric_limits::max();
-
-    // models like Mamba or RWKV can't have a state partially erased
-    if (cache.recurrent) {
-        if (seq_id >= (int64_t) cache.size) {
-            // could be fatal
-            return false;
-        }
-        if (0 <= seq_id) {
-            int32_t & tail_id = cache.cells[seq_id].tail;
-            if (tail_id >= 0) {
-                const llama_kv_cell & cell = cache.cells[tail_id];
-                // partial intersection is invalid
-                if ((0 < p0 && p0 <= cell.pos) || (0 < p1 && p1 <= cell.pos)) {
-                    return false;
-                }
-                // invalidate tails which will be cleared
-                if (p0 <= cell.pos && cell.pos < p1) {
-                    tail_id = -1;
-                }
-            }
-        } else {
-            // seq_id is negative, then the range should include everything or nothing
-            if (p0 != p1 && (p0 != 0 || p1 != std::numeric_limits::max())) {
-                return false;
-            }
-        }
-    }
-
-    for (uint32_t i = 0; i < cache.size; ++i) {
-        if (cache.cells[i].pos >= p0 && cache.cells[i].pos < p1) {
-            if (seq_id < 0) {
-                cache.cells[i].seq_id.clear();
-            } else if (cache.cells[i].has_seq_id(seq_id)) {
-                cache.cells[i].seq_id.erase(seq_id);
-            } else {
-                continue;
-            }
-            if (cache.cells[i].is_empty()) {
-                // keep count of the number of used cells
-                if (cache.cells[i].pos >= 0) cache.used--;
-
-                cache.cells[i].pos = -1;
-                cache.cells[i].src = -1;
-                if (new_head == cache.size) new_head = i;
-            }
-        }
-    }
-
-    // If we freed up a slot, set head to it so searching can start there.
-    if (new_head != cache.size && new_head < cache.head) cache.head = new_head;
-
-    return true;
-}
-
-static void llama_kv_cache_seq_cp(
-        struct llama_kv_cache & cache,
-                 llama_seq_id   seq_id_src,
-                 llama_seq_id   seq_id_dst,
-                    llama_pos   p0,
-                    llama_pos   p1) {
-    if (p0 < 0) p0 = 0;
-    if (p1 < 0) p1 = std::numeric_limits::max();
-
-    if (cache.recurrent) {
-        if ((uint32_t) seq_id_dst < cache.size && (uint32_t) seq_id_src < cache.size) {
-            llama_kv_cell & tail_src = cache.cells[seq_id_src];
-            llama_kv_cell & tail_dst = cache.cells[seq_id_dst];
-            if (tail_dst.tail >= 0) {
-                // clear destination seq_id if it wasn't empty
-                llama_kv_cell & cell_dst = cache.cells[tail_dst.tail];
-
-                cell_dst.seq_id.erase(seq_id_dst);
-                tail_dst.tail = -1;
-                if (cell_dst.seq_id.empty()) {
-                    cell_dst.pos = -1;
-                    cell_dst.delta = -1;
-                    cell_dst.src = -1;
-                    cache.used -= 1;
-                }
-            }
-            if (tail_src.tail >= 0) {
-                llama_kv_cell & cell_src = cache.cells[tail_src.tail];
-
-                cell_src.seq_id.insert(seq_id_dst);
-                tail_dst.tail = tail_src.tail;
-            }
-        }
-
-        return;
-    }
-    // otherwise, this is the KV cache of a Transformer-like model
-
-    cache.head = 0;
-
-    for (uint32_t i = 0; i < cache.size; ++i) {
-        if (cache.cells[i].has_seq_id(seq_id_src) && cache.cells[i].pos >= p0 && cache.cells[i].pos < p1) {
-            cache.cells[i].seq_id.insert(seq_id_dst);
-        }
-    }
-}
-
-static void llama_kv_cache_seq_keep(struct llama_kv_cache & cache, llama_seq_id seq_id) {
-    uint32_t new_head = cache.size;
-
-    for (uint32_t i = 0; i < cache.size; ++i) {
-        if (cache.recurrent && (llama_seq_id) i != seq_id) {
-            cache.cells[i].tail = -1;
-        }
-        if (!cache.cells[i].has_seq_id(seq_id)) {
-            if (cache.cells[i].pos >= 0) cache.used--;
-            cache.cells[i].pos = -1;
-            cache.cells[i].src = -1;
-            cache.cells[i].seq_id.clear();
-            if (new_head == cache.size) new_head = i;
-        } else {
-            cache.cells[i].seq_id.clear();
-            cache.cells[i].seq_id.insert(seq_id);
-        }
-    }
-
-    // If we freed up a slot, set head to it so searching can start there.
-    if (new_head != cache.size && new_head < cache.head) cache.head = new_head;
-}
-
-static void llama_kv_cache_seq_add(
-        struct llama_kv_cache & cache,
-                 llama_seq_id   seq_id,
-                    llama_pos   p0,
-                    llama_pos   p1,
-                    llama_pos   delta) {
-    uint32_t new_head = cache.size;
-
-    if (p0 < 0) p0 = 0;
-    if (p1 < 0) p1 = std::numeric_limits::max();
-    // If there is no range then return early to avoid looping over the cache.
-    if (p0 == p1) return;
-
-    if (cache.recurrent) {
-        // for Mamba-like or RWKV models, only the pos needs to be shifted
-        if (0 <= seq_id && seq_id < (int64_t) cache.size) {
-            const int32_t tail_id = cache.cells[seq_id].tail;
-            if (tail_id >= 0) {
-                llama_kv_cell & cell = cache.cells[tail_id];
-                if (cell.has_seq_id(seq_id) && p0 <= cell.pos && cell.pos < p1) {
-                    cell.pos += delta;
-                }
-            }
-        }
-        return;
-    }
-
-    for (uint32_t i = 0; i < cache.size; ++i) {
-        if (cache.cells[i].has_seq_id(seq_id) && cache.cells[i].pos >= p0 && cache.cells[i].pos < p1) {
-            cache.has_shift = true;
-            cache.cells[i].pos   += delta;
-            cache.cells[i].delta += delta;
-
-            if (cache.cells[i].pos < 0) {
-                if (!cache.cells[i].is_empty()) {
-                    cache.used--;
-                }
-                cache.cells[i].pos = -1;
-                cache.cells[i].seq_id.clear();
-                if (new_head == cache.size) {
-                    new_head = i;
-                }
-            }
-        }
-    }
-
-    // If we freed up a slot, set head to it so searching can start there.
-    // Otherwise we just start the next search from the beginning.
-    cache.head = new_head != cache.size ? new_head : 0;
-}
-
-static void llama_kv_cache_seq_div(
-        struct llama_kv_cache & cache,
-                 llama_seq_id   seq_id,
-                    llama_pos   p0,
-                    llama_pos   p1,
-                          int   d) {
-    if (p0 < 0) p0 = 0;
-    if (p1 < 0) p1 = std::numeric_limits::max();
-    // If there is no range then return early to avoid looping over the cache.
-    if (p0 == p1) return;
-
-    if (cache.recurrent) {
-        // for Mamba-like or RWKV models, only the pos needs to be changed
-        if (0 <= seq_id && seq_id < (int64_t) cache.size) {
-            const int32_t tail_id = cache.cells[seq_id].tail;
-            if (tail_id >= 0) {
-                llama_kv_cell & cell = cache.cells[tail_id];
-                if (cell.has_seq_id(seq_id) && p0 <= cell.pos && cell.pos < p1) {
-                    cell.pos /= d;
-                }
-            }
-        }
-        return;
-    }
-
-    for (uint32_t i = 0; i < cache.size; ++i) {
-        if (cache.cells[i].has_seq_id(seq_id) && cache.cells[i].pos >= p0 && cache.cells[i].pos < p1) {
-            cache.has_shift = true;
-
-            {
-                llama_pos p_old = cache.cells[i].pos;
-                cache.cells[i].pos   /= d;
-                cache.cells[i].delta += cache.cells[i].pos - p_old;
-            }
-        }
-    }
-}
-
-static llama_pos llama_kv_cache_seq_pos_max(struct llama_kv_cache & cache, llama_seq_id seq_id) {
-    llama_pos result = 0;
-
-    for (uint32_t i = 0; i < cache.size; ++i) {
-        if (cache.cells[i].has_seq_id(seq_id)) {
-            result = std::max(result, cache.cells[i].pos);
-        }
-    }
-
-    return result;
-}
-
-static void llama_kv_cache_defrag(struct llama_kv_cache & cache) {
-    if (!cache.recurrent) {
-        cache.do_defrag = true;
-    }
-}
-
-static uint32_t llama_kv_cache_get_padding(const struct llama_cparams & cparams) {
-    // the FA kernels require padding to avoid extra runtime boundary checks
-    return cparams.flash_attn ? 256u : 32u;
-}
-
-// saves the kv_cache state for future recovery.
-// used to rollback llama_kv_cache_find_slot changes.
-struct llama_kv_slot_restorer {
-    struct llama_kv_cache_state {
-        uint32_t head = 0;
-        uint32_t n    = 0;
-    } old_state;
-
-    // for non-recurrent models only
-    // list of slots to restore
-    std::vector> slot_boundaries;
-
-    bool do_restore = false;
-
-    explicit llama_kv_slot_restorer(const struct llama_kv_cache & cache) {
-        old_state.head  = cache.head;
-        old_state.n     = cache.n;
-    }
-
-    // saves a slot information for future restoration
-    void save(const struct llama_kv_cache_slot_info & slot) {
-        if (slot) {
-            do_restore = true;
-            if (slot.boundaries.first != slot.boundaries.second) {
-                slot_boundaries.push_back(slot.boundaries);
-            }
-        }
-    }
-
-    // must be explicitly called to restore the kv_cache state
-    // and rollback changes from all llama_kv_cache_find_slot calls
-    void restore(struct llama_kv_cache & cache) {
-        if (do_restore) {
-            cache.head  = old_state.head;
-            cache.n     = old_state.n;
-
-            if (cache.recurrent) { // recurrent models like Mamba or RWKV can't have a state partially erased
-                llama_kv_cache_seq_rm(cache, -1, -1, -1);
-            } else {
-                for (auto & slot : slot_boundaries) {
-                    llama_kv_cache_seq_rm(cache, -1, slot.first, slot.second);
-                }
-            }
-        }
-    }
-};
-
-//
-// model loading and saving
-//
-
-enum llama_fver {
-    GGUF_FILE_VERSION_V1 = 1,
-    GGUF_FILE_VERSION_V2 = 2,
-    GGUF_FILE_VERSION_V3 = 3,
-};
-
-static const char * llama_file_version_name(llama_fver version) {
-    switch (version) {
-        case GGUF_FILE_VERSION_V1: return "GGUF V1 (support until nov 2023)";
-        case GGUF_FILE_VERSION_V2: return "GGUF V2";
-        case GGUF_FILE_VERSION_V3: return "GGUF V3 (latest)";
-    }
-
-    return "unknown";
-}
-
-static std::string llama_format_tensor_shape(const std::vector & ne) {
-    char buf[256];
-    snprintf(buf, sizeof(buf), "%5" PRId64, ne.at(0));
-    for (size_t i = 1; i < ne.size(); i++) {
-        snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), ", %5" PRId64, ne.at(i));
-    }
-    return buf;
-}
-
-static std::string llama_format_tensor_shape(const struct ggml_tensor * t) {
-    char buf[256];
-    snprintf(buf, sizeof(buf), "%5" PRId64, t->ne[0]);
-    for (int i = 1; i < GGML_MAX_DIMS; i++) {
-        snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), ", %5" PRId64, t->ne[i]);
-    }
-    return buf;
-}
-
-namespace GGUFMeta {
-    template 
-    struct GKV_Base_Type {
-        static constexpr gguf_type gt = gt_;
-
-        static T getter(const gguf_context * ctx, const int kid) {
-            return gfun(ctx, kid);
-        }
-    };
-
-    template struct GKV_Base;
-
-    template<> struct GKV_Base: GKV_Base_Type {};
-    template<> struct GKV_Base: GKV_Base_Type {};
-    template<> struct GKV_Base: GKV_Base_Type {};
-    template<> struct GKV_Base: GKV_Base_Type {};
-    template<> struct GKV_Base: GKV_Base_Type {};
-    template<> struct GKV_Base: GKV_Base_Type {};
-    template<> struct GKV_Base: GKV_Base_Type {};
-    template<> struct GKV_Base: GKV_Base_Type {};
-    template<> struct GKV_Base: GKV_Base_Type {};
-    template<> struct GKV_Base: GKV_Base_Type {};
-    template<> struct GKV_Base: GKV_Base_Type {};
-    template<> struct GKV_Base: GKV_Base_Type {};
-
-    template<> struct GKV_Base {
-        static constexpr gguf_type gt = GGUF_TYPE_STRING;
-
-        static std::string getter(const gguf_context * ctx, const int kid) {
-            return gguf_get_val_str(ctx, kid);
-        }
-    };
-
-    struct ArrayInfo {
-        const gguf_type gt;
-        const size_t length;
-        const void * data;
-    };
-
-    template<> struct GKV_Base {
-        public:
-        static constexpr gguf_type gt = GGUF_TYPE_ARRAY;
-        static ArrayInfo getter(const gguf_context *ctx, const int k) {
-            return ArrayInfo {
-                gguf_get_arr_type(ctx, k),
-                size_t(gguf_get_arr_n(ctx, k)),
-                gguf_get_arr_data(ctx, k),
-            };
-        }
-    };
-
-    template
-    class GKV : public GKV_Base {
-        GKV() = delete;
-
-        public:
-        static T get_kv(const gguf_context * ctx, const int k) {
-            const enum gguf_type kt = gguf_get_kv_type(ctx, k);
-
-            if (kt != GKV::gt) {
-                throw std::runtime_error(format("key %s has wrong type %s but expected type %s",
-                    gguf_get_key(ctx, k), gguf_type_name(kt), gguf_type_name(GKV::gt)));
-            }
-            return GKV::getter(ctx, k);
-        }
-
-        static const char * override_type_to_str(const llama_model_kv_override_type ty) {
-            switch (ty) {
-                case LLAMA_KV_OVERRIDE_TYPE_BOOL:  return "bool";
-                case LLAMA_KV_OVERRIDE_TYPE_INT:   return "int";
-                case LLAMA_KV_OVERRIDE_TYPE_FLOAT: return "float";
-                case LLAMA_KV_OVERRIDE_TYPE_STR:   return "str";
-            }
-            return "unknown";
-        }
-
-        static bool validate_override(const llama_model_kv_override_type expected_type, const struct llama_model_kv_override * ovrd) {
-            if (!ovrd) { return false; }
-            if (ovrd->tag == expected_type) {
-                LLAMA_LOG_INFO("%s: Using metadata override (%5s) '%s' = ",
-                    __func__, override_type_to_str(ovrd->tag), ovrd->key);
-                switch (ovrd->tag) {
-                    case LLAMA_KV_OVERRIDE_TYPE_BOOL:  {
-                        LLAMA_LOG_INFO("%s\n", ovrd->val_bool ? "true" : "false");
-                    } break;
-                    case LLAMA_KV_OVERRIDE_TYPE_INT:   {
-                        LLAMA_LOG_INFO("%" PRId64 "\n", ovrd->val_i64);
-                    } break;
-                    case LLAMA_KV_OVERRIDE_TYPE_FLOAT: {
-                        LLAMA_LOG_INFO("%.6f\n", ovrd->val_f64);
-                    } break;
-                    case LLAMA_KV_OVERRIDE_TYPE_STR: {
-                        LLAMA_LOG_INFO("%s\n", ovrd->val_str);
-                    } break;
-                    default:
-                        // Shouldn't be possible to end up here, but just in case...
-                        throw std::runtime_error(
-                            format("Unsupported attempt to override %s type for metadata key %s\n",
-                                override_type_to_str(ovrd->tag), ovrd->key));
-                }
-                return true;
-            }
-            LLAMA_LOG_WARN("%s: Warning: Bad metadata override type for key '%s', expected %s but got %s\n",
-                __func__, ovrd->key, override_type_to_str(expected_type), override_type_to_str(ovrd->tag));
-            return false;
-        }
-
-        template
-        static typename std::enable_if::value, bool>::type
-        try_override(OT & target, const struct llama_model_kv_override * ovrd) {
-            if (validate_override(LLAMA_KV_OVERRIDE_TYPE_BOOL, ovrd)) {
-                target = ovrd->val_bool;
-                return true;
-            }
-            return false;
-        }
-
-        template
-        static typename std::enable_if::value && std::is_integral::value, bool>::type
-        try_override(OT & target, const struct llama_model_kv_override * ovrd) {
-            if (validate_override(LLAMA_KV_OVERRIDE_TYPE_INT, ovrd)) {
-                target = ovrd->val_i64;
-                return true;
-            }
-            return false;
-        }
-
-        template
-        static typename std::enable_if::value, bool>::type
-        try_override(T & target, const struct llama_model_kv_override * ovrd) {
-            if (validate_override(LLAMA_KV_OVERRIDE_TYPE_FLOAT, ovrd)) {
-                target = ovrd->val_f64;
-                return true;
-            }
-            return false;
-        }
-
-        template
-        static typename std::enable_if::value, bool>::type
-        try_override(T & target, const struct llama_model_kv_override * ovrd) {
-            if (validate_override(LLAMA_KV_OVERRIDE_TYPE_STR, ovrd)) {
-                target = ovrd->val_str;
-                return true;
-            }
-            return false;
-        }
-
-        static bool set(const gguf_context * ctx, const int k, T & target, const struct llama_model_kv_override * ovrd = nullptr) {
-            if (try_override(target, ovrd)) {
-                return true;
-            }
-            if (k < 0) { return false; }
-            target = get_kv(ctx, k);
-            return true;
-        }
-
-        static bool set(const gguf_context * ctx, const char * key, T & target, const struct llama_model_kv_override * ovrd = nullptr) {
-            return set(ctx, gguf_find_key(ctx, key), target, ovrd);
-        }
-
-        static bool set(const gguf_context * ctx, const std::string & key, T & target, const struct llama_model_kv_override * ovrd = nullptr) {
-            return set(ctx, key.c_str(), target, ovrd);
-        }
-    };
-}
-
-using llama_buf_map = std::unordered_map;
-
-static size_t llama_model_max_nodes(const llama_model & model) {
-    return std::max(8192, model.tensors_by_name.size()*5);
-}
-
-struct llama_model_loader {
-    int n_kv      = 0;
-    int n_tensors = 0;
-    int n_created = 0;
-
-    uint64_t n_elements = 0;
-    size_t  n_bytes     = 0;
-
-    bool use_mmap = false;
-    bool check_tensors;
-
-    llama_files files;
-    llama_ftype ftype;
-    llama_fver  fver;
-
-    llama_mmaps mappings;
-
-    // Holds information on a model weight
-    struct llama_tensor_weight {
-        uint16_t  idx; // source file index
-        size_t   offs; // tensor data offset in the original file
-
-        ggml_tensor * tensor;
-
-        llama_tensor_weight(const llama_file * file, uint16_t idx, const struct gguf_context * gguf_ctx, ggml_tensor * tensor) : idx(idx), tensor(tensor) {
-            const int tensor_idx = gguf_find_tensor(gguf_ctx,  ggml_get_name(tensor));
-            if (tensor_idx < 0) {
-                throw std::runtime_error(format("tensor '%s' not found in the model", ggml_get_name(tensor)));
-            }
-
-            offs = gguf_get_data_offset(gguf_ctx) + gguf_get_tensor_offset(gguf_ctx, tensor_idx);
-            if (offs + ggml_nbytes(tensor) < offs || offs + ggml_nbytes(tensor) > file->size) {
-                throw std::runtime_error(format("tensor '%s' data is not within the file bounds, model is corrupted or incomplete", ggml_get_name(tensor)));
-            }
-        }
-    };
-
-    // custom comparator to sort weights more nicely by layer
-    struct weight_name_comparer {
-        bool operator()(const std::string & a, const std::string & b) const {
-            int a_layer = -1;
-            int b_layer = -1;
-            sscanf(a.c_str(), "blk.%d.", &a_layer);
-            sscanf(b.c_str(), "blk.%d.", &b_layer);
-            if (a_layer != b_layer) {
-                return a_layer < b_layer;
-            }
-            return a < b;
-        }
-    };
-
-    std::map weights_map;
-    std::unordered_map kv_overrides;
-
-    gguf_context_ptr meta;
-    std::vector contexts;
-
-    std::string arch_name;
-    LLM_KV      llm_kv    = LLM_KV(LLM_ARCH_UNKNOWN);
-
-    llama_model_loader(const std::string & fname, bool use_mmap, bool check_tensors, const struct llama_model_kv_override * param_overrides_p) {
-        int trace = 0;
-        if (getenv("LLAMA_TRACE")) {
-            trace = atoi(getenv("LLAMA_TRACE"));
-        }
-
-        if (param_overrides_p != nullptr) {
-            for (const struct llama_model_kv_override * p = param_overrides_p; p->key[0] != 0; p++) {
-                kv_overrides.insert({std::string(p->key), *p});
-            }
-        }
-
-        struct ggml_context * ctx = NULL;
-        struct gguf_init_params params = {
-            /*.no_alloc = */ true,
-            /*.ctx      = */ &ctx,
-        };
-
-        meta.reset(gguf_init_from_file(fname.c_str(), params));
-        if (!meta) {
-            throw std::runtime_error(format("%s: failed to load model from %s\n", __func__, fname.c_str()));
-        }
-
-        get_key(llm_kv(LLM_KV_GENERAL_ARCHITECTURE), arch_name, false);
-        llm_kv = LLM_KV(llm_arch_from_string(arch_name));
-
-        files.emplace_back(new llama_file(fname.c_str(), "rb"));
-        contexts.emplace_back(ctx);
-
-        // Save tensors data offset of the main file.
-        // For subsidiary files, `meta` tensor data offset must not be used,
-        // so we build a unified tensors index for weights.
-        for (ggml_tensor * cur = ggml_get_first_tensor(ctx); cur; cur = ggml_get_next_tensor(ctx, cur)) {
-            std::string tensor_name = std::string(cur->name);
-            // make sure there is no duplicated tensor names
-            if (weights_map.find(tensor_name) != weights_map.end()) {
-                throw std::runtime_error(format("invalid model: tensor '%s' is duplicated", ggml_get_name(cur)));
-            }
-            n_elements += ggml_nelements(cur);
-            n_bytes    += ggml_nbytes(cur);
-            weights_map.emplace(tensor_name, llama_tensor_weight(files.back().get(), 0, meta.get(), cur));
-        }
-        uint16_t n_split = 0;
-        get_key(llm_kv(LLM_KV_SPLIT_COUNT), n_split, false);
-
-        // Load additional GGML contexts
-        if (n_split > 1) {
-            uint16_t idx = 0;
-            get_key(llm_kv(LLM_KV_SPLIT_NO), idx);
-            if (idx != 0) {
-                throw std::runtime_error(format("illegal split file: %d, model must be loaded with the first split", idx));
-            }
-
-            char split_prefix[PATH_MAX] = {0};
-            if (!llama_split_prefix(split_prefix, sizeof(split_prefix), fname.c_str(), idx, n_split)) {
-                throw std::runtime_error(format("invalid split file: %s", fname.c_str()));
-            }
-
-            if (trace > 0) {
-                LLAMA_LOG_INFO("%s: loading additional %d GGUFs\n", __func__, n_split);
-            }
-
-            char split_path[PATH_MAX] = {0};
-            for (idx = 1; idx < n_split; idx++) {
-                llama_split_path(split_path, sizeof(split_path), split_prefix, idx, n_split);
-
-                struct gguf_init_params split_params = {
-                    /*.no_alloc = */ true,
-                    /*.ctx      = */ &ctx,
-                };
-                gguf_context_ptr ctx_gguf { gguf_init_from_file(split_path, split_params) };
-                if (!ctx_gguf) {
-                    throw std::runtime_error(format("%s: failed to load GGUF split from %s\n", __func__, split_path));
-                }
-
-                files.emplace_back(new llama_file(split_path, "rb"));
-                contexts.emplace_back(ctx);
-
-                // Save tensors data offset info of the shard.
-                for (ggml_tensor * cur = ggml_get_first_tensor(ctx); cur; cur = ggml_get_next_tensor(ctx, cur)) {
-                    std::string tensor_name = std::string(cur->name);
-                    // make sure there is no duplicated tensor names
-                    if (weights_map.find(tensor_name) != weights_map.end()) {
-                        throw std::runtime_error(format("invalid model: tensor '%s' is duplicated", ggml_get_name(cur)));
-                    }
-                    n_elements += ggml_nelements(cur);
-                    n_bytes    += ggml_nbytes(cur);
-                    weights_map.emplace(tensor_name, llama_tensor_weight(files.back().get(), idx, ctx_gguf.get(), cur));
-                }
-            }
-
-            get_key(llm_kv(LLM_KV_SPLIT_TENSORS_COUNT), n_tensors);
-
-            // sanity check
-            {
-                const int n_tensors_loaded = (int) weights_map.size();
-                if (n_tensors != n_tensors_loaded) {
-                    throw std::runtime_error(format("corrupted model: %d tensors expected but %d found", n_tensors, n_tensors_loaded));
-                }
-            }
-
-            LLAMA_LOG_INFO("%s: additional %d GGUFs metadata loaded.\n",  __func__, n_split - 1);
-        }
-
-        n_kv      = gguf_get_n_kv(meta.get());
-        n_tensors = weights_map.size();
-
-        fver = (enum llama_fver) gguf_get_version(meta.get());
-
-        LLAMA_LOG_INFO("%s: loaded meta data with %d key-value pairs and %d tensors from %s (version %s)\n",
-                __func__, n_kv, n_tensors, fname.c_str(), llama_file_version_name(fver));
-
-        // determine file type based on the number of tensors for each quantization and print meta data
-        // TODO: make optional
-        {
-            std::map n_type;
-
-            uint32_t n_type_max = 0;
-            enum ggml_type type_max = GGML_TYPE_F32;
-
-            for (const auto & it : weights_map) {
-                const llama_tensor_weight & w = it.second;
-                const ggml_tensor * tensor = w.tensor;
-
-                enum ggml_type type = tensor->type;
-
-                n_type[type]++;
-
-                if (n_type_max < n_type[type]) {
-                    n_type_max = n_type[type];
-                    type_max   = type;
-                }
-
-                if (trace > 0) {
-                    const uint16_t sid = w.idx;
-                    LLAMA_LOG_INFO("%s: - tensor split %2d: %32s %-8s [ %s ]\n", __func__, sid, ggml_get_name(tensor), ggml_type_name(type), llama_format_tensor_shape(tensor).c_str());
-                }
-            }
-
-            switch (type_max) {
-                case GGML_TYPE_F32:     ftype = LLAMA_FTYPE_ALL_F32;        break;
-                case GGML_TYPE_F16:     ftype = LLAMA_FTYPE_MOSTLY_F16;     break;
-                case GGML_TYPE_BF16:    ftype = LLAMA_FTYPE_MOSTLY_BF16;    break;
-                case GGML_TYPE_Q4_0:    ftype = LLAMA_FTYPE_MOSTLY_Q4_0;    break;
-                case GGML_TYPE_Q4_1:    ftype = LLAMA_FTYPE_MOSTLY_Q4_1;    break;
-                case GGML_TYPE_Q5_0:    ftype = LLAMA_FTYPE_MOSTLY_Q5_0;    break;
-                case GGML_TYPE_Q5_1:    ftype = LLAMA_FTYPE_MOSTLY_Q5_1;    break;
-                case GGML_TYPE_Q8_0:    ftype = LLAMA_FTYPE_MOSTLY_Q8_0;    break;
-                case GGML_TYPE_Q2_K:    ftype = LLAMA_FTYPE_MOSTLY_Q2_K;    break;
-                case GGML_TYPE_Q3_K:    ftype = LLAMA_FTYPE_MOSTLY_Q3_K_M;  break;
-                case GGML_TYPE_Q4_K:    ftype = LLAMA_FTYPE_MOSTLY_Q4_K_M;  break;
-                case GGML_TYPE_Q5_K:    ftype = LLAMA_FTYPE_MOSTLY_Q5_K_M;  break;
-                case GGML_TYPE_Q6_K:    ftype = LLAMA_FTYPE_MOSTLY_Q6_K;    break;
-                case GGML_TYPE_TQ1_0:   ftype = LLAMA_FTYPE_MOSTLY_TQ1_0;   break;
-                case GGML_TYPE_TQ2_0:   ftype = LLAMA_FTYPE_MOSTLY_TQ2_0;   break;
-                case GGML_TYPE_IQ2_XXS: ftype = LLAMA_FTYPE_MOSTLY_IQ2_XXS; break;
-                case GGML_TYPE_IQ2_XS:  ftype = LLAMA_FTYPE_MOSTLY_IQ2_XS;  break;
-                case GGML_TYPE_IQ2_S:   ftype = LLAMA_FTYPE_MOSTLY_IQ2_S;   break;
-                case GGML_TYPE_IQ3_XXS: ftype = LLAMA_FTYPE_MOSTLY_IQ3_XXS; break;
-                case GGML_TYPE_IQ1_S:   ftype = LLAMA_FTYPE_MOSTLY_IQ1_S;   break;
-                case GGML_TYPE_IQ1_M:   ftype = LLAMA_FTYPE_MOSTLY_IQ1_M;   break;
-                case GGML_TYPE_IQ4_NL:  ftype = LLAMA_FTYPE_MOSTLY_IQ4_NL;  break;
-                case GGML_TYPE_IQ4_XS:  ftype = LLAMA_FTYPE_MOSTLY_IQ4_XS;  break;
-                case GGML_TYPE_IQ3_S:   ftype = LLAMA_FTYPE_MOSTLY_IQ3_S;   break;
-                default:
-                    {
-                        LLAMA_LOG_WARN("%s: unknown type %s\n", __func__, ggml_type_name(type_max));
-                        ftype = LLAMA_FTYPE_ALL_F32;
-                    } break;
-            }
-
-            // this is a way to mark that we have "guessed" the file type
-            ftype = (llama_ftype) (ftype | LLAMA_FTYPE_GUESSED);
-
-            {
-                const int kid = gguf_find_key(meta.get(), "general.file_type"); // TODO: use LLM_KV
-                if (kid >= 0) {
-                    ftype = (llama_ftype) gguf_get_val_u32(meta.get(), kid);
-                }
-            }
-
-            LLAMA_LOG_INFO("%s: Dumping metadata keys/values. Note: KV overrides do not apply in this output.\n", __func__);
-
-            for (int i = 0; i < n_kv; i++) {
-                const char * name           = gguf_get_key(meta.get(), i);
-                const enum gguf_type type   = gguf_get_kv_type(meta.get(), i);
-                const std::string type_name =
-                    type == GGUF_TYPE_ARRAY
-                    ? format("%s[%s,%d]", gguf_type_name(type), gguf_type_name(gguf_get_arr_type(meta.get(), i)), gguf_get_arr_n(meta.get(), i))
-                    : gguf_type_name(type);
-
-                std::string value          = gguf_kv_to_str(meta.get(), i);
-                const size_t MAX_VALUE_LEN = 40;
-                if (value.size() > MAX_VALUE_LEN) {
-                    value = format("%s...", value.substr(0, MAX_VALUE_LEN - 3).c_str());
-                }
-                replace_all(value, "\n", "\\n");
-
-                LLAMA_LOG_INFO("%s: - kv %3d: %42s %-16s = %s\n", __func__, i, name, type_name.c_str(), value.c_str());
-            }
-
-            // print type counts
-            for (auto & kv : n_type) {
-                if (kv.second == 0) {
-                    continue;
-                }
-
-                LLAMA_LOG_INFO("%s: - type %4s: %4d tensors\n", __func__, ggml_type_name(kv.first), kv.second);
-            }
-        }
-
-        if (!llama_mmap::SUPPORTED) {
-            LLAMA_LOG_WARN("%s: mmap is not supported on this platform\n", __func__);
-            use_mmap = false;
-        }
-
-        this->use_mmap = use_mmap;
-        this->check_tensors = check_tensors;
-    }
-
-    template
-    typename std::enable_if::value, bool>::type
-    get_arr_n(const std::string & key, T & result, const bool required = true) {
-        const int kid = gguf_find_key(meta.get(), key.c_str());
-
-        if (kid < 0) {
-            if (required) {
-                throw std::runtime_error(format("key not found in model: %s", key.c_str()));
-            }
-            return false;
-        }
-
-        struct GGUFMeta::ArrayInfo arr_info =
-            GGUFMeta::GKV::get_kv(meta.get(), kid);
-
-
-        result = arr_info.length;
-        return true;
-    }
-
-    template
-    typename std::enable_if::value, bool>::type
-    get_arr_n(const enum llm_kv kid, T & result, const bool required = true) {
-        return get_arr_n(llm_kv(kid), result, required);
-    }
-
-    template
-    bool get_arr(const std::string & key, std::vector & result, const bool required = true) {
-        const int kid = gguf_find_key(meta.get(), key.c_str());
-
-        if (kid < 0 || gguf_get_kv_type(meta.get(), kid) != GGUF_TYPE_ARRAY) {
-            if (required) {
-                throw std::runtime_error(format("array key not found in model: %s", key.c_str()));
-            }
-            return false;
-        }
-
-        struct GGUFMeta::ArrayInfo arr_info =
-            GGUFMeta::GKV::get_kv(meta.get(), kid);
-
-        switch (arr_info.gt) {
-            case GGUF_TYPE_FLOAT32: GGML_ASSERT((std::is_same::value)); break;
-            case GGUF_TYPE_INT32:   GGML_ASSERT(
-                                            (std::is_same::value) ||
-                                            (std::is_same::value));  break;
-            default:
-                throw std::runtime_error(format("%s is not a float32, int32 array", key.c_str()));
-        }
-
-        result.resize(arr_info.length);
-        result.assign((const T*)arr_info.data, (const T *)arr_info.data + arr_info.length);
-
-        return true;
-    }
-
-    template
-    bool get_arr(const std::string & key, std::array & result, const bool required = true) {
-        const int kid = gguf_find_key(meta.get(), key.c_str());
-
-        if (kid < 0 || gguf_get_kv_type(meta.get(), kid) != GGUF_TYPE_ARRAY) {
-            if (required) {
-                throw std::runtime_error(format("array key not found in model: %s", key.c_str()));
-            }
-            return false;
-        }
-
-        struct GGUFMeta::ArrayInfo arr_info =
-            GGUFMeta::GKV::get_kv(meta.get(), kid);
-
-        switch (arr_info.gt) {
-            case GGUF_TYPE_FLOAT32: GGML_ASSERT((std::is_same::value)); break;
-            case GGUF_TYPE_INT32:   GGML_ASSERT(
-                                            (std::is_same::value) ||
-                                            (std::is_same::value));  break;
-            default:
-                throw std::runtime_error(format("%s is not a float32, int32 array", key.c_str()));
-        }
-
-        if (arr_info.length > N_MAX) {
-            throw std::runtime_error(format("array length %u for key %s exceeds max %u", (uint32_t) arr_info.length, key.c_str(), (uint32_t) N_MAX));
-        }
-
-        std::copy((const T*)arr_info.data, (const T *)arr_info.data + arr_info.length, result.begin());
-
-        return true;
-    }
-
-    template
-    bool get_arr(const enum llm_kv kid, T & result, const bool required = true) {
-        return get_arr(llm_kv(kid), result, required);
-    }
-
-    template
-    bool get_key(const std::string & key, T & result, const bool required = true) {
-        auto it = kv_overrides.find(key);
-
-        const struct llama_model_kv_override * override =
-            it != kv_overrides.end() ? &it->second : nullptr;
-
-        const bool found = GGUFMeta::GKV::set(meta.get(), key, result, override);
-
-        if (required && !found) {
-            throw std::runtime_error(format("key not found in model: %s", key.c_str()));
-        }
-
-        return found;
-    }
-
-    template
-    bool get_key(const enum llm_kv kid, T & result, const bool required = true) {
-        return get_key(llm_kv(kid), result, required);
-    }
-
-    // get array of n <= N_MAX elements, or a single element repeated n times
-    template
-    bool get_key_or_arr(const std::string & key, std::array & result, uint32_t n, const bool required = true) {
-        const int kid = gguf_find_key(meta.get(), key.c_str());
-
-        if (kid < 0) {
-            if (required) {
-                throw std::runtime_error(format("key not found in model: %s", key.c_str()));
-            }
-            return false;
-        }
-
-        if (n > N_MAX) {
-            throw std::runtime_error(format("n > N_MAX: %u > %u for key %s", (uint32_t) n, (uint32_t) N_MAX, key.c_str()));
-        }
-
-        if (gguf_get_kv_type(meta.get(), kid) == GGUF_TYPE_ARRAY) {
-            struct GGUFMeta::ArrayInfo arr_info =
-                GGUFMeta::GKV::get_kv(meta.get(), kid);
-
-            if (n != arr_info.length) {
-                throw std::runtime_error(format("key %s has wrong array length; expected %u, got %u", key.c_str(), n, (uint32_t) arr_info.length));
-            }
-
-            return get_arr(key, result, required);
-        } else {
-            T value;
-
-            bool ok = get_key(key, value, required);
-            if (!ok) {
-                return false;
-            }
-
-            for (uint32_t i = 0; i < n; i++) {
-                result[i] = value;
-            }
-
-            return true;
-        }
-    }
-
-    template
-    bool get_key_or_arr(const enum llm_kv kid, T & result, uint32_t n, const bool required = true) {
-        return get_key_or_arr(llm_kv(kid), result, n, required);
-    }
-
-    std::string get_arch_name() const {
-        return arch_name;
-    }
-
-    enum llm_arch get_arch() const {
-        return llm_kv.arch;
-    }
-
-    const llama_tensor_weight * get_weight(const char * name) const {
-        auto pos = weights_map.find(name);
-        if (pos != weights_map.end()) {
-            return &pos->second;
-        }
-
-        return nullptr;
-    }
-
-    const llama_tensor_weight & require_weight(const char * name) const {
-        const llama_tensor_weight * weight = get_weight(name);
-        if (!weight) {
-            throw std::runtime_error(format("%s: tensor '%s' not found", __func__, name));
-        }
-        return *weight;
-    }
-
-    struct ggml_tensor * get_tensor_meta(const char * name) const {
-        const auto * weight = get_weight(name);
-        if (!weight) {
-            return nullptr;
-        }
-        return weight->tensor;
-    }
-
-    struct ggml_tensor * require_tensor_meta(const std::string & name) const {
-        struct ggml_tensor * tensor = get_tensor_meta(name.c_str());
-        if (!tensor) {
-            throw std::runtime_error(format("%s: tensor '%s' not found", __func__, name.c_str()));
-        }
-        return tensor;
-    }
-
-    const struct ggml_tensor * check_tensor_dims(const std::string & name, const std::vector & ne, bool required) const {
-        const struct ggml_tensor * cur = get_tensor_meta(name.c_str());
-
-        if (cur == NULL) {
-            if (!required) {
-                return NULL;
-            }
-            throw std::runtime_error(format("%s: tensor '%s' not found", __func__, name.c_str()));
-        }
-
-        {
-            bool is_ok = true;
-            for (size_t i = 0; i < GGML_MAX_DIMS; ++i) {
-                if ((i < ne.size() && ne[i] != cur->ne[i]) || (i >= ne.size() && cur->ne[i] != 1)) {
-                    is_ok = false;
-                    break;
-                }
-            }
-            if (!is_ok) {
-                throw std::runtime_error(
-                        format("%s: tensor '%s' has wrong shape; expected %s, got %s",
-                            __func__, name.c_str(),
-                            llama_format_tensor_shape(ne).c_str(),
-                            llama_format_tensor_shape(cur).c_str()));
-            }
-        }
-
-        return cur;
-    }
-
-    static const int TENSOR_NOT_REQUIRED = 1;
-    static const int TENSOR_DUPLICATED   = 2;
-
-    struct ggml_tensor * create_tensor(struct ggml_context * ctx, const std::string & name, const std::initializer_list & ne, int flags = 0) {
-        const struct ggml_tensor * cur = check_tensor_dims(name, ne, !(flags & TENSOR_NOT_REQUIRED));
-
-        if (cur == NULL) {
-            return NULL;
-        }
-
-        bool duplicated = flags & TENSOR_DUPLICATED;
-
-        struct ggml_tensor * tensor = ggml_dup_tensor(ctx, cur);
-        ggml_set_name(tensor, ggml_get_name(cur));
-
-        if (duplicated) {
-            size_data += ggml_nbytes(cur);
-        } else {
-            n_created++;
-        }
-
-        return tensor;
-
-    }
-
-    struct ggml_tensor * create_tensor_as_view(struct ggml_context * ctx, struct ggml_tensor * base, const std::string & name, const std::initializer_list & ne, size_t offset, bool required = true) {
-        const struct ggml_tensor * cur = check_tensor_dims(name, ne, required);
-
-        if (cur == NULL) {
-            return NULL;
-        }
-
-        if (cur->type != base->type) {
-            throw std::runtime_error(format("%s: tensor '%s' has wrong type; expected %s, got %s", __func__, name.c_str(), ggml_type_name(base->type), ggml_type_name(cur->type)));
-        }
-
-        std::array dims;
-        for (size_t i = 0; i < GGML_MAX_DIMS; ++i) {
-            dims[i] = i < ne.size() ? ne.begin()[i] : 1;
-        }
-
-        struct ggml_tensor * tensor = ggml_view_4d(ctx, base,
-                                        dims[0], dims[1], dims[2], dims[3],
-                                        cur->nb[1], cur->nb[2], cur->nb[3],
-                                        offset);
-
-        ggml_set_name(tensor, name.c_str());
-
-        n_created++;
-
-        return tensor;
-    }
-
-    void done_getting_tensors() const {
-        if (n_created != n_tensors) {
-            throw std::runtime_error(format("%s: wrong number of tensors; expected %d, got %d", __func__, n_tensors, n_created));
-        }
-    }
-
-    void init_mappings(bool prefetch = true, llama_mlocks * mlock_mmaps = nullptr) {
-        if (use_mmap) {
-            mappings.reserve(files.size());
-            mmaps_used.reserve(files.size());
-            for (const auto & file : files) {
-                auto * reg = ggml_backend_dev_backend_reg(ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU));
-                auto * is_numa_fn = (decltype(ggml_is_numa) *) ggml_backend_reg_get_proc_address(reg, "ggml_backend_cpu_is_numa");
-                std::unique_ptr mapping(new llama_mmap(file.get(), prefetch ? -1 : 0, is_numa_fn()));
-                mmaps_used.emplace_back(mapping->size, 0);
-                if (mlock_mmaps) {
-                    std::unique_ptr mlock_mmap(new llama_mlock());
-                    mlock_mmap->init(mapping->addr);
-                    mlock_mmaps->emplace_back(std::move(mlock_mmap));
-                }
-                mappings.emplace_back(std::move(mapping));
-            }
-        }
-
-        // compute the total size of all tensors for progress reporting
-        for (const auto & it : weights_map) {
-            size_data += ggml_nbytes(it.second.tensor);
-        }
-    }
-
-    void get_mapping_range(size_t * first, size_t * last, void ** addr, int idx, ggml_context * ctx) const {
-        GGML_ASSERT(!mappings.empty());
-        const auto & mapping = mappings.at(idx);
-
-        *first = mapping->size;
-        *last  = 0;
-        *addr = mapping->addr;
-        for (ggml_tensor * tensor = ggml_get_first_tensor(ctx); tensor; tensor = ggml_get_next_tensor(ctx, tensor)) {
-            const auto * weight = get_weight(ggml_get_name(tensor));
-            if (!weight || weight->idx != idx) {
-                continue;
-            }
-            *first = std::min(*first, weight->offs);
-            *last  = std::max(*last,  weight->offs + ggml_nbytes(tensor));
-        }
-    }
-
-    // for backwards compatibility, does not support ggml-backend
-    void load_data_for(struct ggml_tensor * cur) const {
-        const auto & w = require_weight(ggml_get_name(cur));
-
-        if (use_mmap) {
-            const auto & mapping = mappings.at(w.idx);
-            if (cur->data == nullptr) {
-                cur->data = (uint8_t *)mapping->addr + w.offs;
-            } else {
-                memcpy(cur->data, (uint8_t *)mapping->addr + w.offs, ggml_nbytes(cur));
-            }
-        } else {
-            GGML_ASSERT(cur->data != nullptr);
-            GGML_ASSERT(w.idx < files.size());
-            const auto & file = files.at(w.idx);
-            file->seek(w.offs, SEEK_SET);
-            file->read_raw(cur->data, ggml_nbytes(cur));
-        }
-
-        if (check_tensors && !ggml_validate_row_data(cur->type, cur->data, ggml_nbytes(cur))) {
-            throw std::runtime_error(format("tensor '%s' has invalid data", ggml_get_name(cur)));
-        }
-    }
-
-    size_t size_done = 0;
-    size_t size_data = 0;
-    std::vector> mmaps_used;
-
-    // Returns false if cancelled by progress_callback
-    bool load_all_data(
-            struct ggml_context * ctx,
-            llama_buf_map & bufs,
-            llama_mlocks * lmlocks,
-            llama_progress_callback progress_callback,
-            void * progress_callback_user_data) {
-        GGML_ASSERT(size_data != 0 && "call init_mappings() first");
-
-        std::vector> read_buf;
-        std::vector>> validation_result;
-
-        // 4 staging buffers for async uploads, each sized 1MB seems to be a good default for single NVMe drives.
-        // NVMe raid configurations might require more / larger buffers.
-        constexpr size_t n_buffers = 4;
-        constexpr size_t buffer_size = 1 * 1024 * 1024; // 1MB
-
-        std::vector host_buffers;
-        std::vector events;
-        std::vector host_ptrs;
-        size_t buffer_idx = 0; // buffer to use for async loads
-        ggml_backend_t upload_backend = [&](const char * func) -> ggml_backend_t {
-            if (use_mmap || check_tensors) {
-                return nullptr;
-            }
-            // When not using mmaped io use async uploads from pinned memory to GPU memory.
-            // First determine if the backend supports the necessary features for async uploads.
-            auto * buf = bufs.count(0) ? bufs.at(0) : nullptr;
-            if (!buf) {
-                LLAMA_LOG_DEBUG("%s: no buffer found for async uploads\n", func);
-                return nullptr;
-            }
-
-            auto * buft = ggml_backend_buffer_get_type(buf);
-            auto * dev = ggml_backend_buft_get_device(buft);
-            if (!dev) {
-                LLAMA_LOG_DEBUG("%s: no device found for buffer type %s for async uploads\n", func,
-                    ggml_backend_buft_name(buft));
-                return nullptr;
-            }
-
-            if (buft != ggml_backend_dev_buffer_type(dev)) {
-                LLAMA_LOG_DEBUG("%s: buffer type %s is not the default buffer type for device %s for async uploads\n", func,
-                    ggml_backend_buft_name(buft), ggml_backend_dev_name(dev));
-                return nullptr;
-            }
-
-            ggml_backend_dev_props props;
-            ggml_backend_dev_get_props(dev, &props);
-            if (!props.caps.async || !props.caps.host_buffer || !props.caps.events) {
-                LLAMA_LOG_DEBUG("%s: device %s does not support async, host buffers or events\n", func,
-                    ggml_backend_dev_name(dev));
-                return nullptr;
-            }
-
-            auto * host_buft = ggml_backend_dev_host_buffer_type(dev);
-            if (!host_buft) {
-                LLAMA_LOG_DEBUG("%s: no host buffer type found for device %s\n", func,
-                    ggml_backend_dev_name(dev));
-                return nullptr;
-            }
-
-            // If the backend is supported, create pinned memory buffers and events for synchronisation.
-            for (size_t idx = 0; idx < n_buffers; ++idx) {
-                auto * buf = ggml_backend_buft_alloc_buffer(host_buft, buffer_size);
-                if (!buf) {
-                    LLAMA_LOG_DEBUG("%s: failed to allocate host buffer for async uploads for device %s\n", func,
-                        ggml_backend_dev_name(dev));
-                    return nullptr;
-                }
-
-                host_buffers.emplace_back(buf);
-                host_ptrs.emplace_back(ggml_backend_buffer_get_base(buf));
-
-                auto * event = ggml_backend_event_new(dev);
-                if (!event) {
-                    LLAMA_LOG_DEBUG("%s: failed to create event for async uploads for device %s\n", func,
-                        ggml_backend_dev_name(dev));
-                    return nullptr;
-                }
-
-                events.emplace_back(event);
-            }
-
-            ggml_backend_t backend = ggml_backend_dev_init(dev, nullptr);
-            if (!backend) {
-                LLAMA_LOG_DEBUG("%s: failed to initialize backend for device %s for async uploads\n", func,
-                    ggml_backend_dev_name(dev));
-                return nullptr;
-            }
-
-            return backend;
-        }(__func__);
-
-        if (upload_backend) {
-            LLAMA_LOG_DEBUG("%s: using async uploads for device %s, buffer type %s, backend %s\n", __func__,
-                ggml_backend_dev_name(ggml_backend_get_device(upload_backend)),
-                ggml_backend_buft_name(ggml_backend_buffer_get_type(bufs.at(0))),
-                ggml_backend_name(upload_backend));
-        }
-
-        for (struct ggml_tensor * cur = ggml_get_first_tensor(ctx); cur != NULL; cur = ggml_get_next_tensor(ctx, cur)) {
-            const auto * weight = get_weight(ggml_get_name(cur));
-            if (weight == nullptr) {
-                // this can happen with split experts models
-                continue;
-            }
-
-            if (progress_callback) {
-                if (!progress_callback((float) size_done / size_data, progress_callback_user_data)) {
-                    return false;
-                }
-            }
-
-            size_t n_size = ggml_nbytes(cur);
-
-            if (use_mmap) {
-                const auto & mapping = mappings.at(weight->idx);
-                ggml_backend_buffer_t buf_mmap = nullptr;
-                if (bufs.count(weight->idx)) {
-                    buf_mmap = bufs.at(weight->idx);
-                }
-                uint8_t * data = (uint8_t *) mapping->addr + weight->offs;
-
-                if (check_tensors) {
-                    validation_result.emplace_back(std::async(std::launch::async, [cur, data, n_size] {
-                        return std::make_pair(cur, ggml_validate_row_data(cur->type, data, n_size));
-                    }));
-                }
-
-                GGML_ASSERT(buf_mmap || cur->data); // either we have a buffer to allocate the tensor in, or it is already allocated
-                if (buf_mmap && cur->data == nullptr) {
-                    ggml_backend_tensor_alloc(buf_mmap, cur, data);
-                    if (lmlocks) {
-                        const auto & lmlock = lmlocks->at(weight->idx);
-                        lmlock->grow_to(weight->offs + n_size);
-                    }
-
-                    auto & mmap_used = mmaps_used[weight->idx];
-                    mmap_used.first  = std::min(mmap_used.first,  weight->offs);
-                    mmap_used.second = std::max(mmap_used.second, weight->offs + n_size);
-                } else {
-                    ggml_backend_tensor_set(cur, data, 0, n_size);
-                }
-            } else {
-                const auto & file = files.at(weight->idx);
-                if (ggml_backend_buffer_is_host(cur->buffer)) {
-                    file->seek(weight->offs, SEEK_SET);
-                    file->read_raw(cur->data, n_size);
-                    if (check_tensors) {
-                        validation_result.emplace_back(std::async(std::launch::async, [cur, n_size] {
-                            return std::make_pair(cur, ggml_validate_row_data(cur->type, cur->data, n_size));
-                        }));
-                    }
-                } else {
-                    // If upload_backend is valid load the tensor in chunks to pinned memory and upload the buffers asynchronously to the GPU.
-                    if (upload_backend) {
-                        file->seek(weight->offs, SEEK_SET);
-
-                        size_t bytes_read = 0;
-
-                        while (bytes_read < n_size) {
-                            size_t read_iteration = std::min(buffer_size, n_size - bytes_read);
-
-                            ggml_backend_event_synchronize(events[buffer_idx]);
-                            file->read_raw(host_ptrs[buffer_idx], read_iteration);
-                            ggml_backend_tensor_set_async(upload_backend, cur, host_ptrs[buffer_idx], bytes_read, read_iteration);
-                            ggml_backend_event_record(events[buffer_idx], upload_backend);
-
-                            bytes_read += read_iteration;
-                            ++buffer_idx;
-                            buffer_idx %= n_buffers;
-                        }
-                    } else {
-                        read_buf.resize(n_size);
-                        file->seek(weight->offs, SEEK_SET);
-                        file->read_raw(read_buf.data(), n_size);
-                        ggml_backend_tensor_set(cur, read_buf.data(), 0, n_size);
-                        if (check_tensors && !ggml_validate_row_data(cur->type, read_buf.data(), n_size)) {
-                            throw std::runtime_error(format("tensor '%s' has invalid data", ggml_get_name(cur)));
-                        }
-                    }
-                }
-            }
-
-            size_done += n_size;
-        }
-
-        // free temporary resources used for async uploads
-        for (auto * event : events) {
-            ggml_backend_event_synchronize(event);
-            ggml_backend_event_free(event);
-        }
-        for (auto * buf : host_buffers) {
-            ggml_backend_buffer_free(buf);
-        }
-        ggml_backend_free(upload_backend);
-
-        // check validation results
-        bool validation_failed = false;
-        for (auto & future : validation_result) {
-            auto result = future.get();
-            if (!result.second) {
-                LLAMA_LOG_ERROR("%s: tensor '%s' has invalid data\n", __func__, ggml_get_name(result.first));
-                validation_failed = true;
-            }
-        }
-        if (validation_failed) {
-            throw std::runtime_error("found tensors with invalid data");
-        }
-
-        // check if this is the last call and do final cleanup
-        if (size_done >= size_data) {
-            // unmap offloaded tensors and metadata
-            if (use_mmap) {
-                for (uint32_t idx = 0; idx < mappings.size(); idx++) {
-                    const auto & mmap_used = mmaps_used.at(idx);
-                    auto & mapping = mappings.at(idx);
-                    mapping->unmap_fragment(0, mmap_used.first);
-                    if (mmap_used.second != 0) {
-                        mapping->unmap_fragment(mmap_used.second, mapping->size);
-                    }
-                }
-            }
-            if (progress_callback) {
-                // Even though the model is done loading, we still honor
-                // cancellation since we need to free allocations.
-                return progress_callback(1.0f, progress_callback_user_data);
-            }
-        }
-
-        return true;
-    }
-};
-
-// temporary allocate memory for the input batch if needed
-static const llama_seq_id batch_default_seq_id = 0;
-struct llama_batch_allocr {
-    std::array seq_id_0 = {batch_default_seq_id};
-    std::vector      pos;
-    std::vector        n_seq_id;
-    std::vector seq_id;
-    std::vector         logits;
-    struct llama_batch          batch;
-    // optionally fulfill the batch returned by llama_batch_get_one
-    llama_batch_allocr(llama_context & ctx, struct llama_batch in_batch) {
-        batch = in_batch;
-        GGML_ASSERT(batch.n_tokens > 0);
-        if (!batch.pos) {
-            // determine the last position in KV cache
-            llama_pos last_pos = -1;
-            for (const auto & cell : ctx.kv_self.cells) {
-                if (cell.has_seq_id(batch_default_seq_id)) {
-                    last_pos = std::max(last_pos, cell.pos);
-                }
-            }
-            last_pos++; // next position
-            pos.resize(batch.n_tokens);
-            for (int32_t i = 0; i < batch.n_tokens; i++) {
-                pos[i] = i+last_pos;
-            }
-            batch.pos = pos.data();
-        }
-        if (!batch.n_seq_id) {
-            n_seq_id.resize(batch.n_tokens);
-            for (int32_t i = 0; i < batch.n_tokens; i++) {
-                n_seq_id[i] = seq_id_0.size();
-            }
-            batch.n_seq_id = n_seq_id.data();
-        }
-        if (!batch.seq_id) {
-            seq_id.resize(batch.n_tokens + 1);
-            seq_id[batch.n_tokens] = NULL;
-            for (int32_t i = 0; i < batch.n_tokens; i++) {
-                seq_id[i] = seq_id_0.data();
-            }
-            batch.seq_id = seq_id.data();
-        }
-        if (!batch.logits) {
-            logits.resize(batch.n_tokens);
-            logits[logits.size() - 1] = true;
-            batch.logits = logits.data();
-        }
-    }
-};
-
-template<>
-bool llama_model_loader::get_key(const enum llm_kv kid, enum llama_pooling_type & result, const bool required) {
-    uint32_t tmp;
-    const bool found = get_key(kid, tmp, required);
-    if (found) {
-        result = (enum llama_pooling_type) tmp;
-    } else {
-        result = LLAMA_POOLING_TYPE_UNSPECIFIED;
-    }
-    return found;
-}
-
-
-//
-// load LLaMA models
-//
-
-static const char * llama_model_arch_name(llm_arch arch) {
-    auto it = LLM_ARCH_NAMES.find(arch);
-    if (it == LLM_ARCH_NAMES.end()) {
-        return "unknown";
-    }
-    return it->second;
-}
-
-static std::string llama_model_ftype_name(llama_ftype ftype) {
-    if (ftype & LLAMA_FTYPE_GUESSED) {
-        return llama_model_ftype_name((enum llama_ftype) (ftype & ~LLAMA_FTYPE_GUESSED)) + " (guessed)";
-    }
-
-    switch (ftype) {
-        case LLAMA_FTYPE_ALL_F32:         return "all F32";
-        case LLAMA_FTYPE_MOSTLY_F16:      return "F16";
-        case LLAMA_FTYPE_MOSTLY_BF16:     return "BF16";
-        case LLAMA_FTYPE_MOSTLY_Q4_0:     return "Q4_0";
-        case LLAMA_FTYPE_MOSTLY_Q4_1:     return "Q4_1";
-        case LLAMA_FTYPE_MOSTLY_Q5_0:     return "Q5_0";
-        case LLAMA_FTYPE_MOSTLY_Q5_1:     return "Q5_1";
-        case LLAMA_FTYPE_MOSTLY_Q8_0:     return "Q8_0";
-        case LLAMA_FTYPE_MOSTLY_Q2_K:     return "Q2_K - Medium";
-        case LLAMA_FTYPE_MOSTLY_Q2_K_S:   return "Q2_K - Small";
-        case LLAMA_FTYPE_MOSTLY_Q3_K_S:   return "Q3_K - Small";
-        case LLAMA_FTYPE_MOSTLY_Q3_K_M:   return "Q3_K - Medium";
-        case LLAMA_FTYPE_MOSTLY_Q3_K_L:   return "Q3_K - Large";
-        case LLAMA_FTYPE_MOSTLY_Q4_K_S:   return "Q4_K - Small";
-        case LLAMA_FTYPE_MOSTLY_Q4_K_M:   return "Q4_K - Medium";
-        case LLAMA_FTYPE_MOSTLY_Q5_K_S:   return "Q5_K - Small";
-        case LLAMA_FTYPE_MOSTLY_Q5_K_M:   return "Q5_K - Medium";
-        case LLAMA_FTYPE_MOSTLY_Q6_K:     return "Q6_K";
-        case LLAMA_FTYPE_MOSTLY_TQ1_0:    return "TQ1_0 - 1.69 bpw ternary";
-        case LLAMA_FTYPE_MOSTLY_TQ2_0:    return "TQ2_0 - 2.06 bpw ternary";
-        case LLAMA_FTYPE_MOSTLY_IQ2_XXS:  return "IQ2_XXS - 2.0625 bpw";
-        case LLAMA_FTYPE_MOSTLY_IQ2_XS:   return "IQ2_XS - 2.3125 bpw";
-        case LLAMA_FTYPE_MOSTLY_IQ2_S:    return "IQ2_S - 2.5 bpw";
-        case LLAMA_FTYPE_MOSTLY_IQ2_M:    return "IQ2_M - 2.7 bpw";
-        case LLAMA_FTYPE_MOSTLY_IQ3_XS:   return "IQ3_XS - 3.3 bpw";
-        case LLAMA_FTYPE_MOSTLY_IQ3_XXS:  return "IQ3_XXS - 3.0625 bpw";
-        case LLAMA_FTYPE_MOSTLY_IQ1_S:    return "IQ1_S - 1.5625 bpw";
-        case LLAMA_FTYPE_MOSTLY_IQ1_M:    return "IQ1_M - 1.75 bpw";
-        case LLAMA_FTYPE_MOSTLY_IQ4_NL:   return "IQ4_NL - 4.5 bpw";
-        case LLAMA_FTYPE_MOSTLY_IQ4_XS:   return "IQ4_XS - 4.25 bpw";
-        case LLAMA_FTYPE_MOSTLY_IQ3_S:    return "IQ3_S - 3.4375 bpw";
-        case LLAMA_FTYPE_MOSTLY_IQ3_M:    return "IQ3_S mix - 3.66 bpw";
-
-        default: return "unknown, may not work";
-    }
-}
-
-static const char * llama_model_type_name(e_model type) {
-    switch (type) {
-        case MODEL_14M:           return "14M";
-        case MODEL_17M:           return "17M";
-        case MODEL_22M:           return "22M";
-        case MODEL_33M:           return "33M";
-        case MODEL_60M:           return "60M";
-        case MODEL_70M:           return "70M";
-        case MODEL_80M:           return "80M";
-        case MODEL_109M:          return "109M";
-        case MODEL_137M:          return "137M";
-        case MODEL_160M:          return "160M";
-        case MODEL_220M:          return "220M";
-        case MODEL_250M:          return "250M";
-        case MODEL_270M:          return "270M";
-        case MODEL_335M:          return "335M";
-        case MODEL_410M:          return "410M";
-        case MODEL_450M:          return "450M";
-        case MODEL_770M:          return "770M";
-        case MODEL_780M:          return "780M";
-        case MODEL_0_5B:          return "0.5B";
-        case MODEL_1B:            return "1B";
-        case MODEL_1_3B:          return "1.3B";
-        case MODEL_1_4B:          return "1.4B";
-        case MODEL_1_5B:          return "1.5B";
-        case MODEL_1_6B:          return "1.6B";
-        case MODEL_2B:            return "2B";
-        case MODEL_2_8B:          return "2.8B";
-        case MODEL_3B:            return "3B";
-        case MODEL_4B:            return "4B";
-        case MODEL_6B:            return "6B";
-        case MODEL_6_9B:          return "6.9B";
-        case MODEL_7B:            return "7B";
-        case MODEL_8B:            return "8B";
-        case MODEL_9B:            return "9B";
-        case MODEL_11B:           return "11B";
-        case MODEL_12B:           return "12B";
-        case MODEL_13B:           return "13B";
-        case MODEL_14B:           return "14B";
-        case MODEL_15B:           return "15B";
-        case MODEL_16B:           return "16B";
-        case MODEL_20B:           return "20B";
-        case MODEL_30B:           return "30B";
-        case MODEL_32B:           return "32B";
-        case MODEL_34B:           return "34B";
-        case MODEL_35B:           return "35B";
-        case MODEL_40B:           return "40B";
-        case MODEL_65B:           return "65B";
-        case MODEL_70B:           return "70B";
-        case MODEL_236B:          return "236B";
-        case MODEL_314B:          return "314B";
-        case MODEL_SMALL:         return "0.1B";
-        case MODEL_MEDIUM:        return "0.4B";
-        case MODEL_LARGE:         return "0.8B";
-        case MODEL_XL:            return "1.5B";
-        case MODEL_A1_7B:         return "A1.7B";
-        case MODEL_A2_7B:         return "A2.7B";
-        case MODEL_8x7B:          return "8x7B";
-        case MODEL_8x22B:         return "8x22B";
-        case MODEL_16x12B:        return "16x12B";
-        case MODEL_10B_128x3_66B: return "10B+128x3.66B";
-        case MODEL_57B_A14B:      return "57B.A14B";
-        case MODEL_27B:           return "27B";
-        default:                  return "?B";
-    }
-}
-
-static const char * llama_model_vocab_type_name(enum llama_vocab_type type){
-    switch (type) {
-        case LLAMA_VOCAB_TYPE_NONE: return "no vocab";
-        case LLAMA_VOCAB_TYPE_SPM:  return "SPM";
-        case LLAMA_VOCAB_TYPE_BPE:  return "BPE";
-        case LLAMA_VOCAB_TYPE_WPM:  return "WPM";
-        case LLAMA_VOCAB_TYPE_UGM:  return "UGM";
-        case LLAMA_VOCAB_TYPE_RWKV: return "RWKV";
-        default:                    return "unknown";
-    }
-}
-
-static void llm_load_stats(llama_model_loader & ml, llama_model & model) {
-    model.n_elements = ml.n_elements;
-    model.n_bytes = ml.n_bytes;
-}
-
-static void llm_load_arch(llama_model_loader & ml, llama_model & model) {
-    model.arch = ml.get_arch();
-    if (model.arch == LLM_ARCH_UNKNOWN) {
-        throw std::runtime_error("unknown model architecture: '" + ml.get_arch_name() + "'");
-    }
-}
-
-static void llm_load_hparams(
-        llama_model_loader & ml,
-        llama_model & model) {
-    auto & hparams = model.hparams;
-    const gguf_context * ctx = ml.meta.get();
-
-    // get metadata as string
-    for (int i = 0; i < gguf_get_n_kv(ctx); i++) {
-        enum gguf_type type = gguf_get_kv_type(ctx, i);
-        if (type == GGUF_TYPE_ARRAY) {
-            continue;
-        }
-        const char * name = gguf_get_key(ctx, i);
-        const std::string value = gguf_kv_to_str(ctx, i);
-        model.gguf_kv.emplace(name, value);
-    }
-
-    // get general kv
-    ml.get_key(LLM_KV_GENERAL_NAME, model.name, false);
-
-    // get hparams kv
-    ml.get_key(LLM_KV_VOCAB_SIZE, hparams.n_vocab, false) || ml.get_arr_n(LLM_KV_TOKENIZER_LIST, hparams.n_vocab, false);
-
-    // everything past this point is not vocab-related
-    if (hparams.vocab_only) {
-        return;
-    }
-
-    ml.get_key(LLM_KV_CONTEXT_LENGTH,    hparams.n_ctx_train);
-    ml.get_key(LLM_KV_EMBEDDING_LENGTH,  hparams.n_embd);
-    ml.get_key(LLM_KV_BLOCK_COUNT,       hparams.n_layer);
-    ml.get_key(LLM_KV_EXPERT_COUNT,      hparams.n_expert,      false);
-    ml.get_key(LLM_KV_EXPERT_USED_COUNT, hparams.n_expert_used, false);
-
-    if (model.arch == LLM_ARCH_WAVTOKENIZER_DEC) {
-        ml.get_key(LLM_KV_FEATURES_LENGTH, hparams.n_embd_features);
-
-        ml.get_key(LLM_KV_POSNET_EMBEDDING_LENGTH, hparams.posnet.n_embd);
-        ml.get_key(LLM_KV_POSNET_BLOCK_COUNT,      hparams.posnet.n_layer);
-
-        ml.get_key(LLM_KV_CONVNEXT_EMBEDDING_LENGTH, hparams.convnext.n_embd);
-        ml.get_key(LLM_KV_CONVNEXT_BLOCK_COUNT,      hparams.convnext.n_layer);
-    }
-
-    GGML_ASSERT(hparams.n_expert <= LLAMA_MAX_EXPERTS);
-    GGML_ASSERT(hparams.n_expert_used <= hparams.n_expert);
-    if (hparams.n_expert > 0) {
-        GGML_ASSERT(hparams.n_expert_used > 0);
-    } else {
-        GGML_ASSERT(hparams.n_expert_used == 0);
-    }
-
-    // zero-out the array hparams
-    std::fill(hparams.n_head_arr.begin(),    hparams.n_head_arr.end(),    0);
-    std::fill(hparams.n_head_kv_arr.begin(), hparams.n_head_kv_arr.end(), 0);
-    std::fill(hparams.n_ff_arr.begin(),      hparams.n_ff_arr.end(),      0);
-
-    ml.get_key_or_arr(LLM_KV_FEED_FORWARD_LENGTH,  hparams.n_ff_arr,   hparams.n_layer, false);
-    ml.get_key_or_arr(LLM_KV_ATTENTION_HEAD_COUNT, hparams.n_head_arr, hparams.n_layer, false);
-
-    // n_head_kv is optional, default to n_head
-    hparams.n_head_kv_arr = hparams.n_head_arr;
-
-    ml.get_key_or_arr(LLM_KV_ATTENTION_HEAD_COUNT_KV, hparams.n_head_kv_arr, hparams.n_layer, false);
-
-    bool rope_finetuned = false;
-    ml.get_key(LLM_KV_ROPE_SCALING_FINETUNED, rope_finetuned, false);
-    hparams.rope_finetuned = rope_finetuned;
-
-    hparams.n_ctx_orig_yarn = hparams.n_ctx_train;
-    ml.get_key(LLM_KV_ROPE_SCALING_ORIG_CTX_LEN, hparams.n_ctx_orig_yarn, false);
-
-    // rope_freq_base (optional)
-    hparams.rope_freq_base_train = 10000.0f;
-    ml.get_key(LLM_KV_ROPE_FREQ_BASE, hparams.rope_freq_base_train, false);
-
-    std::string rope_scaling("linear");
-    ml.get_key(LLM_KV_ROPE_SCALING_TYPE, rope_scaling, false);
-    hparams.rope_scaling_type_train = llama_rope_scaling_type_from_string(rope_scaling);
-    GGML_ASSERT(hparams.rope_scaling_type_train != LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED);
-
-    // rope_freq_scale (inverse of the kv) is optional
-    float ropescale = 0.0f;
-    if (!ml.get_key(LLM_KV_ROPE_SCALING_FACTOR, ropescale, false)) {
-        // try the old key name
-        ml.get_key(LLM_KV_ROPE_SCALE_LINEAR, ropescale, false);
-    }
-    hparams.rope_freq_scale_train = ropescale == 0.0f ? 1.0f : 1.0f/ropescale;
-
-    ml.get_key(LLM_KV_ROPE_SCALING_ATTN_FACTOR, hparams.rope_attn_factor, false);
-
-    // non-transformer models do not have attention heads
-    if (hparams.n_head() > 0) {
-        // gpt-neox n_rot = rotary_pct * (n_embd / n_head)
-        // gpt-j n_rot = rotary_dim
-
-        hparams.n_embd_head_k = hparams.n_embd / hparams.n_head();
-        ml.get_key(LLM_KV_ATTENTION_KEY_LENGTH, hparams.n_embd_head_k, false);
-
-        hparams.n_embd_head_v = hparams.n_embd / hparams.n_head();
-        ml.get_key(LLM_KV_ATTENTION_VALUE_LENGTH, hparams.n_embd_head_v, false);
-
-        // sanity check for n_rot (optional)
-        hparams.n_rot = hparams.n_embd_head_k;
-
-        ml.get_key(LLM_KV_ROPE_DIMENSION_COUNT, hparams.n_rot, false);
-
-        if (model.arch == LLM_ARCH_LLAMA || model.arch == LLM_ARCH_DECI || model.arch == LLM_ARCH_FALCON) {
-            if (hparams.n_rot != hparams.n_embd_head_k) {
-                throw std::runtime_error(format("invalid n_rot: %u, expected %u", hparams.n_rot, hparams.n_embd_head_k));
-            }
-        }
-    } else {
-        hparams.n_rot = 0;
-        hparams.n_embd_head_k = 0;
-        hparams.n_embd_head_v = 0;
-    }
-
-    // arch-specific KVs
-    switch (model.arch) {
-        case LLM_ARCH_LLAMA:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
-
-                if (hparams.n_expert == 8) {
-                    switch (hparams.n_layer) {
-                        case 32: model.type = e_model::MODEL_8x7B; break;
-                        case 56: model.type = e_model::MODEL_8x22B; break;
-                        default: model.type = e_model::MODEL_UNKNOWN;
-                    }
-                } else {
-                    switch (hparams.n_layer) {
-                        case 16: model.type = e_model::MODEL_1B; break; // Llama 3.2 1B
-                        case 22: model.type = e_model::MODEL_1B; break;
-                        case 26: model.type = e_model::MODEL_3B; break;
-                        case 28: model.type = e_model::MODEL_3B; break; // Llama 3.2 3B
-                        // granite uses a vocab with len 49152
-                        case 32: model.type = hparams.n_vocab == 49152 ? e_model::MODEL_3B : (hparams.n_vocab < 40000 ? e_model::MODEL_7B : e_model::MODEL_8B); break;
-                        case 36: model.type = e_model::MODEL_8B; break; // granite
-                        case 40: model.type = e_model::MODEL_13B; break;
-                        case 48: model.type = e_model::MODEL_34B; break;
-                        case 60: model.type = e_model::MODEL_30B; break;
-                        case 80: model.type = hparams.n_head() == hparams.n_head_kv() ? e_model::MODEL_65B : e_model::MODEL_70B; break;
-                        default: model.type = e_model::MODEL_UNKNOWN;
-                    }
-                }
-            } break;
-        case LLM_ARCH_DECI:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
-                switch (hparams.n_layer) {
-                    case 32: model.type = e_model::MODEL_7B; break;
-                    case 80: model.type = e_model::MODEL_70B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_MINICPM:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
-                ml.get_key(LLM_KV_EMBEDDING_SCALE, hparams.f_embedding_scale);
-                ml.get_key(LLM_KV_RESIDUAL_SCALE, hparams.f_residual_scale);
-                ml.get_key(LLM_KV_LOGIT_SCALE, hparams.f_logit_scale);
-
-                switch (hparams.n_layer) {
-                    case 52: model.type = e_model::MODEL_1B; break;
-                    case 40: model.type = e_model::MODEL_2B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_MINICPM3:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
-                ml.get_key(LLM_KV_ATTENTION_Q_LORA_RANK, hparams.n_lora_q);
-                ml.get_key(LLM_KV_ATTENTION_KV_LORA_RANK, hparams.n_lora_kv);
-
-                switch (hparams.n_layer) {
-                    case 62: model.type = e_model::MODEL_4B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_GROK:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
-
-                switch (hparams.n_layer) {
-                    case 64: model.type = e_model::MODEL_314B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_FALCON:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
-
-                switch (hparams.n_layer) {
-                    case 32: model.type = e_model::MODEL_7B; break;
-                    case 60: model.type = e_model::MODEL_40B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_BAICHUAN:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
-                switch (hparams.n_layer) {
-                    case 32: model.type = e_model::MODEL_7B; break;
-                    case 40: model.type = e_model::MODEL_13B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-
-                if (model.type == e_model::MODEL_13B) {
-                    // TODO: become GGUF KV parameter
-                    hparams.f_max_alibi_bias = 8.0f;
-                }
-            } break;
-        case LLM_ARCH_STARCODER:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
-                switch (hparams.n_layer) {
-                    case 24: model.type = e_model::MODEL_1B; break;
-                    case 36: model.type = e_model::MODEL_3B; break;
-                    case 42: model.type = e_model::MODEL_7B; break;
-                    case 40: model.type = e_model::MODEL_15B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_REFACT:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
-                switch (hparams.n_layer) {
-                    case 32: model.type = e_model::MODEL_1B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-
-                // TODO: become GGUF KV parameter
-                hparams.f_max_alibi_bias = 8.0f;
-            } break;
-        case LLM_ARCH_BERT:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS,    hparams.f_norm_eps);
-                ml.get_key(LLM_KV_ATTENTION_CAUSAL,           hparams.causal_attn);
-                ml.get_key(LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT, hparams.n_vocab_type);
-                ml.get_key(LLM_KV_POOLING_TYPE,               hparams.pooling_type, false);
-
-                switch (hparams.n_layer) {
-                    case 3:
-                        model.type = e_model::MODEL_17M; break; // bge-micro
-                    case 6:
-                        model.type = e_model::MODEL_22M; break; // MiniLM-L6
-                    case 12:
-                        switch (hparams.n_embd) {
-                            case 384: model.type = e_model::MODEL_33M; break; // MiniLM-L12, bge-small
-                            case 768: model.type = e_model::MODEL_109M; break; // bge-base
-                        } break;
-                    case 24:
-                        model.type = e_model::MODEL_335M; break; // bge-large
-                }
-            } break;
-        case LLM_ARCH_JINA_BERT_V2:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS,    hparams.f_norm_eps);
-                ml.get_key(LLM_KV_ATTENTION_CAUSAL,           hparams.causal_attn);
-                ml.get_key(LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT, hparams.n_vocab_type);
-                ml.get_key(LLM_KV_POOLING_TYPE,               hparams.pooling_type, false);
-                hparams.f_max_alibi_bias = 8.0f;
-
-                switch (hparams.n_layer) {
-                    case 4:  model.type = e_model::MODEL_33M;  break; // jina-embeddings-small
-                    case 12: model.type = e_model::MODEL_137M; break; // jina-embeddings-base
-                }
-            } break;
-        case LLM_ARCH_NOMIC_BERT:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS,    hparams.f_norm_eps);
-                ml.get_key(LLM_KV_ATTENTION_CAUSAL,           hparams.causal_attn);
-                ml.get_key(LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT, hparams.n_vocab_type);
-                ml.get_key(LLM_KV_POOLING_TYPE,               hparams.pooling_type);
-
-                if (hparams.n_layer == 12 && hparams.n_embd == 768) {
-                    model.type = e_model::MODEL_137M;
-                }
-            } break;
-        case LLM_ARCH_BLOOM:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
-
-                switch (hparams.n_layer) {
-                    case 24: model.type = e_model::MODEL_1B; break;
-                    case 30:
-                        switch (hparams.n_embd) {
-                            case 2560: model.type = e_model::MODEL_3B; break;
-                            case 4096: model.type = e_model::MODEL_7B; break;
-                        } break;
-                }
-
-                // TODO: become GGUF KV parameter
-                hparams.f_max_alibi_bias = 8.0f;
-            } break;
-        case LLM_ARCH_MPT:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS,  hparams.f_norm_eps);
-                ml.get_key(LLM_KV_ATTENTION_CLAMP_KQV,      hparams.f_clamp_kqv, false);
-                ml.get_key(LLM_KV_ATTENTION_MAX_ALIBI_BIAS, hparams.f_max_alibi_bias);
-
-                switch (hparams.n_layer) {
-                    case 32: model.type = e_model::MODEL_7B; break;
-                    case 48: model.type = e_model::MODEL_30B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_STABLELM:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
-
-                switch (hparams.n_layer) {
-                    case 24: model.type = e_model::MODEL_1B; break;
-                    case 32: model.type = e_model::MODEL_3B; break;
-                    case 40: model.type = e_model::MODEL_12B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-               }
-            } break;
-        case LLM_ARCH_QWEN:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
-
-                switch (hparams.n_layer) {
-                    case 32: model.type = e_model::MODEL_7B; break;
-                    case 40: model.type = e_model::MODEL_13B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_QWEN2VL:
-            {
-                std::array section_dims;
-                ml.get_key_or_arr(LLM_KV_ROPE_DIMENSION_SECTIONS, section_dims, 4, true);
-                std::copy(section_dims.begin(), section_dims.begin() + 4, std::begin(hparams.rope_sections));
-            }
-            // fall through
-        case LLM_ARCH_QWEN2:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
-                switch (hparams.n_layer) {
-                    case 24: model.type = hparams.n_embd == 1024 ? e_model::MODEL_0_5B : e_model::MODEL_1B; break;
-                    case 28: model.type = hparams.n_embd == 1536 ? e_model::MODEL_1_5B : e_model::MODEL_7B; break;
-                    case 32: model.type = e_model::MODEL_7B; break;
-                    case 36: model.type = e_model::MODEL_3B; break;
-                    case 40: model.type = hparams.n_head() == 20 ? e_model::MODEL_4B : e_model::MODEL_13B; break;
-                    case 48: model.type = e_model::MODEL_14B; break;
-                    case 64: model.type = e_model::MODEL_32B; break;
-                    case 80: model.type = e_model::MODEL_70B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_QWEN2MOE:
-            {
-                ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp, false);
-                ml.get_key(LLM_KV_EXPERT_SHARED_FEED_FORWARD_LENGTH, hparams.n_ff_shexp, false);
-
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
-                switch (hparams.n_layer) {
-                    case 24: model.type = e_model::MODEL_A2_7B; break;
-                    case 28: model.type = e_model::MODEL_57B_A14B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_PHI2:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
-
-                switch (hparams.n_layer) {
-                    case 24: model.type = e_model::MODEL_1B; break;
-                    case 32: model.type = e_model::MODEL_3B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_PHI3:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
-
-                switch (hparams.n_layer) {
-                    case 24: model.type = e_model::MODEL_1B; break;
-                    case 32: model.type = e_model::MODEL_3B; break;
-                    case 40: model.type = e_model::MODEL_14B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-
-                // for backward compatibility ; see: https://github.com/ggerganov/llama.cpp/pull/8931
-                if ((hparams.n_layer == 32 || hparams.n_layer == 40) && hparams.n_ctx_train == 4096) {
-                    // default value for Phi-3-mini-4k-instruct and Phi-3-medium-4k-instruct
-                    hparams.n_swa = 2047;
-                } else if (hparams.n_layer == 32 && hparams.n_head_kv(0) == 32 && hparams.n_ctx_train == 131072) {
-                    // default value for Phi-3-mini-128k-instruct
-                    hparams.n_swa = 262144;
-                } else if (hparams.n_layer == 40 && hparams.n_ctx_train == 131072) {
-                    // default value for Phi-3-medium-128k-instruct
-                    hparams.n_swa = 131072;
-                }
-                bool found_swa = ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa, false);
-                if (!found_swa && hparams.n_swa == 0) {
-                    throw std::runtime_error("invalid value for sliding_window");
-                }
-            } break;
-        case LLM_ARCH_PLAMO:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
-
-                switch (hparams.n_layer) {
-                    case 40: model.type = e_model::MODEL_13B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-               }
-            } break;
-        case LLM_ARCH_GPT2:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
-                switch (hparams.n_layer) {
-                    case 12: model.type = e_model::MODEL_SMALL; break;
-                    case 24: model.type = e_model::MODEL_MEDIUM; break;
-                    case 36: model.type = e_model::MODEL_LARGE; break;
-                    case 48: model.type = e_model::MODEL_XL; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_CODESHELL:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
-                switch (hparams.n_layer) {
-                    case 42: model.type = e_model::MODEL_7B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_ORION:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
-
-                switch (hparams.n_layer) {
-                    case 40: model.type = e_model::MODEL_14B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_INTERNLM2:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
-                switch (hparams.n_layer) {
-                    case 32: model.type = e_model::MODEL_7B; break;
-                    case 48: model.type = e_model::MODEL_20B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_GEMMA:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
-
-                switch (hparams.n_layer) {
-                    case 18: model.type = e_model::MODEL_2B; break;
-                    case 28: model.type = e_model::MODEL_7B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-               }
-            } break;
-        case LLM_ARCH_GEMMA2:
-            {
-                hparams.n_swa = 4096; // default value of gemma 2
-                ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa, false);
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
-                ml.get_key(LLM_KV_ATTN_LOGIT_SOFTCAPPING, hparams.f_attn_logit_softcapping, false);
-                ml.get_key(LLM_KV_FINAL_LOGIT_SOFTCAPPING, hparams.f_final_logit_softcapping, false);
-                hparams.attn_soft_cap = true;
-
-                switch (hparams.n_layer) {
-                    case 26: model.type = e_model::MODEL_2B; break;
-                    case 42: model.type = e_model::MODEL_9B; break;
-                    case 46: model.type = e_model::MODEL_27B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-               }
-            } break;
-        case LLM_ARCH_STARCODER2:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
-                switch (hparams.n_layer) {
-                    case 30: model.type = e_model::MODEL_3B; break;
-                    case 32: model.type = e_model::MODEL_7B; break;
-                    case 40: model.type = e_model::MODEL_15B; break;
-                    case 52: model.type = e_model::MODEL_20B; break; // granite
-                    case 88: model.type = e_model::MODEL_34B; break; // granite
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_MAMBA:
-            {
-                ml.get_key(LLM_KV_SSM_CONV_KERNEL,    hparams.ssm_d_conv);
-                ml.get_key(LLM_KV_SSM_INNER_SIZE,     hparams.ssm_d_inner);
-                ml.get_key(LLM_KV_SSM_STATE_SIZE,     hparams.ssm_d_state);
-                ml.get_key(LLM_KV_SSM_TIME_STEP_RANK, hparams.ssm_dt_rank);
-                ml.get_key(LLM_KV_SSM_DT_B_C_RMS, hparams.ssm_dt_b_c_rms, false);
-
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
-
-                switch (hparams.n_layer) {
-                    case 24:
-                        switch (hparams.n_embd) {
-                            case 768: model.type = e_model::MODEL_SMALL; break;
-                            default: model.type = e_model::MODEL_UNKNOWN;
-                        } break;
-                    case 48:
-                        switch (hparams.n_embd) {
-                            case 1024: model.type = e_model::MODEL_MEDIUM; break;
-                            case 1536: model.type = e_model::MODEL_LARGE; break;
-                            case 2048: model.type = e_model::MODEL_XL; break;
-                            default: model.type = e_model::MODEL_UNKNOWN;
-                        } break;
-                    case 64:
-                        switch (hparams.n_embd) {
-                            case 2560: model.type = e_model::MODEL_3B; break;
-                            default: model.type = e_model::MODEL_UNKNOWN;
-                        } break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_XVERSE:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
-                switch (hparams.n_layer) {
-                    case 32: model.type = e_model::MODEL_7B; break;
-                    case 40: model.type = e_model::MODEL_13B; break;
-                    case 80: model.type = e_model::MODEL_65B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_COMMAND_R:
-            {
-                ml.get_key(LLM_KV_LOGIT_SCALE, hparams.f_logit_scale);
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
-                switch (hparams.n_layer) {
-                    case 40: model.type = e_model::MODEL_35B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_DBRX:
-        {
-            ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS,  hparams.f_norm_eps);
-            ml.get_key(LLM_KV_ATTENTION_CLAMP_KQV,      hparams.f_clamp_kqv);
-
-            switch (hparams.n_layer) {
-                case 40: model.type = e_model::MODEL_16x12B; break;
-                default: model.type = e_model::MODEL_UNKNOWN;
-            }
-        } break;
-        case LLM_ARCH_OLMO:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
-                ml.get_key(LLM_KV_ATTENTION_CLAMP_KQV,     hparams.f_clamp_kqv, false);
-
-                switch (hparams.n_layer) {
-                    case 22: model.type = e_model::MODEL_1B; break;
-                    case 32: model.type = e_model::MODEL_7B; break;
-                    case 80: model.type = e_model::MODEL_70B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_OLMO2:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
-
-                switch (hparams.n_layer) {
-                    case 16: model.type = e_model::MODEL_1B; break;
-                    case 32: model.type = e_model::MODEL_7B; break;
-                    case 40: model.type = e_model::MODEL_13B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_OLMOE:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
-                switch (hparams.n_layer) {
-                    case 16: model.type = e_model::MODEL_A1_7B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_OPENELM:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
-
-                switch (hparams.n_layer) {
-                case 16: model.type = e_model::MODEL_270M; break;
-                case 20: model.type = e_model::MODEL_450M; break;
-                case 28: model.type = e_model::MODEL_1B; break;
-                case 36: model.type = e_model::MODEL_3B; break;
-                default: model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_GPTNEOX:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
-                ml.get_key(LLM_KV_USE_PARALLEL_RESIDUAL, hparams.use_par_res);
-                switch (hparams.n_layer) {
-                    case 6:
-                        switch (hparams.n_ff()) {
-                            case 512: model.type = e_model::MODEL_14M; break;
-                            case 2048: model.type = e_model::MODEL_70M; break;
-                            default: model.type = e_model::MODEL_UNKNOWN;
-                        } break;
-                    case 12:
-                        switch (hparams.n_ff()) {
-                            case 3072: model.type = e_model::MODEL_160M; break;
-                            default: model.type = e_model::MODEL_UNKNOWN;
-                        } break;
-                    case 16:
-                        switch (hparams.n_ff()) {
-                            case 8192: model.type = e_model::MODEL_1B; break;
-                            default: model.type = e_model::MODEL_UNKNOWN;
-                        } break;
-                    case 24:
-                        switch (hparams.n_ff()) {
-                            case 4096: model.type = e_model::MODEL_410M; break;
-                            case 8192: model.type = e_model::MODEL_1_4B; break;
-                            default: model.type = e_model::MODEL_UNKNOWN;
-                        } break;
-                    case 32:
-                        switch (hparams.n_ff()) {
-                            case 10240: model.type = e_model::MODEL_2_8B; break;
-                            case 16384: model.type = e_model::MODEL_6_9B; break;
-                            default: model.type = e_model::MODEL_UNKNOWN;
-                        } break;
-                    case 36:
-                        switch (hparams.n_ff()) {
-                            case 20480: model.type = e_model::MODEL_12B; break;
-                            default: model.type = e_model::MODEL_UNKNOWN;
-                        } break;
-                    case 44:
-                        switch (hparams.n_ff()) {
-                            case 24576: model.type = e_model::MODEL_20B; break;
-                            default: model.type = e_model::MODEL_UNKNOWN;
-                        } break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_ARCTIC:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
-
-                if (hparams.n_expert == 128) {
-                    switch (hparams.n_layer) {
-                        case 35: model.type = e_model::MODEL_10B_128x3_66B; break;
-                        default: model.type = e_model::MODEL_UNKNOWN;
-                    }
-                } else {
-                    model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_DEEPSEEK:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
-                ml.get_key(LLM_KV_LEADING_DENSE_BLOCK_COUNT, hparams.n_layer_dense_lead);
-                ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp);
-                ml.get_key(LLM_KV_EXPERT_SHARED_COUNT, hparams.n_expert_shared);
-                ml.get_key(LLM_KV_EXPERT_WEIGHTS_SCALE, hparams.expert_weights_scale);
-
-                switch (hparams.n_layer) {
-                    case 28: model.type = e_model::MODEL_20B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_DEEPSEEK2:
-            {
-                bool is_lite = (hparams.n_layer == 27);
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
-                ml.get_key(LLM_KV_LEADING_DENSE_BLOCK_COUNT, hparams.n_layer_dense_lead);
-                if (!is_lite) {
-                    ml.get_key(LLM_KV_ATTENTION_Q_LORA_RANK, hparams.n_lora_q);
-                }
-                ml.get_key(LLM_KV_ATTENTION_KV_LORA_RANK, hparams.n_lora_kv);
-                ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp);
-                ml.get_key(LLM_KV_EXPERT_SHARED_COUNT, hparams.n_expert_shared);
-                ml.get_key(LLM_KV_EXPERT_WEIGHTS_SCALE, hparams.expert_weights_scale);
-                ml.get_key(LLM_KV_ROPE_SCALING_YARN_LOG_MUL, hparams.rope_yarn_log_mul);
-
-                switch (hparams.n_layer) {
-                    case 27: model.type = e_model::MODEL_16B; break;
-                    case 60: model.type = e_model::MODEL_236B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_CHATGLM:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
-                switch (hparams.n_layer) {
-                    case 28: model.type = e_model::MODEL_6B; break;
-                    case 40: model.type = e_model::MODEL_9B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_BITNET:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
-
-                switch (hparams.n_layer) {
-                    case 26: model.type = e_model::MODEL_3B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_T5:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
-                ml.get_key(LLM_KV_ATTENTION_RELATIVE_BUCKETS_COUNT, hparams.n_rel_attn_bkts);
-
-                uint32_t dec_start_token_id;
-                if (ml.get_key(LLM_KV_DECODER_START_TOKEN_ID, dec_start_token_id, false)) {
-                    hparams.dec_start_token_id = dec_start_token_id;
-                }
-
-                switch (hparams.n_layer) {
-                    case 6:  model.type = e_model::MODEL_60M;  break; // t5-small
-                    case 8:  model.type = e_model::MODEL_80M;  break; // flan-t5-small
-                    case 12:
-                        switch (hparams.n_ff()) {
-                            case 3072: model.type = e_model::MODEL_220M; break; // t5-base
-                            case 2048: model.type = e_model::MODEL_250M; break; // flan-t5-base
-                            default: model.type = e_model::MODEL_UNKNOWN;
-                        } break;
-                    case 24:
-                        switch (hparams.n_ff()) {
-                            case 4096:  model.type = e_model::MODEL_770M; break; // t5-large
-                            case 2816:  model.type = e_model::MODEL_780M; break; // flan-t5-large
-                            case 16384: model.type = e_model::MODEL_3B;   break; // t5-3b
-                            case 5120:  model.type = e_model::MODEL_3B;   break; // flan-t5-xl
-                            case 65536: model.type = e_model::MODEL_11B;  break; // t5-11b
-                            case 10240: model.type = e_model::MODEL_11B;  break; // flan-t5-xxl
-                            default: model.type = e_model::MODEL_UNKNOWN;
-                        } break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-               }
-            } break;
-        case LLM_ARCH_T5ENCODER:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
-                ml.get_key(LLM_KV_ATTENTION_RELATIVE_BUCKETS_COUNT, hparams.n_rel_attn_bkts);
-                model.type = e_model::MODEL_UNKNOWN;
-            } break;
-        case LLM_ARCH_JAIS:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
-                ml.get_key(LLM_KV_ATTENTION_MAX_ALIBI_BIAS, hparams.f_max_alibi_bias);
-
-                switch (hparams.n_layer) {
-                    case 24: model.type = e_model::MODEL_1_3B; break;
-                    case 40: model.type = e_model::MODEL_13B; break;
-                    /* TODO: add variants */
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_NEMOTRON:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
-                switch (hparams.n_layer) {
-                    case 32: model.type = e_model::MODEL_4B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_EXAONE:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
-
-                switch (hparams.n_layer) {
-                    case 32: model.type = e_model::MODEL_8B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_RWKV6:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
-                ml.get_key(LLM_KV_WKV_HEAD_SIZE, hparams.wkv_head_size);
-                ml.get_key(LLM_KV_TIME_MIX_EXTRA_DIM, hparams.time_mix_extra_dim);
-                ml.get_key(LLM_KV_TIME_DECAY_EXTRA_DIM, hparams.time_decay_extra_dim);
-                ml.get_key(LLM_KV_RESCALE_EVERY_N_LAYERS, hparams.rescale_every_n_layers, false);
-
-                switch (hparams.n_layer) {
-                    case 24: model.type = e_model::MODEL_1_6B; break;
-                    case 32:
-                        switch (hparams.n_embd) {
-                            case 2560: model.type = e_model::MODEL_3B; break;
-                            case 4096: model.type = e_model::MODEL_7B; break;
-                            default: model.type = e_model::MODEL_UNKNOWN;
-                        } break;
-                    case 61: model.type = e_model::MODEL_14B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_GRANITE:
-        case LLM_ARCH_GRANITE_MOE:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
-                ml.get_key(LLM_KV_LOGIT_SCALE, hparams.f_logit_scale);
-                ml.get_key(LLM_KV_RESIDUAL_SCALE, hparams.f_residual_scale);
-                ml.get_key(LLM_KV_EMBEDDING_SCALE, hparams.f_embedding_scale);
-                ml.get_key(LLM_KV_ATTENTION_SCALE, hparams.f_attention_scale);
-
-                switch (hparams.n_layer) {
-                    case 32: model.type = e_model::MODEL_3B; break;
-                    case 40: model.type = e_model::MODEL_3B; break;
-                    // Add additional layer/vocab/etc checks here for other model sizes
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_CHAMELEON:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
-                hparams.f_norm_eps = 1e-5;  // eps for qk-norm, torch default
-                ml.get_key(LLM_KV_SWIN_NORM, hparams.swin_norm);
-
-                switch (hparams.n_layer) {
-                    case 32: model.type = e_model::MODEL_7B; break;
-                    case 48: model.type = e_model::MODEL_34B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-               }
-            } break;
-        case LLM_ARCH_WAVTOKENIZER_DEC:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS,    hparams.f_norm_eps);
-                ml.get_key(LLM_KV_ATTENTION_GROUPNORM_EPS,    hparams.f_norm_group_eps);
-                ml.get_key(LLM_KV_ATTENTION_GROUPNORM_GROUPS, hparams.n_norm_groups);
-                ml.get_key(LLM_KV_ATTENTION_CAUSAL,           hparams.causal_attn);
-            } break;
-        default: (void)0;
-    }
-
-    model.ftype = ml.ftype;
-
-    if (hparams.f_max_alibi_bias > 0.0f) {
-        hparams.use_alibi = true;
-    }
-
-    hparams.rope_type = llama_rope_type(&model);
-}
-
-static void llm_load_vocab(
-        llama_model_loader & ml,
-        llama_model & model) {
-    auto & vocab = model.vocab;
-
-    struct gguf_context * ctx = ml.meta.get();
-
-    const auto kv = LLM_KV(model.arch);
-
-    // determine vocab type
-    {
-        std::string tokenizer_model;
-        std::string tokenizer_pre;
-
-        ml.get_key(LLM_KV_TOKENIZER_MODEL, tokenizer_model);
-        ml.get_key(LLM_KV_TOKENIZER_PRE,   tokenizer_pre, false);
-
-        if (tokenizer_model == "no_vocab" || tokenizer_model == "none") {
-            vocab.type = LLAMA_VOCAB_TYPE_NONE;
-
-            // default special tokens
-            vocab.special_bos_id  = LLAMA_TOKEN_NULL;
-            vocab.special_eos_id  = LLAMA_TOKEN_NULL;
-            vocab.special_unk_id  = LLAMA_TOKEN_NULL;
-            vocab.special_sep_id  = LLAMA_TOKEN_NULL;
-            vocab.special_pad_id  = LLAMA_TOKEN_NULL;
-            vocab.special_cls_id  = LLAMA_TOKEN_NULL;
-            vocab.special_mask_id = LLAMA_TOKEN_NULL;
-            vocab.linefeed_id     = LLAMA_TOKEN_NULL;
-
-            // read vocab size from metadata
-            if (!ml.get_key(LLM_KV_VOCAB_SIZE, vocab.n_vocab, false)) {
-                vocab.n_vocab = 0;
-                LLAMA_LOG_WARN("%s: there is no vocab_size in metadata, vocab.n_vocab will be set to %u\n", __func__, vocab.n_vocab);
-            }
-            return;
-        }
-
-        if (tokenizer_model == "llama") {
-            vocab.type = LLAMA_VOCAB_TYPE_SPM;
-
-            // default special tokens
-            vocab.special_bos_id  = 1;
-            vocab.special_eos_id  = 2;
-            vocab.special_unk_id  = 0;
-            vocab.special_sep_id  = LLAMA_TOKEN_NULL;
-            vocab.special_pad_id  = LLAMA_TOKEN_NULL;
-            vocab.special_cls_id  = LLAMA_TOKEN_NULL;
-            vocab.special_mask_id = LLAMA_TOKEN_NULL;
-        } else if (tokenizer_model == "bert") {
-            vocab.type = LLAMA_VOCAB_TYPE_WPM;
-
-            // default special tokens
-            vocab.special_bos_id  = LLAMA_TOKEN_NULL;
-            vocab.special_eos_id  = LLAMA_TOKEN_NULL;
-            vocab.special_unk_id  = 100;
-            vocab.special_sep_id  = 102;
-            vocab.special_pad_id  = 0;
-            vocab.special_cls_id  = 101;
-            vocab.special_mask_id = 103;
-        } else if (tokenizer_model == "gpt2") {
-            vocab.type = LLAMA_VOCAB_TYPE_BPE;
-
-            // read bpe merges and populate bpe ranks
-            const int merges_keyidx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_MERGES).c_str());
-            if (merges_keyidx == -1) {
-                throw std::runtime_error("cannot find tokenizer merges in model file\n");
-            }
-
-            const int n_merges = gguf_get_arr_n(ctx, merges_keyidx);
-            for (int i = 0; i < n_merges; i++) {
-                const std::string word = gguf_get_arr_str(ctx, merges_keyidx, i);
-                GGML_ASSERT(unicode_cpts_from_utf8(word).size() > 0);
-
-                std::string first;
-                std::string second;
-
-                const size_t pos = word.find(' ', 1);
-
-                if (pos != std::string::npos) {
-                    first  = word.substr(0, pos);
-                    second = word.substr(pos + 1);
-                }
-
-                vocab.bpe_ranks.emplace(std::make_pair(first, second), i);
-            }
-
-            // default special tokens
-            vocab.special_bos_id  = 11;
-            vocab.special_eos_id  = 11;
-            vocab.special_unk_id  = LLAMA_TOKEN_NULL;
-            vocab.special_sep_id  = LLAMA_TOKEN_NULL;
-            vocab.special_pad_id  = LLAMA_TOKEN_NULL;
-            vocab.special_cls_id  = LLAMA_TOKEN_NULL;
-            vocab.special_mask_id = LLAMA_TOKEN_NULL;
-        } else if (tokenizer_model == "t5") {
-            vocab.type = LLAMA_VOCAB_TYPE_UGM;
-
-            // default special tokens
-            vocab.special_bos_id  = LLAMA_TOKEN_NULL;
-            vocab.special_eos_id  = 1;
-            vocab.special_unk_id  = 2;
-            vocab.special_sep_id  = LLAMA_TOKEN_NULL;
-            vocab.special_pad_id  = 0;
-            vocab.special_cls_id  = LLAMA_TOKEN_NULL;
-            vocab.special_mask_id = LLAMA_TOKEN_NULL;
-
-            const int precompiled_charsmap_keyidx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_PRECOMPILED_CHARSMAP).c_str());
-            if (precompiled_charsmap_keyidx != -1) {
-                size_t n_precompiled_charsmap = gguf_get_arr_n(ctx, precompiled_charsmap_keyidx);
-                const char * precompiled_charsmap = (const char *) gguf_get_arr_data(ctx, precompiled_charsmap_keyidx);
-                vocab.precompiled_charsmap.assign(precompiled_charsmap, precompiled_charsmap + n_precompiled_charsmap);
-#ifdef IS_BIG_ENDIAN
-                // correct endiannes of data in precompiled_charsmap binary blob
-                uint32_t * xcda_blob_size = (uint32_t *) &vocab.precompiled_charsmap[0];
-                *xcda_blob_size = __builtin_bswap32(*xcda_blob_size);
-                assert(*xcda_blob_size + sizeof(uint32_t) < n_precompiled_charsmap);
-                size_t xcda_array_size = *xcda_blob_size / sizeof(uint32_t);
-                uint32_t * xcda_array = (uint32_t *) &vocab.precompiled_charsmap[sizeof(uint32_t)];
-                for (size_t i = 0; i < xcda_array_size; ++i) {
-                    xcda_array[i] = __builtin_bswap32(xcda_array[i]);
-                }
-#endif
-            }
-        } else if (tokenizer_model == "rwkv") {
-            vocab.type = LLAMA_VOCAB_TYPE_RWKV;
-
-            // default special tokens
-            vocab.special_bos_id = LLAMA_TOKEN_NULL;
-            vocab.special_eos_id = LLAMA_TOKEN_NULL;
-            vocab.special_unk_id = LLAMA_TOKEN_NULL;
-            vocab.special_sep_id = LLAMA_TOKEN_NULL;
-            vocab.special_pad_id = LLAMA_TOKEN_NULL;
-        } else {
-            throw std::runtime_error(format("unknown tokenizer: '%s'", tokenizer_model.c_str()));
-        }
-
-        // for now, only BPE models have pre-tokenizers
-        if (vocab.type == LLAMA_VOCAB_TYPE_BPE) {
-            vocab.tokenizer_add_space_prefix = false;
-            vocab.tokenizer_clean_spaces = true;
-            if (tokenizer_pre.empty()) {
-                LLAMA_LOG_WARN("%s: missing pre-tokenizer type, using: 'default'\n", __func__);
-                LLAMA_LOG_WARN("%s:                                             \n", __func__);
-                LLAMA_LOG_WARN("%s: ************************************        \n", __func__);
-                LLAMA_LOG_WARN("%s: GENERATION QUALITY WILL BE DEGRADED!        \n", __func__);
-                LLAMA_LOG_WARN("%s: CONSIDER REGENERATING THE MODEL             \n", __func__);
-                LLAMA_LOG_WARN("%s: ************************************        \n", __func__);
-                LLAMA_LOG_WARN("%s:                                             \n", __func__);
-                vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
-            } else if (tokenizer_pre == "default") {
-                vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
-            } else if (
-                    tokenizer_pre == "llama3"   ||
-                    tokenizer_pre == "llama-v3" ||
-                    tokenizer_pre == "llama-bpe"||
-                    tokenizer_pre == "falcon3") {
-                vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_LLAMA3;
-                vocab.tokenizer_ignore_merges = true;
-                vocab.tokenizer_add_bos = true;
-            } else if (
-                    tokenizer_pre == "deepseek-llm") {
-                vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_LLM;
-                vocab.tokenizer_clean_spaces = false;
-            } else if (
-                    tokenizer_pre == "deepseek-coder") {
-                vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_CODER;
-                vocab.tokenizer_clean_spaces = false;
-            } else if (
-                    tokenizer_pre == "falcon") {
-                vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_FALCON;
-            } else if (
-                    tokenizer_pre == "mpt") {
-                vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_MPT;
-            } else if (
-                    tokenizer_pre == "starcoder") {
-                vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_STARCODER;
-            } else if (
-                    tokenizer_pre == "gpt-2"   ||
-                    tokenizer_pre == "phi-2"   ||
-                    tokenizer_pre == "jina-es" ||
-                    tokenizer_pre == "jina-de" ||
-                    tokenizer_pre == "gigachat"   ||
-                    tokenizer_pre == "jina-v1-en" ||
-                    tokenizer_pre == "jina-v2-es" ||
-                    tokenizer_pre == "jina-v2-de" ||
-                    tokenizer_pre == "jina-v2-code" ||
-                    tokenizer_pre == "roberta-bpe") {
-                vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_GPT2;
-            } else if (
-                    tokenizer_pre == "refact") {
-                vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_REFACT;
-            } else if (
-                tokenizer_pre == "command-r") {
-                vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_COMMAND_R;
-                vocab.tokenizer_clean_spaces = false;
-            } else if (
-                tokenizer_pre == "qwen2") {
-                vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_QWEN2;
-                vocab.tokenizer_clean_spaces = false;
-            } else if (
-                tokenizer_pre == "stablelm2") {
-                vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_STABLELM2;
-            } else if (
-                tokenizer_pre == "olmo") {
-                vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_OLMO;
-            } else if (
-                tokenizer_pre == "dbrx") {
-                vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DBRX;
-            } else if (
-                tokenizer_pre == "smaug-bpe") {
-                vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_SMAUG;
-            } else if (
-                tokenizer_pre == "poro-chat") {
-                vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_PORO;
-                vocab.tokenizer_clean_spaces = false;
-            } else if (
-                tokenizer_pre == "chatglm-bpe") {
-                vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_CHATGLM4;
-                vocab.special_bos_id = LLAMA_TOKEN_NULL;
-            } else if (
-                tokenizer_pre == "viking") {
-                vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_VIKING;
-                vocab.tokenizer_clean_spaces = false;
-            } else if (
-                tokenizer_pre == "jais") {
-                vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_JAIS;
-            } else if (
-                tokenizer_pre == "tekken") {
-                vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_TEKKEN;
-                vocab.tokenizer_clean_spaces = false;
-                vocab.tokenizer_ignore_merges = true;
-                vocab.tokenizer_add_bos = true;
-            } else if (
-                tokenizer_pre == "smollm") {
-                vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_SMOLLM;
-                vocab.tokenizer_clean_spaces = false;
-            } else if (
-                tokenizer_pre == "codeshell") {
-                vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_CODESHELL;
-            } else if (
-                tokenizer_pre == "bloom") {
-                vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_BLOOM;
-            } else if (
-                tokenizer_pre == "gpt3-finnish") {
-                vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_GPT3_FINNISH;
-            } else if (
-                tokenizer_pre == "exaone") {
-                vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_EXAONE;
-            } else if (
-                tokenizer_pre == "chameleon") {
-                vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_CHAMELEON;
-                vocab.tokenizer_add_bos = true;
-                vocab.tokenizer_clean_spaces = false;
-            } else if (
-                tokenizer_pre == "minerva-7b") {
-                vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_MINERVA;
-            } else if (
-                tokenizer_pre == "megrez") {
-                vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_QWEN2;
-            } else {
-                throw std::runtime_error(format("unknown pre-tokenizer type: '%s'", tokenizer_pre.c_str()));
-            }
-        } else if (vocab.type == LLAMA_VOCAB_TYPE_SPM) {
-            vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
-            vocab.tokenizer_add_space_prefix = true;
-            vocab.tokenizer_clean_spaces = false;
-            vocab.tokenizer_add_bos = true;
-            vocab.tokenizer_add_eos = false;
-        } else if (vocab.type == LLAMA_VOCAB_TYPE_WPM) {
-            vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
-            vocab.tokenizer_add_space_prefix = false;
-            vocab.tokenizer_clean_spaces = true;
-            vocab.tokenizer_add_bos = true;
-            vocab.tokenizer_add_eos = false;
-        } else if (vocab.type == LLAMA_VOCAB_TYPE_UGM) {
-            vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
-            vocab.tokenizer_add_bos = false;
-            vocab.tokenizer_add_eos = true;
-        } else if (vocab.type == LLAMA_VOCAB_TYPE_RWKV) {
-            vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
-            vocab.tokenizer_add_space_prefix = false;
-            vocab.tokenizer_clean_spaces = false;
-            vocab.tokenizer_add_bos = false;
-            vocab.tokenizer_add_eos = false;
-        } else {
-            vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
-        }
-
-        ml.get_key(LLM_KV_TOKENIZER_ADD_PREFIX,      vocab.tokenizer_add_space_prefix,         false);
-        ml.get_key(LLM_KV_TOKENIZER_REMOVE_EXTRA_WS, vocab.tokenizer_remove_extra_whitespaces, false);
-    }
-
-    const int token_idx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_LIST).c_str());
-    if (token_idx == -1) {
-        throw std::runtime_error("cannot find tokenizer vocab in model file\n");
-    }
-
-    const float * scores = nullptr;
-    const int score_idx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_SCORES).c_str());
-    if (score_idx != -1) {
-        scores = (const float * ) gguf_get_arr_data(ctx, score_idx);
-    }
-
-    const int * toktypes = nullptr;
-    const int toktype_idx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_TOKEN_TYPE).c_str());
-    if (toktype_idx != -1) {
-        toktypes = (const int * ) gguf_get_arr_data(ctx, toktype_idx);
-    }
-
-    const uint32_t n_vocab = gguf_get_arr_n(ctx, token_idx);
-
-    vocab.n_vocab = n_vocab;
-    vocab.id_to_token.resize(n_vocab);
-
-    for (uint32_t i = 0; i < n_vocab; i++) {
-        std::string word = gguf_get_arr_str(ctx, token_idx, i);
-
-        //GGML_ASSERT(unicode_cpts_from_utf8(word).size() > 0);
-        if (word.empty()) {
-            LLAMA_LOG_WARN("%s: empty token at index %u\n", __func__, i);
-            word = "[EMPTY_" + std::to_string(i) + "]";
-        }
-
-        vocab.token_to_id[word] = i;
-        vocab.max_token_len = std::max(vocab.max_token_len, (int) word.size());
-
-        auto & token_data = vocab.id_to_token[i];
-        token_data.text  = std::move(word);
-        token_data.score = scores ? scores[i] : 0.0f;
-        token_data.attr  = LLAMA_TOKEN_ATTR_NORMAL;
-
-        if (toktypes) {  //TODO: remove, required until per token attributes are available from GGUF file
-            switch(toktypes[i]) {
-                case LLAMA_TOKEN_TYPE_UNKNOWN:      token_data.attr = LLAMA_TOKEN_ATTR_UNKNOWN;      break;
-                case LLAMA_TOKEN_TYPE_UNUSED:       token_data.attr = LLAMA_TOKEN_ATTR_UNUSED;       break;
-                case LLAMA_TOKEN_TYPE_NORMAL:       token_data.attr = LLAMA_TOKEN_ATTR_NORMAL;       break;
-                case LLAMA_TOKEN_TYPE_CONTROL:      token_data.attr = LLAMA_TOKEN_ATTR_CONTROL;      break;
-                case LLAMA_TOKEN_TYPE_USER_DEFINED: token_data.attr = LLAMA_TOKEN_ATTR_USER_DEFINED; break;
-                case LLAMA_TOKEN_TYPE_BYTE:         token_data.attr = LLAMA_TOKEN_ATTR_BYTE;         break;
-                case LLAMA_TOKEN_TYPE_UNDEFINED:    token_data.attr = LLAMA_TOKEN_ATTR_UNDEFINED;    break;
-                default:                            token_data.attr = LLAMA_TOKEN_ATTR_UNDEFINED;    break;
-            }
-        }
-    }
-    GGML_ASSERT(vocab.id_to_token.size() == vocab.token_to_id.size());
-
-    vocab.init_tokenizer();
-
-    // determine the newline token: LLaMA "<0x0A>" == 10 == '\n', Falcon 193 == '\n'
-    if (vocab.type == LLAMA_VOCAB_TYPE_SPM) {
-        try {
-            vocab.linefeed_id = llama_byte_to_token_impl(vocab, '\n');
-        } catch (const std::exception & e) {
-            LLAMA_LOG_WARN("%s: SPM vocabulary, but newline token not found: %s! Using special_pad_id instead.", __func__, e.what());
-            vocab.linefeed_id = vocab.special_pad_id;
-        }
-    } else if (vocab.type == LLAMA_VOCAB_TYPE_WPM) {
-        vocab.linefeed_id = vocab.special_pad_id;
-    } else if (vocab.type == LLAMA_VOCAB_TYPE_RWKV) {
-        const std::vector ids = llama_tokenize_internal(vocab, "\n", false);
-        GGML_ASSERT(!ids.empty() && "model vocab missing newline token");
-        vocab.linefeed_id = ids[0];
-    } else {
-        const std::vector ids = llama_tokenize_internal(vocab, "\xC4\x8A", false); // U+010A
-
-        //GGML_ASSERT(!ids.empty() && "model vocab missing newline token");
-        if (ids.empty()) {
-            LLAMA_LOG_WARN("%s: model vocab missing newline token, using special_pad_id instead\n", __func__);
-            vocab.linefeed_id = vocab.special_pad_id;
-        } else {
-            vocab.linefeed_id = ids[0];
-        }
-    }
-
-    // special tokens
-    {
-        const std::vector> special_token_types = {
-            { LLM_KV_TOKENIZER_BOS_ID,     vocab.special_bos_id     },
-            { LLM_KV_TOKENIZER_EOS_ID,     vocab.special_eos_id     },
-            { LLM_KV_TOKENIZER_EOT_ID,     vocab.special_eot_id     },
-            { LLM_KV_TOKENIZER_EOM_ID,     vocab.special_eom_id     },
-            { LLM_KV_TOKENIZER_UNK_ID,     vocab.special_unk_id     },
-            { LLM_KV_TOKENIZER_SEP_ID,     vocab.special_sep_id     },
-            { LLM_KV_TOKENIZER_PAD_ID,     vocab.special_pad_id     },
-            { LLM_KV_TOKENIZER_CLS_ID,     vocab.special_cls_id     },
-            { LLM_KV_TOKENIZER_MASK_ID,    vocab.special_mask_id    },
-            { LLM_KV_TOKENIZER_FIM_PRE_ID, vocab.special_fim_pre_id },
-            { LLM_KV_TOKENIZER_FIM_SUF_ID, vocab.special_fim_suf_id },
-            { LLM_KV_TOKENIZER_FIM_MID_ID, vocab.special_fim_mid_id },
-            { LLM_KV_TOKENIZER_FIM_PAD_ID, vocab.special_fim_pad_id },
-            { LLM_KV_TOKENIZER_FIM_REP_ID, vocab.special_fim_rep_id },
-            { LLM_KV_TOKENIZER_FIM_SEP_ID, vocab.special_fim_sep_id },
-
-            // deprecated
-            { LLM_KV_TOKENIZER_PREFIX_ID, vocab.special_fim_pre_id },
-            { LLM_KV_TOKENIZER_SUFFIX_ID, vocab.special_fim_suf_id },
-            { LLM_KV_TOKENIZER_MIDDLE_ID, vocab.special_fim_mid_id },
-        };
-
-        for (const auto & it : special_token_types) {
-            const std::string & key = kv(std::get<0>(it));
-            int32_t & id = std::get<1>(it);
-
-            uint32_t new_id;
-            if (!ml.get_key(std::get<0>(it), new_id, false)) {
-                continue;
-            }
-            if (new_id >= vocab.id_to_token.size()) {
-                LLAMA_LOG_WARN("%s: bad special token: '%s' = %ud, using default id %d\n",
-                    __func__, key.c_str(), new_id, id);
-            } else {
-                id = new_id;
-            }
-        }
-
-        // Handle add_bos_token and add_eos_token
-        {
-            bool temp = true;
-
-            if (ml.get_key(LLM_KV_TOKENIZER_ADD_BOS, temp, false)) {
-                vocab.tokenizer_add_bos = temp;
-            }
-            if (ml.get_key(LLM_KV_TOKENIZER_ADD_EOS, temp, false)) {
-                vocab.tokenizer_add_eos = temp;
-            }
-        }
-
-        // auto-detect special tokens by text
-        // TODO: convert scripts should provide these tokens through the KV metadata LLM_KV_TOKENIZER_...
-        //       for now, we apply this workaround to find the tokens based on their text
-
-        for (const auto & t : vocab.token_to_id) {
-            // find EOT token: "<|eot_id|>", "<|im_end|>", "", etc.
-            if (vocab.special_eot_id == LLAMA_TOKEN_NULL) {
-                if (false
-                        || t.first == "<|eot_id|>"
-                        || t.first == "<|im_end|>"
-                        || t.first == "<|end|>"
-                        || t.first == ""
-                        || t.first == "<|endoftext|>"
-                        || t.first == ""
-                        || t.first == "<|end▁of▁sentence|>" // DeepSeek
-                   ) {
-                    vocab.special_eot_id = t.second;
-                    if ((vocab.id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
-                        LLAMA_LOG_WARN("%s: control-looking token: %6d '%s' was not control-type; this is probably a bug in the model. its type will be overridden\n",
-                                __func__, t.second, t.first.c_str());
-                        vocab.id_to_token[t.second].attr = LLAMA_TOKEN_ATTR_CONTROL;
-                    }
-                }
-            }
-
-            // find EOM token: "<|eom_id|>"
-            if (vocab.special_eom_id == LLAMA_TOKEN_NULL) {
-                if (false
-                        || t.first == "<|eom_id|>"
-                        ) {
-                    vocab.special_eom_id = t.second;
-                    if ((vocab.id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
-                        LLAMA_LOG_WARN("%s: control-looking token: %6d '%s' was not control-type; this is probably a bug in the model. its type will be overridden\n",
-                                __func__, t.second, t.first.c_str());
-                        vocab.id_to_token[t.second].attr = LLAMA_TOKEN_ATTR_CONTROL;
-                    }
-                }
-            }
-
-            // find FIM_PRE token: "<|fim_prefix|>", "", "
", etc.
-            if (vocab.special_fim_pre_id == LLAMA_TOKEN_NULL) {
-                if (false
-                        || t.first == "<|fim_prefix|>"  // Qwen
-                        || t.first == ""
-                        || t.first == "<|fim▁begin|>" // DeepSeek
-                        || t.first == "
"
-                        ) {
-                    vocab.special_fim_pre_id = t.second;
-                    if ((vocab.id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
-                        LLAMA_LOG_WARN("%s: control-looking token: %6d '%s' was not control-type; this is probably a bug in the model. its type will be overridden\n",
-                                __func__, t.second, t.first.c_str());
-                        vocab.id_to_token[t.second].attr = LLAMA_TOKEN_ATTR_CONTROL;
-                    }
-                }
-            }
-
-            // find FIM_SUF token: "<|fim_suffix|>", "", "", etc.
-            if (vocab.special_fim_suf_id == LLAMA_TOKEN_NULL) {
-                if (false
-                        || t.first == "<|fim_suffix|>" // Qwen
-                        || t.first == ""
-                        || t.first == "<|fim▁hole|>" // DeepSeek
-                        || t.first == ""
-                        ) {
-                    vocab.special_fim_suf_id = t.second;
-                    if ((vocab.id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
-                        LLAMA_LOG_WARN("%s: control-looking token: %6d '%s' was not control-type; this is probably a bug in the model. its type will be overridden\n",
-                                __func__, t.second, t.first.c_str());
-                        vocab.id_to_token[t.second].attr = LLAMA_TOKEN_ATTR_CONTROL;
-                    }
-                }
-            }
-
-            // find FIM_MID token: "<|fim_middle|>", "", "", etc.
-            if (vocab.special_fim_mid_id == LLAMA_TOKEN_NULL) {
-                if (false
-                        || t.first == "<|fim_middle|>" // Qwen
-                        || t.first == ""
-                        || t.first == "<|fim▁end|>"  // DeepSeek
-                        || t.first == ""
-                        ) {
-                    vocab.special_fim_mid_id = t.second;
-                    if ((vocab.id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
-                        LLAMA_LOG_WARN("%s: control-looking token: %6d '%s' was not control-type; this is probably a bug in the model. its type will be overridden\n",
-                                __func__, t.second, t.first.c_str());
-                        vocab.id_to_token[t.second].attr = LLAMA_TOKEN_ATTR_CONTROL;
-                    }
-                }
-            }
-
-            // find FIM_PAD token: "<|fim_pad|>", "", "", etc.
-            if (vocab.special_fim_pad_id == LLAMA_TOKEN_NULL) {
-                if (false
-                        || t.first == "<|fim_pad|>" // Qwen
-                        || t.first == ""
-                        || t.first == ""
-                        ) {
-                    vocab.special_fim_pad_id = t.second;
-                    if ((vocab.id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
-                        LLAMA_LOG_WARN("%s: control-looking token: %6d '%s' was not control-type; this is probably a bug in the model. its type will be overridden\n",
-                                __func__, t.second, t.first.c_str());
-                        vocab.id_to_token[t.second].attr = LLAMA_TOKEN_ATTR_CONTROL;
-                    }
-                }
-            }
-
-            // find FIM_REP token: "<|fim_repo|>", "", "", etc.
-            if (vocab.special_fim_rep_id == LLAMA_TOKEN_NULL) {
-                if (false
-                        || t.first == "<|fim_repo|>"  // Qwen
-                        || t.first == "<|repo_name|>"
-                        || t.first == ""
-                        || t.first == ""
-                        ) {
-                    vocab.special_fim_rep_id = t.second;
-                    if ((vocab.id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
-                        LLAMA_LOG_WARN("%s: control-looking token: %6d '%s' was not control-type; this is probably a bug in the model. its type will be overridden\n",
-                                __func__, t.second, t.first.c_str());
-                        vocab.id_to_token[t.second].attr = LLAMA_TOKEN_ATTR_CONTROL;
-                    }
-                }
-            }
-
-            // find FIM_SEP token: "<|file_sep|>"
-            if (vocab.special_fim_sep_id == LLAMA_TOKEN_NULL) {
-                if (false
-                        || t.first == "<|file_sep|>" // Qwen
-                        ) {
-                    vocab.special_fim_sep_id = t.second;
-                    if ((vocab.id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
-                        LLAMA_LOG_WARN("%s: control-looking token: %6d '%s' was not control-type; this is probably a bug in the model. its type will be overridden\n",
-                                __func__, t.second, t.first.c_str());
-                        vocab.id_to_token[t.second].attr = LLAMA_TOKEN_ATTR_CONTROL;
-                    }
-                }
-            }
-        }
-
-        // maintain a list of tokens that cause end-of-generation
-        // this is currently determined based on the token text, which is obviously not ideal
-        // ref: https://github.com/ggerganov/llama.cpp/issues/9606
-        vocab.special_eog_ids.clear();
-
-        if (vocab.special_fim_pad_id != LLAMA_TOKEN_NULL && vocab.special_eog_ids.count(vocab.special_fim_pad_id) == 0) {
-            vocab.special_eog_ids.insert(vocab.special_fim_pad_id);
-        }
-
-        if (vocab.special_fim_rep_id != LLAMA_TOKEN_NULL && vocab.special_eog_ids.count(vocab.special_fim_rep_id) == 0) {
-            vocab.special_eog_ids.insert(vocab.special_fim_rep_id);
-        }
-
-        if (vocab.special_fim_sep_id != LLAMA_TOKEN_NULL && vocab.special_eog_ids.count(vocab.special_fim_sep_id) == 0) {
-            vocab.special_eog_ids.insert(vocab.special_fim_sep_id);
-        }
-
-        for (const auto & t : vocab.token_to_id) {
-            if (false
-                    || t.first == "<|eot_id|>"
-                    || t.first == "<|im_end|>"
-                    || t.first == "<|end|>"
-                    || t.first == ""
-                    || t.first == "<|endoftext|>"
-                    || t.first == "<|eom_id|>"
-                    || t.first == ""
-               ) {
-                vocab.special_eog_ids.insert(t.second);
-                if ((vocab.id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
-                    LLAMA_LOG_WARN("%s: control-looking token: %6d '%s' was not control-type; this is probably a bug in the model. its type will be overridden\n",
-                            __func__, t.second, t.first.c_str());
-                    vocab.id_to_token[t.second].attr = LLAMA_TOKEN_ATTR_CONTROL;
-                }
-            } else {
-                // token is control, but not marked as EOG -> print a debug log
-                if (vocab.id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL && vocab.special_eog_ids.count(t.second) == 0) {
-                    LLAMA_LOG_DEBUG("%s: control token: %6d '%s' is not marked as EOG\n",
-                            __func__, t.second, t.first.c_str());
-                }
-            }
-        }
-
-        // sanity checks
-        if (vocab.special_eos_id != LLAMA_TOKEN_NULL && vocab.special_eog_ids.count(vocab.special_eos_id) == 0) {
-            vocab.special_eog_ids.insert(vocab.special_eos_id);
-            LLAMA_LOG_WARN("%s: special_eos_id is not in special_eog_ids - the tokenizer config may be incorrect\n", __func__);
-        }
-
-        if (vocab.special_eot_id != LLAMA_TOKEN_NULL && vocab.special_eog_ids.count(vocab.special_eot_id) == 0) {
-            vocab.special_eog_ids.insert(vocab.special_eot_id);
-            LLAMA_LOG_WARN("%s: special_eot_id is not in special_eog_ids - the tokenizer config may be incorrect\n", __func__);
-        }
-
-        if (vocab.special_eom_id != LLAMA_TOKEN_NULL && vocab.special_eog_ids.count(vocab.special_eom_id) == 0) {
-            vocab.special_eog_ids.insert(vocab.special_eom_id);
-            LLAMA_LOG_WARN("%s: special_eom_id is not in special_eog_ids - the tokenizer config may be incorrect\n", __func__);
-        }
-    }
-
-    // build special tokens cache
-    {
-        for (llama_vocab::id id = 0; id < (llama_vocab::id)n_vocab; ++id) {
-            if (vocab.id_to_token[id].attr & (LLAMA_TOKEN_ATTR_CONTROL | LLAMA_TOKEN_ATTR_USER_DEFINED | LLAMA_TOKEN_ATTR_UNKNOWN)) {
-                vocab.cache_special_tokens.push_back(id);
-            }
-        }
-
-        std::sort(vocab.cache_special_tokens.begin(), vocab.cache_special_tokens.end(),
-            [&] (const llama_vocab::id a, const llama_vocab::id b) {
-                return vocab.id_to_token[a].text.size() > vocab.id_to_token[b].text.size();
-            }
-        );
-
-        LLAMA_LOG_INFO("%s: special tokens cache size = %u\n", __func__, (uint32_t)vocab.cache_special_tokens.size());
-    }
-
-    // build token to piece cache
-    {
-        size_t size_cache = 0;
-
-        std::vector cache_token_to_piece(n_vocab);
-
-        for (uint32_t id = 0; id < n_vocab; ++id) {
-            cache_token_to_piece[id] = llama_token_to_piece(&model, id, true);
-
-            size_cache += cache_token_to_piece[id].size();
-        }
-
-        std::swap(vocab.cache_token_to_piece, cache_token_to_piece);
-
-        LLAMA_LOG_INFO("%s: token to piece cache size = %.4f MB\n", __func__, size_cache / 1024.0 / 1024.0);
-    }
-
-    // Handle per token attributes
-    //NOTE: Each model customizes per token attributes.
-    //NOTE: Per token attributes are missing from the GGUF file.
-    //TODO: Extract attributes from GGUF file.
-    {
-        auto _contains_any = [] (const std::string &str, const std::vector &substrs) -> bool {
-            for (auto substr : substrs) {
-                if (str.find(substr) < std::string::npos) {
-                    return true;
-                }
-            }
-            return false;
-        };
-
-        auto _set_tokenid_attr = [&] (const llama_vocab::id id, llama_token_attr attr, bool value) {
-            uint32_t current = vocab.id_to_token.at(id).attr;
-            current = value ? (current | attr) : (current & ~attr);
-            vocab.id_to_token[id].attr = (llama_token_attr) current;
-        };
-
-        auto _set_token_attr = [&] (const std::string & token, llama_token_attr attr, bool value) {
-            _set_tokenid_attr(vocab.token_to_id.at(token), attr, value);
-        };
-
-        std::string model_name;
-        std::string tokenizer_pre;
-
-        ml.get_key(LLM_KV_GENERAL_NAME, model_name, false);
-        ml.get_key(LLM_KV_TOKENIZER_PRE, tokenizer_pre, false);
-
-        // model name to lowercase
-        std::transform(model_name.begin(), model_name.end(), model_name.begin(),
-            [] (const std::string::value_type x) {
-                return std::tolower(x);
-            }
-        );
-
-        // set attributes by model/tokenizer name
-        if (_contains_any(tokenizer_pre, {"jina-v2-de", "jina-v2-es", "jina-v2-code"})) {
-            _set_token_attr("", LLAMA_TOKEN_ATTR_LSTRIP, true);
-        } else if (_contains_any(model_name, {"phi-3", "phi3"})) {
-            for (auto id : vocab.cache_special_tokens) {
-                _set_tokenid_attr(id, LLAMA_TOKEN_ATTR_RSTRIP, true);
-            }
-            for (auto token : {""}) {
-                _set_token_attr(token, LLAMA_TOKEN_ATTR_RSTRIP, true);
-            }
-            for (auto token : {"", "", "<|endoftext|>"}) {
-                _set_token_attr(token, LLAMA_TOKEN_ATTR_RSTRIP, false);
-            }
-        }
-    }
-}
-
-static void llm_load_print_meta(llama_model_loader & ml, llama_model & model) {
-    const auto & hparams = model.hparams;
-    const auto & vocab   = model.vocab;
-
-    const char * rope_scaling_type = LLAMA_ROPE_SCALING_TYPES.at(hparams.rope_scaling_type_train);
-
-    auto print_f = [](const std::function & f, uint32_t n) {
-        bool is_var = false;
-
-        std::vector v;
-        for (uint32_t i = 0; i < n; ++i) {
-            v.push_back(f(i));
-            if (v[i] != v[0]) {
-                is_var = true;
-            }
-        }
-
-        std::stringstream ss;
-
-        if (is_var) {
-            ss << "[";
-            for (uint32_t i = 0; i < n; ++i) {
-                ss << v[i];
-                if (i < n - 1) {
-                    ss << ", ";
-                }
-            }
-            ss << "]";
-        } else {
-            ss << v[0];
-        }
-
-        return ss.str();
-    };
-
-    // hparams
-    LLAMA_LOG_INFO("%s: format           = %s\n",     __func__, llama_file_version_name(ml.fver));
-    LLAMA_LOG_INFO("%s: arch             = %s\n",     __func__, LLM_ARCH_NAMES.at(model.arch));
-    LLAMA_LOG_INFO("%s: vocab type       = %s\n",     __func__, llama_model_vocab_type_name(vocab.type));
-    LLAMA_LOG_INFO("%s: n_vocab          = %u\n",     __func__, hparams.n_vocab);
-    LLAMA_LOG_INFO("%s: n_merges         = %u\n",     __func__, (int) vocab.bpe_ranks.size());
-    LLAMA_LOG_INFO("%s: vocab_only       = %d\n",     __func__, hparams.vocab_only);
-
-    if (!hparams.vocab_only) {
-        LLAMA_LOG_INFO("%s: n_ctx_train      = %u\n",     __func__, hparams.n_ctx_train);
-        LLAMA_LOG_INFO("%s: n_embd           = %u\n",     __func__, hparams.n_embd);
-        LLAMA_LOG_INFO("%s: n_layer          = %u\n",     __func__, hparams.n_layer);
-        LLAMA_LOG_INFO("%s: n_head           = %s\n",     __func__, print_f([&](uint32_t il) { return hparams.n_head(il);    }, hparams.n_layer).c_str());
-        LLAMA_LOG_INFO("%s: n_head_kv        = %s\n",     __func__, print_f([&](uint32_t il) { return hparams.n_head_kv(il); }, hparams.n_layer).c_str());
-        LLAMA_LOG_INFO("%s: n_rot            = %u\n",     __func__, hparams.n_rot);
-        LLAMA_LOG_INFO("%s: n_swa            = %u\n",     __func__, hparams.n_swa);
-        LLAMA_LOG_INFO("%s: n_embd_head_k    = %u\n",     __func__, hparams.n_embd_head_k);
-        LLAMA_LOG_INFO("%s: n_embd_head_v    = %u\n",     __func__, hparams.n_embd_head_v);
-        LLAMA_LOG_INFO("%s: n_gqa            = %s\n",     __func__, print_f([&](uint32_t il) { return hparams.n_gqa(il);        }, hparams.n_layer).c_str());
-        LLAMA_LOG_INFO("%s: n_embd_k_gqa     = %s\n",     __func__, print_f([&](uint32_t il) { return hparams.n_embd_k_gqa(il); }, hparams.n_layer).c_str());
-        LLAMA_LOG_INFO("%s: n_embd_v_gqa     = %s\n",     __func__, print_f([&](uint32_t il) { return hparams.n_embd_v_gqa(il); }, hparams.n_layer).c_str());
-        LLAMA_LOG_INFO("%s: f_norm_eps       = %.1e\n",   __func__, hparams.f_norm_eps);
-        LLAMA_LOG_INFO("%s: f_norm_rms_eps   = %.1e\n",   __func__, hparams.f_norm_rms_eps);
-        LLAMA_LOG_INFO("%s: f_clamp_kqv      = %.1e\n",   __func__, hparams.f_clamp_kqv);
-        LLAMA_LOG_INFO("%s: f_max_alibi_bias = %.1e\n",   __func__, hparams.f_max_alibi_bias);
-        LLAMA_LOG_INFO("%s: f_logit_scale    = %.1e\n",   __func__, hparams.f_logit_scale);
-        LLAMA_LOG_INFO("%s: n_ff             = %s\n",     __func__, print_f([&](uint32_t il) { return hparams.n_ff(il); }, hparams.n_layer).c_str());
-        LLAMA_LOG_INFO("%s: n_expert         = %u\n",     __func__, hparams.n_expert);
-        LLAMA_LOG_INFO("%s: n_expert_used    = %u\n",     __func__, hparams.n_expert_used);
-        LLAMA_LOG_INFO("%s: causal attn      = %d\n",     __func__, hparams.causal_attn);
-        LLAMA_LOG_INFO("%s: pooling type     = %d\n",     __func__, hparams.pooling_type);
-        LLAMA_LOG_INFO("%s: rope type        = %d\n",     __func__, hparams.rope_type);
-        LLAMA_LOG_INFO("%s: rope scaling     = %s\n",     __func__, rope_scaling_type);
-        LLAMA_LOG_INFO("%s: freq_base_train  = %.1f\n",   __func__, hparams.rope_freq_base_train);
-        LLAMA_LOG_INFO("%s: freq_scale_train = %g\n",     __func__, hparams.rope_freq_scale_train);
-        LLAMA_LOG_INFO("%s: n_ctx_orig_yarn  = %u\n",     __func__, hparams.n_ctx_orig_yarn);
-        LLAMA_LOG_INFO("%s: rope_finetuned   = %s\n",     __func__, hparams.rope_finetuned ? "yes" : "unknown");
-        LLAMA_LOG_INFO("%s: ssm_d_conv       = %u\n",     __func__, hparams.ssm_d_conv);
-        LLAMA_LOG_INFO("%s: ssm_d_inner      = %u\n",     __func__, hparams.ssm_d_inner);
-        LLAMA_LOG_INFO("%s: ssm_d_state      = %u\n",     __func__, hparams.ssm_d_state);
-        LLAMA_LOG_INFO("%s: ssm_dt_rank      = %u\n",     __func__, hparams.ssm_dt_rank);
-        LLAMA_LOG_INFO("%s: ssm_dt_b_c_rms   = %d\n",     __func__, hparams.ssm_dt_b_c_rms);
-    }
-
-    LLAMA_LOG_INFO("%s: model type       = %s\n",     __func__, llama_model_type_name(model.type));
-    LLAMA_LOG_INFO("%s: model ftype      = %s\n",     __func__, llama_model_ftype_name(model.ftype).c_str());
-    if (ml.n_elements >= 1e12) {
-        LLAMA_LOG_INFO("%s: model params     = %.2f T\n", __func__, ml.n_elements*1e-12);
-    } else if (ml.n_elements >= 1e9) {
-        LLAMA_LOG_INFO("%s: model params     = %.2f B\n", __func__, ml.n_elements*1e-9);
-    } else if (ml.n_elements >= 1e6) {
-        LLAMA_LOG_INFO("%s: model params     = %.2f M\n", __func__, ml.n_elements*1e-6);
-    } else {
-        LLAMA_LOG_INFO("%s: model params     = %.2f K\n", __func__, ml.n_elements*1e-3);
-    }
-    if (ml.n_bytes < GiB) {
-        LLAMA_LOG_INFO("%s: model size       = %.2f MiB (%.2f BPW) \n", __func__, ml.n_bytes/1024.0/1024.0,        ml.n_bytes*8.0/ml.n_elements);
-    } else {
-        LLAMA_LOG_INFO("%s: model size       = %.2f GiB (%.2f BPW) \n", __func__, ml.n_bytes/1024.0/1024.0/1024.0, ml.n_bytes*8.0/ml.n_elements);
-    }
-
-    // general kv
-    LLAMA_LOG_INFO("%s: general.name     = %s\n",    __func__, model.name.c_str());
-
-    // special tokens
-    if (vocab.special_bos_id  != -1)    { LLAMA_LOG_INFO( "%s: BOS token        = %d '%s'\n", __func__, vocab.special_bos_id,     vocab.id_to_token[vocab.special_bos_id].text.c_str() );  }
-    if (vocab.special_eos_id  != -1)    { LLAMA_LOG_INFO( "%s: EOS token        = %d '%s'\n", __func__, vocab.special_eos_id,     vocab.id_to_token[vocab.special_eos_id].text.c_str() );  }
-    if (vocab.special_eot_id  != -1)    { LLAMA_LOG_INFO( "%s: EOT token        = %d '%s'\n", __func__, vocab.special_eot_id,     vocab.id_to_token[vocab.special_eot_id].text.c_str() );  }
-    if (vocab.special_eom_id  != -1)    { LLAMA_LOG_INFO( "%s: EOM token        = %d '%s'\n", __func__, vocab.special_eom_id,     vocab.id_to_token[vocab.special_eom_id].text.c_str() );  }
-    if (vocab.special_unk_id  != -1)    { LLAMA_LOG_INFO( "%s: UNK token        = %d '%s'\n", __func__, vocab.special_unk_id,     vocab.id_to_token[vocab.special_unk_id].text.c_str() );  }
-    if (vocab.special_sep_id  != -1)    { LLAMA_LOG_INFO( "%s: SEP token        = %d '%s'\n", __func__, vocab.special_sep_id,     vocab.id_to_token[vocab.special_sep_id].text.c_str() );  }
-    if (vocab.special_pad_id  != -1)    { LLAMA_LOG_INFO( "%s: PAD token        = %d '%s'\n", __func__, vocab.special_pad_id,     vocab.id_to_token[vocab.special_pad_id].text.c_str() );  }
-    if (vocab.special_cls_id  != -1)    { LLAMA_LOG_INFO( "%s: CLS token        = %d '%s'\n", __func__, vocab.special_cls_id,     vocab.id_to_token[vocab.special_cls_id].text.c_str() );  }
-    if (vocab.special_mask_id != -1)    { LLAMA_LOG_INFO( "%s: MASK token       = %d '%s'\n", __func__, vocab.special_mask_id,    vocab.id_to_token[vocab.special_mask_id].text.c_str() ); }
-
-    if (vocab.linefeed_id != -1)        { LLAMA_LOG_INFO( "%s: LF token         = %d '%s'\n", __func__, vocab.linefeed_id,        vocab.id_to_token[vocab.linefeed_id].text.c_str() ); }
-
-    if (vocab.special_fim_pre_id != -1) { LLAMA_LOG_INFO( "%s: FIM PRE token    = %d '%s'\n", __func__, vocab.special_fim_pre_id, vocab.id_to_token[vocab.special_fim_pre_id].text.c_str() ); }
-    if (vocab.special_fim_suf_id != -1) { LLAMA_LOG_INFO( "%s: FIM SUF token    = %d '%s'\n", __func__, vocab.special_fim_suf_id, vocab.id_to_token[vocab.special_fim_suf_id].text.c_str() ); }
-    if (vocab.special_fim_mid_id != -1) { LLAMA_LOG_INFO( "%s: FIM MID token    = %d '%s'\n", __func__, vocab.special_fim_mid_id, vocab.id_to_token[vocab.special_fim_mid_id].text.c_str() ); }
-    if (vocab.special_fim_pad_id != -1) { LLAMA_LOG_INFO( "%s: FIM PAD token    = %d '%s'\n", __func__, vocab.special_fim_pad_id, vocab.id_to_token[vocab.special_fim_pad_id].text.c_str() ); }
-    if (vocab.special_fim_rep_id != -1) { LLAMA_LOG_INFO( "%s: FIM REP token    = %d '%s'\n", __func__, vocab.special_fim_rep_id, vocab.id_to_token[vocab.special_fim_rep_id].text.c_str() ); }
-    if (vocab.special_fim_sep_id != -1) { LLAMA_LOG_INFO( "%s: FIM SEP token    = %d '%s'\n", __func__, vocab.special_fim_sep_id, vocab.id_to_token[vocab.special_fim_sep_id].text.c_str() ); }
-
-    for (const auto & id : vocab.special_eog_ids) {
-        LLAMA_LOG_INFO( "%s: EOG token        = %d '%s'\n", __func__, id, vocab.id_to_token[id].text.c_str() );
-    }
-
-    LLAMA_LOG_INFO("%s: max token length = %d\n", __func__, vocab.max_token_len);
-
-    if (model.arch == LLM_ARCH_DEEPSEEK) {
-        LLAMA_LOG_INFO("%s: n_layer_dense_lead   = %d\n",     __func__, hparams.n_layer_dense_lead);
-        LLAMA_LOG_INFO("%s: n_ff_exp             = %d\n",     __func__, hparams.n_ff_exp);
-        LLAMA_LOG_INFO("%s: n_expert_shared      = %d\n",     __func__, hparams.n_expert_shared);
-        LLAMA_LOG_INFO("%s: expert_weights_scale = %.1f\n",   __func__, hparams.expert_weights_scale);
-    }
-
-    if (model.arch == LLM_ARCH_DEEPSEEK2) {
-        LLAMA_LOG_INFO("%s: n_layer_dense_lead   = %d\n",     __func__, hparams.n_layer_dense_lead);
-        LLAMA_LOG_INFO("%s: n_lora_q             = %d\n",     __func__, hparams.n_lora_q);
-        LLAMA_LOG_INFO("%s: n_lora_kv            = %d\n",     __func__, hparams.n_lora_kv);
-        LLAMA_LOG_INFO("%s: n_ff_exp             = %d\n",     __func__, hparams.n_ff_exp);
-        LLAMA_LOG_INFO("%s: n_expert_shared      = %d\n",     __func__, hparams.n_expert_shared);
-        LLAMA_LOG_INFO("%s: expert_weights_scale = %.1f\n",   __func__, hparams.expert_weights_scale);
-        LLAMA_LOG_INFO("%s: rope_yarn_log_mul    = %.4f\n",   __func__, hparams.rope_yarn_log_mul);
-    }
-
-    if (model.arch == LLM_ARCH_QWEN2MOE) {
-        LLAMA_LOG_INFO("%s: n_ff_exp         = %d\n",     __func__, hparams.n_ff_exp);
-        LLAMA_LOG_INFO("%s: n_ff_shexp       = %d\n",     __func__, hparams.n_ff_shexp);
-    }
-
-    if (model.arch == LLM_ARCH_MINICPM || model.arch == LLM_ARCH_GRANITE || model.arch == LLM_ARCH_GRANITE_MOE) {
-        LLAMA_LOG_INFO("%s: f_embedding_scale = %f\n", __func__, hparams.f_embedding_scale);
-        LLAMA_LOG_INFO("%s: f_residual_scale  = %f\n", __func__, hparams.f_residual_scale);
-        LLAMA_LOG_INFO("%s: f_attention_scale = %f\n", __func__, hparams.f_attention_scale);
-    }
-}
-
-enum llm_tensor_layer {
-    LLM_TENSOR_LAYER_INPUT,
-    LLM_TENSOR_LAYER_REPEATING,
-    LLM_TENSOR_LAYER_OUTPUT,
-};
-
-struct llm_tensor_info {
-    llm_tensor_layer layer;
-    ggml_op op;
-};
-
-static const std::map llm_tensor_info_mapping = {
-    {LLM_TENSOR_TOKEN_EMBD,                 {LLM_TENSOR_LAYER_INPUT, GGML_OP_GET_ROWS}},
-    {LLM_TENSOR_POS_EMBD,                   {LLM_TENSOR_LAYER_INPUT, GGML_OP_GET_ROWS}},
-    {LLM_TENSOR_TOKEN_EMBD_NORM,            {LLM_TENSOR_LAYER_INPUT, GGML_OP_GET_ROWS}},
-    {LLM_TENSOR_TOKEN_TYPES,                {LLM_TENSOR_LAYER_INPUT, GGML_OP_GET_ROWS}},
-    {LLM_TENSOR_OUTPUT,                     {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_CLS,                        {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_CLS_OUT,                    {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_OUTPUT_NORM,                {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL}},
-    {LLM_TENSOR_DEC_OUTPUT_NORM,            {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL}},
-    {LLM_TENSOR_ENC_OUTPUT_NORM,            {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL}},
-    {LLM_TENSOR_ROPE_FREQS,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ROPE}},
-    {LLM_TENSOR_ROPE_FACTORS_LONG,          {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ROPE}},
-    {LLM_TENSOR_ROPE_FACTORS_SHORT,         {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ROPE}},
-    {LLM_TENSOR_ATTN_Q,                     {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ATTN_K,                     {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ATTN_V,                     {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ATTN_QKV,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ATTN_OUT,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_FFN_GATE,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_FFN_DOWN,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_FFN_UP,                     {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_FFN_DOWN_SHEXP,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_FFN_GATE_SHEXP,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_FFN_UP_SHEXP,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ATTN_Q_A,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ATTN_Q_B,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ATTN_KV_A_MQA,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ATTN_KV_B,                  {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_DEC_ATTN_Q,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_DEC_ATTN_K,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ATTN_Q,                     {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ATTN_K,                     {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ATTN_V,                     {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ATTN_QKV,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ATTN_OUT,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_FFN_GATE,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_FFN_DOWN,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_FFN_UP,                     {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_FFN_DOWN_SHEXP,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_FFN_GATE_SHEXP,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_FFN_UP_SHEXP,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ATTN_Q_A,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ATTN_Q_B,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ATTN_KV_A_MQA,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ATTN_KV_B,                  {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_DEC_ATTN_Q,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_DEC_ATTN_K,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_DEC_ATTN_V,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_DEC_ATTN_OUT,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_DEC_CROSS_ATTN_Q,           {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_DEC_CROSS_ATTN_K,           {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_DEC_CROSS_ATTN_V,           {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_DEC_CROSS_ATTN_OUT,         {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_DEC_FFN_GATE,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_DEC_FFN_DOWN,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_DEC_FFN_UP,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ENC_ATTN_Q,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ENC_ATTN_K,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ENC_ATTN_V,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ENC_ATTN_OUT,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ENC_FFN_GATE,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ENC_FFN_DOWN,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ENC_FFN_UP,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_FFN_GATE_INP_SHEXP,         {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_FFN_GATE_INP,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_SSM_IN,                     {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_SSM_X,                      {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_SSM_DT,                     {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_SSM_OUT,                    {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_TIME_MIX_W1,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_TIME_MIX_W2,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_TIME_MIX_DECAY_W1,          {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_TIME_MIX_DECAY_W2,          {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_TIME_MIX_KEY,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_TIME_MIX_VALUE,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_TIME_MIX_RECEPTANCE,        {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_TIME_MIX_GATE,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_TIME_MIX_OUTPUT,            {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_CHANNEL_MIX_KEY,            {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_CHANNEL_MIX_RECEPTANCE,     {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_CHANNEL_MIX_VALUE,          {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_FFN_ACT,                    {LLM_TENSOR_LAYER_REPEATING, GGML_OP_DIV}},
-    {LLM_TENSOR_SSM_CONV1D,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_SSM_CONV}},
-    {LLM_TENSOR_SSM_A,                      {LLM_TENSOR_LAYER_REPEATING, GGML_OP_SSM_SCAN}},
-    {LLM_TENSOR_SSM_D,                      {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_TIME_MIX_LERP_X,            {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_TIME_MIX_LN,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_CHANNEL_MIX_LERP_K,         {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_CHANNEL_MIX_LERP_R,         {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_TIME_MIX_LERP_W,            {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
-    {LLM_TENSOR_TIME_MIX_LERP_K,            {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
-    {LLM_TENSOR_TIME_MIX_LERP_V,            {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
-    {LLM_TENSOR_TIME_MIX_LERP_R,            {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
-    {LLM_TENSOR_TIME_MIX_LERP_G,            {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
-    {LLM_TENSOR_TIME_MIX_DECAY,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
-    {LLM_TENSOR_TIME_MIX_FIRST,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_RWKV_WKV6}},
-    {LLM_TENSOR_ATTN_NORM,                  {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_ATTN_NORM_2,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_ATTN_OUT_NORM,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_ATTN_POST_NORM,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_FFN_NORM,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_FFN_POST_NORM,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_FFN_NORM_EXPS,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_ATTN_Q_NORM,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_ATTN_K_NORM,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_LAYER_OUT_NORM,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_ATTN_Q_A_NORM,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_ATTN_KV_A_NORM,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_ATTN_SUB_NORM,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_FFN_SUB_NORM,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_DEC_ATTN_NORM,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_DEC_CROSS_ATTN_NORM,        {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_DEC_FFN_NORM,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_ENC_ATTN_NORM,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_ENC_FFN_NORM,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_DEC_ATTN_REL_B,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_GET_ROWS}},
-    {LLM_TENSOR_ENC_ATTN_REL_B,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_GET_ROWS}},
-    {LLM_TENSOR_FFN_DOWN_EXPS,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT_ID}},
-    {LLM_TENSOR_FFN_GATE_EXPS,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT_ID}},
-    {LLM_TENSOR_FFN_UP_EXPS,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT_ID}},
-    // this tensor is loaded for T5, but never used
-    {LLM_TENSOR_DEC_CROSS_ATTN_REL_B,       {LLM_TENSOR_LAYER_REPEATING, GGML_OP_NONE}},
-    {LLM_TENSOR_CONV1D,                     {LLM_TENSOR_LAYER_INPUT,     GGML_OP_IM2COL}},
-    {LLM_TENSOR_POS_NET_NORM,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_POS_NET_NORM1,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_POS_NET_NORM2,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_POS_NET_CONV1,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_IM2COL}},
-    {LLM_TENSOR_POS_NET_CONV2,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_IM2COL}},
-    {LLM_TENSOR_POS_NET_ATTN_NORM,          {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_POS_NET_ATTN_Q,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_POS_NET_ATTN_K,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_POS_NET_ATTN_V,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_POS_NET_ATTN_OUT,           {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_CONVNEXT_DW,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_IM2COL}},
-    {LLM_TENSOR_CONVNEXT_NORM,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_CONVNEXT_PW1,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_CONVNEXT_PW2,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_CONVNEXT_GAMMA,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-};
-
 // checks if the weight tensor can be used with the specified buffer type and device
 static bool weight_buft_supported(const llama_hparams & hparams, ggml_tensor * w, ggml_op op, ggml_backend_buffer_type_t buft, ggml_backend_dev_t dev) {
     GGML_ASSERT(w != nullptr);
@@ -7841,11 +422,12 @@ static bool llm_load_tensors(
                 tn_tensor = LLM_TENSOR_OUTPUT;
             }
 
-            auto it = llm_tensor_info_mapping.find(tn_tensor);
-            if (it == llm_tensor_info_mapping.end()) {
+            llm_tensor_info info;
+            try {
+                info = llm_tensor_info_for(tn_tensor);
+            } catch (const std::out_of_range & e) {
                 throw std::runtime_error(format("missing tensor info mapping for %s", tn.str().c_str()));
             }
-            const auto & info = it->second;
 
             // tensors with "bias" suffix are always used with GGML_OP_ADD
             ggml_op op;
@@ -14644,9 +7226,9 @@ struct llm_build_context {
 
                 // ref: https://github.com/google/gemma_pytorch/commit/03e657582d17cb5a8617ebf333c1c16f3694670e
                 switch (model.type) {
-                    case e_model::MODEL_2B:
-                    case e_model::MODEL_9B:  Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd_head_k)));   break;
-                    case e_model::MODEL_27B: Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd / n_head))); break;
+                    case llm_type::MODEL_2B:
+                    case llm_type::MODEL_9B:  Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd_head_k)));   break;
+                    case llm_type::MODEL_27B: Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd / n_head))); break;
                     default: GGML_ABORT("fatal error");
                 };
                 cb(Qcur, "Qcur_scaled", il);
@@ -17896,572 +10478,6 @@ static struct ggml_cgraph * llama_build_graph(
     return result;
 }
 
-static void llama_set_k_shift(llama_context & lctx) {
-    const int64_t kv_size = lctx.kv_self.size;
-
-    assert(ggml_backend_buffer_is_host(lctx.inp_K_shift->buffer));
-
-    int32_t * data = (int32_t *) lctx.inp_K_shift->data;
-
-    for (int i = 0; i < kv_size; ++i) {
-        data[i] = lctx.kv_self.cells[i].delta;
-    }
-}
-
-static void llama_set_s_copy(llama_context & lctx) {
-    const int64_t kv_size = lctx.kv_self.size;
-
-    assert(ggml_backend_buffer_is_host(lctx.inp_s_copy->buffer));
-
-    int32_t * data = (int32_t *) lctx.inp_s_copy->data;
-
-    for (int i = 0; i < kv_size; ++i) {
-        data[i] = lctx.kv_self.cells[i].src;
-    }
-}
-
-static int32_t llama_relative_position_bucket(llama_pos x, llama_pos y, uint64_t n_buckets, bool bidirectional) {
-    // TODO move to hparams if a T5 variant appears that uses a different value
-    const int64_t max_distance = 128;
-
-    if (bidirectional) {
-        n_buckets >>= 1;
-    }
-
-    const int64_t max_exact = n_buckets >> 1;
-
-    int32_t relative_position = x - y;
-    int32_t relative_bucket = 0;
-    if (bidirectional) {
-        relative_bucket += (relative_position > 0) * n_buckets;
-        relative_position = abs(relative_position);
-    } else {
-        relative_position = -std::min(relative_position, 0);
-    }
-    int32_t relative_position_if_large = floorf(max_exact + logf(1.0 * relative_position / max_exact) * (n_buckets - max_exact) / log(1.0 * max_distance / max_exact));
-    relative_position_if_large = std::min(relative_position_if_large, n_buckets - 1);
-    relative_bucket += (relative_position < max_exact ? relative_position : relative_position_if_large);
-    return relative_bucket;
-}
-
-static void llama_set_inputs(llama_context & lctx, const llama_ubatch & ubatch) {
-    //
-    // set input data
-    //
-
-    const auto & hparams = lctx.model.hparams;
-    const auto & cparams = lctx.cparams;
-    const auto & kv_self = lctx.kv_self;
-
-    if (ubatch.token) {
-        const int64_t n_tokens = ubatch.n_tokens;
-
-        ggml_backend_tensor_set(lctx.inp_tokens, ubatch.token, 0, n_tokens*ggml_element_size(lctx.inp_tokens));
-    }
-
-    if (ubatch.embd) {
-        const int64_t n_embd   = hparams.n_embd;
-        const int64_t n_tokens = ubatch.n_tokens;
-
-        ggml_backend_tensor_set(lctx.inp_embd, ubatch.embd, 0, n_tokens*n_embd*ggml_element_size(lctx.inp_embd));
-    }
-
-    if (ubatch.pos && lctx.inp_pos) {
-        const int64_t n_tokens = ubatch.n_tokens;
-        auto n_pos = lctx.n_pos_per_token;
-        ggml_backend_tensor_set(lctx.inp_pos, ubatch.pos, 0, n_tokens*n_pos*ggml_element_size(lctx.inp_pos));
-    }
-
-    if (hparams.causal_attn || cparams.pooling_type == LLAMA_POOLING_TYPE_NONE) {
-        //GGML_ASSERT(lctx.inp_out_ids && "every model that can must skip unused outputs");
-
-        if (!lctx.inp_out_ids) {
-            LLAMA_LOG_WARN("%s: 'lctx.inp_out_ids' is not created\n", __func__);
-        } else {
-            const int64_t n_tokens = ubatch.n_tokens;
-
-            GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_out_ids->buffer));
-            int32_t * data = (int32_t *) lctx.inp_out_ids->data;
-
-            if (lctx.n_outputs == n_tokens) {
-                for (int i = 0; i < n_tokens; ++i) {
-                    data[i] = i;
-                }
-            } else if (ubatch.output) {
-                int32_t n_outputs = 0;
-                for (int i = 0; i < n_tokens; ++i) {
-                    if (ubatch.output[i]) {
-                        data[n_outputs++] = i;
-                    }
-                }
-                // the graph needs to have been passed the correct number of outputs
-                GGML_ASSERT(lctx.n_outputs == n_outputs);
-            } else if (lctx.n_outputs == 1) {
-                // only keep last output
-                data[0] = n_tokens - 1;
-            } else {
-                GGML_ASSERT(lctx.n_outputs == 0);
-            }
-        }
-    }
-
-    GGML_ASSERT(
-        // (!a || b) is a logical implication (a -> b)
-        // !hparams.causal_attn -> !cparams.causal_attn
-        (hparams.causal_attn || !cparams.causal_attn) &&
-        "causal attention is not supported by this model"
-    );
-
-    if (lctx.inp_KQ_mask || lctx.inp_KQ_mask_swa) {
-        // NOTE: hparams.causal_attn indicates the model is capable of generation and uses the kv cache.
-        if (cparams.causal_attn && !lctx.is_encoding) {
-            const int64_t n_kv         = kv_self.n;
-            const int64_t n_tokens     = ubatch.n_tokens;
-            const int64_t n_seq_tokens = ubatch.n_seq_tokens;
-            const int64_t n_seqs       = ubatch.n_seqs;
-
-
-            float * data     = nullptr;
-            float * data_swa = nullptr;
-
-            if (lctx.inp_KQ_mask) {
-                GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_KQ_mask->buffer));
-                data = (float *) lctx.inp_KQ_mask->data;
-            }
-
-            if (lctx.inp_KQ_mask_swa) {
-                GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_KQ_mask_swa->buffer));
-                data_swa = (float *) lctx.inp_KQ_mask_swa->data;
-            }
-
-            // For causal attention, use only the previous KV cells
-            // of the correct sequence for each token of the ubatch.
-            // It's assumed that if a token in the batch has multiple sequences, they are equivalent.
-            for (int h = 0; h < 1; ++h) {
-                for (int s = 0; s < n_seqs; ++s) {
-                    const llama_seq_id seq_id = ubatch.seq_id[s][0];
-
-                    for (int j = 0; j < n_seq_tokens; ++j) {
-                        const llama_pos pos = ubatch.pos[s*n_seq_tokens + j];
-
-                        for (int i = 0; i < n_kv; ++i) {
-                            float f;
-                            if (!kv_self.cells[i].has_seq_id(seq_id) || kv_self.cells[i].pos > pos) {
-                                f = -INFINITY;
-                            } else {
-                                if (hparams.use_alibi) {
-                                    f = -std::abs(kv_self.cells[i].pos - pos);
-                                } else {
-                                    f = 0.0f;
-                                }
-                            }
-
-                            if (data) {
-                                data[h*(n_kv*n_tokens) + s*(n_kv*n_seq_tokens) + j*n_kv + i] = f;
-                            }
-
-                            // may need to cut off old tokens for sliding window
-                            if (data_swa) {
-                                if (pos - kv_self.cells[i].pos >= (int32_t)hparams.n_swa) {
-                                    f = -INFINITY;
-                                }
-                                data_swa[h*(n_kv*n_tokens) + s*(n_kv*n_seq_tokens) + j*n_kv + i] = f;
-                            }
-                        }
-                    }
-                }
-
-                if (data) {
-                    for (int i = n_tokens; i < GGML_PAD(n_tokens, GGML_KQ_MASK_PAD); ++i) {
-                        for (int j = 0; j < n_kv; ++j) {
-                            data[h*(n_kv*n_tokens) + i*n_kv + j] = -INFINITY;
-                        }
-                    }
-                }
-
-                if (data_swa) {
-                    for (int i = n_tokens; i < GGML_PAD(n_tokens, GGML_KQ_MASK_PAD); ++i) {
-                        for (int j = 0; j < n_kv; ++j) {
-                            data_swa[h*(n_kv*n_tokens) + i*n_kv + j] = -INFINITY;
-                        }
-                    }
-                }
-            }
-        } else {
-            const int64_t n_tokens     = ubatch.n_tokens;
-            const int64_t n_seq_tokens = ubatch.n_seq_tokens;
-            const int64_t n_seqs       = ubatch.n_seqs;
-            // when using kv cache, the mask needs to match the kv cache size
-            const int64_t n_stride = hparams.causal_attn && !lctx.is_encoding ? kv_self.n : n_tokens;
-
-            GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_KQ_mask->buffer));
-
-            float * data = (float *) lctx.inp_KQ_mask->data;
-
-            for (int h = 0; h < 1; ++h) {
-                for (int s1 = 0; s1 < n_seqs; ++s1) {
-                    const llama_seq_id seq_id = ubatch.seq_id[s1][0];
-
-                    for (int j = 0; j < n_seq_tokens; ++j) {
-                        const int32_t tj = s1*n_seq_tokens + j;
-
-                        for (int s0 = 0; s0 < n_seqs; ++s0) {
-                            for (int i = 0; i < n_seq_tokens; ++i) {
-                                const int32_t ti = s0*n_seq_tokens + i;
-                                float f = -INFINITY;
-
-                                for (int s = 0; s < ubatch.n_seq_id[s0]; ++s) {
-                                    if (ubatch.seq_id[s0][s] == seq_id) {
-                                        if (hparams.use_alibi) {
-                                            f = -std::abs(ubatch.pos[ti] - ubatch.pos[tj]);
-                                        } else {
-                                            f = 0.0f;
-                                        }
-                                        break;
-                                    }
-                                }
-
-                                data[h*(n_tokens*n_tokens) + tj*n_stride + ti] = f;
-                            }
-                        }
-
-                        for (int i = n_tokens; i < n_stride; ++i) {
-                            data[h*(n_tokens*n_tokens) + tj*n_stride + i] = -INFINITY;
-                        }
-                    }
-                }
-            }
-        }
-    }
-
-    if (cparams.embeddings && cparams.pooling_type == LLAMA_POOLING_TYPE_MEAN) {
-        const int64_t n_tokens     = ubatch.n_tokens;
-        const int64_t n_seq_tokens = ubatch.n_seq_tokens;
-        const int64_t n_seqs       = ubatch.n_seqs;
-
-        GGML_ASSERT(lctx.inp_mean);
-        GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_mean->buffer));
-
-        float * data = (float *) lctx.inp_mean->data;
-        memset(lctx.inp_mean->data, 0, n_tokens * n_tokens * ggml_element_size(lctx.inp_mean));
-
-        std::vector sum(n_tokens, 0);
-
-        for (int s = 0; s < n_seqs; ++s) {
-            const llama_seq_id seq_id = ubatch.seq_id[s][0];
-
-            // TODO: adapt limits to n_seqs when ubatch.equal_seqs is true
-            GGML_ASSERT(seq_id < n_tokens && "seq_id cannot be larger than n_tokens with pooling_type == MEAN");
-
-            sum[seq_id] += ubatch.n_seq_tokens;
-        }
-
-        std::vector div(n_tokens, 0.0f);
-        for (int i = 0; i < n_tokens; ++i) {
-            const uint64_t s = sum[i];
-            if (s > 0) {
-                div[i] = 1.0f/float(s);
-            }
-        }
-
-        for (int s = 0; s < n_seqs; ++s) {
-            const llama_seq_id seq_id = ubatch.seq_id[s][0];
-
-            for (int i = 0; i < n_seq_tokens; ++i) {
-                data[seq_id*n_tokens + s*n_seq_tokens + i] = div[seq_id];
-            }
-        }
-    }
-
-    if (cparams.embeddings && (
-                cparams.pooling_type == LLAMA_POOLING_TYPE_CLS ||
-                cparams.pooling_type == LLAMA_POOLING_TYPE_RANK)) {
-        const int64_t n_tokens     = ubatch.n_tokens;
-        const int64_t n_seq_tokens = ubatch.n_seq_tokens;
-        const int64_t n_seqs       = ubatch.n_seqs;
-
-        GGML_ASSERT(lctx.inp_cls);
-        GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_cls->buffer));
-
-        uint32_t * data = (uint32_t *) lctx.inp_cls->data;
-        memset(lctx.inp_cls->data, 0, n_tokens * ggml_element_size(lctx.inp_cls));
-
-        for (int s = 0; s < n_seqs; ++s) {
-            const llama_seq_id seq_id = ubatch.seq_id[s][0];
-
-            // TODO: adapt limits to n_seqs when ubatch.equal_seqs is true
-            GGML_ASSERT(seq_id < n_tokens && "seq_id cannot be larger than n_tokens with pooling_type == CLS or RANK");
-
-            for (int i = 0; i < n_seq_tokens; ++i) {
-                const llama_pos pos = ubatch.pos[s*n_seq_tokens + i];
-
-                if (pos == 0) {
-                    data[seq_id] = s*n_seq_tokens + i;
-                }
-            }
-        }
-    }
-
-    if (cparams.embeddings && cparams.pooling_type == LLAMA_POOLING_TYPE_LAST) {
-        const int64_t n_tokens     = ubatch.n_tokens;
-        const int64_t n_seq_tokens = ubatch.n_seq_tokens;
-        const int64_t n_seqs       = ubatch.n_seqs;
-
-        GGML_ASSERT(lctx.inp_cls);
-        GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_cls->buffer));
-
-        uint32_t * data = (uint32_t *) lctx.inp_cls->data;
-        memset(lctx.inp_cls->data, 0, n_tokens * ggml_element_size(lctx.inp_cls));
-
-        std::vector last_pos(n_tokens, -1);
-        std::vector last_row(n_tokens, -1);
-
-        for (int s = 0; s < n_seqs; ++s) {
-            const llama_seq_id seq_id = ubatch.seq_id[s][0];
-
-            // TODO: adapt limits to n_seqs when ubatch.equal_seqs is true
-            GGML_ASSERT(seq_id < n_tokens && "seq_id cannot be larger than n_tokens with pooling_type == LAST");
-
-            for (int i = 0; i < n_seq_tokens; ++i) {
-                const llama_pos pos = ubatch.pos[s*n_seq_tokens + i];
-
-                if (pos >= last_pos[seq_id]) {
-                    last_pos[seq_id] = pos;
-                    last_row[seq_id] = s*n_seq_tokens + i;
-                }
-            }
-        }
-
-        for (int i = 0; i < n_tokens; ++i) {
-            if (last_row[i] >= 0) {
-                data[i] = last_row[i];
-            }
-        }
-    }
-
-    if (kv_self.recurrent) {
-        const int64_t n_kv = kv_self.n;
-
-        if (lctx.inp_s_mask) {
-            GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_s_mask->buffer));
-            float * data = (float *) lctx.inp_s_mask->data;
-
-            // clear unused states
-            for (int i = 0; i < n_kv; ++i) {
-                const uint32_t  cell_id = i + kv_self.head;
-                llama_kv_cell & kv_cell = lctx.kv_self.cells[cell_id];
-
-                data[i] = (float) (kv_cell.src >= 0);
-
-                // only clear once
-                if (kv_cell.src < 0) {
-                    kv_cell.src = cell_id;
-                }
-            }
-        }
-
-        if (lctx.inp_s_copy) {
-            GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_s_copy->buffer));
-            int32_t * data = (int32_t *) lctx.inp_s_copy->data;
-
-            // assuming copy destinations ALWAYS happen ONLY on the cells between head and head+n
-            for (uint32_t i = 0; i < n_kv; ++i) {
-                const uint32_t  cell_id = i + kv_self.head;
-                llama_kv_cell & kv_cell = lctx.kv_self.cells[cell_id];
-
-                // prevent out-of-bound sources
-                if (kv_cell.src < 0 || (uint32_t) kv_cell.src >= kv_self.size) {
-                    kv_cell.src = cell_id;
-                }
-
-                data[i] = kv_cell.src;
-
-                // ensure copy only happens once
-                if (kv_cell.src != (int32_t) cell_id) {
-                    kv_cell.src = cell_id;
-                }
-            }
-        }
-    }
-
-    if (lctx.inp_pos_bucket) {
-        const int64_t n_tokens = ubatch.n_tokens;
-
-        GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_pos_bucket->buffer));
-        GGML_ASSERT(!ubatch.equal_seqs); // TODO: use ubatch.n_seqs instead of failing
-
-        int32_t * data = (int32_t *) lctx.inp_pos_bucket->data;
-
-        if (!lctx.is_encoding) {
-            const int64_t n_kv = kv_self.n;
-            for (int h = 0; h < 1; ++h) {
-                for (int j = 0; j < n_tokens; ++j) {
-                    for (int i = 0; i < n_kv; ++i) {
-                        data[h*(n_kv*n_tokens) + j*n_kv + i] = llama_relative_position_bucket(lctx.kv_self.cells[i].pos, ubatch.pos[j], hparams.n_rel_attn_bkts, lctx.is_encoding);
-                    }
-                }
-            }
-        } else {
-            for (int h = 0; h < 1; ++h) {
-                for (int j = 0; j < n_tokens; ++j) {
-                    for (int i = 0; i < n_tokens; ++i) {
-                        data[h*(n_tokens*n_tokens) + j*n_tokens + i] = llama_relative_position_bucket(ubatch.pos[i], ubatch.pos[j], hparams.n_rel_attn_bkts, lctx.is_encoding);
-                    }
-                }
-            }
-        }
-    }
-
-    if (!lctx.is_encoding && lctx.inp_embd_enc) {
-        assert(lctx.inp_embd_enc->type == GGML_TYPE_F32);
-        assert((size_t) ggml_nelements(lctx.inp_embd_enc) == lctx.embd_enc.size());
-
-        ggml_backend_tensor_set(lctx.inp_embd_enc, lctx.embd_enc.data(), 0, ggml_nbytes(lctx.inp_embd_enc));
-    }
-
-    if (!lctx.is_encoding && lctx.inp_KQ_mask_cross) {
-        const int64_t n_output_enc = lctx.embd_enc.size() / hparams.n_embd;
-        const int64_t n_tokens = ubatch.n_tokens;
-
-        GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_KQ_mask_cross->buffer));
-        GGML_ASSERT(!ubatch.equal_seqs); // TODO: use ubatch.n_seqs instead of failing
-
-        float * data = (float *) lctx.inp_KQ_mask_cross->data;
-
-        for (int h = 0; h < 1; ++h) {
-            for (int j = 0; j < n_tokens; ++j) {
-                for (int i = 0; i < n_output_enc; ++i) {
-                    float f = -INFINITY;
-                    for (int s = 0; s < ubatch.n_seq_id[j]; ++s) {
-                        const llama_seq_id seq_id = ubatch.seq_id[j][s];
-                        if (lctx.seq_ids_enc[i].find(seq_id) != lctx.seq_ids_enc[i].end()) {
-                            f = 0.0f;
-                        }
-                    }
-                    data[h*(n_output_enc*n_tokens) + j*n_output_enc + i] = f;
-                }
-            }
-
-            for (int i = n_tokens; i < GGML_PAD(n_tokens, GGML_KQ_MASK_PAD); ++i) {
-                for (int j = 0; j < n_output_enc; ++j) {
-                    data[h*(n_output_enc*n_tokens) + i*n_output_enc + j] = -INFINITY;
-                }
-            }
-        }
-    }
-}
-
-// Make sure enough space is available for outputs.
-// Returns max number of outputs for which space was reserved.
-static size_t llama_output_reserve(llama_context & lctx, size_t n_outputs) {
-    const auto & cparams = lctx.cparams;
-    const auto & hparams = lctx.model.hparams;
-
-    const size_t n_outputs_max = std::max(n_outputs, (size_t) cparams.n_seq_max);
-
-    const auto n_batch = cparams.n_batch;
-    const auto n_vocab = hparams.n_vocab;
-    const auto n_embd  = hparams.n_embd;
-
-    // TODO: use a per-batch flag for logits presence instead
-    const bool has_logits = !cparams.embeddings;
-    const bool has_embd   =  cparams.embeddings && (cparams.pooling_type == LLAMA_POOLING_TYPE_NONE);
-
-    const size_t logits_size = has_logits ? n_vocab*n_outputs_max : 0;
-    const size_t embd_size   = has_embd   ?  n_embd*n_outputs_max : 0;
-
-    if (lctx.output_ids.empty()) {
-        // init, never resized afterwards
-        lctx.output_ids.resize(n_batch);
-    }
-
-    const size_t prev_size = lctx.buf_output ? ggml_backend_buffer_get_size(lctx.buf_output.get()) : 0;
-    const size_t new_size  = (logits_size + embd_size) * sizeof(float);
-
-    // alloc only when more than the current capacity is required
-    // TODO: also consider shrinking the buffer
-    if (!lctx.buf_output || prev_size < new_size) {
-        if (lctx.buf_output) {
-#ifndef NDEBUG
-            // This doesn't happen often, but may be annoying in some cases (like the HellaSwag benchmark)
-            LLAMA_LOG_INFO("%s: reallocating output buffer from size %.02f MiB to %.02f MiB\n", __func__, prev_size / 1024.0 / 1024.0, new_size / 1024.0 / 1024.0);
-#endif
-            lctx.buf_output = nullptr;
-            lctx.logits = nullptr;
-            lctx.embd = nullptr;
-        }
-
-        auto * buft = ggml_backend_cpu_buffer_type();
-        // try to use the host buffer of the device where the output tensor is allocated for faster transfer to system memory
-        auto * output_dev = lctx.model.dev_output.dev;
-        auto * output_dev_host_buft = output_dev ? ggml_backend_dev_host_buffer_type(output_dev) : nullptr;
-        if (output_dev_host_buft) {
-            buft = output_dev_host_buft;
-        }
-        lctx.buf_output.reset(ggml_backend_buft_alloc_buffer(buft, new_size));
-        if (lctx.buf_output == nullptr) {
-            LLAMA_LOG_ERROR("%s: failed to allocate output buffer of size %.2f MiB\n", __func__, new_size / (1024.0 * 1024.0));
-            return 0;
-        }
-    }
-
-    float * output_base = (float *) ggml_backend_buffer_get_base(lctx.buf_output.get());
-
-    lctx.logits = has_logits ? output_base               : nullptr;
-    lctx.embd   = has_embd   ? output_base + logits_size : nullptr;
-
-    lctx.output_size = n_outputs_max;
-    lctx.logits_size = logits_size;
-    lctx.embd_size   = embd_size;
-
-    // set all ids as invalid (negative)
-    std::fill(lctx.output_ids.begin(), lctx.output_ids.end(), -1);
-
-    ggml_backend_buffer_clear(lctx.buf_output.get(), 0);
-
-    lctx.n_outputs = 0;
-
-    return n_outputs_max;
-}
-
-// make the outputs have the same order they had in the user-provided batch
-static void llama_output_reorder(struct llama_context * ctx) {
-    std::vector & out_ids = ctx->sbatch.out_ids;
-    if (!out_ids.empty()) {
-        uint32_t n_vocab = ctx->model.hparams.n_vocab;
-        uint32_t n_embd  = ctx->model.hparams.n_embd;
-        int32_t n_outputs = ctx->n_outputs;
-        GGML_ASSERT((size_t) n_outputs == out_ids.size());
-        // TODO: is there something more efficient which also minimizes swaps?
-        // selection sort, to minimize swaps (from https://en.wikipedia.org/wiki/Selection_sort)
-        for (int32_t i = 0; i < n_outputs - 1; ++i) {
-            int32_t j_min = i;
-            for (int32_t j = i + 1; j < n_outputs; ++j) {
-                if (out_ids[j] < out_ids[j_min]) {
-                    j_min = j;
-                }
-            }
-            if (j_min == i) { continue; }
-            std::swap(out_ids[i], out_ids[j_min]);
-            if (ctx->logits_size > 0) {
-                for (uint32_t k = 0; k < n_vocab; k++) {
-                    std::swap(ctx->logits[i*n_vocab + k], ctx->logits[j_min*n_vocab + k]);
-                }
-            }
-            if (ctx->embd_size > 0) {
-                for (uint32_t k = 0; k < n_embd; k++) {
-                    std::swap(ctx->embd[i*n_embd + k], ctx->embd[j_min*n_embd + k]);
-                }
-            }
-        }
-        std::fill(ctx->output_ids.begin(), ctx->output_ids.end(), -1);
-        for (int32_t i = 0; i < n_outputs; ++i) {
-            ctx->output_ids[out_ids[i]] = i;
-        }
-        out_ids.clear();
-    }
-}
-
 // returns the result of ggml_backend_sched_graph_compute_async execution
 static enum ggml_status llama_graph_compute(
           llama_context & lctx,
@@ -18513,7 +10529,8 @@ static int llama_decode_internal(
     }
 
     // temporary allocate memory for the input batch if needed
-    llama_batch_allocr batch_allocr(lctx, inp_batch);
+    llama_batch_allocr batch_allocr(inp_batch, inp_batch.pos ? -1 : lctx.kv_self.max_pos() + 1);
+
     const llama_batch & batch = batch_allocr.batch;
     const uint32_t n_tokens_all = batch.n_tokens;
 
@@ -18847,7 +10864,8 @@ static int llama_encode_internal(
     }
 
     // temporary allocate memory for the input batch if needed
-    llama_batch_allocr batch_allocr(lctx, inp_batch);
+    llama_batch_allocr batch_allocr(inp_batch, inp_batch.pos ? -1 : lctx.kv_self.max_pos() + 1);
+
     const llama_batch & batch = batch_allocr.batch;
     const uint32_t n_tokens = batch.n_tokens;
 
@@ -19297,1046 +11315,6 @@ static void llama_kv_cache_update_internal(struct llama_context & lctx) {
     }
 }
 
-//
-// quantization
-//
-
-struct quantize_state_internal {
-    const llama_model                 & model;
-    const llama_model_quantize_params * params;
-
-    int n_attention_wv    = 0;
-    int n_ffn_down        = 0;
-    int n_ffn_gate        = 0;
-    int n_ffn_up          = 0;
-    int i_attention_wv    = 0;
-    int i_ffn_down        = 0;
-    int i_ffn_gate        = 0;
-    int i_ffn_up          = 0;
-
-    int n_k_quantized     = 0;
-    int n_fallback        = 0;
-
-    bool has_imatrix      = false;
-
-    // used to figure out if a model shares tok_embd with the output weight
-    bool has_output       = false;
-
-    quantize_state_internal(const llama_model & model, const llama_model_quantize_params * params)
-        : model(model)
-        , params(params)
-        {}
-};
-
-static void llama_tensor_dequantize_internal(
-    struct ggml_tensor * tensor, std::vector> & output, std::vector & workers,
-    const size_t nelements, const int nthread
-) {
-    if (output.size() < nelements) {
-        output.resize(nelements);
-    }
-    float * f32_output = (float *) output.data();
-
-    const ggml_type_traits * qtype = ggml_get_type_traits(tensor->type);
-    if (ggml_is_quantized(tensor->type)) {
-        if (qtype->to_float == NULL) {
-            throw std::runtime_error(format("type %s unsupported for integer quantization: no dequantization available", ggml_type_name(tensor->type)));
-        }
-    } else if (tensor->type != GGML_TYPE_F16 &&
-               tensor->type != GGML_TYPE_BF16) {
-        throw std::runtime_error(format("cannot dequantize/convert tensor type %s", ggml_type_name(tensor->type)));
-    }
-
-    if (nthread < 2) {
-        if (tensor->type == GGML_TYPE_F16) {
-            ggml_fp16_to_fp32_row((ggml_fp16_t *)tensor->data, f32_output, nelements);
-        } else if (tensor->type == GGML_TYPE_BF16) {
-            ggml_bf16_to_fp32_row((ggml_bf16_t *)tensor->data, f32_output, nelements);
-        } else if (ggml_is_quantized(tensor->type)) {
-            qtype->to_float(tensor->data, f32_output, nelements);
-        } else {
-            GGML_ABORT("fatal error"); // unreachable
-        }
-        return;
-    }
-
-    size_t block_size;
-    if (tensor->type == GGML_TYPE_F16 ||
-        tensor->type == GGML_TYPE_BF16) {
-        block_size = 1;
-    } else {
-        block_size = (size_t)ggml_blck_size(tensor->type);
-    }
-
-    size_t block_size_bytes = ggml_type_size(tensor->type);
-
-    GGML_ASSERT(nelements % block_size == 0);
-    size_t nblocks = nelements / block_size;
-    size_t blocks_per_thread = nblocks / nthread;
-    size_t spare_blocks = nblocks - (blocks_per_thread * nthread); // if blocks aren't divisible by thread count
-
-    size_t in_buff_offs = 0;
-    size_t out_buff_offs = 0;
-
-    for (int tnum = 0; tnum < nthread; tnum++) {
-        size_t thr_blocks = blocks_per_thread + (tnum == nthread - 1 ? spare_blocks : 0); // num blocks for this thread
-        size_t thr_elems = thr_blocks * block_size; // number of elements for this thread
-        size_t thr_block_bytes = thr_blocks * block_size_bytes; // number of input bytes for this thread
-
-        auto compute = [qtype] (ggml_type typ, uint8_t * inbuf, float * outbuf, int nels) {
-            if (typ == GGML_TYPE_F16) {
-                ggml_fp16_to_fp32_row((ggml_fp16_t *)inbuf, outbuf, nels);
-            } else if (typ == GGML_TYPE_BF16) {
-                ggml_bf16_to_fp32_row((ggml_bf16_t *)inbuf, outbuf, nels);
-            } else {
-                qtype->to_float(inbuf, outbuf, nels);
-            }
-        };
-        workers.emplace_back(compute, tensor->type, (uint8_t *) tensor->data + in_buff_offs, f32_output + out_buff_offs, thr_elems);
-        in_buff_offs += thr_block_bytes;
-        out_buff_offs += thr_elems;
-    }
-    for (auto & w : workers) { w.join(); }
-    workers.clear();
-}
-
-static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type new_type, const ggml_tensor * tensor, llama_ftype ftype) {
-    const std::string name = ggml_get_name(tensor);
-
-    // TODO: avoid hardcoded tensor names - use the TN_* constants
-    const llm_arch arch = qs.model.arch;
-    const auto       tn = LLM_TN(arch);
-
-    auto use_more_bits = [](int i_layer, int n_layers) -> bool {
-        return i_layer < n_layers/8 || i_layer >= 7*n_layers/8 || (i_layer - n_layers/8)%3 == 2;
-    };
-    const int n_expert = std::max(1, (int)qs.model.hparams.n_expert);
-    auto layer_info = [n_expert] (int i_layer, int n_layer, const char * name) {
-        if (n_expert > 1) {
-            // Believe it or not, "experts" in the FFN of Mixtral-8x7B are not consecutive, but occasionally randomly
-            // sprinkled in the model. Hence, simply dividing i_ffn_down by n_expert does not work
-            // for getting the current layer as I initially thought, and we need to resort to parsing the
-            // tensor name.
-            if (sscanf(name, "blk.%d.", &i_layer) != 1) {
-                throw std::runtime_error(format("Failed to determine layer for tensor %s", name));
-            }
-            if (i_layer < 0 || i_layer >= n_layer) {
-                throw std::runtime_error(format("Bad layer %d for tensor %s. Must be in [0, %d)", i_layer, name, n_layer));
-            }
-        }
-        return std::make_pair(i_layer, n_layer);
-    };
-
-    // for arches that share the same tensor between the token embeddings and the output, we quantize the token embeddings
-    // with the quantization of the output tensor
-    if (name == tn(LLM_TENSOR_OUTPUT, "weight") || (!qs.has_output && name == tn(LLM_TENSOR_TOKEN_EMBD, "weight"))) {
-        if (qs.params->output_tensor_type < GGML_TYPE_COUNT) {
-            new_type = qs.params->output_tensor_type;
-        } else {
-            int nx = tensor->ne[0];
-            if (arch == LLM_ARCH_FALCON || nx % QK_K != 0) {
-                new_type = GGML_TYPE_Q8_0;
-            }
-            else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS || ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS ||
-                     ftype == LLAMA_FTYPE_MOSTLY_IQ1_S   || ftype == LLAMA_FTYPE_MOSTLY_IQ2_S  || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M   ||
-                     ftype == LLAMA_FTYPE_MOSTLY_IQ1_M) {
-                new_type = GGML_TYPE_Q5_K;
-            }
-            else if (new_type != GGML_TYPE_Q8_0) {
-                new_type = GGML_TYPE_Q6_K;
-            }
-        }
-    } else if (name == "token_embd.weight") {
-        if (qs.params->token_embedding_type < GGML_TYPE_COUNT) {
-            new_type = qs.params->token_embedding_type;
-        } else {
-            if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS || ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS ||
-                ftype == LLAMA_FTYPE_MOSTLY_IQ1_S   || ftype == LLAMA_FTYPE_MOSTLY_IQ1_M) {
-                new_type = GGML_TYPE_Q2_K;
-            }
-            else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M) {
-                new_type = GGML_TYPE_IQ3_S;
-            }
-            else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) {
-                new_type = GGML_TYPE_IQ3_S;
-            }
-            else if (ftype == LLAMA_FTYPE_MOSTLY_TQ1_0 || ftype == LLAMA_FTYPE_MOSTLY_TQ2_0) {
-                new_type = GGML_TYPE_Q4_K;
-            }
-        }
-    } else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS || ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ1_S ||
-               ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M    || ftype == LLAMA_FTYPE_MOSTLY_IQ1_M) {
-        if (name.find("attn_v.weight") != std::string::npos) {
-            if (qs.model.hparams.n_gqa() >= 4 || qs.model.hparams.n_expert >= 4) new_type = GGML_TYPE_Q4_K;
-            else new_type = ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M ? GGML_TYPE_IQ3_S : GGML_TYPE_Q2_K;
-            ++qs.i_attention_wv;
-        }
-        else if (qs.model.hparams.n_expert == 8 && name.find("attn_k.weight") != std::string::npos) {
-            new_type = GGML_TYPE_Q4_K;
-        }
-        else if (name.find("ffn_down") != std::string::npos) {
-            if (qs.i_ffn_down < qs.n_ffn_down/8) {
-                new_type = ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M ? GGML_TYPE_IQ3_S : GGML_TYPE_Q2_K;
-            }
-            ++qs.i_ffn_down;
-        }
-        else if (name.find("attn_output.weight") != std::string::npos) {
-            if (qs.model.hparams.n_expert == 8) {
-                new_type = GGML_TYPE_Q5_K;
-            } else {
-                if (ftype == LLAMA_FTYPE_MOSTLY_IQ1_S || ftype == LLAMA_FTYPE_MOSTLY_IQ1_M) new_type = GGML_TYPE_IQ2_XXS;
-                else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M) new_type = GGML_TYPE_IQ3_S;
-            }
-        }
-    } else if (name.find("attn_v.weight") != std::string::npos) {
-        if      (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) {
-            new_type = qs.model.hparams.n_gqa() >= 4 ? GGML_TYPE_Q4_K : GGML_TYPE_Q3_K;
-        }
-        else if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S && qs.model.hparams.n_gqa() >= 4) {
-            new_type = GGML_TYPE_Q4_K;
-        }
-        else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) {
-            new_type = qs.model.hparams.n_gqa() >= 4 ? GGML_TYPE_Q4_K : !qs.has_imatrix ? GGML_TYPE_IQ3_S : GGML_TYPE_IQ3_XXS;
-        }
-        else if ((ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ3_S) && qs.model.hparams.n_gqa() >= 4) {
-            new_type = GGML_TYPE_Q4_K;
-        }
-        else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_M) {
-            new_type = GGML_TYPE_Q4_K;
-        }
-        else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M) {
-            new_type = qs.i_attention_wv < 2 ? GGML_TYPE_Q5_K : GGML_TYPE_Q4_K;
-        }
-        else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q5_K;
-        else if ((ftype == LLAMA_FTYPE_MOSTLY_IQ4_NL || ftype == LLAMA_FTYPE_MOSTLY_IQ4_XS) && qs.model.hparams.n_gqa() >= 4) {
-            new_type = GGML_TYPE_Q5_K;
-        }
-        else if ((ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) &&
-                use_more_bits(qs.i_attention_wv, qs.n_attention_wv)) new_type = GGML_TYPE_Q6_K;
-        else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S && qs.i_attention_wv < 4) new_type = GGML_TYPE_Q5_K;
-        if (qs.model.type == MODEL_70B) {
-            // In the 70B model we have 8 heads sharing the same attn_v weights. As a result, the attn_v.weight tensor is
-            // 8x smaller compared to attn_q.weight. Hence, we can get a nice boost in quantization accuracy with
-            // nearly negligible increase in model size by quantizing this tensor with more bits:
-            if (new_type == GGML_TYPE_Q3_K || new_type == GGML_TYPE_Q4_K) new_type = GGML_TYPE_Q5_K;
-        }
-        if (qs.model.hparams.n_expert == 8) {
-            // for the 8-expert model, bumping this to Q8_0 trades just ~128MB
-            // TODO: explore better strategies
-            new_type = GGML_TYPE_Q8_0;
-        }
-        ++qs.i_attention_wv;
-    } else if (name.find("attn_k.weight") != std::string::npos) {
-        if (qs.model.hparams.n_expert == 8) {
-            // for the 8-expert model, bumping this to Q8_0 trades just ~128MB
-            // TODO: explore better strategies
-            new_type = GGML_TYPE_Q8_0;
-        }
-        else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS) {
-            new_type = GGML_TYPE_IQ3_XXS;
-        }
-        else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) {
-            new_type = GGML_TYPE_IQ2_S;
-        }
-    } else if (name.find("attn_q.weight") != std::string::npos) {
-        if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS) {
-            new_type = GGML_TYPE_IQ3_XXS;
-        }
-        else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) {
-            new_type = GGML_TYPE_IQ2_S;
-        }
-    } else if (name.find("ffn_down") != std::string::npos) {
-        auto info = layer_info(qs.i_ffn_down, qs.n_ffn_down, name.c_str());
-        int i_layer = info.first, n_layer = info.second;
-        if      (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q3_K;
-        else if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S) {
-            if (i_layer < n_layer/8) new_type = GGML_TYPE_Q4_K;
-        }
-        else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS && !qs.has_imatrix) {
-            new_type = i_layer < n_layer/8 ? GGML_TYPE_Q4_K : GGML_TYPE_Q3_K;
-        }
-        else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M) {
-            new_type = i_layer < n_layer/16 ? GGML_TYPE_Q5_K
-                     : arch != LLM_ARCH_FALCON || use_more_bits(i_layer, n_layer) ? GGML_TYPE_Q4_K
-                     : GGML_TYPE_Q3_K;
-        }
-        else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_M && (i_layer < n_layer/8 ||
-                    (qs.model.hparams.n_expert == 8 && use_more_bits(i_layer, n_layer)))) {
-            new_type = GGML_TYPE_Q4_K;
-        }
-        else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) {
-            new_type = arch == LLM_ARCH_FALCON ? GGML_TYPE_Q4_K : GGML_TYPE_Q5_K;
-        }
-        else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M) {
-            if (arch == LLM_ARCH_FALCON) {
-                new_type = i_layer < n_layer/16 ? GGML_TYPE_Q6_K :
-                           use_more_bits(i_layer, n_layer) ? GGML_TYPE_Q5_K : GGML_TYPE_Q4_K;
-            } else {
-                if (use_more_bits(i_layer, n_layer)) new_type = GGML_TYPE_Q6_K;
-            }
-        }
-        else if (i_layer < n_layer/8 && (ftype == LLAMA_FTYPE_MOSTLY_IQ4_NL || ftype == LLAMA_FTYPE_MOSTLY_IQ4_XS) && !qs.has_imatrix) {
-            new_type = GGML_TYPE_Q5_K;
-        }
-        else if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M && use_more_bits(i_layer, n_layer)) new_type = GGML_TYPE_Q6_K;
-        else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S && arch != LLM_ARCH_FALCON && i_layer < n_layer/8) {
-            new_type = GGML_TYPE_Q5_K;
-        }
-        else if ((ftype == LLAMA_FTYPE_MOSTLY_Q4_0 || ftype == LLAMA_FTYPE_MOSTLY_Q5_0)
-                && qs.has_imatrix && i_layer < n_layer/8) {
-            // Guard against craziness in the first few ffn_down layers that can happen even with imatrix for Q4_0/Q5_0.
-            // We only do it when an imatrix is provided because a) we want to make sure that one can always get the
-            // same quantization as before imatrix stuff, and b) Q4_1/Q5_1 do go crazy on ffn_down without an imatrix.
-            new_type = ftype == LLAMA_FTYPE_MOSTLY_Q4_0 ? GGML_TYPE_Q4_1 : GGML_TYPE_Q5_1;
-        }
-        ++qs.i_ffn_down;
-    } else if (name.find("attn_output.weight") != std::string::npos) {
-        if (arch != LLM_ARCH_FALCON) {
-            if (qs.model.hparams.n_expert == 8) {
-                if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K   || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS ||
-                    ftype == LLAMA_FTYPE_MOSTLY_Q3_K_S || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M  || ftype == LLAMA_FTYPE_MOSTLY_IQ4_NL  ||
-                    ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S || ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M  || ftype == LLAMA_FTYPE_MOSTLY_IQ3_S  ||
-                    ftype == LLAMA_FTYPE_MOSTLY_IQ3_M  || ftype == LLAMA_FTYPE_MOSTLY_IQ4_XS) {
-                    new_type = GGML_TYPE_Q5_K;
-                }
-            } else {
-                if      (ftype == LLAMA_FTYPE_MOSTLY_Q2_K   ) new_type = GGML_TYPE_Q3_K;
-                else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) new_type = GGML_TYPE_IQ3_S;
-                else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M ) new_type = GGML_TYPE_Q4_K;
-                else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L ) new_type = GGML_TYPE_Q5_K;
-                else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_M  ) new_type = GGML_TYPE_Q4_K;
-            }
-        } else {
-            if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q4_K;
-        }
-    }
-    else if (name.find("attn_qkv.weight") != std::string::npos) {
-        if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L || ftype == LLAMA_FTYPE_MOSTLY_IQ3_M) {
-            new_type = GGML_TYPE_Q4_K;
-        }
-        else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M) new_type = GGML_TYPE_Q5_K;
-        else if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) new_type = GGML_TYPE_Q6_K;
-    }
-    else if (name.find("ffn_gate") != std::string::npos) {
-        auto info = layer_info(qs.i_ffn_gate, qs.n_ffn_gate, name.c_str());
-        int i_layer = info.first, n_layer = info.second;
-        if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS && (i_layer >= n_layer/8 && i_layer < 7*n_layer/8)) {
-            new_type = GGML_TYPE_IQ3_XXS;
-        }
-        ++qs.i_ffn_gate;
-    }
-    else if (name.find("ffn_up") != std::string::npos) {
-        auto info = layer_info(qs.i_ffn_up, qs.n_ffn_up, name.c_str());
-        int i_layer = info.first, n_layer = info.second;
-        if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS && (i_layer >= n_layer/8 && i_layer < 7*n_layer/8)) {
-            new_type = GGML_TYPE_IQ3_XXS;
-        }
-        ++qs.i_ffn_up;
-    }
-
-    //    if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q3_K;
-    //}
-    // IK: let's remove this, else Q2_K is almost the same as Q3_K_S
-    //else if (name.find("ffn_gate") != std::string::npos || name.find("ffn_up") != std::string::npos) {
-    //    if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q3_K;
-    //}
-    // This can be used to reduce the size of the Q5_K_S model.
-    // The associated PPL increase is fully in line with the size reduction
-    //else {
-    //    if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_S) new_type = GGML_TYPE_Q4_K;
-    //}
-    bool convert_incompatible_tensor = false;
-    if (new_type == GGML_TYPE_Q2_K    || new_type == GGML_TYPE_Q3_K    || new_type == GGML_TYPE_Q4_K   ||
-        new_type == GGML_TYPE_Q5_K    || new_type == GGML_TYPE_Q6_K    || new_type == GGML_TYPE_IQ4_XS ||
-        new_type == GGML_TYPE_IQ2_XS  || new_type == GGML_TYPE_IQ2_XXS || new_type == GGML_TYPE_IQ2_S  ||
-        new_type == GGML_TYPE_IQ3_XXS || new_type == GGML_TYPE_IQ1_S   || new_type == GGML_TYPE_IQ3_S  ||
-        new_type == GGML_TYPE_IQ1_M) {
-        int nx = tensor->ne[0];
-        int ny = tensor->ne[1];
-        if (nx % QK_K != 0) {
-            LLAMA_LOG_WARN("\n\n%s : tensor cols %d x %d are not divisible by %d, required for %s", __func__, nx, ny, QK_K, ggml_type_name(new_type));
-            convert_incompatible_tensor = true;
-        } else {
-            ++qs.n_k_quantized;
-        }
-    }
-    if (convert_incompatible_tensor) {
-        switch (new_type) {
-            case GGML_TYPE_TQ1_0:
-            case GGML_TYPE_TQ2_0:  new_type = GGML_TYPE_Q4_0; break;  // TODO: use a symmetric type instead
-            case GGML_TYPE_IQ2_XXS:
-            case GGML_TYPE_IQ2_XS:
-            case GGML_TYPE_IQ2_S:
-            case GGML_TYPE_IQ3_XXS:
-            case GGML_TYPE_IQ3_S:
-            case GGML_TYPE_IQ1_S:
-            case GGML_TYPE_IQ1_M:
-            case GGML_TYPE_Q2_K:
-            case GGML_TYPE_Q3_K:
-            case GGML_TYPE_IQ4_XS: new_type = GGML_TYPE_IQ4_NL; break;
-            case GGML_TYPE_Q4_K:   new_type = GGML_TYPE_Q5_0;   break;
-            case GGML_TYPE_Q5_K:   new_type = GGML_TYPE_Q5_1;   break;
-            case GGML_TYPE_Q6_K:   new_type = GGML_TYPE_Q8_0;   break;
-            default: throw std::runtime_error("\nUnsupported tensor size encountered\n");
-        }
-        if (tensor->ne[0] % ggml_blck_size(new_type) != 0) {
-            new_type = GGML_TYPE_F16;
-        }
-        LLAMA_LOG_WARN(" - using fallback quantization %s\n", ggml_type_name(new_type));
-        ++qs.n_fallback;
-    }
-
-    return new_type;
-}
-
-static size_t llama_tensor_quantize_internal(enum ggml_type new_type, const float * f32_data, void * new_data, const int64_t chunk_size, int64_t nrows, int64_t n_per_row, const float * imatrix, std::vector & workers, const int nthread) {
-    if (nthread < 2) {
-        // single-thread
-        size_t new_size = ggml_quantize_chunk(new_type, f32_data, new_data, 0, nrows, n_per_row, imatrix);
-        if (!ggml_validate_row_data(new_type, new_data, new_size)) {
-            throw std::runtime_error("quantized data validation failed");
-        }
-        return new_size;
-    }
-
-    std::mutex mutex;
-    int64_t counter = 0;
-    size_t new_size = 0;
-    bool valid = true;
-    auto compute = [&mutex, &counter, &new_size, &valid, new_type, f32_data, new_data, chunk_size,
-            nrows, n_per_row, imatrix]() {
-        const int64_t nrows_per_chunk = chunk_size / n_per_row;
-        size_t local_size = 0;
-        while (true) {
-            std::unique_lock lock(mutex);
-            int64_t first_row = counter; counter += nrows_per_chunk;
-            if (first_row >= nrows) {
-                if (local_size > 0) {
-                    new_size += local_size;
-                }
-                break;
-            }
-            lock.unlock();
-            const int64_t this_nrow = std::min(nrows - first_row, nrows_per_chunk);
-            size_t this_size = ggml_quantize_chunk(new_type, f32_data, new_data, first_row * n_per_row, this_nrow, n_per_row, imatrix);
-            local_size += this_size;
-
-            // validate the quantized data
-            const size_t row_size  = ggml_row_size(new_type, n_per_row);
-            void * this_data = (char *) new_data + first_row * row_size;
-            if (!ggml_validate_row_data(new_type, this_data, this_size)) {
-                std::unique_lock lock(mutex);
-                valid = false;
-                break;
-            }
-        }
-    };
-    for (int it = 0; it < nthread - 1; ++it) {
-        workers.emplace_back(compute);
-    }
-    compute();
-    for (auto & w : workers) { w.join(); }
-    workers.clear();
-    if (!valid) {
-        throw std::runtime_error("quantized data validation failed");
-    }
-    return new_size;
-}
-
-static void llama_model_quantize_internal(const std::string & fname_inp, const std::string & fname_out, const llama_model_quantize_params * params) {
-    ggml_type default_type;
-    llama_ftype ftype = params->ftype;
-
-    switch (params->ftype) {
-        case LLAMA_FTYPE_MOSTLY_Q4_0: default_type = GGML_TYPE_Q4_0; break;
-        case LLAMA_FTYPE_MOSTLY_Q4_1: default_type = GGML_TYPE_Q4_1; break;
-        case LLAMA_FTYPE_MOSTLY_Q5_0: default_type = GGML_TYPE_Q5_0; break;
-        case LLAMA_FTYPE_MOSTLY_Q5_1: default_type = GGML_TYPE_Q5_1; break;
-        case LLAMA_FTYPE_MOSTLY_Q8_0: default_type = GGML_TYPE_Q8_0; break;
-        case LLAMA_FTYPE_MOSTLY_F16:  default_type = GGML_TYPE_F16;  break;
-        case LLAMA_FTYPE_MOSTLY_BF16: default_type = GGML_TYPE_BF16; break;
-        case LLAMA_FTYPE_ALL_F32:     default_type = GGML_TYPE_F32;  break;
-
-        // K-quants
-        case LLAMA_FTYPE_MOSTLY_Q2_K_S:
-        case LLAMA_FTYPE_MOSTLY_Q2_K:    default_type = GGML_TYPE_Q2_K;    break;
-        case LLAMA_FTYPE_MOSTLY_IQ3_XS:  default_type = GGML_TYPE_IQ3_S;   break;
-        case LLAMA_FTYPE_MOSTLY_Q3_K_S:
-        case LLAMA_FTYPE_MOSTLY_Q3_K_M:
-        case LLAMA_FTYPE_MOSTLY_Q3_K_L:  default_type = GGML_TYPE_Q3_K;    break;
-        case LLAMA_FTYPE_MOSTLY_Q4_K_S:
-        case LLAMA_FTYPE_MOSTLY_Q4_K_M:  default_type = GGML_TYPE_Q4_K;    break;
-        case LLAMA_FTYPE_MOSTLY_Q5_K_S:
-        case LLAMA_FTYPE_MOSTLY_Q5_K_M:  default_type = GGML_TYPE_Q5_K;    break;
-        case LLAMA_FTYPE_MOSTLY_Q6_K:    default_type = GGML_TYPE_Q6_K;    break;
-        case LLAMA_FTYPE_MOSTLY_TQ1_0:   default_type = GGML_TYPE_TQ1_0;   break;
-        case LLAMA_FTYPE_MOSTLY_TQ2_0:   default_type = GGML_TYPE_TQ2_0;   break;
-        case LLAMA_FTYPE_MOSTLY_IQ2_XXS: default_type = GGML_TYPE_IQ2_XXS; break;
-        case LLAMA_FTYPE_MOSTLY_IQ2_XS:  default_type = GGML_TYPE_IQ2_XS;  break;
-        case LLAMA_FTYPE_MOSTLY_IQ2_S:   default_type = GGML_TYPE_IQ2_XS;  break;
-        case LLAMA_FTYPE_MOSTLY_IQ2_M:   default_type = GGML_TYPE_IQ2_S;   break;
-        case LLAMA_FTYPE_MOSTLY_IQ3_XXS: default_type = GGML_TYPE_IQ3_XXS; break;
-        case LLAMA_FTYPE_MOSTLY_IQ1_S:   default_type = GGML_TYPE_IQ1_S;   break;
-        case LLAMA_FTYPE_MOSTLY_IQ1_M:   default_type = GGML_TYPE_IQ1_M;   break;
-        case LLAMA_FTYPE_MOSTLY_IQ4_NL:  default_type = GGML_TYPE_IQ4_NL;  break;
-        case LLAMA_FTYPE_MOSTLY_IQ4_XS:  default_type = GGML_TYPE_IQ4_XS;  break;
-        case LLAMA_FTYPE_MOSTLY_IQ3_S:   default_type = GGML_TYPE_IQ3_S;   break;
-        case LLAMA_FTYPE_MOSTLY_IQ3_M:   default_type = GGML_TYPE_IQ3_S;   break;
-
-        default: throw std::runtime_error(format("invalid output file type %d\n", ftype));
-    }
-
-    int nthread = params->nthread;
-
-    if (nthread <= 0) {
-        nthread = std::thread::hardware_concurrency();
-    }
-
-    // mmap consistently increases speed Linux, and also increases speed on Windows with
-    // hot cache. It may cause a slowdown on macOS, possibly related to free memory.
-#if defined(__linux__) || defined(_WIN32)
-    constexpr bool use_mmap = true;
-#else
-    constexpr bool use_mmap = false;
-#endif
-
-    llama_model_kv_override * kv_overrides = nullptr;
-    if (params->kv_overrides) {
-        auto v = (std::vector*)params->kv_overrides;
-        kv_overrides = v->data();
-    }
-    llama_model_loader ml(fname_inp, use_mmap, /*check_tensors*/ true, kv_overrides);
-    ml.init_mappings(false); // no prefetching
-
-    llama_model model;
-    llm_load_arch(ml, model);
-    llm_load_hparams(ml, model);
-    llm_load_stats(ml, model);
-
-    struct quantize_state_internal qs(model, params);
-
-    if (params->only_copy) {
-        ftype = model.ftype;
-    }
-    const std::unordered_map> * imatrix_data = nullptr;
-    if (params->imatrix) {
-        imatrix_data = static_cast>*>(params->imatrix);
-        if (imatrix_data) {
-            LLAMA_LOG_INFO("================================ Have weights data with %d entries\n",int(imatrix_data->size()));
-            qs.has_imatrix = true;
-            // check imatrix for nans or infs
-            for (const auto & kv : *imatrix_data) {
-                for (float f : kv.second) {
-                    if (!std::isfinite(f)) {
-                        throw std::runtime_error(format("imatrix contains non-finite value %f\n", f));
-                    }
-                }
-            }
-        }
-    }
-
-    const size_t align = GGUF_DEFAULT_ALIGNMENT;
-    gguf_context_ptr ctx_out { gguf_init_empty() };
-
-    // copy the KV pairs from the input file
-    gguf_set_kv     (ctx_out.get(), ml.meta.get());
-    gguf_set_val_u32(ctx_out.get(), "general.quantization_version", GGML_QNT_VERSION); // TODO: use LLM_KV
-    gguf_set_val_u32(ctx_out.get(), "general.file_type", ftype); // TODO: use LLM_KV
-
-    // Remove split metadata
-    gguf_remove_key(ctx_out.get(), ml.llm_kv(LLM_KV_SPLIT_NO).c_str());
-    gguf_remove_key(ctx_out.get(), ml.llm_kv(LLM_KV_SPLIT_COUNT).c_str());
-    gguf_remove_key(ctx_out.get(), ml.llm_kv(LLM_KV_SPLIT_TENSORS_COUNT).c_str());
-
-    if (params->kv_overrides) {
-        const std::vector & overrides = *(const std::vector *)params->kv_overrides;
-        for (const auto & o : overrides) {
-            if (o.key[0] == 0) break;
-            if (o.tag == LLAMA_KV_OVERRIDE_TYPE_FLOAT) {
-                gguf_set_val_f32(ctx_out.get(), o.key, o.val_f64);
-            } else if (o.tag == LLAMA_KV_OVERRIDE_TYPE_INT) {
-                gguf_set_val_i32(ctx_out.get(), o.key, o.val_i64);
-            } else if (o.tag == LLAMA_KV_OVERRIDE_TYPE_BOOL) {
-                gguf_set_val_bool(ctx_out.get(), o.key, o.val_bool);
-            } else if (o.tag == LLAMA_KV_OVERRIDE_TYPE_STR) {
-                gguf_set_val_str(ctx_out.get(), o.key, o.val_str);
-            } else {
-                LLAMA_LOG_WARN("%s: unknown KV override type for key %s\n", __func__, o.key);
-            }
-        }
-    }
-
-    // make a list of weights
-    std::vector tensors;
-    tensors.reserve(ml.weights_map.size());
-    for (const auto & it : ml.weights_map) {
-        tensors.push_back(&it.second);
-    }
-
-    // keep_split requires that the weights are sorted by split index
-    if (params->keep_split) {
-        std::sort(tensors.begin(), tensors.end(), [](const llama_model_loader::llama_tensor_weight * a, const llama_model_loader::llama_tensor_weight * b) {
-            if (a->idx == b->idx) {
-                return a->offs < b->offs;
-            }
-            return a->idx < b->idx;
-        });
-    }
-
-    for (const auto * it : tensors) {
-        const struct ggml_tensor * tensor = it->tensor;
-
-        const std::string name = ggml_get_name(tensor);
-
-        // TODO: avoid hardcoded tensor names - use the TN_* constants
-        if (name.find("attn_v.weight")   != std::string::npos ||
-            name.find("attn_qkv.weight") != std::string::npos ||
-            name.find("attn_kv_b.weight")!= std::string::npos) {
-            ++qs.n_attention_wv;
-        } else if (name == LLM_TN(model.arch)(LLM_TENSOR_OUTPUT, "weight")) {
-            qs.has_output = true;
-        }
-    }
-
-    qs.n_ffn_down = qs.n_ffn_gate = qs.n_ffn_up = (int)model.hparams.n_layer;
-
-    // sanity checks
-    {
-        const auto & n_head_kv_iter = model.hparams.n_head_kv_arr.begin();
-        // attention layers have a non-zero number of kv heads
-        int32_t n_attn_layer = model.hparams.n_layer - std::count(n_head_kv_iter, n_head_kv_iter + model.hparams.n_layer, 0);
-        if (llama_model_has_encoder(&model)) {
-            n_attn_layer *= 3;
-        }
-        GGML_ASSERT((qs.n_attention_wv == n_attn_layer) && "n_attention_wv is unexpected");
-    }
-
-    size_t total_size_org = 0;
-    size_t total_size_new = 0;
-
-    std::vector workers;
-    workers.reserve(nthread);
-
-    int idx = 0;
-
-    std::vector> read_data;
-    std::vector> work;
-    std::vector> f32_conv_buf;
-
-    uint16_t n_split = 1;
-
-    // Assume split index is continuous
-    if (params->keep_split) {
-        for (const auto * it : tensors) {
-            n_split = std::max(uint16_t(it->idx + 1), n_split);
-        }
-    }
-    std::vector ctx_outs(n_split);
-    ctx_outs[0] = std::move(ctx_out);
-
-    // populate the original tensors so we get an initial meta data
-    for (const auto * it : tensors) {
-        uint16_t i_split = params->keep_split ? it->idx : 0;
-        struct ggml_tensor * tensor = it->tensor;
-        if (!ctx_outs[i_split]) {
-            ctx_outs[i_split].reset(gguf_init_empty());
-        }
-        gguf_add_tensor(ctx_outs[i_split].get(), tensor);
-    }
-
-    // Set split info if needed
-    if (n_split > 1) {
-        for (size_t i = 0; i < ctx_outs.size(); ++i) {
-            gguf_set_val_u16(ctx_outs[i].get(), ml.llm_kv(LLM_KV_SPLIT_NO).c_str(), i);
-            gguf_set_val_u16(ctx_outs[i].get(), ml.llm_kv(LLM_KV_SPLIT_COUNT).c_str(), n_split);
-            gguf_set_val_i32(ctx_outs[i].get(), ml.llm_kv(LLM_KV_SPLIT_TENSORS_COUNT).c_str(), ml.n_tensors);
-        }
-    }
-
-    int cur_split = -1;
-    std::ofstream fout;
-    auto close_ofstream = [&]() {
-        // Write metadata and close file handler
-        if (fout.is_open()) {
-            fout.seekp(0);
-            std::vector data(gguf_get_meta_size(ctx_outs[cur_split].get()));
-            gguf_get_meta_data(ctx_outs[cur_split].get(), data.data());
-            fout.write((const char *) data.data(), data.size());
-            fout.close();
-        }
-    };
-    auto new_ofstream = [&](int index) {
-        cur_split = index;
-        GGML_ASSERT(ctx_outs[cur_split] && "Find uninitialized gguf_context");
-        std::string fname = fname_out;
-        if (params->keep_split) {
-            char split_path[PATH_MAX] = {0};
-            llama_split_path(split_path, sizeof(split_path), fname_out.c_str(), cur_split, n_split);
-            fname = std::string(split_path);
-        }
-
-        fout = std::ofstream(fname, std::ios::binary);
-        fout.exceptions(std::ofstream::failbit); // fail fast on write errors
-        const size_t meta_size = gguf_get_meta_size(ctx_outs[cur_split].get());
-        // placeholder for the meta data
-        ::zeros(fout, meta_size);
-    };
-
-    const auto tn = LLM_TN(model.arch);
-    new_ofstream(0);
-    for (const auto * it : tensors) {
-        const auto & weight = *it;
-        struct ggml_tensor * tensor = weight.tensor;
-        if (weight.idx != cur_split && params->keep_split) {
-            close_ofstream();
-            new_ofstream(weight.idx);
-        }
-
-        const std::string name = ggml_get_name(tensor);
-
-        if (!ml.use_mmap) {
-            if (read_data.size() < ggml_nbytes(tensor)) {
-                read_data.resize(ggml_nbytes(tensor));
-            }
-            tensor->data = read_data.data();
-        }
-        ml.load_data_for(tensor);
-
-        LLAMA_LOG_INFO("[%4d/%4d] %36s - [%s], type = %6s, ",
-               ++idx, ml.n_tensors,
-               ggml_get_name(tensor),
-               llama_format_tensor_shape(tensor).c_str(),
-               ggml_type_name(tensor->type));
-
-        // This used to be a regex, but  has an extreme cost to compile times.
-        bool quantize = name.rfind("weight") == name.size() - 6; // ends with 'weight'?
-
-        // quantize only 2D and 3D tensors (experts)
-        quantize &= (ggml_n_dims(tensor) >= 2);
-
-        // do not quantize norm tensors
-        quantize &= name.find("_norm.weight") == std::string::npos;
-
-        quantize &= params->quantize_output_tensor || name != "output.weight";
-        quantize &= !params->only_copy;
-
-        // do not quantize expert gating tensors
-        // NOTE: can't use LLM_TN here because the layer number is not known
-        quantize &= name.find("ffn_gate_inp.weight") == std::string::npos;
-
-        // do not quantize positional embeddings and token types (BERT)
-        quantize &= name != LLM_TN(model.arch)(LLM_TENSOR_POS_EMBD,    "weight");
-        quantize &= name != LLM_TN(model.arch)(LLM_TENSOR_TOKEN_TYPES, "weight");
-
-        // do not quantize Mamba's small yet 2D weights
-        // NOTE: can't use LLM_TN here because the layer number is not known
-        quantize &= name.find("ssm_conv1d.weight") == std::string::npos;
-
-        // do not quantize RWKV's time_mix_first tensors
-        quantize &= name.find("time_mix_first.weight") == std::string::npos;
-        quantize &= name.find("time_mix_w1.weight") == std::string::npos;
-        quantize &= name.find("time_mix_w2.weight") == std::string::npos;
-        quantize &= name.find("time_mix_decay_w1.weight") == std::string::npos;
-        quantize &= name.find("time_mix_decay_w2.weight") == std::string::npos;
-
-        // do not quantize relative position bias (T5)
-        quantize &= name.find("attn_rel_b.weight") == std::string::npos;
-
-        enum ggml_type new_type;
-        void * new_data;
-        size_t new_size;
-
-        if (quantize) {
-            new_type = default_type;
-
-            // get more optimal quantization type based on the tensor shape, layer, etc.
-            if (!params->pure && ggml_is_quantized(default_type)) {
-                new_type = llama_tensor_get_type(qs, new_type, tensor, ftype);
-            }
-            if (params->token_embedding_type < GGML_TYPE_COUNT && strcmp(tensor->name, "token_embd.weight") == 0) {
-                new_type = params->token_embedding_type;
-            }
-            if (params->output_tensor_type < GGML_TYPE_COUNT && strcmp(tensor->name, "output.weight") == 0) {
-                new_type = params->output_tensor_type;
-            }
-
-            // If we've decided to quantize to the same type the tensor is already
-            // in then there's nothing to do.
-            quantize = tensor->type != new_type;
-        }
-
-        if (!quantize) {
-            new_type = tensor->type;
-            new_data = tensor->data;
-            new_size = ggml_nbytes(tensor);
-            LLAMA_LOG_INFO("size = %8.3f MB\n", ggml_nbytes(tensor)/1024.0/1024.0);
-        } else {
-            const int64_t nelements = ggml_nelements(tensor);
-
-            const float * imatrix = nullptr;
-            if (imatrix_data) {
-                auto it = imatrix_data->find(tensor->name);
-                if (it == imatrix_data->end()) {
-                    LLAMA_LOG_INFO("\n====== %s: did not find weights for %s\n", __func__, tensor->name);
-                } else {
-                    if (it->second.size() == (size_t)tensor->ne[0]*tensor->ne[2]) {
-                        imatrix = it->second.data();
-                    } else {
-                        LLAMA_LOG_INFO("\n====== %s: imatrix size %d is different from tensor size %d for %s\n", __func__,
-                                int(it->second.size()), int(tensor->ne[0]*tensor->ne[2]), tensor->name);
-
-                        // this can happen when quantizing an old mixtral model with split tensors with a new incompatible imatrix
-                        // this is a significant error and it may be good idea to abort the process if this happens,
-                        // since many people will miss the error and not realize that most of the model is being quantized without an imatrix
-                        // tok_embd should be ignored in this case, since it always causes this warning
-                        if (name != tn(LLM_TENSOR_TOKEN_EMBD, "weight")) {
-                            throw std::runtime_error(format("imatrix size %d is different from tensor size %d for %s",
-                                    int(it->second.size()), int(tensor->ne[0]*tensor->ne[2]), tensor->name));
-                        }
-                    }
-                }
-            }
-            if ((new_type == GGML_TYPE_IQ2_XXS ||
-                 new_type == GGML_TYPE_IQ2_XS  ||
-                 new_type == GGML_TYPE_IQ2_S   ||
-                 new_type == GGML_TYPE_IQ1_S   ||
-                (new_type == GGML_TYPE_IQ1_M && strcmp(tensor->name, "token_embd.weight") && strcmp(tensor->name, "output.weight"))  ||
-                (new_type == GGML_TYPE_Q2_K && params->ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S && strcmp(tensor->name, "token_embd.weight") != 0)) && !imatrix) {
-                LLAMA_LOG_ERROR("\n\n============================================================\n");
-                LLAMA_LOG_ERROR("Missing importance matrix for tensor %s in a very low-bit quantization\n", tensor->name);
-                LLAMA_LOG_ERROR("The result will be garbage, so bailing out\n");
-                LLAMA_LOG_ERROR("============================================================\n\n");
-                throw std::runtime_error(format("Missing importance matrix for tensor %s in a very low-bit quantization", tensor->name));
-            }
-
-            float * f32_data;
-
-            if (tensor->type == GGML_TYPE_F32) {
-                f32_data = (float *) tensor->data;
-            } else if (ggml_is_quantized(tensor->type) && !params->allow_requantize) {
-                throw std::runtime_error(format("requantizing from type %s is disabled", ggml_type_name(tensor->type)));
-            } else {
-                llama_tensor_dequantize_internal(tensor, f32_conv_buf, workers, nelements, nthread);
-                f32_data = (float *) f32_conv_buf.data();
-            }
-
-            LLAMA_LOG_INFO("converting to %s .. ", ggml_type_name(new_type));
-            fflush(stdout);
-
-            if (work.size() < (size_t)nelements * 4) {
-                work.resize(nelements * 4); // upper bound on size
-            }
-            new_data = work.data();
-
-            const int64_t n_per_row = tensor->ne[0];
-            const int64_t nrows = tensor->ne[1];
-
-            static const int64_t min_chunk_size = 32 * 512;
-            const int64_t chunk_size = (n_per_row >= min_chunk_size ? n_per_row : n_per_row * ((min_chunk_size + n_per_row - 1)/n_per_row));
-
-            const int64_t nelements_matrix = tensor->ne[0] * tensor->ne[1];
-            const int64_t nchunk = (nelements_matrix + chunk_size - 1)/chunk_size;
-            const int64_t nthread_use = nthread > 1 ? std::max((int64_t)1, std::min((int64_t)nthread, nchunk)) : 1;
-
-            // quantize each expert separately since they have different importance matrices
-            new_size = 0;
-            for (int64_t i03 = 0; i03 < tensor->ne[2]; ++i03) {
-                const float * f32_data_03 = f32_data + i03 * nelements_matrix;
-                void * new_data_03 = (char *)new_data + ggml_row_size(new_type, n_per_row) * i03 * nrows;
-                const float * imatrix_03 = imatrix ? imatrix + i03 * n_per_row : nullptr;
-
-                new_size += llama_tensor_quantize_internal(new_type, f32_data_03, new_data_03, chunk_size, nrows, n_per_row, imatrix_03, workers, nthread_use);
-            }
-            LLAMA_LOG_INFO("size = %8.2f MiB -> %8.2f MiB\n", ggml_nbytes(tensor)/1024.0/1024.0, new_size/1024.0/1024.0);
-        }
-        total_size_org += ggml_nbytes(tensor);
-        total_size_new += new_size;
-
-        // update the gguf meta data as we go
-        gguf_set_tensor_type(ctx_outs[cur_split].get(), name.c_str(), new_type);
-        gguf_set_tensor_data(ctx_outs[cur_split].get(), name.c_str(), new_data, new_size);
-
-        // write tensor data + padding
-        fout.write((const char *) new_data, new_size);
-        zeros(fout, GGML_PAD(new_size, align) - new_size);
-    }
-    close_ofstream();
-
-    LLAMA_LOG_INFO("%s: model size  = %8.2f MB\n", __func__, total_size_org/1024.0/1024.0);
-    LLAMA_LOG_INFO("%s: quant size  = %8.2f MB\n", __func__, total_size_new/1024.0/1024.0);
-
-    if (qs.n_fallback > 0) {
-        LLAMA_LOG_WARN("%s: WARNING: %d of %d tensor(s) required fallback quantization\n",
-                __func__, qs.n_fallback, qs.n_k_quantized + qs.n_fallback);
-    }
-}
-
-static void llama_lora_adapter_init_internal(struct llama_model * model, const char * path_lora, struct llama_lora_adapter & adapter) {
-    LLAMA_LOG_INFO("%s: loading lora adapter from '%s' ...\n", __func__, path_lora);
-
-    ggml_context * ctx_init;
-    struct gguf_init_params meta_gguf_params = {
-        /* .no_alloc = */ true,
-        /* .ctx      = */ &ctx_init,
-    };
-
-    gguf_context_ptr ctx_gguf { gguf_init_from_file(path_lora, meta_gguf_params) };
-    if (!ctx_gguf) {
-        throw std::runtime_error("failed to load lora adapter file from " + std::string(path_lora));
-    }
-
-    ggml_context_ptr ctx { ctx_init };
-
-    // check metadata
-    {
-        auto get_kv_str = [&](const std::string & key) -> std::string {
-            int id = gguf_find_key(ctx_gguf.get(), key.c_str());
-            return id < 0 ? "" : std::string(gguf_get_val_str(ctx_gguf.get(), id));
-        };
-        auto get_kv_f32 = [&](const std::string & key) -> float {
-            int id = gguf_find_key(ctx_gguf.get(), key.c_str());
-            return id < 0 ? 0.0f : gguf_get_val_f32(ctx_gguf.get(), id);
-        };
-        LLM_KV llm_kv = LLM_KV(LLM_ARCH_UNKNOWN);
-
-        auto general_type = get_kv_str(llm_kv(LLM_KV_GENERAL_TYPE));
-        if (general_type != "adapter") {
-            throw std::runtime_error("expect general.type to be 'adapter', but got: " + general_type);
-        }
-
-        auto general_arch_str = get_kv_str(llm_kv(LLM_KV_GENERAL_ARCHITECTURE));
-        auto general_arch = llm_arch_from_string(general_arch_str);
-        if (general_arch != model->arch) {
-            throw std::runtime_error("model arch and LoRA arch mismatch");
-        }
-
-        auto adapter_type = get_kv_str(llm_kv(LLM_KV_ADAPTER_TYPE));
-        if (adapter_type != "lora") {
-            throw std::runtime_error("expect adapter.type to be 'lora', but got: " + adapter_type);
-        }
-
-        adapter.alpha = get_kv_f32(llm_kv(LLM_KV_ADAPTER_LORA_ALPHA));
-    }
-
-    int n_tensors = gguf_get_n_tensors(ctx_gguf.get());
-
-    // contexts for each buffer type
-    std::map ctx_map;
-    auto ctx_for_buft = [&](ggml_backend_buffer_type_t buft) -> ggml_context * {
-        auto it = ctx_map.find(buft);
-        if (it == ctx_map.end()) {
-            // add a new context
-            struct ggml_init_params params = {
-                /*.mem_size   =*/ n_tensors*ggml_tensor_overhead(),
-                /*.mem_buffer =*/ NULL,
-                /*.no_alloc   =*/ true,
-            };
-            ggml_context * buft_ctx = ggml_init(params);
-            if (!buft_ctx) {
-                return nullptr;
-            }
-            ctx_map[buft] = buft_ctx;
-            adapter.ctxs.emplace_back(buft_ctx);
-            return buft_ctx;
-        };
-        return it->second;
-    };
-
-    // bundle lora_a and lora_b into pairs
-    std::map ab_map;
-    auto str_endswith = [](const std::string & str, const std::string & suffix) {
-        return str.size() >= suffix.size() && str.compare(str.size()-suffix.size(), suffix.size(), suffix) == 0;
-    };
-    for (ggml_tensor * cur = ggml_get_first_tensor(ctx.get()); cur; cur = ggml_get_next_tensor(ctx.get(), cur)) {
-        std::string name(cur->name);
-        if (str_endswith(name, ".lora_a")) {
-            replace_all(name, ".lora_a", "");
-            if (ab_map.find(name) == ab_map.end()) {
-                ab_map[name] = llama_lora_weight(cur, nullptr);
-            } else {
-                ab_map[name].a = cur;
-            }
-        } else if (str_endswith(name, ".lora_b")) {
-            replace_all(name, ".lora_b", "");
-            if (ab_map.find(name) == ab_map.end()) {
-                ab_map[name] = llama_lora_weight(nullptr, cur);
-            } else {
-                ab_map[name].b = cur;
-            }
-        } else {
-            throw std::runtime_error("LoRA tensor '" + name + "' has unexpected suffix");
-        }
-    }
-
-    // add tensors
-    for (auto & it : ab_map) {
-        const std::string & name = it.first;
-        llama_lora_weight & w = it.second;
-
-        if (!w.a || !w.b) {
-            throw std::runtime_error("LoRA tensor pair for '" + name + "' is missing one component");
-        }
-
-        // device buft and device ctx
-        auto * model_tensor = llama_get_model_tensor(model, name.c_str());
-        if (!model_tensor) {
-            throw std::runtime_error("LoRA tensor '" + name + "' does not exist in base model");
-        }
-        struct ggml_context * dev_ctx = ctx_for_buft(ggml_backend_buffer_get_type(model_tensor->buffer));
-        // validate tensor shape
-        if (model_tensor->ne[0] != w.a->ne[0] || model_tensor->ne[1] != w.b->ne[1]) {
-            throw std::runtime_error("tensor '" + name + "' has incorrect shape");
-        }
-        if (w.a->ne[1] != w.b->ne[0]) {
-            throw std::runtime_error("lora_a tensor is not transposed (hint: adapter from \"finetune\" example is no longer supported)");
-        }
-        // save tensor to adapter
-        struct ggml_tensor * tensor_a = ggml_dup_tensor(dev_ctx, w.a);
-        struct ggml_tensor * tensor_b = ggml_dup_tensor(dev_ctx, w.b);
-        ggml_set_name(tensor_a, w.a->name);
-        ggml_set_name(tensor_b, w.b->name);
-        adapter.ab_map[name] = llama_lora_weight(tensor_a, tensor_b);
-    }
-
-    // allocate tensors / buffers and zero
-    {
-        adapter.ctxs.reserve(ctx_map.size());
-        adapter.bufs.reserve(ctx_map.size());
-        for (auto & it : ctx_map) {
-            ggml_backend_buffer_type_t buft = it.first;
-            ggml_context * ctx_dev = it.second;
-            ggml_backend_buffer_ptr buf { ggml_backend_alloc_ctx_tensors_from_buft(ctx_dev, buft) };
-            if (!buf) {
-                throw std::runtime_error("failed to allocate buffer for lora adapter\n");
-            }
-            LLAMA_LOG_INFO("%s: %10s LoRA buffer size = %8.2f MiB\n", __func__, ggml_backend_buffer_name(buf.get()), ggml_backend_buffer_get_size(buf.get())/1024.0/1024.0);
-            adapter.bufs.emplace_back(std::move(buf));
-        }
-    }
-
-    // set tensor data
-    {
-        llama_file gguf_file(path_lora, "rb");
-        std::vector read_buf;
-        auto set_tensor = [&](struct ggml_tensor * orig, struct ggml_tensor * dev) {
-            size_t offs = gguf_get_data_offset(ctx_gguf.get()) + gguf_get_tensor_offset(ctx_gguf.get(), gguf_find_tensor(ctx_gguf.get(), orig->name));
-            size_t size = ggml_nbytes(orig);
-            read_buf.resize(size);
-            gguf_file.seek(offs, SEEK_SET);
-            gguf_file.read_raw(read_buf.data(), size);
-            ggml_backend_tensor_set(dev, read_buf.data(), 0, size);
-        };
-        for (auto & it : adapter.ab_map) {
-            auto orig = ab_map[it.first];
-            auto dev  = it.second;
-            set_tensor(orig.a, dev.a);
-            set_tensor(orig.b, dev.b);
-        }
-    }
-
-    LLAMA_LOG_INFO("%s: loaded %zu tensors from lora file\n", __func__, adapter.ab_map.size()*2);
-}
-
 int32_t llama_lora_adapter_set(
             struct llama_context * ctx,
             struct llama_lora_adapter * adapter,
@@ -20345,7 +11323,9 @@ int32_t llama_lora_adapter_set(
         LLAMA_LOG_ERROR("%s: flash_attn is not compatible with LoRA\n", __func__);
         return -1;
     }
+
     ctx->lora_adapters[adapter] = scale;
+
     return 0;
 }
 
@@ -20357,6 +11337,7 @@ int32_t llama_lora_adapter_remove(
         ctx->lora_adapters.erase(pos);
         return 0;
     }
+
     return -1;
 }
 
@@ -20364,37 +11345,20 @@ void llama_lora_adapter_clear(struct llama_context * ctx) {
     ctx->lora_adapters.clear();
 }
 
-void llama_lora_adapter_free(struct llama_lora_adapter * adapter) {
-    delete adapter;
+// TODO: tmp
+int32_t llama_control_vector_apply(
+        struct llama_context * lctx,
+                 const float * data,
+                      size_t   len,
+                     int32_t   n_embd,
+                     int32_t   il_start,
+                     int32_t   il_end) {
+    return llama_control_vector_apply(lctx->cvec, lctx->model, data, len, n_embd, il_start, il_end);
 }
 
 //
 // interface implementation
 //
-struct llama_model_params llama_model_default_params() {
-    struct llama_model_params result = {
-        /*.devices                     =*/ nullptr,
-        /*.n_gpu_layers                =*/ 0,
-        /*.split_mode                  =*/ LLAMA_SPLIT_MODE_LAYER,
-        /*.main_gpu                    =*/ 0,
-        /*.tensor_split                =*/ nullptr,
-        /*.rpc_servers                 =*/ nullptr,
-        /*.progress_callback           =*/ nullptr,
-        /*.progress_callback_user_data =*/ nullptr,
-        /*.kv_overrides                =*/ nullptr,
-        /*.vocab_only                  =*/ false,
-        /*.use_mmap                    =*/ true,
-        /*.use_mlock                   =*/ false,
-        /*.check_tensors               =*/ false,
-    };
-
-#ifdef GGML_USE_METAL
-    // note: we usually have plenty of VRAM, so by default offload all layers to the GPU
-    result.n_gpu_layers = 999;
-#endif
-
-    return result;
-}
 
 struct llama_context_params llama_context_default_params() {
     struct llama_context_params result = {
@@ -20439,24 +11403,6 @@ struct llama_sampler_chain_params llama_sampler_chain_default_params() {
     return result;
 }
 
-struct llama_model_quantize_params llama_model_quantize_default_params() {
-    struct llama_model_quantize_params result = {
-        /*.nthread                     =*/ 0,
-        /*.ftype                       =*/ LLAMA_FTYPE_MOSTLY_Q5_1,
-        /*.output_tensor_type          =*/ GGML_TYPE_COUNT,
-        /*.token_embedding_type        =*/ GGML_TYPE_COUNT,
-        /*.allow_requantize            =*/ false,
-        /*.quantize_output_tensor      =*/ true,
-        /*.only_copy                   =*/ false,
-        /*.pure                        =*/ false,
-        /*.keep_split                  =*/ false,
-        /*.imatrix                     =*/ nullptr,
-        /*.kv_overrides                =*/ nullptr,
-    };
-
-    return result;
-}
-
 size_t llama_max_devices(void) {
     return 16;
 }
@@ -20499,19 +11445,6 @@ void llama_numa_init(enum ggml_numa_strategy numa) {
     }
 }
 
-void llama_attach_threadpool(
-             struct llama_context * ctx,
-        ggml_threadpool_t   threadpool,
-        ggml_threadpool_t   threadpool_batch) {
-    ctx->threadpool       = threadpool;
-    ctx->threadpool_batch = threadpool_batch ? threadpool_batch : threadpool;
-}
-
-void llama_detach_threadpool(struct llama_context * ctx) {
-    ctx->threadpool       = nullptr;
-    ctx->threadpool_batch = nullptr;
-}
-
 void llama_backend_free(void) {
     ggml_quantize_free();
 }
@@ -20522,7 +11455,7 @@ int64_t llama_time_us(void) {
 
 struct llama_model * llama_load_model_from_file(
         const char * path_model,
-        struct llama_model_params   params) {
+        struct llama_model_params params) {
     ggml_time_init();
 
     llama_model * model = new llama_model;
@@ -20633,6 +11566,7 @@ struct llama_model * llama_load_model_from_file(
         } else if (status == -2) {
             LLAMA_LOG_INFO("%s: cancelled model load\n", __func__);
         }
+
         llama_free_model(model);
         return nullptr;
     }
@@ -20640,10 +11574,6 @@ struct llama_model * llama_load_model_from_file(
     return model;
 }
 
-void llama_free_model(struct llama_model * model) {
-    delete model;
-}
-
 struct llama_context * llama_new_context_with_model(
                  struct llama_model * model,
         struct llama_context_params   params) {
@@ -20844,7 +11774,7 @@ struct llama_context * llama_new_context_with_model(
 
         llama_set_abort_callback(ctx, params.abort_callback, params.abort_callback_data);
 
-        if (!llama_kv_cache_init(ctx->kv_self, ctx, type_k, type_v, kv_size, cparams.offload_kqv)) {
+        if (!llama_kv_cache_init(ctx->kv_self, ctx->model, ctx->cparams, type_k, type_v, kv_size, cparams.offload_kqv)) {
             LLAMA_LOG_ERROR("%s: llama_kv_cache_init() failed for self-attention cache\n", __func__);
             llama_free(ctx);
             return nullptr;
@@ -20995,442 +11925,26 @@ struct llama_context * llama_new_context_with_model(
     return ctx;
 }
 
-void llama_free(struct llama_context * ctx) {
-    delete ctx;
-}
+//
+// kv cache
+//
 
-uint32_t llama_n_ctx(const struct llama_context * ctx) {
-    return ctx->cparams.n_ctx;
-}
-
-uint32_t llama_n_batch(const struct llama_context * ctx) {
-    return ctx->cparams.n_batch;
-}
-
-uint32_t llama_n_ubatch(const struct llama_context * ctx) {
-    return ctx->cparams.n_ubatch;
-}
-
-uint32_t llama_n_seq_max(const struct llama_context * ctx) {
-    return ctx->kv_self.size;
-}
-
-enum llama_vocab_type llama_vocab_type(const struct llama_model * model) {
-    return model->vocab.type;
-}
-
-int32_t llama_n_vocab(const struct llama_model * model) {
-    return model->hparams.n_vocab;
-}
-
-int32_t llama_n_ctx_train(const struct llama_model * model) {
-    return model->hparams.n_ctx_train;
-}
-
-int32_t llama_n_embd(const struct llama_model * model) {
-    return model->hparams.n_embd;
-}
-
-int32_t llama_n_layer(const struct llama_model * model) {
-    return model->hparams.n_layer;
-}
-
-int32_t llama_n_head(const struct llama_model * model) {
-    return model->hparams.n_head();
-}
-
-const struct llama_model * llama_get_model(const struct llama_context * ctx) {
-    return &ctx->model;
-}
-
-enum llama_pooling_type llama_pooling_type(const struct llama_context * ctx) {
-    return ctx->cparams.pooling_type;
-}
-
-enum llama_rope_type llama_rope_type(const struct llama_model * model) {
-    switch (model->arch) {
-        // these models do not use RoPE
-        case LLM_ARCH_GPT2:
-        case LLM_ARCH_GPTJ:
-        case LLM_ARCH_MPT:
-        case LLM_ARCH_REFACT:
-        case LLM_ARCH_BLOOM:
-        case LLM_ARCH_MAMBA:
-        case LLM_ARCH_JINA_BERT_V2:
-        case LLM_ARCH_T5:
-        case LLM_ARCH_T5ENCODER:
-        case LLM_ARCH_JAIS:
-        case LLM_ARCH_RWKV6:
-        case LLM_ARCH_WAVTOKENIZER_DEC:
-            return LLAMA_ROPE_TYPE_NONE;
-
-        // use what we call a normal RoPE, operating on pairs of consecutive head values
-        case LLM_ARCH_LLAMA:
-        case LLM_ARCH_DECI:
-        case LLM_ARCH_BAICHUAN:
-        case LLM_ARCH_STARCODER:
-        case LLM_ARCH_PLAMO:
-        case LLM_ARCH_ORION:
-        case LLM_ARCH_INTERNLM2:
-        case LLM_ARCH_MINICPM:
-        case LLM_ARCH_XVERSE:
-        case LLM_ARCH_COMMAND_R:
-        case LLM_ARCH_OLMO:
-        case LLM_ARCH_ARCTIC:
-        case LLM_ARCH_DEEPSEEK:
-        case LLM_ARCH_DEEPSEEK2:
-        case LLM_ARCH_CHATGLM:
-        case LLM_ARCH_GRANITE:
-        case LLM_ARCH_GRANITE_MOE:
-        case LLM_ARCH_CHAMELEON:
-            return LLAMA_ROPE_TYPE_NORM;
-
-        // the pairs of head values are offset by n_rot/2
-        case LLM_ARCH_FALCON:
-        case LLM_ARCH_GROK:
-        case LLM_ARCH_DBRX:
-        case LLM_ARCH_BERT:
-        case LLM_ARCH_NOMIC_BERT:
-        case LLM_ARCH_STABLELM:
-        case LLM_ARCH_BITNET:
-        case LLM_ARCH_QWEN:
-        case LLM_ARCH_QWEN2:
-        case LLM_ARCH_QWEN2MOE:
-        case LLM_ARCH_OLMO2:
-        case LLM_ARCH_OLMOE:
-        case LLM_ARCH_PHI2:
-        case LLM_ARCH_PHI3:
-        case LLM_ARCH_GEMMA:
-        case LLM_ARCH_GEMMA2:
-        case LLM_ARCH_STARCODER2:
-        case LLM_ARCH_OPENELM:
-        case LLM_ARCH_GPTNEOX:
-        case LLM_ARCH_CODESHELL:
-        case LLM_ARCH_NEMOTRON:
-        case LLM_ARCH_EXAONE:
-        case LLM_ARCH_MINICPM3:
-            return LLAMA_ROPE_TYPE_NEOX;
-
-        case LLM_ARCH_QWEN2VL:
-            return LLAMA_ROPE_TYPE_MROPE;
-
-        // all model arches should be listed explicitly here
-        case LLM_ARCH_UNKNOWN:
-            GGML_ABORT("unknown architecture");
-    }
-
-    return LLAMA_ROPE_TYPE_NONE;
-}
-
-float llama_rope_freq_scale_train(const struct llama_model * model) {
-    return model->hparams.rope_freq_scale_train;
-}
-
-int32_t llama_model_meta_val_str(const struct llama_model * model, const char * key, char * buf, size_t buf_size) {
-    const auto & it = model->gguf_kv.find(key);
-    if (it == model->gguf_kv.end()) {
-        if (buf_size > 0) {
-            buf[0] = '\0';
-        }
-        return -1;
-    }
-    return snprintf(buf, buf_size, "%s", it->second.c_str());
-}
-
-int32_t llama_model_meta_count(const struct llama_model * model) {
-    return (int)model->gguf_kv.size();
-}
-
-int32_t llama_model_meta_key_by_index(const struct llama_model * model, int i, char * buf, size_t buf_size) {
-    if (i < 0 || i >= (int)model->gguf_kv.size()) {
-        if (buf_size > 0) {
-            buf[0] = '\0';
-        }
-        return -1;
-    }
-    auto it = model->gguf_kv.begin();
-    std::advance(it, i);
-    return snprintf(buf, buf_size, "%s", it->first.c_str());
-}
-
-int32_t llama_model_meta_val_str_by_index(const struct llama_model * model, int32_t i, char * buf, size_t buf_size) {
-    if (i < 0 || i >= (int)model->gguf_kv.size()) {
-        if (buf_size > 0) {
-            buf[0] = '\0';
-        }
-        return -1;
-    }
-    auto it = model->gguf_kv.begin();
-    std::advance(it, i);
-    return snprintf(buf, buf_size, "%s", it->second.c_str());
-}
-
-int32_t llama_model_desc(const struct llama_model * model, char * buf, size_t buf_size) {
-    return snprintf(buf, buf_size, "%s %s %s",
-            llama_model_arch_name(model->arch),
-            llama_model_type_name(model->type),
-            llama_model_ftype_name(model->ftype).c_str());
-}
-
-uint64_t llama_model_size(const struct llama_model * model) {
-    return model->n_bytes;
-}
-
-uint64_t llama_model_n_params(const struct llama_model * model) {
-    return model->n_elements;
-}
-
-bool llama_model_has_encoder(const struct llama_model * model) {
-    switch (model->arch) {
-        case LLM_ARCH_T5:        return true;
-        case LLM_ARCH_T5ENCODER: return true;
-        default:                 return false;
-    }
-}
-
-bool llama_model_has_decoder(const struct llama_model * model) {
-    switch (model->arch) {
-        case LLM_ARCH_T5ENCODER: return false;
-        default:                 return true;
-    }
-}
-
-llama_token llama_model_decoder_start_token(const struct llama_model * model) {
-    return model->hparams.dec_start_token_id;
-}
-
-bool llama_model_is_recurrent(const struct llama_model * model) {
-    switch (model->arch) {
-        case LLM_ARCH_MAMBA:  return true;
-        case LLM_ARCH_RWKV6:  return true;
-        default:              return false;
-    }
-}
-
-uint32_t llama_model_quantize(
-        const char * fname_inp,
-        const char * fname_out,
-        const llama_model_quantize_params * params) {
-    try {
-        llama_model_quantize_internal(fname_inp, fname_out, params);
-        return 0;
-    } catch (const std::exception & err) {
-        LLAMA_LOG_ERROR("%s: failed to quantize: %s\n", __func__, err.what());
-        return 1;
-    }
-}
-
-struct llama_lora_adapter * llama_lora_adapter_init(struct llama_model * model, const char * path_lora) {
-    try {
-        struct llama_lora_adapter * adapter = new llama_lora_adapter(model);
-        llama_lora_adapter_init_internal(model, path_lora, *adapter);
-        return adapter;
-    } catch (const std::exception & err) {
-        LLAMA_LOG_ERROR("%s: failed to apply lora adapter: %s\n", __func__, err.what());
-        return nullptr;
-    }
-}
-
-static bool llama_control_vector_init(struct llama_control_vector & cvec, const llama_model & model) {
-    GGML_ASSERT(cvec.tensors.empty());
-    GGML_ASSERT(cvec.ctxs.empty());
-    GGML_ASSERT(cvec.bufs.empty());
-
-    // create a context for each buffer type
-    std::map ctx_map;
-    auto ctx_for_buft = [&](ggml_backend_buffer_type_t buft) -> ggml_context * {
-        auto it = ctx_map.find(buft);
-        if (it == ctx_map.end()) {
-            struct ggml_init_params params = {
-                /*.mem_size   =*/ model.hparams.n_layer*ggml_tensor_overhead(),
-                /*.mem_buffer =*/ NULL,
-                /*.no_alloc   =*/ true,
-            };
-            ggml_context * ctx = ggml_init(params);
-            if (!ctx) {
-                return nullptr;
-            }
-            ctx_map[buft] = ctx;
-            cvec.ctxs.emplace_back(ctx);
-            return ctx;
-        }
-        return it->second;
-    };
-
-    // make tensors
-    cvec.tensors.reserve(model.hparams.n_layer);
-    cvec.tensors.push_back(nullptr); // there's never a tensor for layer 0
-    for (size_t il = 1; il < model.hparams.n_layer; il++) {
-        ggml_backend_buffer_type_t buft = select_buft(*model.dev_layer.at(il).buft_list,
-            [&](ggml_context * ctx) {
-                ggml_tensor * cur = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, model.hparams.n_embd);
-                ggml_tensor * layer_dir = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, model.hparams.n_embd);
-                return ggml_add(ctx, cur, layer_dir);
-            });
-        ggml_context * ctx = ctx_for_buft(buft);
-        if (!ctx) {
-            LLAMA_LOG_ERROR("%s: failed to allocate context for control vector\n", __func__);
-            return false;
-        }
-        ggml_tensor * tensor = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, model.hparams.n_embd);
-        cvec.tensors.push_back(tensor);
-    }
-
-    // allocate tensors / buffers and zero
-    cvec.bufs.reserve(ctx_map.size());
-    for (auto it : ctx_map) {
-        ggml_backend_buffer_type_t buft = it.first;
-        ggml_context * ctx = it.second;
-        ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, buft);
-        if (!buf) {
-            LLAMA_LOG_ERROR("%s: failed to allocate buffer for control vector\n", __func__);
-            return false;
-        }
-        ggml_backend_buffer_clear(buf, 0);
-        cvec.bufs.emplace_back(buf);
-    }
-
-    return true;
-}
-
-int32_t llama_control_vector_apply(struct llama_context * lctx, const float * data, size_t len, int32_t n_embd, int32_t il_start, int32_t il_end) {
-    const llama_model & model = lctx->model;
-    llama_control_vector & cvec = lctx->cvec;
-
-    if (data == nullptr) {
-        // disable the current control vector (but leave allocated for later)
-        cvec.layer_start = -1;
-        cvec.layer_end   = -1;
-        return 0;
-    }
-
-    if (n_embd != (int) model.hparams.n_embd) {
-        LLAMA_LOG_ERROR("%s: control vector n_embd does not match model\n", __func__);
-        return 1;
-    }
-
-    if (cvec.tensors.empty()) {
-        if (!llama_control_vector_init(cvec, model)) {
-            return 1;
-        }
-    }
-
-    cvec.layer_start = il_start;
-    cvec.layer_end   = il_end;
-
-    for (size_t il = 1; il < model.hparams.n_layer; il++) {
-        assert(cvec.tensors[il] != nullptr);
-
-        const size_t off = n_embd * (il - 1); // buffer doesn't have data for layer 0, since it's never present
-        if (off + n_embd <= len) {
-            ggml_backend_tensor_set(cvec.tensors[il], data + off, 0, n_embd * ggml_element_size(cvec.tensors[il]));
-        }
-    }
-
-    return 0;
-}
+// TODO: tmp bridges below until `struct llama_kv_cache` is exposed through the public API
 
 struct llama_kv_cache_view llama_kv_cache_view_init(const struct llama_context * ctx, int32_t n_seq_max) {
-    struct llama_kv_cache_view result = {
-        /*.n_cells            = */ 0,
-        /*.n_seq_max          = */ n_seq_max,
-        /*.token_count        = */ 0,
-        /*.used_cells         = */ llama_get_kv_cache_used_cells(ctx),
-        /*.max_contiguous     = */ 0,
-        /*.max_contiguous_idx = */ -1,
-        /*.cells              = */ nullptr,
-        /*.cells_sequences    = */ nullptr,
-    };
-    return result;
-}
-
-void llama_kv_cache_view_free(struct llama_kv_cache_view * view) {
-    if (view->cells != nullptr) {
-        free(view->cells);
-        view->cells = nullptr;
-    }
-    if (view->cells_sequences != nullptr) {
-        free(view->cells_sequences);
-        view->cells_sequences = nullptr;
-    }
+    return llama_kv_cache_view_init(ctx->kv_self, n_seq_max);
 }
 
 void llama_kv_cache_view_update(const struct llama_context * ctx, struct llama_kv_cache_view * view) {
-    if (uint32_t(view->n_cells) < ctx->kv_self.size || view->cells == nullptr) {
-        view->n_cells = int32_t(ctx->kv_self.size);
-        void * p = realloc(view->cells, sizeof(struct llama_kv_cache_view_cell) * view->n_cells);
-        GGML_ASSERT(p != nullptr && "Failed to alloc kv_cache_view cells");
-        view->cells = (struct llama_kv_cache_view_cell *)p;
-        p = realloc(view->cells_sequences, sizeof(llama_seq_id) * view->n_seq_max * view->n_cells);
-        GGML_ASSERT(p != nullptr && "Failed to alloc kv_cache_view cells sequences");
-        view->cells_sequences = (llama_seq_id *)p;
-    }
-
-    const std::vector & kv_cells = ctx->kv_self.cells;
-    llama_kv_cache_view_cell * c_curr = view->cells;
-    llama_seq_id * cs_curr = view->cells_sequences;
-    int32_t used_cells = 0;
-    int32_t token_count = 0;
-    int32_t curr_contig_idx = -1;
-    uint32_t max_contig = 0;
-    int32_t max_contig_idx = -1;
-
-    for (int32_t i = 0; i < int32_t(ctx->kv_self.size); i++, c_curr++, cs_curr += view->n_seq_max) {
-        const size_t curr_size = kv_cells[i].seq_id.size();
-        token_count += curr_size;
-        c_curr->pos = kv_cells[i].pos + kv_cells[i].delta;
-
-        if (curr_size > 0) {
-            if (curr_contig_idx >= 0 && uint32_t(i - curr_contig_idx) > max_contig) {
-                max_contig = i - curr_contig_idx;
-                max_contig_idx = curr_contig_idx;
-            }
-            curr_contig_idx = -1;
-        } else if (curr_contig_idx < 0) {
-            curr_contig_idx = i;
-        }
-
-        int seq_idx = 0;
-        for (const llama_seq_id it : kv_cells[i].seq_id) {
-            if (seq_idx >= view->n_seq_max) {
-                break;
-            }
-            cs_curr[seq_idx] = it;
-            seq_idx++;
-        }
-        if (seq_idx != 0) {
-            used_cells++;
-        }
-        for (; seq_idx < view->n_seq_max; seq_idx++) {
-            cs_curr[seq_idx] = -1;
-        }
-    }
-    if (curr_contig_idx >= 0 && kv_cells.size() - curr_contig_idx > max_contig) {
-        max_contig_idx = curr_contig_idx;
-        max_contig = kv_cells.size() - curr_contig_idx;
-    }
-    view->max_contiguous = max_contig;
-    view->max_contiguous_idx = max_contig_idx;
-    view->token_count = token_count;
-    view->used_cells = used_cells;
-    if (uint32_t(used_cells) != ctx->kv_self.used) {
-        LLAMA_LOG_ERROR("%s: used cells mismatch. kv_cache says %d but we calculated %d\n",
-            __func__, ctx->kv_self.used, used_cells);
-    }
+    llama_kv_cache_view_update(view, ctx->kv_self);
 }
 
 int32_t llama_get_kv_cache_token_count(const struct llama_context * ctx) {
-    int result = 0;
-
-    for (uint32_t i = 0; i < ctx->kv_self.size; i++) {
-        result += ctx->kv_self.cells[i].seq_id.size();
-    }
-
-    return result;
+    return llama_get_kv_cache_token_count(ctx->kv_self);
 }
 
 int32_t llama_get_kv_cache_used_cells(const struct llama_context * ctx) {
-    return ctx->kv_self.used;
+    return llama_get_kv_cache_used_cells(ctx->kv_self);
 }
 
 void llama_kv_cache_clear(struct llama_context * ctx) {
@@ -21481,1068 +11995,10 @@ void llama_kv_cache_update(struct llama_context * ctx) {
 }
 
 bool llama_kv_cache_can_shift(struct llama_context * ctx) {
-    return !ctx->kv_self.recurrent && ctx->model.arch != LLM_ARCH_DEEPSEEK2; // not supported due to MLA
+    return llama_kv_cache_can_shift(ctx->kv_self);
 }
 
-// deprecated
-size_t llama_get_state_size(struct llama_context * ctx) {
-    return llama_state_get_size(ctx);
-}
-
-// deprecated
-size_t llama_copy_state_data(struct llama_context * ctx, uint8_t * dst) {
-    return llama_state_get_data(ctx, dst, -1);
-}
-
-// deprecated
-size_t llama_set_state_data(struct llama_context * ctx, const uint8_t * src) {
-    return llama_state_set_data(ctx, src, -1);
-}
-
-// deprecated
-bool llama_load_session_file(struct llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) {
-    return llama_state_load_file(ctx, path_session, tokens_out, n_token_capacity, n_token_count_out);
-}
-
-// deprecated
-bool llama_save_session_file(struct llama_context * ctx, const char * path_session, const llama_token * tokens, size_t n_token_count) {
-    return llama_state_save_file(ctx, path_session, tokens, n_token_count);
-}
-
-// TODO: replace all non-fatal assertions with returned errors or exceptions
-struct llama_data_write {
-    virtual void write(const void * src, size_t size) = 0;
-    virtual void write_tensor_data(const struct ggml_tensor * tensor, size_t offset, size_t size) = 0;
-    virtual size_t get_size_written() = 0;
-    virtual ~llama_data_write() = default;
-
-    void write_string(const std::string & str) {
-        uint32_t str_size = str.size();
-
-        write(&str_size,  sizeof(str_size));
-        write(str.data(), str_size);
-    }
-
-    void write_model_info(const struct llama_context * ctx) {
-        std::string arch_str = LLM_ARCH_NAMES.at(ctx->model.arch);
-        write_string(arch_str);
-        // TODO: add more model-specific info which should prevent loading the session file if not identical
-    }
-
-    //void write_rng(const std::mt19937 & rng) {
-    //    std::ostringstream rng_ss;
-    //    rng_ss << rng;
-
-    //    const std::string & rng_str = rng_ss.str();
-
-    //    write_string(rng_str);
-    //}
-
-    void write_output_ids(struct llama_context * ctx) {
-        llama_output_reorder(ctx);
-
-        const uint32_t n_outputs = ctx->n_outputs;
-
-        std::vector output_pos;
-
-        const size_t    n_batch = ctx->cparams.n_batch;
-        const auto & output_ids = ctx->output_ids;
-
-        GGML_ASSERT(n_outputs <= ctx->output_size);
-
-        output_pos.resize(n_outputs);
-
-        // build a more compact representation of the output ids
-        for (size_t i = 0; i < n_batch; ++i) {
-            // map an output id to a position in the batch
-            int32_t pos = output_ids[i];
-            if (pos >= 0) {
-                GGML_ASSERT((uint32_t) pos < n_outputs);
-                output_pos[pos] = i;
-            }
-        }
-
-        write(&n_outputs, sizeof(n_outputs));
-
-        if (n_outputs) {
-            write(output_pos.data(), n_outputs * sizeof(int32_t));
-        }
-    }
-
-    void write_logits(const struct llama_context * ctx) {
-        const uint64_t logits_size = std::min((uint64_t) ctx->logits_size, (uint64_t) ctx->n_outputs * ctx->model.hparams.n_vocab);
-
-        write(&logits_size, sizeof(logits_size));
-
-        if (logits_size) {
-            write(ctx->logits, logits_size * sizeof(float));
-        }
-    }
-
-    void write_embeddings(const struct llama_context * ctx) {
-        const uint64_t embeddings_size = std::min((uint64_t) ctx->embd_size, (uint64_t) ctx->n_outputs * ctx->model.hparams.n_embd);
-
-        write(&embeddings_size, sizeof(embeddings_size));
-
-        if (embeddings_size) {
-            write(ctx->embd, embeddings_size * sizeof(float));
-        }
-    }
-
-    void write_kv_cache_meta(const llama_kv_cache & kv_self, const std::vector> & cell_ranges, llama_seq_id seq_id = -1) {
-
-        for (const auto & range : cell_ranges) {
-            for (uint32_t i = range.first; i < range.second; ++i) {
-                const auto & cell = kv_self.cells[i];
-                const llama_pos pos      = cell.pos;
-                const uint32_t  n_seq_id = seq_id == -1 ? cell.seq_id.size() : 0;
-
-                write(&pos,      sizeof(pos));
-                write(&n_seq_id, sizeof(n_seq_id));
-
-                if (n_seq_id) {
-                    for (auto seq_id : cell.seq_id) {
-                        write(&seq_id, sizeof(seq_id));
-                    }
-                }
-            }
-        }
-    }
-
-    void write_kv_cache_data(const struct llama_context * ctx, const std::vector> & cell_ranges) {
-        const struct llama_kv_cache & kv_self = ctx->kv_self;
-        const struct llama_hparams & hparams = ctx->model.hparams;
-
-        const uint32_t v_trans = kv_self.v_trans ? 1 : 0;
-        const uint32_t n_layer = hparams.n_layer;
-
-        write(&v_trans, sizeof(v_trans));
-        write(&n_layer, sizeof(n_layer));
-
-        std::vector tmp_buf;
-
-        // Iterate and write all the keys first, each row is a cell
-        // Get whole range at a time
-        for (uint32_t il = 0; il < n_layer; ++il) {
-            const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il) + hparams.n_embd_k_s();
-
-            // Write key type
-            const int32_t k_type_i = (int32_t)kv_self.k_l[il]->type;
-            write(&k_type_i, sizeof(k_type_i));
-
-            // Write row size of key
-            const uint64_t k_size_row = ggml_row_size(kv_self.k_l[il]->type, n_embd_k_gqa);
-            write(&k_size_row, sizeof(k_size_row));
-
-            // Read each range of cells of k_size length each into tmp_buf and write out
-            for (const auto & range : cell_ranges) {
-                const size_t range_size = range.second - range.first;
-                const size_t buf_size = range_size * k_size_row;
-                write_tensor_data(kv_self.k_l[il], range.first * k_size_row, buf_size);
-            }
-        }
-
-        if (!kv_self.v_trans) {
-            for (uint32_t il = 0; il < n_layer; ++il) {
-                const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s();
-
-                // Write value type
-                const int32_t v_type_i = (int32_t)kv_self.v_l[il]->type;
-                write(&v_type_i, sizeof(v_type_i));
-
-                // Write row size of value
-                const uint64_t v_size_row = ggml_row_size(kv_self.v_l[il]->type, n_embd_v_gqa);
-                write(&v_size_row, sizeof(v_size_row));
-
-                // Read each range of cells of v_size length each into tmp_buf and write out
-                for (const auto & range : cell_ranges) {
-                    const size_t range_size = range.second - range.first;
-                    const size_t buf_size = range_size * v_size_row;
-                    write_tensor_data(kv_self.v_l[il], range.first * v_size_row, buf_size);
-                }
-            }
-        } else {
-            // When v is transposed, we also need the element size and get the element ranges from each row
-            const uint32_t kv_size = kv_self.size;
-            for (uint32_t il = 0; il < n_layer; ++il) {
-                const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s();
-
-                // Write value type
-                const int32_t v_type_i = (int32_t)kv_self.v_l[il]->type;
-                write(&v_type_i, sizeof(v_type_i));
-
-                // Write element size
-                const uint32_t v_size_el = ggml_type_size(kv_self.v_l[il]->type);
-                write(&v_size_el, sizeof(v_size_el));
-
-                // Write GQA embedding size
-                write(&n_embd_v_gqa, sizeof(n_embd_v_gqa));
-
-                // For each row, we get the element values of each cell
-                for (uint32_t j = 0; j < n_embd_v_gqa; ++j) {
-                    // Read each range of cells of v_size_el length each into tmp_buf and write out
-                    for (const auto & range : cell_ranges) {
-                        const size_t range_size = range.second - range.first;
-                        const size_t src_offset = (range.first + j * kv_size) * v_size_el;
-                        const size_t buf_size = range_size * v_size_el;
-                        write_tensor_data(kv_self.v_l[il], src_offset, buf_size);
-                    }
-                }
-            }
-        }
-    }
-
-    void write_kv_cache(const struct llama_context * ctx, llama_seq_id seq_id = -1) {
-        const struct llama_kv_cache & kv_self = ctx->kv_self;
-        std::vector> cell_ranges; // ranges, from inclusive, to exclusive
-        uint32_t cell_count = 0;
-
-        // Count the number of cells with the specified seq_id
-        // Find all the ranges of cells with this seq id (or all, when -1)
-        uint32_t cell_range_begin = kv_self.size;
-        for (uint32_t i = 0; i < kv_self.size; ++i) {
-            const auto & cell = kv_self.cells[i];
-            if ((seq_id == -1 && !cell.is_empty()) || cell.has_seq_id(seq_id)) {
-                ++cell_count;
-                if (cell_range_begin == kv_self.size) {
-                    cell_range_begin = i;
-                }
-            } else {
-                if (cell_range_begin != kv_self.size) {
-                    cell_ranges.emplace_back(cell_range_begin, i);
-                    cell_range_begin = kv_self.size;
-                }
-            }
-        }
-        if (cell_range_begin != kv_self.size) {
-            cell_ranges.emplace_back(cell_range_begin, kv_self.size);
-        }
-
-        // DEBUG CHECK: Sum of cell counts in ranges should equal the total cell count
-        uint32_t cell_count_check = 0;
-        for (const auto & range : cell_ranges) {
-            cell_count_check += range.second - range.first;
-        }
-        GGML_ASSERT(cell_count == cell_count_check);
-
-        write(&cell_count, sizeof(cell_count));
-
-        write_kv_cache_meta(kv_self, cell_ranges, seq_id);
-        write_kv_cache_data(ctx, cell_ranges);
-    }
-};
-
-struct llama_data_read {
-    virtual const uint8_t * read(size_t size) = 0;
-    virtual void read_to(void * dst, size_t size) = 0;
-    virtual size_t get_size_read() = 0;
-    virtual ~llama_data_read() = default;
-
-    void read_string(std::string & str) {
-        uint32_t str_size;
-        read_to(&str_size, sizeof(str_size));
-
-        str.assign((const char *) read(str_size), str_size);
-    }
-
-    // validate model information
-    void read_model_info(const struct llama_context * ctx) {
-        std::string cur_arch_str = LLM_ARCH_NAMES.at(ctx->model.arch);
-        std::string arch_str;
-        read_string(arch_str);
-        if (cur_arch_str != arch_str) {
-            throw std::runtime_error(format("wrong model arch: '%s' instead of '%s'", arch_str.c_str(), cur_arch_str.c_str()));
-        }
-        // TODO: add more info which needs to be identical but which is not verified otherwise
-    }
-
-    //void read_rng(std::mt19937 & rng) {
-    //    std::string rng_str;
-    //    read_string(rng_str);
-
-    //    std::istringstream rng_ss(rng_str);
-    //    rng_ss >> rng;
-
-    //    if (rng_ss.fail()) {
-    //        throw std::runtime_error("failed to load RNG state");
-    //    }
-    //}
-
-    void read_output_ids(struct llama_context * ctx) {
-        std::vector output_pos;
-
-        uint32_t n_outputs;
-        read_to(&n_outputs, sizeof(n_outputs));
-
-        if (n_outputs > llama_output_reserve(*ctx, n_outputs)) {
-            throw std::runtime_error("could not reserve outputs");
-        }
-
-        if (n_outputs) {
-            output_pos.resize(n_outputs);
-            read_to(output_pos.data(), n_outputs * sizeof(int32_t));
-
-            for (int32_t i = 0; i < (int32_t) output_pos.size(); ++i) {
-                int32_t id = output_pos[i];
-                if ((uint32_t) id >= ctx->cparams.n_batch) {
-                    throw std::runtime_error(format("invalid output id, %d does not fit in batch size of %u", id, ctx->cparams.n_batch));
-                }
-                ctx->output_ids[id] = i;
-            }
-
-            ctx->n_outputs = n_outputs;
-        }
-    }
-
-    void read_logits(struct llama_context * ctx) {
-        uint64_t logits_size;
-        read_to(&logits_size, sizeof(logits_size));
-
-        if (ctx->logits_size < logits_size) {
-            throw std::runtime_error("logits buffer too small");
-        }
-
-        if (logits_size) {
-            read_to(ctx->logits, logits_size * sizeof(float));
-        }
-    }
-
-    void read_embeddings(struct llama_context * ctx) {
-        uint64_t embeddings_size;
-        read_to(&embeddings_size, sizeof(embeddings_size));
-
-        if (ctx->embd_size < embeddings_size) {
-            throw std::runtime_error("embeddings buffer too small");
-        }
-
-        if (embeddings_size) {
-            read_to(ctx->embd, embeddings_size * sizeof(float));
-        }
-    }
-
-    bool read_kv_cache_meta(struct llama_context * ctx, uint32_t cell_count, llama_seq_id dest_seq_id = -1) {
-        struct llama_kv_cache & kv_self = ctx->kv_self;
-
-        if (dest_seq_id != -1) {
-            // single sequence
-
-            llama_kv_cache_seq_rm(kv_self, dest_seq_id, -1, -1);
-
-            llama_ubatch batch = ctx->sbatch.reserve_ubatch(cell_count, /* has_embd */ false);
-            batch.n_tokens = cell_count;
-            batch.n_seq_tokens = cell_count;
-            batch.n_seqs = 1;
-
-            for (uint32_t i = 0; i < cell_count; ++i) {
-                llama_pos pos;
-                uint32_t n_seq_id;
-
-                read_to(&pos, sizeof(pos));
-                read_to(&n_seq_id, sizeof(n_seq_id));
-
-                if (n_seq_id != 0) {
-                    LLAMA_LOG_ERROR("%s: invalid seq_id-agnostic kv cell\n", __func__);
-                    return false;
-                }
-
-                batch.pos[i] = pos;
-            }
-            batch.n_seq_id[0] = 1;
-            batch.seq_id[0] = &dest_seq_id;
-            if (!llama_kv_cache_find_slot(kv_self, batch)) {
-                LLAMA_LOG_ERROR("%s: failed to find available cells in kv cache\n", __func__);
-                return false;
-            }
-
-            // DEBUG CHECK: kv_self.head should be our first cell, kv_self.head + cell_count - 1 should be our last cell (verify seq_id and pos values)
-            // Assume that this is one contiguous block of cells
-            GGML_ASSERT(kv_self.head + cell_count <= kv_self.size);
-            GGML_ASSERT(kv_self.cells[kv_self.head].pos == batch.pos[0]);
-            GGML_ASSERT(kv_self.cells[kv_self.head + cell_count - 1].pos == batch.pos[cell_count - 1]);
-            GGML_ASSERT(kv_self.cells[kv_self.head].has_seq_id(dest_seq_id));
-            GGML_ASSERT(kv_self.cells[kv_self.head + cell_count - 1].has_seq_id(dest_seq_id));
-        } else {
-            // whole KV cache restore
-
-            if (cell_count > kv_self.size) {
-                LLAMA_LOG_ERROR("%s: not enough cells in kv cache\n", __func__);
-                return false;
-            }
-
-            llama_kv_cache_clear(kv_self);
-
-            for (uint32_t i = 0; i < cell_count; ++i) {
-                llama_kv_cell & cell = kv_self.cells[i];
-
-                llama_pos pos;
-                uint32_t  n_seq_id;
-
-                read_to(&pos,      sizeof(pos));
-                read_to(&n_seq_id, sizeof(n_seq_id));
-
-                cell.pos = pos;
-
-                for (uint32_t j = 0; j < n_seq_id; ++j) {
-                    llama_seq_id seq_id;
-                    read_to(&seq_id, sizeof(seq_id));
-
-                    if (seq_id < 0 || (uint32_t) seq_id >= llama_n_seq_max(ctx)) {
-                        LLAMA_LOG_ERROR("%s: invalid seq_id, %d is out of range [0, %u)\n", __func__, seq_id, llama_n_seq_max(ctx));
-                        return false;
-                    }
-
-                    cell.seq_id.insert(seq_id);
-
-                    if (kv_self.recurrent) {
-                        int32_t & tail = kv_self.cells[seq_id].tail;
-                        if (tail != -1) {
-                            LLAMA_LOG_ERROR("%s: duplicate tail for seq_id %d in cell %d and %d\n", __func__, seq_id, i, tail);
-                            return false;
-                        }
-                        tail = i;
-                    }
-                }
-            }
-
-            kv_self.head = 0;
-            kv_self.used = cell_count;
-        }
-
-        if (kv_self.recurrent) {
-            for (uint32_t i = 0; i < cell_count; ++i) {
-                uint32_t cell_id = kv_self.head + i;
-                // make sure the recurrent states will keep their restored state
-                kv_self.cells[cell_id].src = cell_id;
-            }
-        }
-
-        return true;
-    }
-
-    bool read_kv_cache_data(struct llama_context * ctx, uint32_t cell_count) {
-        const struct llama_hparams & hparams = ctx->model.hparams;
-        struct llama_kv_cache & kv_self = ctx->kv_self;
-        uint32_t v_trans;
-        uint32_t n_layer;
-        read_to(&v_trans, sizeof(v_trans));
-        read_to(&n_layer, sizeof(n_layer));
-
-        if (n_layer != hparams.n_layer) {
-            LLAMA_LOG_ERROR("%s: mismatched layer count (%u instead of %u)\n", __func__, n_layer, hparams.n_layer);
-            return false;
-        }
-        if (cell_count > kv_self.size) {
-            LLAMA_LOG_ERROR("%s: not enough cells in kv cache to restore state (%u > %u)\n", __func__, cell_count, kv_self.size);
-            return false;
-        }
-        if (kv_self.v_trans != (bool) v_trans) {
-            LLAMA_LOG_ERROR("%s: incompatible V transposition\n", __func__);
-            return false;
-        }
-
-        // For each layer, read the keys for each cell, one row is one cell, read as one contiguous block
-        for (uint32_t il = 0; il < n_layer; ++il) {
-            const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il) + hparams.n_embd_k_s();
-
-            // Read type of key
-            int32_t k_type_i_ref;
-            read_to(&k_type_i_ref, sizeof(k_type_i_ref));
-            const int32_t k_type_i = (int32_t)kv_self.k_l[il]->type;
-            if (k_type_i != k_type_i_ref) {
-                LLAMA_LOG_ERROR("%s: mismatched key type (%d != %d, layer %d)\n", __func__, k_type_i, k_type_i_ref, il);
-                return false;
-            }
-
-            // Read row size of key
-            uint64_t k_size_row_ref;
-            read_to(&k_size_row_ref, sizeof(k_size_row_ref));
-            const size_t k_size_row = ggml_row_size(kv_self.k_l[il]->type, n_embd_k_gqa);
-            if (k_size_row != k_size_row_ref) {
-                LLAMA_LOG_ERROR("%s: mismatched key row size (%zu != %zu, layer %d)\n", __func__, k_size_row, (size_t) k_size_row_ref, il);
-                return false;
-            }
-
-            if (cell_count) {
-                // Read and set the keys for the whole cell range
-                ggml_backend_tensor_set(kv_self.k_l[il], read(cell_count * k_size_row), kv_self.head * k_size_row, cell_count * k_size_row);
-            }
-        }
-
-        if (!kv_self.v_trans) {
-            for (uint32_t il = 0; il < n_layer; ++il) {
-                const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s();
-
-                // Read type of value
-                int32_t v_type_i_ref;
-                read_to(&v_type_i_ref, sizeof(v_type_i_ref));
-                const int32_t v_type_i = (int32_t)kv_self.v_l[il]->type;
-                if (v_type_i != v_type_i_ref) {
-                    LLAMA_LOG_ERROR("%s: mismatched value type (%d != %d, layer %d)\n", __func__, v_type_i, v_type_i_ref, il);
-                    return false;
-                }
-
-                // Read row size of value
-                uint64_t v_size_row_ref;
-                read_to(&v_size_row_ref, sizeof(v_size_row_ref));
-                const size_t v_size_row = ggml_row_size(kv_self.v_l[il]->type, n_embd_v_gqa);
-                if (v_size_row != v_size_row_ref) {
-                    LLAMA_LOG_ERROR("%s: mismatched value row size (%zu != %zu, layer %d)\n", __func__, v_size_row, (size_t) v_size_row_ref, il);
-                    return false;
-                }
-
-                if (cell_count) {
-                    // Read and set the values for the whole cell range
-                    ggml_backend_tensor_set(kv_self.v_l[il], read(cell_count * v_size_row), kv_self.head * v_size_row, cell_count * v_size_row);
-                }
-            }
-        } else {
-            // For each layer, read the values for each cell (transposed)
-            for (uint32_t il = 0; il < n_layer; ++il) {
-                const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s();
-
-                // Read type of value
-                int32_t v_type_i_ref;
-                read_to(&v_type_i_ref, sizeof(v_type_i_ref));
-                const int32_t v_type_i = (int32_t)kv_self.v_l[il]->type;
-                if (v_type_i != v_type_i_ref) {
-                    LLAMA_LOG_ERROR("%s: mismatched value type (%d != %d, layer %d)\n", __func__, v_type_i, v_type_i_ref, il);
-                    return false;
-                }
-
-                // Read element size of value
-                uint32_t v_size_el_ref;
-                read_to(&v_size_el_ref, sizeof(v_size_el_ref));
-                const size_t v_size_el = ggml_type_size(kv_self.v_l[il]->type);
-                if (v_size_el != v_size_el_ref) {
-                    LLAMA_LOG_ERROR("%s: mismatched value element size (%zu != %zu, layer %d)\n", __func__, v_size_el, (size_t) v_size_el_ref, il);
-                    return false;
-                }
-
-                // Read GQA embedding size
-                uint32_t n_embd_v_gqa_ref;
-                read_to(&n_embd_v_gqa_ref, sizeof(n_embd_v_gqa_ref));
-                if (n_embd_v_gqa != n_embd_v_gqa_ref) {
-                    LLAMA_LOG_ERROR("%s: mismatched GQA embedding size (%u != %u, layer %d)\n", __func__, n_embd_v_gqa, n_embd_v_gqa_ref, il);
-                    return false;
-                }
-
-                if (cell_count) {
-                    // For each row in the transposed matrix, read the values for the whole cell range
-                    for (uint32_t j = 0; j < n_embd_v_gqa; ++j) {
-                        const size_t dst_offset = (kv_self.head + j * kv_self.size) * v_size_el;
-                        ggml_backend_tensor_set(kv_self.v_l[il], read(cell_count * v_size_el), dst_offset, cell_count * v_size_el);
-                    }
-                }
-            }
-        }
-        return true;
-    }
-
-    void read_kv_cache(struct llama_context * ctx, llama_seq_id seq_id = -1) {
-        uint32_t cell_count;
-        read_to(&cell_count, sizeof(cell_count));
-
-        bool res = read_kv_cache_meta(ctx, cell_count, seq_id) && read_kv_cache_data(ctx, cell_count);
-
-        if (!res) {
-            if (seq_id == -1) {
-                llama_kv_cache_clear(ctx);
-            } else {
-                llama_kv_cache_seq_rm(ctx, seq_id, -1, -1);
-            }
-            throw std::runtime_error("failed to restore kv cache");
-        }
-    }
-};
-
-struct llama_data_write_dummy : llama_data_write {
-    size_t size_written = 0;
-
-    llama_data_write_dummy() {}
-
-    void write(const void * /* src */, size_t size) override {
-        size_written += size;
-    }
-
-    void write_tensor_data(const struct ggml_tensor * /* tensor */, size_t /* offset */, size_t size) override {
-        size_written += size;
-    }
-
-    size_t get_size_written() override {
-        return size_written;
-    }
-};
-
-struct llama_data_write_buffer : llama_data_write {
-    uint8_t * ptr;
-    size_t buf_size = 0;
-    size_t size_written = 0;
-
-    llama_data_write_buffer(uint8_t * p, size_t len) : ptr(p), buf_size(len) {}
-
-    void write(const void * src, size_t size) override {
-        if (size > buf_size) {
-            throw std::runtime_error("unexpectedly reached end of buffer");
-        }
-        memcpy(ptr, src, size);
-        ptr += size;
-        size_written += size;
-        buf_size -= size;
-    }
-
-    void write_tensor_data(const struct ggml_tensor * tensor, size_t offset, size_t size) override {
-        if (size > buf_size) {
-            throw std::runtime_error("unexpectedly reached end of buffer");
-        }
-        ggml_backend_tensor_get(tensor, ptr, offset, size);
-        ptr += size;
-        size_written += size;
-        buf_size -= size;
-    }
-
-    size_t get_size_written() override {
-        return size_written;
-    }
-};
-
-struct llama_data_read_buffer : llama_data_read {
-    const uint8_t * ptr;
-    size_t buf_size = 0;
-    size_t size_read = 0;
-
-    llama_data_read_buffer(const uint8_t * p, size_t len) : ptr(p), buf_size(len) {}
-
-    const uint8_t * read(size_t size) override {
-        const uint8_t * base_ptr = ptr;
-        if (size > buf_size) {
-            throw std::runtime_error("unexpectedly reached end of buffer");
-        }
-        ptr += size;
-        size_read += size;
-        buf_size -= size;
-        return base_ptr;
-    }
-
-    void read_to(void * dst, size_t size) override {
-        memcpy(dst, read(size), size);
-    }
-
-    size_t get_size_read() override {
-        return size_read;
-    }
-};
-
-struct llama_data_write_file : llama_data_write {
-    llama_file * file;
-    size_t size_written = 0;
-    std::vector temp_buffer;
-
-    llama_data_write_file(llama_file * f) : file(f) {}
-
-    void write(const void * src, size_t size) override {
-        file->write_raw(src, size);
-        size_written += size;
-    }
-
-    void write_tensor_data(const struct ggml_tensor * tensor, size_t offset, size_t size) override {
-        temp_buffer.resize(size);
-        ggml_backend_tensor_get(tensor, temp_buffer.data(), offset, size);
-        write(temp_buffer.data(), temp_buffer.size());
-    }
-
-    size_t get_size_written() override {
-        return size_written;
-    }
-};
-
-struct llama_data_read_file : llama_data_read {
-    llama_file * file;
-    size_t size_read = 0;
-    std::vector temp_buffer;
-
-    llama_data_read_file(llama_file * f) : file(f) {}
-
-    void read_to(void * dst, size_t size) override {
-        file->read_raw(dst, size);
-        size_read += size;
-    }
-
-    const uint8_t * read(size_t size) override {
-        temp_buffer.resize(size);
-        read_to(temp_buffer.data(), size);
-        return temp_buffer.data();
-    }
-
-    size_t get_size_read() override {
-        return size_read;
-    }
-};
-
-/** copy state data into either a buffer or file depending on the passed in context
- *
- * file context:
- * llama_file file("/path", "wb");
- * llama_data_write_file data_ctx(&file);
- * llama_state_get_data_internal(ctx, data_ctx);
- *
- * buffer context:
- * std::vector buf(max_size, 0);
- * llama_data_write_buffer data_ctx(buf.data(), max_size);
- * llama_state_get_data_internal(ctx, data_ctx);
- *
-*/
-static size_t llama_state_get_data_internal(struct llama_context * ctx, llama_data_write & data_ctx) {
-    llama_synchronize(ctx);
-
-    data_ctx.write_model_info(ctx);
-
-    // copy outputs
-    data_ctx.write_output_ids(ctx);
-    data_ctx.write_logits(ctx);
-    data_ctx.write_embeddings(ctx);
-
-    data_ctx.write_kv_cache(ctx);
-
-    return data_ctx.get_size_written();
-}
-
-size_t llama_state_get_data(struct llama_context * ctx, uint8_t * dst, size_t size) {
-    llama_data_write_buffer data_ctx(dst, size);
-    try {
-        return llama_state_get_data_internal(ctx, data_ctx);
-    } catch (const std::exception & err) {
-        LLAMA_LOG_ERROR("%s: error saving state: %s\n", __func__, err.what());
-        return 0;
-    }
-}
-
-// Returns the *actual* size of the state.
-// Intended to be used when saving to state to a buffer.
-size_t llama_state_get_size(struct llama_context * ctx) {
-    llama_data_write_dummy data_ctx;
-    try {
-        return llama_state_get_data_internal(ctx, data_ctx);
-    } catch (const std::exception & err) {
-        LLAMA_LOG_ERROR("%s: error getting state size: %s\n", __func__, err.what());
-        return 0;
-    }
-}
-
-static size_t llama_state_set_data_internal(struct llama_context * ctx, llama_data_read & data_ctx) {
-    llama_synchronize(ctx);
-
-    data_ctx.read_model_info(ctx);
-
-    // set outputs
-    data_ctx.read_output_ids(ctx);
-    data_ctx.read_logits(ctx);
-    data_ctx.read_embeddings(ctx);
-
-    data_ctx.read_kv_cache(ctx);
-
-    return data_ctx.get_size_read();
-}
-
-// Sets the state reading from the specified source address
-size_t llama_state_set_data(struct llama_context * ctx, const uint8_t * src, size_t size) {
-    llama_data_read_buffer data_ctx(src, size);
-    try {
-        return llama_state_set_data_internal(ctx, data_ctx);
-    } catch (const std::exception & err) {
-        LLAMA_LOG_ERROR("%s: error loading state: %s\n", __func__, err.what());
-        return 0;
-    }
-}
-
-static bool llama_state_load_file_internal(struct llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) {
-    llama_file file(path_session, "rb");
-
-    // sanity checks
-    {
-        const uint32_t magic   = file.read_u32();
-        const uint32_t version = file.read_u32();
-
-        if (magic != LLAMA_SESSION_MAGIC || version != LLAMA_SESSION_VERSION) {
-            LLAMA_LOG_ERROR("%s: unknown (magic, version) for session file: %08x, %08x\n", __func__, magic, version);
-            return false;
-        }
-    }
-
-    // load the prompt
-    {
-        const uint32_t n_token_count = file.read_u32();
-
-        if (n_token_count > n_token_capacity) {
-            LLAMA_LOG_ERROR("%s: token count in session file exceeded capacity! %u > %zu\n", __func__, n_token_count, n_token_capacity);
-            return false;
-        }
-
-        file.read_raw(tokens_out, sizeof(llama_token) * n_token_count);
-        *n_token_count_out = n_token_count;
-    }
-
-    // restore the context state
-    {
-        const size_t n_state_size_cur = file.size - file.tell();
-
-        llama_data_read_file data_ctx(&file);
-        const size_t n_read = llama_state_set_data_internal(ctx, data_ctx);
-
-        if (n_read != n_state_size_cur) {
-            LLAMA_LOG_ERROR("%s: did not read all of the session file data! size %zu, got %zu\n", __func__, n_state_size_cur, n_read);
-            return false;
-        }
-    }
-    return true;
-}
-
-bool llama_state_load_file(struct llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) {
-    try {
-        return llama_state_load_file_internal(ctx, path_session, tokens_out, n_token_capacity, n_token_count_out);
-    } catch (const std::exception & err) {
-        LLAMA_LOG_ERROR("%s: error loading session file: %s\n", __func__, err.what());
-        return false;
-    }
-}
-
-static bool llama_state_save_file_internal(struct llama_context * ctx, const char * path_session, const llama_token * tokens, size_t n_token_count) {
-    llama_file file(path_session, "wb");
-
-    file.write_u32(LLAMA_SESSION_MAGIC);
-    file.write_u32(LLAMA_SESSION_VERSION);
-
-    // save the prompt
-    file.write_u32((uint32_t) n_token_count);
-    file.write_raw(tokens, sizeof(llama_token) * n_token_count);
-
-    // save the context state using stream saving
-    llama_data_write_file data_ctx(&file);
-    llama_state_get_data_internal(ctx, data_ctx);
-
-    return true;
-}
-
-bool llama_state_save_file(struct llama_context * ctx, const char * path_session, const llama_token * tokens, size_t n_token_count) {
-    try {
-        return llama_state_save_file_internal(ctx, path_session, tokens, n_token_count);
-    } catch (const std::exception & err) {
-        LLAMA_LOG_ERROR("%s: error saving session file: %s\n", __func__, err.what());
-        return false;
-    }
-}
-
-static size_t llama_state_seq_get_data_internal(struct llama_context * ctx, llama_data_write & data_ctx, llama_seq_id seq_id) {
-    llama_synchronize(ctx);
-
-    data_ctx.write_kv_cache(ctx, seq_id);
-
-    return data_ctx.get_size_written();
-}
-
-size_t llama_state_seq_get_size(struct llama_context * ctx, llama_seq_id seq_id) {
-    llama_data_write_dummy data_ctx;
-    return llama_state_seq_get_data_internal(ctx, data_ctx, seq_id);
-}
-
-size_t llama_state_seq_get_data(struct llama_context * ctx, uint8_t * dst, size_t size, llama_seq_id seq_id) {
-    llama_data_write_buffer data_ctx(dst, size);
-    try {
-        return llama_state_seq_get_data_internal(ctx, data_ctx, seq_id);
-    } catch (const std::exception & err) {
-        LLAMA_LOG_ERROR("%s: error saving sequence state: %s\n", __func__, err.what());
-        return 0;
-    }
-}
-
-static size_t llama_state_seq_set_data_internal(struct llama_context * ctx, llama_data_read & data_ctx, llama_seq_id dest_seq_id) {
-    llama_synchronize(ctx);
-
-    data_ctx.read_kv_cache(ctx, dest_seq_id);
-
-    return data_ctx.get_size_read();
-}
-
-size_t llama_state_seq_set_data(struct llama_context * ctx, const uint8_t * src, size_t size, llama_seq_id dest_seq_id) {
-    llama_data_read_buffer data_ctx(src, size);
-    try {
-        return llama_state_seq_set_data_internal(ctx, data_ctx, dest_seq_id);
-    } catch (const std::exception & err) {
-        LLAMA_LOG_ERROR("%s: error loading sequence state: %s\n", __func__, err.what());
-        return 0;
-    }
-}
-
-static size_t llama_state_seq_save_file_internal(struct llama_context * ctx, const char * filepath, llama_seq_id seq_id, const llama_token * tokens, size_t n_token_count) {
-    llama_file file(filepath, "wb");
-
-    file.write_u32(LLAMA_STATE_SEQ_MAGIC);
-    file.write_u32(LLAMA_STATE_SEQ_VERSION);
-
-    // save the prompt
-    file.write_u32((uint32_t) n_token_count);
-    file.write_raw(tokens, sizeof(llama_token) * n_token_count);
-
-    // save the context state using stream saving
-    llama_data_write_file data_ctx(&file);
-    llama_state_seq_get_data_internal(ctx, data_ctx, seq_id);
-
-    const size_t res = file.tell();
-    GGML_ASSERT(res == sizeof(uint32_t) * 3 + sizeof(llama_token) * n_token_count + data_ctx.get_size_written());
-    return res;
-}
-
-static size_t llama_state_seq_load_file_internal(struct llama_context * ctx, const char * filepath, llama_seq_id dest_seq_id, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) {
-    llama_file file(filepath, "rb");
-
-    // version checks
-    {
-        const uint32_t magic   = file.read_u32();
-        const uint32_t version = file.read_u32();
-
-        if (magic != LLAMA_STATE_SEQ_MAGIC || version != LLAMA_STATE_SEQ_VERSION) {
-            LLAMA_LOG_ERROR("%s: unknown (magic, version) for sequence state file: %08x, %08x\n", __func__, magic, version);
-            return 0;
-        }
-    }
-
-    // load the prompt
-    {
-        const uint32_t n_token_count = file.read_u32();
-
-        if (n_token_count > n_token_capacity) {
-            LLAMA_LOG_ERROR("%s: token count in sequence state file exceeded capacity! %u > %zu\n", __func__, n_token_count, n_token_capacity);
-            return 0;
-        }
-
-        file.read_raw(tokens_out, sizeof(llama_token) * n_token_count);
-        *n_token_count_out = n_token_count;
-    }
-
-    // restore the context state
-    {
-        const size_t state_size = file.size - file.tell();
-        llama_data_read_file data_ctx(&file);
-        const size_t nread = llama_state_seq_set_data_internal(ctx, data_ctx, dest_seq_id);
-        if (!nread) {
-            LLAMA_LOG_ERROR("%s: failed to restore sequence state\n", __func__);
-            return 0;
-        }
-        GGML_ASSERT(nread <= state_size);
-        GGML_ASSERT(nread + sizeof(uint32_t) * 3 + sizeof(llama_token) * *n_token_count_out == file.tell());
-    }
-
-    return file.tell();
-}
-
-size_t llama_state_seq_save_file(struct llama_context * ctx, const char * filepath, llama_seq_id seq_id, const llama_token * tokens, size_t n_token_count) {
-    try {
-        return llama_state_seq_save_file_internal(ctx, filepath, seq_id, tokens, n_token_count);
-    } catch (const std::exception & err) {
-        LLAMA_LOG_ERROR("%s: error saving sequence state file: %s\n", __func__, err.what());
-        return 0;
-    }
-}
-
-size_t llama_state_seq_load_file(struct llama_context * ctx, const char * filepath, llama_seq_id dest_seq_id, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) {
-    try {
-        return llama_state_seq_load_file_internal(ctx, filepath, dest_seq_id, tokens_out, n_token_capacity, n_token_count_out);
-    } catch (const std::exception & err) {
-        LLAMA_LOG_ERROR("%s: error loading sequence state file: %s\n", __func__, err.what());
-        return 0;
-    }
-}
-
-void llama_set_n_threads(struct llama_context * ctx, int32_t n_threads, int32_t n_threads_batch) {
-    ctx->cparams.n_threads       = n_threads;
-    ctx->cparams.n_threads_batch = n_threads_batch;
-}
-
-int32_t llama_n_threads(struct llama_context * ctx) {
-    return ctx->cparams.n_threads;
-}
-
-int32_t llama_n_threads_batch(struct llama_context * ctx) {
-    return ctx->cparams.n_threads_batch;
-}
-
-void llama_set_abort_callback(struct llama_context * ctx, bool (*abort_callback)(void * data), void * abort_callback_data) {
-    ctx->abort_callback      = abort_callback;
-    ctx->abort_callback_data = abort_callback_data;
-
-    for (auto & backend : ctx->backends) {
-        auto * reg = ggml_backend_dev_backend_reg(ggml_backend_get_device(backend.get()));
-        auto * set_abort_callback_fn = (ggml_backend_set_abort_callback_t) ggml_backend_reg_get_proc_address(reg, "ggml_backend_set_abort_callback");
-        if (set_abort_callback_fn) {
-            set_abort_callback_fn(backend.get(), ctx->abort_callback, ctx->abort_callback_data);
-        }
-    }
-}
-
-void llama_set_embeddings(struct llama_context * ctx, bool embeddings) {
-    ctx->cparams.embeddings = embeddings;
-}
-
-void llama_set_causal_attn(struct llama_context * ctx, bool causal_attn) {
-    ctx->cparams.causal_attn = causal_attn;
-}
-
-struct llama_batch llama_batch_get_one(
-             llama_token * tokens,
-                 int32_t   n_tokens) {
-    return {
-        /*n_tokens       =*/ n_tokens,
-        /*tokens         =*/ tokens,
-        /*embd           =*/ nullptr,
-        /*pos            =*/ nullptr,
-        /*n_seq_id       =*/ nullptr,
-        /*seq_id         =*/ nullptr,
-        /*logits         =*/ nullptr,
-    };
-}
-
-struct llama_batch llama_batch_init(int32_t n_tokens_alloc, int32_t embd, int32_t n_seq_max) {
-    llama_batch batch = {
-        /*n_tokens       =*/ 0,
-        /*tokens         =*/ nullptr,
-        /*embd           =*/ nullptr,
-        /*pos            =*/ nullptr,
-        /*n_seq_id       =*/ nullptr,
-        /*seq_id         =*/ nullptr,
-        /*logits         =*/ nullptr,
-    };
-
-    if (embd) {
-        batch.embd = (float *) malloc(sizeof(float) * n_tokens_alloc * embd);
-    } else {
-        batch.token = (llama_token *) malloc(sizeof(llama_token) * n_tokens_alloc);
-    }
-
-    batch.pos      = (llama_pos *)     malloc(sizeof(llama_pos)      * n_tokens_alloc);
-    batch.n_seq_id = (int32_t *)       malloc(sizeof(int32_t)        * n_tokens_alloc);
-    batch.seq_id   = (llama_seq_id **) malloc(sizeof(llama_seq_id *) * (n_tokens_alloc + 1));
-    for (int i = 0; i < n_tokens_alloc; ++i) {
-        batch.seq_id[i] = (llama_seq_id *) malloc(sizeof(llama_seq_id) * n_seq_max);
-    }
-    batch.seq_id[n_tokens_alloc] = nullptr;
-
-    batch.logits   = (int8_t *)        malloc(sizeof(int8_t)         * n_tokens_alloc);
-
-    return batch;
-}
-
-void llama_batch_free(struct llama_batch batch) {
-    if (batch.token)    free(batch.token);
-    if (batch.embd)     free(batch.embd);
-    if (batch.pos)      free(batch.pos);
-    if (batch.n_seq_id) free(batch.n_seq_id);
-    if (batch.seq_id) {
-        for (int i = 0; batch.seq_id[i] != nullptr; ++i) {
-            free(batch.seq_id[i]);
-        }
-        free(batch.seq_id);
-    }
-    if (batch.logits)   free(batch.logits);
-}
+///
 
 int32_t llama_encode(
         struct llama_context * ctx,
@@ -22566,150 +12022,12 @@ int32_t llama_decode(
     return ret;
 }
 
-void llama_synchronize(struct llama_context * ctx) {
-    ggml_backend_sched_synchronize(ctx->sched.get());
-
-    // FIXME: if multiple single tokens are evaluated without a synchronization,
-    // the stats will be added to the prompt evaluation stats
-    // this should only happen when using batch size 1 to evaluate a batch
-
-    // add the evaluation to the stats
-    if (ctx->n_queued_tokens == 1) {
-        if (!ctx->cparams.no_perf) {
-            ctx->t_eval_us += ggml_time_us() - ctx->t_compute_start_us;
-        }
-        ctx->n_eval++;
-    } else if (ctx->n_queued_tokens > 1) {
-        if (!ctx->cparams.no_perf) {
-            ctx->t_p_eval_us += ggml_time_us() - ctx->t_compute_start_us;
-        }
-        ctx->n_p_eval += ctx->n_queued_tokens;
-    }
-
-    // get a more accurate load time, upon first eval
-    if (ctx->n_queued_tokens > 0 && !ctx->has_evaluated_once) {
-        ctx->t_load_us = ggml_time_us() - ctx->t_start_us;
-        ctx->has_evaluated_once = true;
-    }
-
-    ctx->n_queued_tokens = 0;
-    ctx->t_compute_start_us = 0;
-}
-
-float * llama_get_logits(struct llama_context * ctx) {
-    llama_synchronize(ctx);
-
-    // reorder logits for backward compatibility
-    // TODO: maybe deprecate this
-    llama_output_reorder(ctx);
-
-    return ctx->logits;
-}
-
-float * llama_get_logits_ith(struct llama_context * ctx, int32_t i) {
-    int32_t j = -1;
-    llama_synchronize(ctx);
-
-    try {
-        if (ctx->logits == nullptr) {
-            throw std::runtime_error("no logits");
-        }
-
-        if (i < 0) {
-            j = ctx->n_outputs + i;
-            if (j < 0) {
-                throw std::runtime_error(format("negative index out of range [0, %d)", ctx->n_outputs));
-            }
-        } else if ((size_t) i >= ctx->output_ids.size()) {
-            throw std::runtime_error(format("out of range [0, %zu)", ctx->output_ids.size()));
-        } else {
-            j = ctx->output_ids[i];
-        }
-
-        if (j < 0) {
-            throw std::runtime_error(format("batch.logits[%d] != true", i));
-        }
-        if (j >= ctx->n_outputs) {
-            // This should not happen
-            throw std::runtime_error(format("corrupt output buffer (j=%d, n_outputs=%d)", j, ctx->n_outputs));
-        }
-
-        return ctx->logits + j*ctx->model.hparams.n_vocab;
-    } catch (const std::exception & err) {
-        LLAMA_LOG_ERROR("%s: invalid logits id %d, reason: %s\n", __func__, i, err.what());
-#ifndef NDEBUG
-        GGML_ABORT("fatal error");
-#else
-        return nullptr;
-#endif
-    }
-}
-
-float * llama_get_embeddings(struct llama_context * ctx) {
-    llama_synchronize(ctx);
-
-    // reorder embeddings for backward compatibility
-    // TODO: maybe deprecate this
-    llama_output_reorder(ctx);
-
-    return ctx->embd;
-}
-
-float * llama_get_embeddings_ith(struct llama_context * ctx, int32_t i) {
-    int32_t j = -1;
-
-    llama_synchronize(ctx);
-
-    try {
-        if (ctx->embd == nullptr) {
-            throw std::runtime_error("no embeddings");
-        }
-
-        if (i < 0) {
-            j = ctx->n_outputs + i;
-            if (j < 0) {
-                throw std::runtime_error(format("negative index out of range [0, %d)", ctx->n_outputs));
-            }
-        } else if ((size_t) i >= ctx->output_ids.size()) {
-            throw std::runtime_error(format("out of range [0, %zu)", ctx->output_ids.size()));
-        } else {
-            j = ctx->output_ids[i];
-        }
-
-        if (j < 0) {
-            throw std::runtime_error(format("batch.logits[%d] != true", i));
-        }
-        if (j >= ctx->n_outputs) {
-            // This should not happen
-            throw std::runtime_error(format("corrupt output buffer (j=%d, n_outputs=%d)", j, ctx->n_outputs));
-        }
-
-        return ctx->embd + j*ctx->model.hparams.n_embd;
-    } catch (const std::exception & err) {
-        LLAMA_LOG_ERROR("%s: invalid embeddings id %d, reason: %s\n", __func__, i, err.what());
-#ifndef NDEBUG
-        GGML_ABORT("fatal error");
-#else
-        return nullptr;
-#endif
-    }
-}
-
-float * llama_get_embeddings_seq(struct llama_context * ctx, llama_seq_id seq_id) {
-    llama_synchronize(ctx);
-
-    auto it = ctx->embd_seq.find(seq_id);
-    if (it == ctx->embd_seq.end()) {
-        return nullptr;
-    }
-
-    return it->second.data();
-}
-
 //
 // vocab
 //
 
+// TODO: tmp bridges below until `struct llama_vocab` is exposed through the public API
+
 const char * llama_token_get_text(const struct llama_model * model, llama_token token) {
     return llama_token_get_text_impl(model->vocab, token);
 }
@@ -22842,478 +12160,6 @@ int32_t llama_detokenize(
 // chat templates
 //
 
-static llm_chat_template llama_chat_detect_template(const std::string & tmpl) {
-    if (LLM_CHAT_TEMPLATES.find(tmpl) != LLM_CHAT_TEMPLATES.end()) {
-        return LLM_CHAT_TEMPLATES.at(tmpl);
-    }
-    auto tmpl_contains = [&tmpl](const char * haystack) -> bool {
-        return tmpl.find(haystack) != std::string::npos;
-    };
-    if (tmpl_contains("<|im_start|>")) {
-        return LLM_CHAT_TEMPLATE_CHATML;
-    } else if (tmpl.find("mistral") == 0 || tmpl_contains("[INST]")) {
-        if (tmpl_contains("[SYSTEM_PROMPT]")) {
-            return LLM_CHAT_TEMPLATE_MISTRAL_V7;
-        } else if (
-            // catches official 'v1' template
-            tmpl_contains("' [INST] ' + system_message")
-            // catches official 'v3' and 'v3-tekken' templates
-            || tmpl_contains("[AVAILABLE_TOOLS]")
-        ) {
-            // Official mistral 'v1', 'v3' and 'v3-tekken' templates
-            // See: https://github.com/mistralai/cookbook/blob/main/concept-deep-dive/tokenization/chat_templates.md
-            // See: https://github.com/mistralai/cookbook/blob/main/concept-deep-dive/tokenization/templates.md
-            if (tmpl_contains(" [INST]")) {
-                return LLM_CHAT_TEMPLATE_MISTRAL_V1;
-            } else if (tmpl_contains("\"[INST]\"")) {
-                return LLM_CHAT_TEMPLATE_MISTRAL_V3_TEKKEN;
-            }
-            return LLM_CHAT_TEMPLATE_MISTRAL_V3;
-        } else {
-            // llama2 template and its variants
-            // [variant] support system message
-            // See: https://huggingface.co/blog/llama2#how-to-prompt-llama-2
-            bool support_system_message = tmpl_contains("<>");
-            bool add_bos_inside_history = tmpl_contains("bos_token + '[INST]");
-            bool strip_message = tmpl_contains("content.strip()");
-            if (strip_message) {
-                return LLM_CHAT_TEMPLATE_LLAMA_2_SYS_STRIP;
-            } else if (add_bos_inside_history) {
-                return LLM_CHAT_TEMPLATE_LLAMA_2_SYS_BOS;
-            } else if (support_system_message) {
-                return LLM_CHAT_TEMPLATE_LLAMA_2_SYS;
-            } else {
-                return LLM_CHAT_TEMPLATE_LLAMA_2;
-            }
-        }
-    } else if (tmpl_contains("<|assistant|>") && tmpl_contains("<|end|>")) {
-        return LLM_CHAT_TEMPLATE_PHI_3;
-    } else if (tmpl_contains("<|assistant|>") && tmpl_contains("<|user|>")) {
-        return LLM_CHAT_TEMPLATE_FALCON_3;
-    } else if (tmpl_contains("<|user|>") && tmpl_contains("<|endoftext|>")) {
-        return LLM_CHAT_TEMPLATE_ZEPHYR;
-    } else if (tmpl_contains("bos_token + message['role']")) {
-        return LLM_CHAT_TEMPLATE_MONARCH;
-    } else if (tmpl_contains("")) {
-        return LLM_CHAT_TEMPLATE_GEMMA;
-    } else if (tmpl_contains("'\\n\\nAssistant: ' + eos_token")) {
-        // OrionStarAI/Orion-14B-Chat
-        return LLM_CHAT_TEMPLATE_ORION;
-    } else if (tmpl_contains("GPT4 Correct ")) {
-        // openchat/openchat-3.5-0106
-        return LLM_CHAT_TEMPLATE_OPENCHAT;
-    } else if (tmpl_contains("USER: ") && tmpl_contains("ASSISTANT: ")) {
-        // eachadea/vicuna-13b-1.1 (and Orca variant)
-        if (tmpl_contains("SYSTEM: ")) {
-            return LLM_CHAT_TEMPLATE_VICUNA_ORCA;
-        }
-        return LLM_CHAT_TEMPLATE_VICUNA;
-    } else if (tmpl_contains("### Instruction:") && tmpl_contains("<|EOT|>")) {
-        // deepseek-ai/deepseek-coder-33b-instruct
-        return LLM_CHAT_TEMPLATE_DEEPSEEK;
-    } else if (tmpl_contains("<|START_OF_TURN_TOKEN|>") && tmpl_contains("<|USER_TOKEN|>")) {
-        // CohereForAI/c4ai-command-r-plus
-        return LLM_CHAT_TEMPLATE_COMMAND_R;
-    } else if (tmpl_contains("<|start_header_id|>") && tmpl_contains("<|end_header_id|>")) {
-        return LLM_CHAT_TEMPLATE_LLAMA_3;
-    } else if (tmpl_contains("[gMASK]sop")) {
-        // chatglm3-6b
-        return LLM_CHAT_TEMPLATE_CHATGML_3;
-    } else if (tmpl_contains("[gMASK]")) {
-        return LLM_CHAT_TEMPLATE_CHATGML_4;
-    } else if (tmpl_contains(LU8("<用户>"))) {
-        // MiniCPM-3B-OpenHermes-2.5-v2-GGUF
-        return LLM_CHAT_TEMPLATE_MINICPM;
-    } else if (tmpl_contains("'Assistant: ' + message['content'] + eos_token")) {
-        return LLM_CHAT_TEMPLATE_DEEPSEEK_2;
-    } else if (tmpl_contains("[|system|]") && tmpl_contains("[|assistant|]") && tmpl_contains("[|endofturn|]")) {
-        // ref: https://huggingface.co/LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct/discussions/8#66bae61b1893d14ee8ed85bb
-        // EXAONE-3.0-7.8B-Instruct
-        return LLM_CHAT_TEMPLATE_EXAONE_3;
-    } else if (tmpl_contains("rwkv-world")) {
-        return LLM_CHAT_TEMPLATE_RWKV_WORLD;
-    } else if (tmpl_contains("<|start_of_role|>")) {
-        return LLM_CHAT_TEMPLATE_GRANITE;
-    } else if (tmpl_contains("message['role'] + additional_special_tokens[0] + message['content'] + additional_special_tokens[1]")) {
-        return LLM_CHAT_TEMPLATE_GIGACHAT;
-    } else if (tmpl_contains("<|role_start|>")) {
-        return LLM_CHAT_TEMPLATE_MEGREZ;
-    }
-    return LLM_CHAT_TEMPLATE_UNKNOWN;
-}
-
-// Simple version of "llama_apply_chat_template" that only works with strings
-// This function uses heuristic checks to determine commonly used template. It is not a jinja parser.
-static int32_t llama_chat_apply_template_internal(
-    const llm_chat_template tmpl,
-    const std::vector & chat,
-    std::string & dest, bool add_ass) {
-    // Taken from the research: https://github.com/ggerganov/llama.cpp/issues/5527
-    std::stringstream ss;
-    if (tmpl == LLM_CHAT_TEMPLATE_CHATML) {
-        // chatml template
-        for (auto message : chat) {
-            ss << "<|im_start|>" << message->role << "\n" << message->content << "<|im_end|>\n";
-        }
-        if (add_ass) {
-            ss << "<|im_start|>assistant\n";
-        }
-    } else if (tmpl == LLM_CHAT_TEMPLATE_MISTRAL_V7) {
-        // Official mistral 'v7' template
-        // See: https://huggingface.co/mistralai/Mistral-Large-Instruct-2411#basic-instruct-template-v7
-        for (auto message : chat) {
-            std::string role(message->role);
-            std::string content(message->content);
-            if (role == "system") {
-                ss << "[SYSTEM_PROMPT] " << content << "[/SYSTEM_PROMPT]";
-            } else if (role == "user") {
-                ss << "[INST] " << content << "[/INST]";
-            }
-            else {
-                ss << " " << content << "";
-            }
-        }
-    } else if (tmpl == LLM_CHAT_TEMPLATE_MISTRAL_V1
-            || tmpl == LLM_CHAT_TEMPLATE_MISTRAL_V3
-            || tmpl == LLM_CHAT_TEMPLATE_MISTRAL_V3_TEKKEN) {
-        // See: https://github.com/mistralai/cookbook/blob/main/concept-deep-dive/tokenization/chat_templates.md
-        // See: https://github.com/mistralai/cookbook/blob/main/concept-deep-dive/tokenization/templates.md
-        std::string leading_space = tmpl == LLM_CHAT_TEMPLATE_MISTRAL_V1 ? " " : "";
-        std::string trailing_space = tmpl == LLM_CHAT_TEMPLATE_MISTRAL_V3_TEKKEN ? "" : " ";
-        bool trim_assistant_message = tmpl == LLM_CHAT_TEMPLATE_MISTRAL_V3;
-        bool is_inside_turn = false;
-        for (auto message : chat) {
-            if (!is_inside_turn) {
-                ss << leading_space << "[INST]" << trailing_space;
-                is_inside_turn = true;
-            }
-            std::string role(message->role);
-            std::string content(message->content);
-            if (role == "system") {
-                ss << content << "\n\n";
-            } else if (role == "user") {
-                ss << content << leading_space << "[/INST]";
-            } else {
-                ss << trailing_space << (trim_assistant_message ? trim(content) : content) << "";
-                is_inside_turn = false;
-            }
-        }
-    } else if (
-            tmpl == LLM_CHAT_TEMPLATE_LLAMA_2
-            || tmpl == LLM_CHAT_TEMPLATE_LLAMA_2_SYS
-            || tmpl == LLM_CHAT_TEMPLATE_LLAMA_2_SYS_BOS
-            || tmpl == LLM_CHAT_TEMPLATE_LLAMA_2_SYS_STRIP) {
-        // llama2 template and its variants
-        // [variant] support system message
-        // See: https://huggingface.co/blog/llama2#how-to-prompt-llama-2
-        bool support_system_message = tmpl != LLM_CHAT_TEMPLATE_LLAMA_2;
-        // [variant] add BOS inside history
-        bool add_bos_inside_history = tmpl == LLM_CHAT_TEMPLATE_LLAMA_2_SYS_BOS;
-        // [variant] trim spaces from the input message
-        bool strip_message = tmpl == LLM_CHAT_TEMPLATE_LLAMA_2_SYS_STRIP;
-        // construct the prompt
-        bool is_inside_turn = true; // skip BOS at the beginning
-        ss << "[INST] ";
-        for (auto message : chat) {
-            std::string content = strip_message ? trim(message->content) : message->content;
-            std::string role(message->role);
-            if (!is_inside_turn) {
-                is_inside_turn = true;
-                ss << (add_bos_inside_history ? "[INST] " : "[INST] ");
-            }
-            if (role == "system") {
-                if (support_system_message) {
-                    ss << "<>\n" << content << "\n<>\n\n";
-                } else {
-                    // if the model does not support system message, we still include it in the first message, but without <>
-                    ss << content << "\n";
-                }
-            } else if (role == "user") {
-                ss << content << " [/INST]";
-            } else {
-                ss << content << "";
-                is_inside_turn = false;
-            }
-        }
-    } else if (tmpl == LLM_CHAT_TEMPLATE_PHI_3) {
-        // Phi 3
-        for (auto message : chat) {
-            std::string role(message->role);
-            ss << "<|" << role << "|>\n" << message->content << "<|end|>\n";
-        }
-        if (add_ass) {
-            ss << "<|assistant|>\n";
-        }
-    } else if (tmpl == LLM_CHAT_TEMPLATE_FALCON_3) {
-        // Falcon 3
-        for (auto message : chat) {
-            std::string role(message->role);
-            ss << "<|" << role << "|>\n" << message->content << "\n";
-        }
-        if (add_ass) {
-            ss << "<|assistant|>\n";
-        }
-    } else if (tmpl == LLM_CHAT_TEMPLATE_ZEPHYR) {
-        // zephyr template
-        for (auto message : chat) {
-            ss << "<|" << message->role << "|>" << "\n" << message->content << "<|endoftext|>\n";
-        }
-        if (add_ass) {
-            ss << "<|assistant|>\n";
-        }
-    } else if (tmpl == LLM_CHAT_TEMPLATE_MONARCH) {
-        // mlabonne/AlphaMonarch-7B template (the  is included inside history)
-        for (auto message : chat) {
-            std::string bos = (message == chat.front()) ? "" : ""; // skip BOS for first message
-            ss << bos << message->role << "\n" << message->content << "\n";
-        }
-        if (add_ass) {
-            ss << "assistant\n";
-        }
-    } else if (tmpl == LLM_CHAT_TEMPLATE_GEMMA) {
-        // google/gemma-7b-it
-        std::string system_prompt = "";
-        for (auto message : chat) {
-            std::string role(message->role);
-            if (role == "system") {
-                // there is no system message for gemma, but we will merge it with user prompt, so nothing is broken
-                system_prompt = trim(message->content);
-                continue;
-            }
-            // in gemma, "assistant" is "model"
-            role = role == "assistant" ? "model" : message->role;
-            ss << "" << role << "\n";
-            if (!system_prompt.empty() && role != "model") {
-                ss << system_prompt << "\n\n";
-                system_prompt = "";
-            }
-            ss << trim(message->content) << "\n";
-        }
-        if (add_ass) {
-            ss << "model\n";
-        }
-    } else if (tmpl == LLM_CHAT_TEMPLATE_ORION) {
-        // OrionStarAI/Orion-14B-Chat
-        std::string system_prompt = "";
-        for (auto message : chat) {
-            std::string role(message->role);
-            if (role == "system") {
-                // there is no system message support, we will merge it with user prompt
-                system_prompt = message->content;
-                continue;
-            } else if (role == "user") {
-                ss << "Human: ";
-                if (!system_prompt.empty()) {
-                    ss << system_prompt << "\n\n";
-                    system_prompt = "";
-                }
-                ss << message->content << "\n\nAssistant: ";
-            } else {
-                ss << message->content << "";
-            }
-        }
-    } else if (tmpl == LLM_CHAT_TEMPLATE_OPENCHAT) {
-        // openchat/openchat-3.5-0106,
-        for (auto message : chat) {
-            std::string role(message->role);
-            if (role == "system") {
-                ss << message->content << "<|end_of_turn|>";
-            } else {
-                role[0] = toupper(role[0]);
-                ss << "GPT4 Correct " << role << ": " << message->content << "<|end_of_turn|>";
-            }
-        }
-        if (add_ass) {
-            ss << "GPT4 Correct Assistant:";
-        }
-    } else if (tmpl == LLM_CHAT_TEMPLATE_VICUNA || tmpl == LLM_CHAT_TEMPLATE_VICUNA_ORCA) {
-        // eachadea/vicuna-13b-1.1 (and Orca variant)
-        for (auto message : chat) {
-            std::string role(message->role);
-            if (role == "system") {
-                // Orca-Vicuna variant uses a system prefix
-                if (tmpl == LLM_CHAT_TEMPLATE_VICUNA_ORCA) {
-                    ss << "SYSTEM: " << message->content << "\n";
-                } else {
-                    ss << message->content << "\n\n";
-                }
-            } else if (role == "user") {
-                ss << "USER: " << message->content << "\n";
-            } else if (role == "assistant") {
-                ss << "ASSISTANT: " << message->content << "\n";
-            }
-        }
-        if (add_ass) {
-            ss << "ASSISTANT:";
-        }
-    } else if (tmpl == LLM_CHAT_TEMPLATE_DEEPSEEK) {
-        // deepseek-ai/deepseek-coder-33b-instruct
-        for (auto message : chat) {
-            std::string role(message->role);
-            if (role == "system") {
-                ss << message->content;
-            } else if (role == "user") {
-                ss << "### Instruction:\n" << message->content << "\n";
-            } else if (role == "assistant") {
-                ss << "### Response:\n" << message->content << "\n<|EOT|>\n";
-            }
-        }
-        if (add_ass) {
-            ss << "### Response:\n";
-        }
-    } else if (tmpl == LLM_CHAT_TEMPLATE_COMMAND_R) {
-        // CohereForAI/c4ai-command-r-plus
-        for (auto message : chat) {
-            std::string role(message->role);
-            if (role == "system") {
-                ss << "<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>" << trim(message->content) << "<|END_OF_TURN_TOKEN|>";
-            } else if (role == "user") {
-                ss << "<|START_OF_TURN_TOKEN|><|USER_TOKEN|>" << trim(message->content) << "<|END_OF_TURN_TOKEN|>";
-            } else if (role == "assistant") {
-                ss << "<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>" << trim(message->content) << "<|END_OF_TURN_TOKEN|>";
-            }
-        }
-        if (add_ass) {
-            ss << "<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>";
-        }
-    } else if (tmpl == LLM_CHAT_TEMPLATE_LLAMA_3) {
-        // Llama 3
-        for (auto message : chat) {
-            std::string role(message->role);
-            ss << "<|start_header_id|>" << role << "<|end_header_id|>\n\n" << trim(message->content) << "<|eot_id|>";
-        }
-        if (add_ass) {
-            ss << "<|start_header_id|>assistant<|end_header_id|>\n\n";
-        }
-    } else if (tmpl == LLM_CHAT_TEMPLATE_CHATGML_3) {
-        // chatglm3-6b
-        ss << "[gMASK]" << "sop";
-        for (auto message : chat) {
-            std::string role(message->role);
-            ss << "<|" << role << "|>" << "\n " << message->content;
-        }
-        if (add_ass) {
-            ss << "<|assistant|>";
-        }
-    } else if (tmpl == LLM_CHAT_TEMPLATE_CHATGML_4) {
-        ss << "[gMASK]" << "";
-        for (auto message : chat) {
-            std::string role(message->role);
-            ss << "<|" << role << "|>" << "\n" << message->content;
-        }
-        if (add_ass) {
-            ss << "<|assistant|>";
-        }
-    } else if (tmpl == LLM_CHAT_TEMPLATE_MINICPM) {
-        // MiniCPM-3B-OpenHermes-2.5-v2-GGUF
-        for (auto message : chat) {
-            std::string role(message->role);
-            if (role == "user") {
-                ss << LU8("<用户>");
-                ss << trim(message->content);
-                ss << "";
-            } else {
-                ss << trim(message->content);
-            }
-        }
-    } else if (tmpl == LLM_CHAT_TEMPLATE_DEEPSEEK_2) {
-        // DeepSeek-V2
-        for (auto message : chat) {
-            std::string role(message->role);
-            if (role == "system") {
-                ss << message->content << "\n\n";
-            } else if (role == "user") {
-                ss << "User: " << message->content << "\n\n";
-            } else if (role == "assistant") {
-                ss << "Assistant: " << message->content << LU8("<|end▁of▁sentence|>");
-            }
-        }
-        if (add_ass) {
-            ss << "Assistant:";
-        }
-    } else if (tmpl == LLM_CHAT_TEMPLATE_EXAONE_3) {
-        // ref: https://huggingface.co/LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct/discussions/8#66bae61b1893d14ee8ed85bb
-        // EXAONE-3.0-7.8B-Instruct
-        for (auto message : chat) {
-            std::string role(message->role);
-            if (role == "system") {
-                ss << "[|system|]" << trim(message->content) << "[|endofturn|]\n";
-            } else if (role == "user") {
-                ss << "[|user|]" << trim(message->content) << "\n";
-            } else if (role == "assistant") {
-                ss << "[|assistant|]" << trim(message->content) << "[|endofturn|]\n";
-            }
-        }
-        if (add_ass) {
-            ss << "[|assistant|]";
-        }
-    } else if (tmpl == LLM_CHAT_TEMPLATE_RWKV_WORLD) {
-        // this template requires the model to have "\n\n" as EOT token
-        for (auto message : chat) {
-            std::string role(message->role);
-            if (role == "user") {
-                ss << "User: " << message->content << "\n\nAssistant:";
-            } else {
-                ss << message->content << "\n\n";
-            }
-        }
-    } else if (tmpl == LLM_CHAT_TEMPLATE_GRANITE) {
-        // IBM Granite template
-        for (const auto & message : chat) {
-            std::string role(message->role);
-            ss << "<|start_of_role|>" << role << "<|end_of_role|>";
-            if (role == "assistant_tool_call") {
-                ss << "<|tool_call|>";
-            }
-            ss << message->content << "<|end_of_text|>\n";
-        }
-        if (add_ass) {
-            ss << "<|start_of_role|>assistant<|end_of_role|>\n";
-        }
-    } else if (tmpl == LLM_CHAT_TEMPLATE_GIGACHAT) {
-        // GigaChat template
-        bool has_system = !chat.empty() && std::string(chat[0]->role) == "system";
-
-        // Handle system message if present
-        if (has_system) {
-            ss << "" << chat[0]->content << "<|message_sep|>";
-        } else {
-            ss << "";
-        }
-
-        // Process remaining messages
-        for (size_t i = has_system ? 1 : 0; i < chat.size(); i++) {
-            std::string role(chat[i]->role);
-            if (role == "user") {
-                ss << "user<|role_sep|>" << chat[i]->content << "<|message_sep|>"
-                << "available functions<|role_sep|>[]<|message_sep|>";
-            } else if (role == "assistant") {
-                ss << "assistant<|role_sep|>" << chat[i]->content << "<|message_sep|>";
-            }
-        }
-
-        // Add generation prompt if needed
-        if (add_ass) {
-            ss << "assistant<|role_sep|>";
-        }
-    }  else if (tmpl == LLM_CHAT_TEMPLATE_MEGREZ) {
-        // Megrez template
-        for (auto message : chat) {
-            std::string role(message->role);
-            ss << "<|role_start|>" << role << "<|role_end|>" << message->content << "<|turn_end|>";
-        }
-
-        if (add_ass) {
-            ss << "<|role_start|>assistant<|role_end|>";
-        }
-    } else {
-        // template not supported
-        return -1;
-    }
-    dest = ss.str();
-    return dest.size();
-}
-
 int32_t llama_chat_apply_template(
                 const struct llama_model * model,
                               const char * tmpl,
@@ -23333,7 +12179,7 @@ int32_t llama_chat_apply_template(
         }
         else {
             // worst case: there is no information about template, we will use chatml by default
-            curr_tmpl = "chatml";  // see llama_chat_apply_template_internal
+            curr_tmpl = "chatml";  // see llm_chat_apply_template
         }
     }
 
@@ -23345,11 +12191,11 @@ int32_t llama_chat_apply_template(
     }
 
     std::string formatted_chat;
-    llm_chat_template detected_tmpl = llama_chat_detect_template(curr_tmpl);
+    llm_chat_template detected_tmpl = llm_chat_detect_template(curr_tmpl);
     if (detected_tmpl == LLM_CHAT_TEMPLATE_UNKNOWN) {
         return -1;
     }
-    int32_t res = llama_chat_apply_template_internal(detected_tmpl, chat_vec, formatted_chat, add_ass);
+    int32_t res = llm_chat_apply_template(detected_tmpl, chat_vec, formatted_chat, add_ass);
     if (res < 0) {
         return res;
     }
@@ -23359,15 +12205,6 @@ int32_t llama_chat_apply_template(
     return res;
 }
 
-int32_t llama_chat_builtin_templates(const char ** output, size_t len) {
-    auto it = LLM_CHAT_TEMPLATES.begin();
-    for (size_t i = 0; i < std::min(len, LLM_CHAT_TEMPLATES.size()); i++) {
-        output[i] = it->first.c_str();
-        std::advance(it, 1);
-    }
-    return (int32_t) LLM_CHAT_TEMPLATES.size();
-}
-
 //
 // sampling
 //
@@ -23435,6 +12272,10 @@ const char * llama_print_system_info(void) {
     return s.c_str();
 }
 
+//
+// perf
+//
+
 struct llama_perf_context_data llama_perf_context(const struct llama_context * ctx) {
     struct llama_perf_context_data data = {};
 
@@ -23470,47 +12311,3 @@ void llama_perf_context_reset(struct llama_context * ctx) {
     ctx->t_eval_us   = ctx->n_eval = 0;
     ctx->t_p_eval_us = ctx->n_p_eval = 0;
 }
-
-// For internal test use
-const std::vector> & llama_internal_get_tensor_map(
-    struct llama_context * ctx
-) {
-    return ctx->model.tensors_by_name;
-}
-
-void llama_log_set(ggml_log_callback log_callback, void * user_data) {
-    ggml_log_set(log_callback, user_data);
-    g_logger_state.log_callback = log_callback ? log_callback : llama_log_callback_default;
-    g_logger_state.log_callback_user_data = user_data;
-}
-
-static void llama_log_internal_v(ggml_log_level level, const char * format, va_list args) {
-    va_list args_copy;
-    va_copy(args_copy, args);
-    char buffer[128];
-    int len = vsnprintf(buffer, 128, format, args);
-    if (len < 128) {
-        g_logger_state.log_callback(level, buffer, g_logger_state.log_callback_user_data);
-    } else {
-        char * buffer2 = new char[len + 1];
-        vsnprintf(buffer2, len + 1, format, args_copy);
-        buffer2[len] = 0;
-        g_logger_state.log_callback(level, buffer2, g_logger_state.log_callback_user_data);
-        delete[] buffer2;
-    }
-    va_end(args_copy);
-}
-
-void llama_log_internal(ggml_log_level level, const char * format, ...) {
-    va_list args;
-    va_start(args, format);
-    llama_log_internal_v(level, format, args);
-    va_end(args);
-}
-
-void llama_log_callback_default(ggml_log_level level, const char * text, void * user_data) {
-    (void) level;
-    (void) user_data;
-    fputs(text, stderr);
-    fflush(stderr);
-}

From e7da954eccdf39ee795a6135bdb86f0978902681 Mon Sep 17 00:00:00 2001
From: Georgi Gerganov 
Date: Fri, 3 Jan 2025 11:26:14 +0200
Subject: [PATCH 35/81] metal : avoid uint (#11019)

---
 ggml/src/ggml-metal/ggml-metal.m | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/ggml/src/ggml-metal/ggml-metal.m b/ggml/src/ggml-metal/ggml-metal.m
index 28f590f92..a85502ee0 100644
--- a/ggml/src/ggml-metal/ggml-metal.m
+++ b/ggml/src/ggml-metal/ggml-metal.m
@@ -2067,8 +2067,8 @@ static void ggml_metal_encode_node(
                 GGML_ASSERT(ne12 % ne02 == 0);
                 GGML_ASSERT(ne13 % ne03 == 0);
 
-                const uint r2 = ne12/ne02;
-                const uint r3 = ne13/ne03;
+                const uint32_t r2 = ne12/ne02;
+                const uint32_t r3 = ne13/ne03;
 
                 // find the break-even point where the matrix-matrix kernel becomes more efficient compared
                 // to the matrix-vector kernel

From 4b0c638b9a68f577cb2066b638c9f622d91ee661 Mon Sep 17 00:00:00 2001
From: Molly Sophia 
Date: Fri, 3 Jan 2025 20:13:18 +0800
Subject: [PATCH 36/81] common : disable KV cache shifting automatically for
 unsupported models (#11053)

* Disable KV cache shifting automatically for unsupported models

instead of exiting directly

Signed-off-by: Molly Sophia 

* Update common/common.cpp

Co-authored-by: Georgi Gerganov 

---------

Signed-off-by: Molly Sophia 
Co-authored-by: Georgi Gerganov 
---
 common/common.cpp | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git a/common/common.cpp b/common/common.cpp
index 3e37039ca..4bb140ee2 100644
--- a/common/common.cpp
+++ b/common/common.cpp
@@ -889,9 +889,8 @@ struct common_init_result common_init_from_params(common_params & params) {
     }
 
     if (params.ctx_shift && !llama_kv_cache_can_shift(lctx)) {
-        LOG_ERR("%s: KV cache shifting is not supported for this model (--no-context-shift to disable)'\n", __func__);
-        llama_free_model(model);
-        return iparams;
+        LOG_WRN("%s: KV cache shifting is not supported for this model, disabling KV cache shifting\n", __func__);
+        params.ctx_shift = false;
     }
 
     if (!params.control_vectors.empty()) {

From c31fc8b966817b2f0b277fd28e04a189e388972a Mon Sep 17 00:00:00 2001
From: "Gilad S." <7817232+giladgd@users.noreply.github.com>
Date: Sat, 4 Jan 2025 10:17:31 +0200
Subject: [PATCH 37/81] fix: Vulkan shader gen binary path (#11037)

---
 ggml/src/ggml-vulkan/CMakeLists.txt | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/ggml/src/ggml-vulkan/CMakeLists.txt b/ggml/src/ggml-vulkan/CMakeLists.txt
index 6d46e5f24..9501de736 100644
--- a/ggml/src/ggml-vulkan/CMakeLists.txt
+++ b/ggml/src/ggml-vulkan/CMakeLists.txt
@@ -73,7 +73,7 @@ if (Vulkan_FOUND)
         OUTPUT ${_ggml_vk_header}
                 ${_ggml_vk_source}
 
-        COMMAND ${_ggml_vk_genshaders_cmd}
+        COMMAND "$/${_ggml_vk_genshaders_cmd}"
             --glslc      ${Vulkan_GLSLC_EXECUTABLE}
             --input-dir  ${_ggml_vk_input_dir}
             --output-dir ${_ggml_vk_output_dir}

From db68c93b57bfdf6da1fbdae81080382d6998cbc9 Mon Sep 17 00:00:00 2001
From: Daniel Bevenius 
Date: Thu, 19 Dec 2024 03:50:12 +0100
Subject: [PATCH 38/81] ggml : improve inputs log sched_print_assignments
 (ggml/1053)

This commit attempts to improve the log message for the inputs of the
splits in the sched_print_assignments function.

The motivation for this change is that currently even if there are no
inputs a colon is displayed at the end of the line, which can make it a
little confusing when reading the output as it could be interpreted as
the line below are inputs when they are in fact nodes. With this change
the colon will only be printed if there actually are inputs.
---
 ggml/src/ggml-backend.cpp | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/ggml/src/ggml-backend.cpp b/ggml/src/ggml-backend.cpp
index fdb4b986f..e2d6c4056 100644
--- a/ggml/src/ggml-backend.cpp
+++ b/ggml/src/ggml-backend.cpp
@@ -795,9 +795,12 @@ static void ggml_backend_sched_print_assignments(ggml_backend_sched_t sched, str
     for (int i = 0; i < graph->n_nodes; i++) {
         if (cur_split < sched->n_splits && i == sched->splits[cur_split].i_start) {
             ggml_backend_t split_backend = sched->backends[sched->splits[cur_split].backend_id];
-            GGML_LOG_DEBUG("\n## SPLIT #%d: %s # %d inputs: ", cur_split, ggml_backend_name(split_backend),
+            GGML_LOG_DEBUG("\n## SPLIT #%d: %s # %d inputs", cur_split, ggml_backend_name(split_backend),
                 sched->splits[cur_split].n_inputs);
             for (int j = 0; j < sched->splits[cur_split].n_inputs; j++) {
+                if (j == 0) {
+                    GGML_LOG_DEBUG(": ");
+                }
                 GGML_LOG_DEBUG("[%s (%5.5s)] ", sched->splits[cur_split].inputs[j]->name,
                     fmt_size(ggml_nbytes(sched->splits[cur_split].inputs[j])));
             }

From 5e3b08d606b5b0caaea16541b504c3bba8f3ec1d Mon Sep 17 00:00:00 2001
From: Georgi Gerganov 
Date: Sat, 4 Jan 2025 10:53:54 +0200
Subject: [PATCH 39/81] ggml : do not install metal source when embed library
 (ggml/1054)

---
 ggml/CMakeLists.txt                | 20 --------------------
 ggml/src/ggml-metal/CMakeLists.txt | 16 ++++++++++++++++
 2 files changed, 16 insertions(+), 20 deletions(-)

diff --git a/ggml/CMakeLists.txt b/ggml/CMakeLists.txt
index e33d97482..393506533 100644
--- a/ggml/CMakeLists.txt
+++ b/ggml/CMakeLists.txt
@@ -252,26 +252,6 @@ set_target_properties(ggml PROPERTIES PUBLIC_HEADER "${GGML_PUBLIC_HEADERS}")
 install(TARGETS ggml LIBRARY PUBLIC_HEADER)
 install(TARGETS ggml-base LIBRARY)
 
-# FIXME: this should be done in the backend cmake files
-if (GGML_METAL)
-    # FIXME: does this need to be installed with GGML_METAL_EMBED_LIBRARY?
-    install(
-        FILES src/ggml-metal/ggml-metal.metal
-        PERMISSIONS
-            OWNER_READ
-            OWNER_WRITE
-            GROUP_READ
-            WORLD_READ
-        DESTINATION ${CMAKE_INSTALL_BINDIR})
-
-    if (NOT GGML_METAL_EMBED_LIBRARY)
-        install(
-            FILES ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/default.metallib
-            DESTINATION ${CMAKE_INSTALL_BINDIR}
-        )
-    endif()
-endif()
-
 if (GGML_STANDALONE)
     configure_file(${CMAKE_CURRENT_SOURCE_DIR}/ggml.pc.in
         ${CMAKE_CURRENT_BINARY_DIR}/ggml.pc
diff --git a/ggml/src/ggml-metal/CMakeLists.txt b/ggml/src/ggml-metal/CMakeLists.txt
index 1bad27206..89fcde2fa 100644
--- a/ggml/src/ggml-metal/CMakeLists.txt
+++ b/ggml/src/ggml-metal/CMakeLists.txt
@@ -103,3 +103,19 @@ else()
         DEPENDS ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/default.metallib
         )
 endif() # GGML_METAL_EMBED_LIBRARY
+
+if (NOT GGML_METAL_EMBED_LIBRARY)
+    install(
+        FILES src/ggml-metal/ggml-metal.metal
+        PERMISSIONS
+            OWNER_READ
+            OWNER_WRITE
+            GROUP_READ
+            WORLD_READ
+        DESTINATION ${CMAKE_INSTALL_BINDIR})
+
+        install(
+            FILES ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/default.metallib
+            DESTINATION ${CMAKE_INSTALL_BINDIR}
+        )
+endif()

From 78c678517530d411b4263341cdb4dc28c9d117c8 Mon Sep 17 00:00:00 2001
From: Georgi Gerganov 
Date: Sat, 4 Jan 2025 10:54:01 +0200
Subject: [PATCH 40/81] sync : ggml

---
 scripts/sync-ggml.last | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/scripts/sync-ggml.last b/scripts/sync-ggml.last
index b4ac38bbf..b67445ecd 100644
--- a/scripts/sync-ggml.last
+++ b/scripts/sync-ggml.last
@@ -1 +1 @@
-e6d93f40dffe8733d5d72f1d8fa6b3ca27ae899f
+a2af72be7baf5b1f4a33d34e77e509e5e85b7cd7

From 46be942214e295cd34660bbbd6b846155d1c36a0 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?DAN=E2=84=A2?= 
Date: Sat, 4 Jan 2025 09:33:31 -0500
Subject: [PATCH 41/81] llama : add support for the cohere2 model architecture
 (#10900)

---
 convert_hf_to_gguf.py     |  18 +++++
 gguf-py/gguf/constants.py |  14 ++++
 src/llama-arch.cpp        |  16 ++++
 src/llama-arch.h          |   1 +
 src/llama-model.cpp       |  11 +++
 src/llama.cpp             | 161 ++++++++++++++++++++++++++++++++++++++
 6 files changed, 221 insertions(+)

diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py
index 4e6c0f60c..d4441bbe9 100755
--- a/convert_hf_to_gguf.py
+++ b/convert_hf_to_gguf.py
@@ -3373,6 +3373,24 @@ class CommandR2Model(Model):
         self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.NONE)
 
 
+@Model.register("Cohere2ForCausalLM")
+class Cohere2Model(Model):
+    model_arch = gguf.MODEL_ARCH.COHERE2
+
+    def set_gguf_parameters(self):
+        super().set_gguf_parameters()
+
+        self.gguf_writer.add_logit_scale(self.hparams["logit_scale"])
+        self.gguf_writer.add_sliding_window(self.hparams["sliding_window"])
+        self.gguf_writer.add_vocab_size(self.hparams["vocab_size"])
+
+        rotary_pct = self.hparams["rotary_pct"]
+        hidden_size = self.hparams["hidden_size"]
+        num_attention_heads = self.hparams["num_attention_heads"]
+        self.gguf_writer.add_rope_dimension_count(int(rotary_pct * (hidden_size // num_attention_heads)))
+        self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.NONE)
+
+
 @Model.register("OlmoForCausalLM")
 @Model.register("OLMoForCausalLM")
 class OlmoModel(Model):
diff --git a/gguf-py/gguf/constants.py b/gguf-py/gguf/constants.py
index 273370370..cdf79673b 100644
--- a/gguf-py/gguf/constants.py
+++ b/gguf-py/gguf/constants.py
@@ -255,6 +255,7 @@ class MODEL_ARCH(IntEnum):
     MAMBA            = auto()
     XVERSE           = auto()
     COMMAND_R        = auto()
+    COHERE2          = auto()
     DBRX             = auto()
     OLMO             = auto()
     OLMO2            = auto()
@@ -437,6 +438,7 @@ MODEL_ARCH_NAMES: dict[MODEL_ARCH, str] = {
     MODEL_ARCH.MAMBA:            "mamba",
     MODEL_ARCH.XVERSE:           "xverse",
     MODEL_ARCH.COMMAND_R:        "command-r",
+    MODEL_ARCH.COHERE2:          "cohere2",
     MODEL_ARCH.DBRX:             "dbrx",
     MODEL_ARCH.OLMO:             "olmo",
     MODEL_ARCH.OLMO2:            "olmo2",
@@ -1136,6 +1138,18 @@ MODEL_TENSORS: dict[MODEL_ARCH, list[MODEL_TENSOR]] = {
         MODEL_TENSOR.ATTN_K_NORM,
         MODEL_TENSOR.ATTN_Q_NORM,
     ],
+    MODEL_ARCH.COHERE2: [
+        MODEL_TENSOR.TOKEN_EMBD,
+        MODEL_TENSOR.OUTPUT_NORM,
+        MODEL_TENSOR.ATTN_NORM,
+        MODEL_TENSOR.ATTN_Q,
+        MODEL_TENSOR.ATTN_K,
+        MODEL_TENSOR.ATTN_V,
+        MODEL_TENSOR.ATTN_OUT,
+        MODEL_TENSOR.FFN_GATE,
+        MODEL_TENSOR.FFN_DOWN,
+        MODEL_TENSOR.FFN_UP,
+    ],
     MODEL_ARCH.DBRX: [
         MODEL_TENSOR.TOKEN_EMBD,
         MODEL_TENSOR.OUTPUT_NORM,
diff --git a/src/llama-arch.cpp b/src/llama-arch.cpp
index a60038385..fea4b21d3 100644
--- a/src/llama-arch.cpp
+++ b/src/llama-arch.cpp
@@ -39,6 +39,7 @@ static const std::map LLM_ARCH_NAMES = {
     { LLM_ARCH_MAMBA,            "mamba"            },
     { LLM_ARCH_XVERSE,           "xverse"           },
     { LLM_ARCH_COMMAND_R,        "command-r"        },
+    { LLM_ARCH_COHERE2,          "cohere2"          },
     { LLM_ARCH_DBRX,             "dbrx"             },
     { LLM_ARCH_OLMO,             "olmo"             },
     { LLM_ARCH_OLMO2,            "olmo2"            },
@@ -807,6 +808,21 @@ static const std::map> LLM_TENSOR_N
             { LLM_TENSOR_ATTN_K_NORM,     "blk.%d.attn_k_norm" },
         },
     },
+    {
+        LLM_ARCH_COHERE2,
+        {
+            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
+            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
+            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
+            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
+            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
+            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
+            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
+            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
+            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
+            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
+        },
+    },
     {
         LLM_ARCH_DBRX,
         {
diff --git a/src/llama-arch.h b/src/llama-arch.h
index 446e72eeb..10bd619a4 100644
--- a/src/llama-arch.h
+++ b/src/llama-arch.h
@@ -43,6 +43,7 @@ enum llm_arch {
     LLM_ARCH_MAMBA,
     LLM_ARCH_XVERSE,
     LLM_ARCH_COMMAND_R,
+    LLM_ARCH_COHERE2,
     LLM_ARCH_DBRX,
     LLM_ARCH_OLMO,
     LLM_ARCH_OLMO2,
diff --git a/src/llama-model.cpp b/src/llama-model.cpp
index ace0ba262..c356abded 100644
--- a/src/llama-model.cpp
+++ b/src/llama-model.cpp
@@ -786,6 +786,16 @@ void llm_load_hparams(llama_model_loader & ml, llama_model & model) {
                     default: model.type = e_model::MODEL_UNKNOWN;
                 }
             } break;
+        case LLM_ARCH_COHERE2:
+            {
+                ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa);
+                ml.get_key(LLM_KV_LOGIT_SCALE, hparams.f_logit_scale);
+                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
+                switch (hparams.n_layer) {
+                    case 32: model.type = e_model::MODEL_8B; break;
+                    default: model.type = e_model::MODEL_UNKNOWN;
+                }
+            } break;
         case LLM_ARCH_DBRX:
         {
             ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
@@ -2031,6 +2041,7 @@ enum llama_rope_type llama_rope_type(const struct llama_model * model) {
         case LLM_ARCH_MINICPM:
         case LLM_ARCH_XVERSE:
         case LLM_ARCH_COMMAND_R:
+        case LLM_ARCH_COHERE2:
         case LLM_ARCH_OLMO:
         case LLM_ARCH_ARCTIC:
         case LLM_ARCH_DEEPSEEK:
diff --git a/src/llama.cpp b/src/llama.cpp
index d7110b90b..50e9191fa 100644
--- a/src/llama.cpp
+++ b/src/llama.cpp
@@ -1552,6 +1552,32 @@ static bool llm_load_tensors(
                         layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
                     }
                 } break;
+            case LLM_ARCH_COHERE2:
+                {
+                    model.tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), { n_embd, n_vocab }, 0);
+
+                    // output
+                    model.output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), { n_embd }, 0);
+                    // init output from the input tok embed
+                    model.output      = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), { n_embd, n_vocab },
+                                                      llama_model_loader::TENSOR_DUPLICATED);
+
+                    for (int i = 0; i < n_layer; ++i) {
+                        auto & layer = model.layers[i];
+
+                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), { n_embd }, 0);
+
+                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), { n_embd, n_embd }, 0);
+                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), { n_embd, n_embd_gqa }, 0);
+                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), { n_embd, n_embd_gqa }, 0);
+                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), { n_embd, n_embd }, 0);
+
+                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), { n_embd, n_ff }, 0);
+                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd }, 0);
+                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), { n_embd, n_ff }, 0);
+                    }
+                }
+                break;
             case LLM_ARCH_OLMO:  // adapted from LLM_ARCH_LLAMA with norm params removed
                 {
                     model.tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
@@ -7633,6 +7659,137 @@ struct llm_build_context {
 
     }
 
+    struct ggml_cgraph * build_cohere2() {
+        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
+
+        const int64_t n_embd_head = hparams.n_embd_head_v;
+        GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
+        const float f_logit_scale = hparams.f_logit_scale;
+
+        struct ggml_tensor * cur;
+        struct ggml_tensor * inpL;
+
+        inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb);
+
+        // inp_pos - contains the positions
+        struct ggml_tensor * inp_pos = build_inp_pos();
+
+        // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
+        // cohere2 requires different mask for layers using sliding window (SWA)
+        struct ggml_tensor * KQ_mask     = build_inp_KQ_mask();
+        struct ggml_tensor * KQ_mask_swa = build_inp_KQ_mask_swa();
+
+        // sliding window switch pattern
+        const int32_t sliding_window_pattern = 4;
+
+        for (int il = 0; il < n_layer; ++il) {
+            // three layers sliding window attention (window size 4096) and ROPE
+            // fourth layer uses global attention without positional embeddings
+            const bool           is_sliding = il % sliding_window_pattern < (sliding_window_pattern - 1);
+            struct ggml_tensor * KQ_mask_l = is_sliding ? KQ_mask_swa : KQ_mask;
+
+            // norm
+            cur = llm_build_norm(ctx0, inpL, hparams, model.layers[il].attn_norm, NULL, LLM_NORM, cb, il);
+            cb(cur, "attn_norm", il);
+            struct ggml_tensor * ffn_inp = cur;
+
+            // self-attention
+            {
+                // rope freq factors for 128k context
+                struct ggml_tensor * rope_factors = build_rope_factors(il);
+
+                // compute Q and K and RoPE them
+                struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur);
+                cb(Qcur, "Qcur", il);
+                if (model.layers[il].bq) {
+                    Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
+                    cb(Qcur, "Qcur", il);
+                }
+
+                struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur);
+                cb(Kcur, "Kcur", il);
+                if (model.layers[il].bk) {
+                    Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
+                    cb(Kcur, "Kcur", il);
+                }
+
+                struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur);
+                cb(Vcur, "Vcur", il);
+                if (model.layers[il].bv) {
+                    Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
+                    cb(Vcur, "Vcur", il);
+                }
+
+                if (is_sliding) {
+                    Qcur = ggml_rope_ext(ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, rope_factors,
+                                        n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, attn_factor,
+                                        beta_fast, beta_slow);
+                    cb(Qcur, "Qcur", il);
+
+                    Kcur = ggml_rope_ext(ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos,
+                                        rope_factors, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor,
+                                        attn_factor, beta_fast, beta_slow);
+                    cb(Kcur, "Kcur", il);
+                } else {
+                    // For non-sliding layers, just reshape without applying RoPE
+                    Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
+                    cb(Qcur, "Qcur", il);
+
+                    Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
+                    cb(Kcur, "Kcur", il);
+                }
+
+                cur = llm_build_kv(ctx0, lctx, kv_self, gf, model.layers[il].wo, model.layers[il].bo, Kcur, Vcur, Qcur,
+                                   KQ_mask_l, n_tokens, kv_head, n_kv, 1.0f / sqrtf(float(n_embd_head)), cb, il);
+            }
+
+            if (il == n_layer - 1) {
+                // skip computing output for unused tokens
+                struct ggml_tensor * inp_out_ids = build_inp_out_ids();
+                cur                              = ggml_get_rows(ctx0, cur, inp_out_ids);
+                inpL                             = ggml_get_rows(ctx0, inpL, inp_out_ids);
+                ffn_inp                          = ggml_get_rows(ctx0, ffn_inp, inp_out_ids);
+            }
+
+            struct ggml_tensor * attn_out = cur;
+
+            // feed-forward network
+            {
+                cur = llm_build_ffn(ctx0, lctx, ffn_inp, model.layers[il].ffn_up, NULL, NULL, model.layers[il].ffn_gate,
+                                    NULL, NULL, model.layers[il].ffn_down, NULL, NULL, NULL, LLM_FFN_SILU, LLM_FFN_PAR,
+                                    cb, il);
+                cb(cur, "ffn_out", il);
+            }
+
+            // add together residual + FFN + self-attention
+            cur = ggml_add(ctx0, cur, inpL);
+            cur = ggml_add(ctx0, cur, attn_out);
+            cur = lctx.cvec.apply_to(ctx0, cur, il);
+            cb(cur, "l_out", il);
+
+            // input for next layer
+            inpL = cur;
+        }
+
+        cur = inpL;
+
+        cur = llm_build_norm(ctx0, cur, hparams, model.output_norm, NULL, LLM_NORM, cb, -1);
+        cb(cur, "result_norm", -1);
+
+        // lm_head
+        cur = llm_build_lora_mm(lctx, ctx0, model.output, cur);
+
+        if (f_logit_scale) {
+            cur = ggml_scale(ctx0, cur, f_logit_scale);
+        }
+
+        cb(cur, "result_output", -1);
+
+        ggml_build_forward_expand(gf, cur);
+
+        return gf;
+    }
+
     // ref: https://allenai.org/olmo
     // based on the original build_llama() function, changes:
     //   * non-parametric layer norm
@@ -10384,6 +10541,10 @@ static struct ggml_cgraph * llama_build_graph(
             {
                 result = llm.build_command_r();
             } break;
+        case LLM_ARCH_COHERE2:
+            {
+                result = llm.build_cohere2();
+            } break;
         case LLM_ARCH_DBRX:
             {
                 result = llm.build_dbrx();

From f922a9c542ee117550a168395c63ea79261f5c99 Mon Sep 17 00:00:00 2001
From: matt23654 
Date: Sat, 4 Jan 2025 16:10:30 +0000
Subject: [PATCH 42/81] [GGML][RPC] Support for models with non-512-aligned
 tensors over RPC. (#11047)

* Added init tensor calling code

* Added get_alloc_size forwarding

* Cleaned up and improved type/error handling.

* fix: remove trailing whitespaces.

* Cleanup and use GGML error logging functions.

* Handle potentially dangerous edge cases.

* Apply suggestions from code review

Co-authored-by: Diego Devesa 

---------

Co-authored-by: Diego Devesa 
---
 ggml/src/ggml-rpc/ggml-rpc.cpp | 140 +++++++++++++++++++++++++++++++--
 1 file changed, 134 insertions(+), 6 deletions(-)

diff --git a/ggml/src/ggml-rpc/ggml-rpc.cpp b/ggml/src/ggml-rpc/ggml-rpc.cpp
index 431082426..2213aba9f 100644
--- a/ggml/src/ggml-rpc/ggml-rpc.cpp
+++ b/ggml/src/ggml-rpc/ggml-rpc.cpp
@@ -93,9 +93,23 @@ enum rpc_cmd {
     RPC_CMD_COPY_TENSOR,
     RPC_CMD_GRAPH_COMPUTE,
     RPC_CMD_GET_DEVICE_MEMORY,
+    RPC_CMD_INIT_TENSOR,
+    RPC_CMD_GET_ALLOC_SIZE,
     RPC_CMD_COUNT,
 };
 
+struct rpc_msg_get_alloc_size_req {
+    rpc_tensor tensor;
+};
+
+struct rpc_msg_get_alloc_size_rsp {
+    uint64_t alloc_size;
+};
+
+struct rpc_msg_init_tensor_req {
+    rpc_tensor tensor;
+};
+
 struct rpc_msg_alloc_buffer_req {
     uint64_t size;
 };
@@ -461,10 +475,18 @@ static rpc_tensor serialize_tensor(const ggml_tensor * tensor) {
 }
 
 static void ggml_backend_rpc_buffer_init_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor) {
-    UNUSED(buffer);
-    if (ggml_is_quantized(tensor->type)) {
-        // TODO: this check is due to MATRIX_ROW_PADDING in CUDA and should be generalized
-        GGML_ASSERT(tensor->ne[0] % 512 == 0 && "unsupported quantized tensor");
+    ggml_backend_rpc_buffer_context * ctx = (ggml_backend_rpc_buffer_context *)buffer->context;
+
+    // CUDA backend on the server pads everything to 512 due to CUDA limitations.
+    // Due to bandwidth constraints, we only call the server init tensor functions if necessary.
+    // In particular, only quantized tensors need padding
+    if (ggml_is_quantized(tensor->type) && (tensor->ne[0] % 512 != 0) && (tensor->view_src == nullptr)) {
+        rpc_msg_init_tensor_req request;
+
+        request.tensor = serialize_tensor(tensor);
+
+        bool status = send_rpc_cmd(ctx->sock, RPC_CMD_INIT_TENSOR, &request, sizeof(request), nullptr, 0);
+        GGML_ASSERT(status);
     }
 }
 
@@ -577,8 +599,23 @@ static size_t ggml_backend_rpc_get_max_size(ggml_backend_buffer_type_t buft) {
 }
 
 static size_t ggml_backend_rpc_buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor * tensor) {
-    UNUSED(buft);
-    return ggml_nbytes(tensor);
+    // See comments in init_tensor.
+    if (ggml_is_quantized(tensor->type) && (tensor->ne[0] % 512 != 0) && (tensor->view_src == nullptr)) {
+        ggml_backend_rpc_buffer_type_context * buft_ctx = (ggml_backend_rpc_buffer_type_context *)buft->context;
+        auto sock = get_socket(buft_ctx->endpoint);
+
+        rpc_msg_get_alloc_size_req request;
+
+        request.tensor = serialize_tensor(tensor);
+
+        rpc_msg_get_alloc_size_rsp response;
+        bool status = send_rpc_cmd(sock, RPC_CMD_GET_ALLOC_SIZE, &request, sizeof(request), &response, sizeof(response));
+        GGML_ASSERT(status);
+
+        return response.alloc_size;
+    } else {
+        return ggml_nbytes(tensor);
+    }
 }
 
 static ggml_backend_buffer_type_i ggml_backend_rpc_buffer_type_interface = {
@@ -757,6 +794,8 @@ public:
     bool get_tensor(const rpc_msg_get_tensor_req & request, std::vector & response);
     bool copy_tensor(const rpc_msg_copy_tensor_req & request, rpc_msg_copy_tensor_rsp & response);
     bool graph_compute(const std::vector & input, rpc_msg_graph_compute_rsp & response);
+    bool init_tensor(const rpc_msg_init_tensor_req & request);
+    bool get_alloc_size(const rpc_msg_get_alloc_size_req & request, rpc_msg_get_alloc_size_rsp & response);
 
 private:
     ggml_tensor * deserialize_tensor(struct ggml_context * ctx, const rpc_tensor * tensor);
@@ -770,6 +809,36 @@ private:
     std::unordered_set buffers;
 };
 
+bool rpc_server::get_alloc_size(const rpc_msg_get_alloc_size_req & request, rpc_msg_get_alloc_size_rsp & response) {
+    ggml_backend_buffer_type_t buft;
+    struct ggml_init_params params {
+        /*.mem_size   =*/ ggml_tensor_overhead(),
+        /*.mem_buffer =*/ NULL,
+        /*.no_alloc   =*/ true,
+    };
+
+    struct ggml_context * ctx = ggml_init(params);
+    ggml_tensor * tensor = deserialize_tensor(ctx, &request.tensor);
+
+    if (tensor == nullptr) {
+        GGML_LOG_ERROR("Null tensor pointer passed to server get_alloc_size function.\n");
+        ggml_free(ctx);
+        return false;
+    }
+
+    if (tensor->buffer == nullptr) {
+        //No buffer allocated.
+        buft = ggml_backend_get_default_buffer_type(backend);
+    } else {
+        buft = tensor->buffer->buft;
+    }
+
+    response.alloc_size = ggml_backend_buft_get_alloc_size(buft,tensor);
+
+    ggml_free(ctx);
+    return true;
+}
+
 void rpc_server::alloc_buffer(const rpc_msg_alloc_buffer_req & request, rpc_msg_alloc_buffer_rsp & response) {
     ggml_backend_buffer_type_t buft = ggml_backend_get_default_buffer_type(backend);
     ggml_backend_buffer_t buffer = ggml_backend_buft_alloc_buffer(buft, request.size);
@@ -905,6 +974,40 @@ bool rpc_server::set_tensor(const std::vector & input) {
     return true;
 }
 
+bool rpc_server::init_tensor(const rpc_msg_init_tensor_req & request) {
+    struct ggml_init_params params {
+        /*.mem_size   =*/ ggml_tensor_overhead(),
+        /*.mem_buffer =*/ NULL,
+        /*.no_alloc   =*/ true,
+    };
+    struct ggml_context * ctx = ggml_init(params);
+    ggml_tensor * tensor = deserialize_tensor(ctx, &request.tensor);
+    if (tensor == nullptr) {
+        GGML_LOG_ERROR("Null tensor pointer passed to server init_tensor function.\n");
+        ggml_free(ctx);
+        return false;
+    }
+
+    // Call the backend's buffer_init_tensor function
+    ggml_backend_buffer_t buffer = tensor->buffer;
+    if (buffer && buffer->iface.init_tensor) {
+        buffer->iface.init_tensor(buffer, tensor);
+    } else {
+        GGML_LOG_ERROR("Null buffer for tensor passed to init_tensor function\n");
+    }
+
+    if (tensor->extra != nullptr) {
+        // This pointer can either be passed around client/server, or probably better stored server-side and kept track of.
+        // Currently unimplemented.
+        GGML_LOG_ERROR("tensor->extra populated by the backend, this is currently unsupported.\n");
+        ggml_free(ctx);
+        return false;
+    }
+
+    ggml_free(ctx);
+    return true;
+}
+
 bool rpc_server::get_tensor(const rpc_msg_get_tensor_req & request, std::vector & response) {
     struct ggml_init_params params {
         /*.mem_size   =*/ ggml_tensor_overhead(),
@@ -1058,6 +1161,18 @@ static void rpc_serve_client(ggml_backend_t backend, sockfd_t sockfd, size_t fre
                 }
                 break;
             }
+            case RPC_CMD_GET_ALLOC_SIZE: {
+                rpc_msg_get_alloc_size_req request;
+                if (!recv_msg(sockfd, &request, sizeof(request))) {
+                    return;
+                }
+                rpc_msg_get_alloc_size_rsp response;
+                server.get_alloc_size(request, response);
+                if (!send_msg(sockfd, &response, sizeof(response))) {
+                    return;
+                }
+                break;
+            }
             case RPC_CMD_GET_ALIGNMENT: {
                 if (!recv_msg(sockfd, nullptr, 0)) {
                     return;
@@ -1133,6 +1248,19 @@ static void rpc_serve_client(ggml_backend_t backend, sockfd_t sockfd, size_t fre
                 }
                 break;
             }
+            case RPC_CMD_INIT_TENSOR: {
+                rpc_msg_init_tensor_req request;
+                if (!recv_msg(sockfd, &request,sizeof(request))) {
+                    return;
+                }
+                if (!server.init_tensor(request)) {
+                    return;
+                }
+                if (!send_msg(sockfd, nullptr, 0)) {
+                    return;
+                }
+                break;
+            }
             case RPC_CMD_GET_TENSOR: {
                 rpc_msg_get_tensor_req request;
                 if (!recv_msg(sockfd, &request, sizeof(request))) {

From 9394bbd484f802ce80d2858033583af3ef700d25 Mon Sep 17 00:00:00 2001
From: fairydreaming <166155368+fairydreaming@users.noreply.github.com>
Date: Sat, 4 Jan 2025 21:06:11 +0100
Subject: [PATCH 43/81] llama : Add support for DeepSeek V3 (#11049)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

* convert : extend DEEPSEEK2 model architecture to support DeepseekV3ForCausalLM by adding EXPERT_WEIGHTS_NORM and EXPERT_GATING_FUNC model parameters and FFN_EXP_PROBS_B tensor type

* vocab : add DeepSeek V3 pre-tokenizer regexes

* unicode : handle ACCENT_MARK and SYMBOL categories in regex

* llama : add DeepSeek V3 chat template, handle new model parameters and tensor types

---------

Co-authored-by: Stanisław Szymczyk 
---
 convert_hf_to_gguf.py          | 23 +++++++++++++++++
 convert_hf_to_gguf_update.py   |  1 +
 gguf-py/gguf/constants.py      | 10 ++++++++
 gguf-py/gguf/gguf_writer.py    |  7 ++++++
 gguf-py/gguf/tensor_mapping.py |  4 +++
 include/llama.h                |  1 +
 src/llama-arch.cpp             |  4 +++
 src/llama-arch.h               |  3 +++
 src/llama-chat.cpp             | 18 ++++++++++++++
 src/llama-chat.h               |  1 +
 src/llama-hparams.h            | 12 +++++++--
 src/llama-model.cpp            | 23 +++++++++++++++++
 src/llama-model.h              |  2 ++
 src/llama-vocab.cpp            |  7 ++++++
 src/llama.cpp                  | 45 +++++++++++++++++++++++++++++++---
 src/unicode.cpp                |  6 +++++
 16 files changed, 162 insertions(+), 5 deletions(-)

diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py
index d4441bbe9..01b58f976 100755
--- a/convert_hf_to_gguf.py
+++ b/convert_hf_to_gguf.py
@@ -687,6 +687,9 @@ class Model:
         if chkhsh == "d4c8f286ea6b520b3d495c4455483cfa2302c0cfcd4be05d781b6a8a0a7cdaf1":
             # ref: https://huggingface.co/Infinigence/Megrez-3B-Instruct
             res = "megrez"
+        if chkhsh == "877081d19cf6996e2c4ff0e1236341e9b7bde288f5311a56a937f0afbbb3aeb5":
+            # ref: https://huggingface.co/deepseek-ai/DeepSeek-V3
+            res = "deepseek-v3"
 
         if res is None:
             logger.warning("\n")
@@ -3849,6 +3852,7 @@ class DeepseekModel(Model):
 
 
 @Model.register("DeepseekV2ForCausalLM")
+@Model.register("DeepseekV3ForCausalLM")
 class DeepseekV2Model(Model):
     model_arch = gguf.MODEL_ARCH.DEEPSEEK2
 
@@ -3870,6 +3874,15 @@ class DeepseekV2Model(Model):
         self.gguf_writer.add_expert_count(hparams["n_routed_experts"])
         self.gguf_writer.add_expert_shared_count(hparams["n_shared_experts"])
         self.gguf_writer.add_expert_weights_scale(hparams["routed_scaling_factor"])
+        self.gguf_writer.add_expert_weights_norm(hparams["norm_topk_prob"])
+
+        if hparams["scoring_func"] == "sigmoid":
+            self.gguf_writer.add_expert_gating_func(gguf.ExpertGatingFuncType.SIGMOID)
+        elif hparams["scoring_func"] == "softmax":
+            self.gguf_writer.add_expert_gating_func(gguf.ExpertGatingFuncType.SOFTMAX)
+        else:
+            raise ValueError(f"Unsupported scoring_func value: {hparams['scoring_func']}")
+
         self.gguf_writer.add_rope_dimension_count(hparams["qk_rope_head_dim"])
 
         if self.hparams.get("rope_scaling") is not None and "factor" in self.hparams["rope_scaling"]:
@@ -3882,6 +3895,16 @@ class DeepseekV2Model(Model):
     _experts: list[dict[str, Tensor]] | None = None
 
     def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
+        # rename e_score_correction_bias tensors
+        if name.endswith("e_score_correction_bias"):
+            name = name.replace("e_score_correction_bias", "e_score_correction.bias")
+
+        # skip Multi-Token Prediction (MTP) layers
+        block_count = self.hparams["num_hidden_layers"]
+        match = re.match(r"model.layers.(\d+)", name)
+        if match and int(match.group(1)) >= block_count:
+            return []
+
         # process the experts separately
         if name.find("mlp.experts") != -1:
             n_experts = self.hparams["n_routed_experts"]
diff --git a/convert_hf_to_gguf_update.py b/convert_hf_to_gguf_update.py
index fea23ddb4..56edc64a7 100755
--- a/convert_hf_to_gguf_update.py
+++ b/convert_hf_to_gguf_update.py
@@ -107,6 +107,7 @@ models = [
     {"name": "roberta-bpe",    "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/sentence-transformers/stsb-roberta-base"},
     {"name": "gigachat",       "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/ai-sage/GigaChat-20B-A3B-instruct"},
     {"name": "megrez",         "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/Infinigence/Megrez-3B-Instruct"},
+    {"name": "deepseek-v3",    "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/deepseek-ai/DeepSeek-V3"},
 ]
 
 
diff --git a/gguf-py/gguf/constants.py b/gguf-py/gguf/constants.py
index cdf79673b..9d0e7489f 100644
--- a/gguf-py/gguf/constants.py
+++ b/gguf-py/gguf/constants.py
@@ -102,6 +102,8 @@ class Keys:
         EXPERT_USED_COUNT                 = "{arch}.expert_used_count"
         EXPERT_SHARED_COUNT               = "{arch}.expert_shared_count"
         EXPERT_WEIGHTS_SCALE              = "{arch}.expert_weights_scale"
+        EXPERT_WEIGHTS_NORM               = "{arch}.expert_weights_norm"
+        EXPERT_GATING_FUNC                = "{arch}.expert_gating_func"
         POOLING_TYPE                      = "{arch}.pooling_type"
         LOGIT_SCALE                       = "{arch}.logit_scale"
         DECODER_START_TOKEN_ID            = "{arch}.decoder_start_token_id"
@@ -313,6 +315,7 @@ class MODEL_TENSOR(IntEnum):
     FFN_GATE_SHEXP       = auto()
     FFN_DOWN_SHEXP       = auto()
     FFN_UP_SHEXP         = auto()
+    FFN_EXP_PROBS_B      = auto()
     ATTN_Q_NORM          = auto()
     ATTN_K_NORM          = auto()
     LAYER_OUT_NORM       = auto()
@@ -498,6 +501,7 @@ TENSOR_NAMES: dict[MODEL_TENSOR, str] = {
     MODEL_TENSOR.FFN_GATE_EXP:              "blk.{bid}.ffn_gate_exps",
     MODEL_TENSOR.FFN_DOWN_EXP:              "blk.{bid}.ffn_down_exps",
     MODEL_TENSOR.FFN_UP_EXP:                "blk.{bid}.ffn_up_exps",
+    MODEL_TENSOR.FFN_EXP_PROBS_B:           "blk.{bid}.exp_probs_b",
     MODEL_TENSOR.LAYER_OUT_NORM:            "blk.{bid}.layer_output_norm",
     MODEL_TENSOR.SSM_IN:                    "blk.{bid}.ssm_in",
     MODEL_TENSOR.SSM_CONV1D:                "blk.{bid}.ssm_conv1d",
@@ -1290,6 +1294,7 @@ MODEL_TENSORS: dict[MODEL_ARCH, list[MODEL_TENSOR]] = {
         MODEL_TENSOR.FFN_GATE_SHEXP,
         MODEL_TENSOR.FFN_DOWN_SHEXP,
         MODEL_TENSOR.FFN_UP_SHEXP,
+        MODEL_TENSOR.FFN_EXP_PROBS_B,
     ],
     MODEL_ARCH.CHATGLM : [
         MODEL_TENSOR.TOKEN_EMBD,
@@ -1590,6 +1595,11 @@ class GGMLQuantizationType(IntEnum):
     TQ2_0   = 35
 
 
+class ExpertGatingFuncType(IntEnum):
+    SOFTMAX  = 1
+    SIGMOID  = 2
+
+
 # TODO: add GGMLFileType from ggml_ftype in ggml.h
 
 
diff --git a/gguf-py/gguf/gguf_writer.py b/gguf-py/gguf/gguf_writer.py
index 3023b539a..4a0a65e3c 100644
--- a/gguf-py/gguf/gguf_writer.py
+++ b/gguf-py/gguf/gguf_writer.py
@@ -26,6 +26,7 @@ from .constants import (
     RopeScalingType,
     PoolingType,
     TokenType,
+    ExpertGatingFuncType,
 )
 
 from .quants import quant_shape_from_byte_shape
@@ -715,6 +716,12 @@ class GGUFWriter:
     def add_expert_weights_scale(self, value: float) -> None:
         self.add_float32(Keys.LLM.EXPERT_WEIGHTS_SCALE.format(arch=self.arch), value)
 
+    def add_expert_weights_norm(self, value: bool) -> None:
+        self.add_bool(Keys.LLM.EXPERT_WEIGHTS_NORM.format(arch=self.arch), value)
+
+    def add_expert_gating_func(self, value: ExpertGatingFuncType) -> None:
+        self.add_uint32(Keys.LLM.EXPERT_GATING_FUNC.format(arch=self.arch), value.value)
+
     def add_swin_norm(self, value: bool) -> None:
         self.add_bool(Keys.LLM.SWIN_NORM.format(arch=self.arch), value)
 
diff --git a/gguf-py/gguf/tensor_mapping.py b/gguf-py/gguf/tensor_mapping.py
index 7009a11d4..efe2a4aa4 100644
--- a/gguf-py/gguf/tensor_mapping.py
+++ b/gguf-py/gguf/tensor_mapping.py
@@ -276,6 +276,10 @@ class TensorNameMap:
             "model.layers.{bid}.mlp.shared_expert_gate", # qwen2moe
         ),
 
+        MODEL_TENSOR.FFN_EXP_PROBS_B: (
+            "model.layers.{bid}.mlp.gate.e_score_correction", # deepseek-v3
+        ),
+
         # Feed-forward up
         MODEL_TENSOR.FFN_UP: (
             "gpt_neox.layers.{bid}.mlp.dense_h_to_4h",                # gptneox
diff --git a/include/llama.h b/include/llama.h
index 7b305b299..a0d5ba5dd 100644
--- a/include/llama.h
+++ b/include/llama.h
@@ -105,6 +105,7 @@ extern "C" {
         LLAMA_VOCAB_PRE_TYPE_EXAONE         = 25,
         LLAMA_VOCAB_PRE_TYPE_CHAMELEON      = 26,
         LLAMA_VOCAB_PRE_TYPE_MINERVA        = 27,
+        LLAMA_VOCAB_PRE_TYPE_DEEPSEEK3_LLM  = 28,
     };
 
     enum llama_rope_type {
diff --git a/src/llama-arch.cpp b/src/llama-arch.cpp
index fea4b21d3..007d79f82 100644
--- a/src/llama-arch.cpp
+++ b/src/llama-arch.cpp
@@ -92,6 +92,8 @@ static const std::map LLM_KV_NAMES = {
     { LLM_KV_EXPERT_USED_COUNT,                 "%s.expert_used_count"                 },
     { LLM_KV_EXPERT_SHARED_COUNT,               "%s.expert_shared_count"               },
     { LLM_KV_EXPERT_WEIGHTS_SCALE,              "%s.expert_weights_scale"              },
+    { LLM_KV_EXPERT_WEIGHTS_NORM,               "%s.expert_weights_norm"               },
+    { LLM_KV_EXPERT_GATING_FUNC,                "%s.expert_gating_func"                },
     { LLM_KV_POOLING_TYPE,                      "%s.pooling_type"                      },
     { LLM_KV_LOGIT_SCALE,                       "%s.logit_scale"                       },
     { LLM_KV_DECODER_START_TOKEN_ID,            "%s.decoder_start_token_id"            },
@@ -984,6 +986,7 @@ static const std::map> LLM_TENSOR_N
             { LLM_TENSOR_FFN_GATE_SHEXP,     "blk.%d.ffn_gate_shexp" },
             { LLM_TENSOR_FFN_DOWN_SHEXP,     "blk.%d.ffn_down_shexp" },
             { LLM_TENSOR_FFN_UP_SHEXP,       "blk.%d.ffn_up_shexp" },
+            { LLM_TENSOR_FFN_EXP_PROBS_B,    "blk.%d.exp_probs_b" },
         },
     },
     {
@@ -1366,6 +1369,7 @@ static const std::map LLM_TENSOR_INFOS = {
     {LLM_TENSOR_FFN_DOWN_EXPS,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT_ID}},
     {LLM_TENSOR_FFN_GATE_EXPS,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT_ID}},
     {LLM_TENSOR_FFN_UP_EXPS,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT_ID}},
+    {LLM_TENSOR_FFN_EXP_PROBS_B,            {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
     // this tensor is loaded for T5, but never used
     {LLM_TENSOR_DEC_CROSS_ATTN_REL_B,       {LLM_TENSOR_LAYER_REPEATING, GGML_OP_NONE}},
     {LLM_TENSOR_CONV1D,                     {LLM_TENSOR_LAYER_INPUT,     GGML_OP_IM2COL}},
diff --git a/src/llama-arch.h b/src/llama-arch.h
index 10bd619a4..45e458bb9 100644
--- a/src/llama-arch.h
+++ b/src/llama-arch.h
@@ -96,6 +96,8 @@ enum llm_kv {
     LLM_KV_EXPERT_USED_COUNT,
     LLM_KV_EXPERT_SHARED_COUNT,
     LLM_KV_EXPERT_WEIGHTS_SCALE,
+    LLM_KV_EXPERT_WEIGHTS_NORM,
+    LLM_KV_EXPERT_GATING_FUNC,
     LLM_KV_POOLING_TYPE,
     LLM_KV_LOGIT_SCALE,
     LLM_KV_DECODER_START_TOKEN_ID,
@@ -231,6 +233,7 @@ enum llm_tensor {
     LLM_TENSOR_FFN_DOWN_SHEXP,
     LLM_TENSOR_FFN_GATE_SHEXP,
     LLM_TENSOR_FFN_UP_SHEXP,
+    LLM_TENSOR_FFN_EXP_PROBS_B,
     LLM_TENSOR_ATTN_Q_NORM,
     LLM_TENSOR_ATTN_K_NORM,
     LLM_TENSOR_LAYER_OUT_NORM,
diff --git a/src/llama-chat.cpp b/src/llama-chat.cpp
index a07e9cf00..44670d3d8 100644
--- a/src/llama-chat.cpp
+++ b/src/llama-chat.cpp
@@ -45,6 +45,7 @@ static const std::map LLM_CHAT_TEMPLATES = {
     { "vicuna-orca",       LLM_CHAT_TEMPLATE_VICUNA_ORCA       },
     { "deepseek",          LLM_CHAT_TEMPLATE_DEEPSEEK          },
     { "deepseek2",         LLM_CHAT_TEMPLATE_DEEPSEEK_2        },
+    { "deepseek3",         LLM_CHAT_TEMPLATE_DEEPSEEK_3        },
     { "command-r",         LLM_CHAT_TEMPLATE_COMMAND_R         },
     { "llama3",            LLM_CHAT_TEMPLATE_LLAMA_3           },
     { "chatglm3",          LLM_CHAT_TEMPLATE_CHATGML_3         },
@@ -148,6 +149,8 @@ llm_chat_template llm_chat_detect_template(const std::string & tmpl) {
         return LLM_CHAT_TEMPLATE_MINICPM;
     } else if (tmpl_contains("'Assistant: ' + message['content'] + eos_token")) {
         return LLM_CHAT_TEMPLATE_DEEPSEEK_2;
+    } else if (tmpl_contains(LU8("'<|Assistant|>' + message['content'] + '<|end▁of▁sentence|>'"))) {
+        return LLM_CHAT_TEMPLATE_DEEPSEEK_3;
     } else if (tmpl_contains("[|system|]") && tmpl_contains("[|assistant|]") && tmpl_contains("[|endofturn|]")) {
         // ref: https://huggingface.co/LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct/discussions/8#66bae61b1893d14ee8ed85bb
         // EXAONE-3.0-7.8B-Instruct
@@ -453,6 +456,21 @@ int32_t llm_chat_apply_template(
         if (add_ass) {
             ss << "Assistant:";
         }
+    } else if (tmpl == LLM_CHAT_TEMPLATE_DEEPSEEK_3) {
+        // DeepSeek-V3
+        for (auto message : chat) {
+            std::string role(message->role);
+            if (role == "system") {
+                ss << message->content << "\n\n";
+            } else if (role == "user") {
+                ss << LU8("<|User|>") << message->content;
+            } else if (role == "assistant") {
+                ss << LU8("<|Assistant|>") << message->content << LU8("<|end▁of▁sentence|>");
+            }
+        }
+        if (add_ass) {
+            ss << LU8("<|Assistant|>");
+        }
     } else if (tmpl == LLM_CHAT_TEMPLATE_EXAONE_3) {
         // ref: https://huggingface.co/LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct/discussions/8#66bae61b1893d14ee8ed85bb
         // EXAONE-3.0-7.8B-Instruct
diff --git a/src/llama-chat.h b/src/llama-chat.h
index 364318c27..b8e94d9ef 100644
--- a/src/llama-chat.h
+++ b/src/llama-chat.h
@@ -25,6 +25,7 @@ enum llm_chat_template {
     LLM_CHAT_TEMPLATE_VICUNA_ORCA,
     LLM_CHAT_TEMPLATE_DEEPSEEK,
     LLM_CHAT_TEMPLATE_DEEPSEEK_2,
+    LLM_CHAT_TEMPLATE_DEEPSEEK_3,
     LLM_CHAT_TEMPLATE_COMMAND_R,
     LLM_CHAT_TEMPLATE_LLAMA_3,
     LLM_CHAT_TEMPLATE_CHATGML_3,
diff --git a/src/llama-hparams.h b/src/llama-hparams.h
index 3a76b71a4..a29f20ec4 100644
--- a/src/llama-hparams.h
+++ b/src/llama-hparams.h
@@ -6,7 +6,13 @@
 
 // bump if necessary
 #define LLAMA_MAX_LAYERS  512
-#define LLAMA_MAX_EXPERTS 160  // DeepSeekV2
+#define LLAMA_MAX_EXPERTS 256  // DeepSeekV3
+
+enum llama_expert_gating_func_type {
+    LLAMA_EXPERT_GATING_FUNC_TYPE_NONE    = 0,
+    LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX = 1,
+    LLAMA_EXPERT_GATING_FUNC_TYPE_SIGMOID = 2,
+};
 
 struct llama_hparams_posnet {
     uint32_t n_embd;
@@ -54,7 +60,9 @@ struct llama_hparams {
     uint32_t n_expert_shared    = 0;
     uint32_t n_norm_groups      = 0;
 
-    float expert_weights_scale = 0.0;
+    float    expert_weights_scale = 0.0;
+    bool     expert_weights_norm  = false;
+    uint32_t expert_gating_func   = LLAMA_EXPERT_GATING_FUNC_TYPE_NONE;
 
     float f_norm_eps;
     float f_norm_rms_eps;
diff --git a/src/llama-model.cpp b/src/llama-model.cpp
index c356abded..405e0528f 100644
--- a/src/llama-model.cpp
+++ b/src/llama-model.cpp
@@ -66,6 +66,7 @@ const char * llm_type_name(llm_type type) {
         case MODEL_70B:           return "70B";
         case MODEL_236B:          return "236B";
         case MODEL_314B:          return "314B";
+        case MODEL_671B:          return "671B";
         case MODEL_SMALL:         return "0.1B";
         case MODEL_MEDIUM:        return "0.4B";
         case MODEL_LARGE:         return "0.8B";
@@ -125,6 +126,14 @@ static std::string llama_model_ftype_name(llama_ftype ftype) {
     }
 }
 
+static const char * llama_expert_gating_func_name(llama_expert_gating_func_type type) {
+    switch (type) {
+        case LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX: return "softmax";
+        case LLAMA_EXPERT_GATING_FUNC_TYPE_SIGMOID: return "sigmoid";
+        default:                                    return "unknown";
+    }
+}
+
 std::string llama_model_arch_name (const llama_model & model) {
     return llm_arch_name(model.arch);
 }
@@ -933,11 +942,19 @@ void llm_load_hparams(llama_model_loader & ml, llama_model & model) {
                 ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp);
                 ml.get_key(LLM_KV_EXPERT_SHARED_COUNT, hparams.n_expert_shared);
                 ml.get_key(LLM_KV_EXPERT_WEIGHTS_SCALE, hparams.expert_weights_scale);
+                ml.get_key(LLM_KV_EXPERT_WEIGHTS_NORM, hparams.expert_weights_norm, false);
+                ml.get_key(LLM_KV_EXPERT_GATING_FUNC, hparams.expert_gating_func, false);
+                if (hparams.expert_gating_func == LLAMA_EXPERT_GATING_FUNC_TYPE_NONE) {
+                    // for compatibility with existing DeepSeek V2 and V2.5 GGUFs
+                    // that have no expert_gating_func model parameter set
+                    hparams.expert_gating_func = LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX;
+                }
                 ml.get_key(LLM_KV_ROPE_SCALING_YARN_LOG_MUL, hparams.rope_yarn_log_mul);
 
                 switch (hparams.n_layer) {
                     case 27: model.type = e_model::MODEL_16B; break;
                     case 60: model.type = e_model::MODEL_236B; break;
+                    case 61: model.type = e_model::MODEL_671B; break;
                     default: model.type = e_model::MODEL_UNKNOWN;
                 }
             } break;
@@ -1259,6 +1276,10 @@ void llm_load_vocab(llama_model_loader & ml, llama_model & model) {
                     tokenizer_pre == "deepseek-coder") {
                 vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_CODER;
                 vocab.tokenizer_clean_spaces = false;
+            } else if (
+                    tokenizer_pre == "deepseek-v3") {
+                vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEEPSEEK3_LLM;
+                vocab.tokenizer_clean_spaces = false;
             } else if (
                     tokenizer_pre == "falcon") {
                 vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_FALCON;
@@ -1941,6 +1962,8 @@ void llm_load_print_meta(llama_model_loader & ml, llama_model & model) {
         LLAMA_LOG_INFO("%s: n_ff_exp             = %d\n",     __func__, hparams.n_ff_exp);
         LLAMA_LOG_INFO("%s: n_expert_shared      = %d\n",     __func__, hparams.n_expert_shared);
         LLAMA_LOG_INFO("%s: expert_weights_scale = %.1f\n",   __func__, hparams.expert_weights_scale);
+        LLAMA_LOG_INFO("%s: expert_weights_norm  = %d\n",     __func__, hparams.expert_weights_norm);
+        LLAMA_LOG_INFO("%s: expert_gating_func   = %s\n",     __func__, llama_expert_gating_func_name((enum llama_expert_gating_func_type) hparams.expert_gating_func));
         LLAMA_LOG_INFO("%s: rope_yarn_log_mul    = %.4f\n",   __func__, hparams.rope_yarn_log_mul);
     }
 
diff --git a/src/llama-model.h b/src/llama-model.h
index 01c780c41..ce038932d 100644
--- a/src/llama-model.h
+++ b/src/llama-model.h
@@ -63,6 +63,7 @@ enum llm_type {
     MODEL_70B,
     MODEL_236B,
     MODEL_314B,
+    MODEL_671B,
     MODEL_SMALL,
     MODEL_MEDIUM,
     MODEL_LARGE,
@@ -213,6 +214,7 @@ struct llama_layer {
     struct ggml_tensor * ffn_down_b = nullptr; // b2
     struct ggml_tensor * ffn_up_b   = nullptr; // b3
     struct ggml_tensor * ffn_act    = nullptr;
+    struct ggml_tensor * ffn_exp_probs_b = nullptr;
 
     // mamba proj
     struct ggml_tensor * ssm_in  = nullptr;
diff --git a/src/llama-vocab.cpp b/src/llama-vocab.cpp
index 909e04871..3fcfcaa3f 100644
--- a/src/llama-vocab.cpp
+++ b/src/llama-vocab.cpp
@@ -382,6 +382,13 @@ struct llm_tokenizer_bpe : llm_tokenizer {
                     "\\p{N}+",
                 };
                 break;
+            case LLAMA_VOCAB_PRE_TYPE_DEEPSEEK3_LLM:
+                regex_exprs = {
+                    "\\p{N}{1,3}",
+                    "[一-龥぀-ゟ゠-ヿ]+",
+                    "[!\"#$%&'()*+,\\-./:;<=>?@\\[\\\\\\]^_`{|}~][A-Za-z]+|[^\r\n\\p{L}\\p{P}\\p{S}]?[\\p{L}\\p{M}]+| ?[\\p{P}\\p{S}]+[\r\n]*|\\s*[\r\n]+|\\s+(?!\\S)|\\s+",
+                };
+                break;
             case LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_CODER:
                 regex_exprs = {
                     "[\r\n]",
diff --git a/src/llama.cpp b/src/llama.cpp
index 50e9191fa..ea78ea487 100644
--- a/src/llama.cpp
+++ b/src/llama.cpp
@@ -1857,6 +1857,7 @@ static bool llm_load_tensors(
                             layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
                         } else {
                             layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);
+                            layer.ffn_exp_probs_b = create_tensor(tn(LLM_TENSOR_FFN_EXP_PROBS_B, "bias", i), {n_expert}, llama_model_loader::TENSOR_NOT_REQUIRED);
 
                             if (n_expert == 0) {
                                 throw std::runtime_error("n_expert must be > 0");
@@ -2837,12 +2838,14 @@ static struct ggml_tensor * llm_build_moe_ffn(
          struct ggml_tensor * up_exps,
          struct ggml_tensor * gate_exps,
          struct ggml_tensor * down_exps,
+         struct ggml_tensor * exp_probs_b,
                     int64_t   n_expert,
                     int64_t   n_expert_used,
             llm_ffn_op_type   type_op,
                        bool   norm_w,
                        bool   scale_w,
                       float   w_scale,
+llama_expert_gating_func_type gating_op,
          const llm_build_cb & cb,
                         int   il) {
     int64_t n_embd = cur->ne[0];
@@ -2851,11 +2854,31 @@ static struct ggml_tensor * llm_build_moe_ffn(
     ggml_tensor * logits = llm_build_lora_mm(lctx, ctx, gate_inp, cur); // [n_expert, n_tokens]
     cb(logits, "ffn_moe_logits", il);
 
-    ggml_tensor * probs = ggml_soft_max(ctx, logits); // [n_expert, n_tokens]
+    ggml_tensor * probs = nullptr;
+    switch (gating_op) {
+        case LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX:
+            {
+                probs = ggml_soft_max(ctx, logits); // [n_expert, n_tokens]
+            } break;
+        case LLAMA_EXPERT_GATING_FUNC_TYPE_SIGMOID:
+            {
+                probs = ggml_sigmoid(ctx, logits); // [n_expert, n_tokens]
+            } break;
+        default:
+            GGML_ABORT("fatal error");
+    }
     cb(probs, "ffn_moe_probs", il);
 
+    // add experts selection bias - introduced in DeepSeek V3
+    // leave probs unbiased as it's later used to get expert weights
+    ggml_tensor * selection_probs = probs;
+    if (exp_probs_b != nullptr) {
+        selection_probs = ggml_add(ctx, probs, exp_probs_b);
+        cb(selection_probs, "ffn_moe_probs_biased", il);
+    }
+
     // select experts
-    ggml_tensor * selected_experts = ggml_top_k(ctx, probs, n_expert_used); // [n_expert_used, n_tokens]
+    ggml_tensor * selected_experts = ggml_top_k(ctx, selection_probs, n_expert_used); // [n_expert_used, n_tokens]
     cb(selected_experts->src[0], "ffn_moe_argsort", il);
     cb(selected_experts, "ffn_moe_topk", il);
 
@@ -3976,9 +3999,11 @@ struct llm_build_context {
                         model.layers[il].ffn_up_exps,
                         model.layers[il].ffn_gate_exps,
                         model.layers[il].ffn_down_exps,
+                        nullptr,
                         n_expert, n_expert_used,
                         LLM_FFN_SILU, true,
                         false, 0.0,
+                        LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX,
                         cb, il);
                 cb(cur, "ffn_moe_out", il);
             }
@@ -4628,9 +4653,11 @@ struct llm_build_context {
                     model.layers[il].ffn_up_exps,
                     model.layers[il].ffn_gate_exps,
                     model.layers[il].ffn_down_exps,
+                    nullptr,
                     n_expert, n_expert_used,
                     LLM_FFN_GELU, true,
                     false, 0.0,
+                    LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX,
                     cb, il);
             cb(cur, "ffn_moe_out", il);
 
@@ -4769,9 +4796,11 @@ struct llm_build_context {
                     model.layers[il].ffn_up_exps,
                     model.layers[il].ffn_gate_exps,
                     model.layers[il].ffn_down_exps,
+                    nullptr,
                     n_expert, n_expert_used,
                     LLM_FFN_SILU, true,
                     false, 0.0,
+                    LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX,
                     cb, il);
             cb(cur, "ffn_moe_out", il);
 
@@ -6017,9 +6046,11 @@ struct llm_build_context {
                         model.layers[il].ffn_up_exps,
                         model.layers[il].ffn_gate_exps,
                         model.layers[il].ffn_down_exps,
+                        nullptr,
                         n_expert, n_expert_used,
                         LLM_FFN_SILU, false,
                         false, 0.0,
+                        LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX,
                         cb, il);
             cb(cur, "ffn_moe_out", il);
 
@@ -8142,9 +8173,11 @@ struct llm_build_context {
                     model.layers[il].ffn_up_exps,
                     model.layers[il].ffn_gate_exps,
                     model.layers[il].ffn_down_exps,
+                    nullptr,
                     n_expert, n_expert_used,
                     LLM_FFN_SILU, false,
                     false, 0.0,
+                    LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX,
                     cb, il);
             cb(cur, "ffn_moe_out", il);
 
@@ -8539,9 +8572,11 @@ struct llm_build_context {
                     model.layers[il].ffn_up_exps,
                     model.layers[il].ffn_gate_exps,
                     model.layers[il].ffn_down_exps,
+                    nullptr,
                     n_expert, n_expert_used,
                     LLM_FFN_SILU, true,
                     false, 0.0,
+                    LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX,
                     cb, il);
             cb(cur, "ffn_moe_out", il);
 
@@ -8680,9 +8715,11 @@ struct llm_build_context {
                             model.layers[il].ffn_up_exps,
                             model.layers[il].ffn_gate_exps,
                             model.layers[il].ffn_down_exps,
+                            nullptr,
                             n_expert, n_expert_used,
                             LLM_FFN_SILU, false,
                             false, hparams.expert_weights_scale,
+                            LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX,
                             cb, il);
                 cb(moe_out, "ffn_moe_out", il);
 
@@ -8909,9 +8946,11 @@ struct llm_build_context {
                             model.layers[il].ffn_up_exps,
                             model.layers[il].ffn_gate_exps,
                             model.layers[il].ffn_down_exps,
+                            model.layers[il].ffn_exp_probs_b,
                             n_expert, n_expert_used,
-                            LLM_FFN_SILU, false,
+                            LLM_FFN_SILU, hparams.expert_weights_norm,
                             true, hparams.expert_weights_scale,
+                            (enum llama_expert_gating_func_type) hparams.expert_gating_func,
                             cb, il);
                 cb(moe_out, "ffn_moe_out", il);
 
diff --git a/src/unicode.cpp b/src/unicode.cpp
index 8ed6b1a51..7aca6544b 100644
--- a/src/unicode.cpp
+++ b/src/unicode.cpp
@@ -667,18 +667,24 @@ std::vector unicode_regex_split(const std::string & text, const std
         { "\\p{N}", unicode_cpt_flags::NUMBER },
         { "\\p{L}", unicode_cpt_flags::LETTER },
         { "\\p{P}", unicode_cpt_flags::PUNCTUATION },
+        { "\\p{M}", unicode_cpt_flags::ACCENT_MARK },
+        { "\\p{S}", unicode_cpt_flags::SYMBOL },
     };
 
     static const std::map k_ucat_cpt = {
         { unicode_cpt_flags::NUMBER,      0xD1 },
         { unicode_cpt_flags::LETTER,      0xD2 },
         { unicode_cpt_flags::PUNCTUATION, 0xD3 },
+        { unicode_cpt_flags::ACCENT_MARK, 0xD4 },
+        { unicode_cpt_flags::SYMBOL,      0xD5 },
     };
 
     static const std::map k_ucat_map = {
         { unicode_cpt_flags::NUMBER,      "\x30-\x39" }, // 0-9
         { unicode_cpt_flags::LETTER,      "\x41-\x5A\x61-\x7A" }, // A-Za-z
         { unicode_cpt_flags::PUNCTUATION, "\x21-\x23\x25-\x2A\x2C-\x2F\x3A-\x3B\x3F-\x40\\\x5B-\\\x5D\x5F\\\x7B\\\x7D" }, // !-#%-*,-/:-;?-@\[-\]_\{\}
+        { unicode_cpt_flags::ACCENT_MARK, "" }, // no sub-128 codepoints
+        { unicode_cpt_flags::SYMBOL,      "\\\x24\\\x2B\x3C-\x3E\x5E\x60\\\x7C" }, // $+<=>^`|
     };
 
     // compute collapsed codepoints only if needed by at least one regex

From b56f079e28fda692f11a8b59200ceb815b05d419 Mon Sep 17 00:00:00 2001
From: 0cc4m 
Date: Sat, 4 Jan 2025 21:09:59 +0100
Subject: [PATCH 44/81] Vulkan: Add device-specific blacklist for coopmat for
 the AMD proprietary driver (#11074)

* Vulkan: Add device-specific blacklist for coopmat for the AMD proprietary driver

* Add (TM) to AMD name check
---
 ggml/src/ggml-vulkan/ggml-vulkan.cpp | 30 +++++++++++++++++++++-------
 1 file changed, 23 insertions(+), 7 deletions(-)

diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp
index 020e61280..d75cd6d61 100644
--- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp
+++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp
@@ -2040,6 +2040,8 @@ static void ggml_vk_load_shaders(vk_device& device) {
     std::cerr << "Done!" << std::endl;
 }
 
+static bool ggml_vk_khr_cooperative_matrix_support(const vk::PhysicalDeviceProperties& props, const vk::PhysicalDeviceDriverProperties& driver_props);
+
 static vk_device ggml_vk_get_device(size_t idx) {
     VK_LOG_DEBUG("ggml_vk_get_device(" << idx << ")");
 
@@ -2175,9 +2177,7 @@ static vk_device ggml_vk_get_device(size_t idx) {
 
         device->fp16 = !force_disable_f16 && fp16_storage && fp16_compute;
 
-        if (device->vendor_id == VK_VENDOR_ID_INTEL || (device->vendor_id == VK_VENDOR_ID_AMD && (driver_props.driverID == vk::DriverId::eAmdProprietary || driver_props.driverID == vk::DriverId::eAmdOpenSource))) {
-            // Intel drivers don't support coopmat properly yet
-            // Only RADV supports coopmat properly on AMD
+        if (!ggml_vk_khr_cooperative_matrix_support(device->properties, driver_props)) {
             device->coopmat_support = false;
         }
 
@@ -2515,7 +2515,6 @@ static vk_device ggml_vk_get_device(size_t idx) {
     return vk_instance.devices[idx];
 }
 
-
 static void ggml_vk_print_gpu_info(size_t idx) {
     GGML_ASSERT(idx < vk_instance.device_indices.size());
     size_t dev_num = vk_instance.device_indices[idx];
@@ -2565,9 +2564,7 @@ static void ggml_vk_print_gpu_info(size_t idx) {
         }
     }
 
-    if (props2.properties.vendorID == VK_VENDOR_ID_INTEL || (props2.properties.vendorID == VK_VENDOR_ID_AMD && (driver_props.driverID == vk::DriverId::eAmdProprietary || driver_props.driverID == vk::DriverId::eAmdOpenSource))) {
-        // Intel drivers don't support coopmat properly yet
-        // Only RADV supports coopmat properly on AMD
+    if (!ggml_vk_khr_cooperative_matrix_support(props2.properties, driver_props)) {
         coopmat_support = false;
     }
 
@@ -8088,6 +8085,25 @@ static bool ggml_vk_instance_portability_enumeration_ext_available(const std::ve
     UNUSED(instance_extensions);
 }
 
+static bool ggml_vk_khr_cooperative_matrix_support(const vk::PhysicalDeviceProperties& props, const vk::PhysicalDeviceDriverProperties& driver_props) {
+    switch (props.vendorID) {
+    case VK_VENDOR_ID_INTEL:
+        // Intel drivers don't support coopmat properly yet
+        return false;
+    case VK_VENDOR_ID_AMD:
+        if (driver_props.driverID == vk::DriverId::eAmdProprietary || driver_props.driverID == vk::DriverId::eAmdOpenSource) {
+            // Workaround for AMD proprietary driver reporting support on all GPUs
+            const std::string name = props.deviceName;
+            return name.rfind("AMD Radeon RX 7", 0) == 0   || name.rfind("AMD Radeon(TM) RX 7", 0) == 0   || // RDNA 3 consumer GPUs
+                   name.rfind("AMD Radeon PRO W7", 0) == 0 || name.rfind("AMD Radeon(TM) PRO W7", 0) == 0 || // RDNA 3 workstation GPUs
+                   name.rfind("AMD Radeon 7", 0) == 0      || name.rfind("AMD Radeon(TM) 7", 0) == 0;        // RDNA 3 APUs
+        }
+        return true;
+    default:
+        return true;
+    }
+}
+
 // checks
 
 #ifdef GGML_VULKAN_CHECK_RESULTS

From 46e3556e01b824e52395fb050b29804b6cff2a7c Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Johannes=20G=C3=A4=C3=9Fler?= 
Date: Mon, 6 Jan 2025 02:33:52 +0100
Subject: [PATCH 45/81] CUDA: add BF16 support (#11093)

* CUDA: add BF16 support
---
 ggml/src/ggml-cuda/convert.cu     |   2 +
 ggml/src/ggml-cuda/ggml-cuda.cu   |   3 +-
 ggml/src/ggml-cuda/mmv.cu         | 114 ++++++++++++++++++++----------
 ggml/src/ggml-cuda/vendors/cuda.h |   1 +
 ggml/src/ggml-cuda/vendors/hip.h  |   3 +
 ggml/src/ggml-cuda/vendors/musa.h |   3 +
 6 files changed, 87 insertions(+), 39 deletions(-)

diff --git a/ggml/src/ggml-cuda/convert.cu b/ggml/src/ggml-cuda/convert.cu
index 3896f956d..5b0dfacef 100644
--- a/ggml/src/ggml-cuda/convert.cu
+++ b/ggml/src/ggml-cuda/convert.cu
@@ -680,6 +680,8 @@ to_fp32_cuda_t ggml_get_to_fp32_cuda(ggml_type type) {
             return dequantize_row_iq3_s_cuda;
         case GGML_TYPE_F16:
             return convert_unary_cuda;
+        case GGML_TYPE_BF16:
+            return convert_unary_cuda;
         default:
             return nullptr;
     }
diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu
index c180adc84..0b06be729 100644
--- a/ggml/src/ggml-cuda/ggml-cuda.cu
+++ b/ggml/src/ggml-cuda/ggml-cuda.cu
@@ -1728,7 +1728,7 @@ static void ggml_cuda_mul_mat_batched_cublas(ggml_backend_cuda_context & ctx, co
 static void ggml_cuda_mul_mat(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
     const bool split = ggml_backend_buft_is_cuda_split(src0->buffer->buft);
 
-    bool use_mul_mat_vec   = src0->type == GGML_TYPE_F16
+    bool use_mul_mat_vec   = (src0->type == GGML_TYPE_F16 || src0->type == GGML_TYPE_BF16)
         && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32
         && src0->ne[0] % 2 == 0 && src1->ne[1] == 1;
     bool use_mul_mat_vec_q = ggml_is_quantized(src0->type)
@@ -2869,6 +2869,7 @@ static bool ggml_backend_cuda_device_supports_op(ggml_backend_dev_t dev, const g
                     case GGML_TYPE_IQ3_XXS:
                     case GGML_TYPE_IQ4_NL:
                     case GGML_TYPE_IQ4_XS:
+                    case GGML_TYPE_BF16:
 #ifdef GGML_USE_MUSA
                         if (a->type == GGML_TYPE_Q3_K) {
                             return false;
diff --git a/ggml/src/ggml-cuda/mmv.cu b/ggml/src/ggml-cuda/mmv.cu
index a4b4f6bc1..ac45f2d17 100644
--- a/ggml/src/ggml-cuda/mmv.cu
+++ b/ggml/src/ggml-cuda/mmv.cu
@@ -1,9 +1,9 @@
 #include "common.cuh"
 #include "mmv.cuh"
 
-template 
+template 
 static __global__ void mul_mat_vec(
-        const half * __restrict__ x, const float * __restrict__ y, float * __restrict__ dst, const int64_t ncols2, const int64_t stride_row,
+        const T * __restrict__ x, const float * __restrict__ y, float * __restrict__ dst, const int64_t ncols2, const int64_t stride_row,
         const int64_t channel_ratio, const int64_t stride_channel_x, const int64_t stride_channel_y, const int64_t stride_channel_dst) {
     const int64_t row     = blockIdx.x;
     const int64_t channel = blockIdx.z;
@@ -13,7 +13,6 @@ static __global__ void mul_mat_vec(
     y   +=  channel               *stride_channel_y;
     dst +=  channel               *stride_channel_dst;
 
-    const half2  * x2 = (const half2  *) x;
     const float2 * y2 = (const float2 *) y;
 
     extern __shared__ char data_mmv[];
@@ -28,28 +27,44 @@ static __global__ void mul_mat_vec(
 
     float sumf;
 
-    if (std::is_same::value) {
+    if constexpr (std::is_same::value) {
+        const half2 * x2 = (const half2 *) x;
+
+        if (std::is_same::value) {
+            sumf = 0.0f;
+
+            for (int64_t col2 = tid; col2 < ncols2; col2 += block_size) {
+                const float2 tmpx = __half22float2(x2[col2]);
+                const float2 tmpy = y2[col2];
+                sumf += tmpx.x * tmpy.x;
+                sumf += tmpx.y * tmpy.y;
+            }
+        } else {
+#ifdef FP16_AVAILABLE
+            half2 sumh2 = make_half2(0.0f, 0.0f);
+
+            for (int64_t col2 = tid; col2 < ncols2; col2 += block_size) {
+                const float2 tmp = y2[col2];
+                sumh2 += x2[col2] * make_half2(tmp.x, tmp.y);
+            }
+
+            sumf = __low2float(sumh2) + __high2float(sumh2);
+#else
+            NO_DEVICE_CODE;
+#endif // FP16_AVAILABLE
+        }
+    } else if constexpr (std::is_same::value) {
+        const int * x2 = (const int *) x;
         sumf = 0.0f;
 
         for (int64_t col2 = tid; col2 < ncols2; col2 += block_size) {
-            const float2 tmpx = __half22float2(x2[col2]);
+            const int    tmpx = x2[col2];
             const float2 tmpy = y2[col2];
-            sumf += tmpx.x * tmpy.x;
-            sumf += tmpx.y * tmpy.y;
+            sumf += float(reinterpret_cast(&tmpx)[0]) * tmpy.x;
+            sumf += float(reinterpret_cast(&tmpx)[1]) * tmpy.y;
         }
     } else {
-#ifdef FP16_AVAILABLE
-        half2 sumh2 = make_half2(0.0f, 0.0f);
-
-        for (int64_t col2 = tid; col2 < ncols2; col2 += block_size) {
-            const float2 tmp = y2[col2];
-            sumh2 += x2[col2] * make_half2(tmp.x, tmp.y);
-        }
-
-        sumf = __low2float(sumh2) + __high2float(sumh2);
-#else
-        NO_DEVICE_CODE;
-#endif // FP16_AVAILABLE
+        static_assert(std::is_same::value, "unsupported type");
     }
 
     sumf = warp_reduce_sum(sumf);
@@ -71,9 +86,9 @@ static __global__ void mul_mat_vec(
     dst[row] = sumf;
 }
 
-template 
+template 
 static void launch_mul_mat_vec_cuda(
-        const half * x, const float * y, float * dst,
+        const T * x, const float * y, float * dst,
         const int64_t ncols, const int64_t nrows, const int64_t stride_row, const int64_t nchannels_x, const int64_t nchannels_y,
         const int64_t stride_channel_x, const int64_t stride_channel_y, const int64_t stride_channel_dst,
         cudaStream_t stream) {
@@ -97,35 +112,35 @@ static void launch_mul_mat_vec_cuda(
     const dim3 block_dims(block_size_best, 1, 1);
     switch (block_size_best) {
         case   32: {
-            mul_mat_vec<<>>
+            mul_mat_vec<<>>
                 (x, y, dst, ncols/2, stride_row, channel_ratio, stride_channel_x, stride_channel_y, stride_channel_dst);
         } break;
         case   64: {
-            mul_mat_vec<<>>
+            mul_mat_vec<<>>
                 (x, y, dst, ncols/2, stride_row, channel_ratio, stride_channel_x, stride_channel_y, stride_channel_dst);
         } break;
         case   96: {
-            mul_mat_vec<<>>
+            mul_mat_vec<<>>
                 (x, y, dst, ncols/2, stride_row, channel_ratio, stride_channel_x, stride_channel_y, stride_channel_dst);
         } break;
         case  128: {
-            mul_mat_vec<<>>
+            mul_mat_vec<<>>
                 (x, y, dst, ncols/2, stride_row, channel_ratio, stride_channel_x, stride_channel_y, stride_channel_dst);
         } break;
         case  160: {
-            mul_mat_vec<<>>
+            mul_mat_vec<<>>
                 (x, y, dst, ncols/2, stride_row, channel_ratio, stride_channel_x, stride_channel_y, stride_channel_dst);
         } break;
         case  192: {
-            mul_mat_vec<<>>
+            mul_mat_vec<<>>
                 (x, y, dst, ncols/2, stride_row, channel_ratio, stride_channel_x, stride_channel_y, stride_channel_dst);
         } break;
         case  224: {
-            mul_mat_vec<<>>
+            mul_mat_vec<<>>
                 (x, y, dst, ncols/2, stride_row, channel_ratio, stride_channel_x, stride_channel_y, stride_channel_dst);
         } break;
         case  256: {
-            mul_mat_vec<<>>
+            mul_mat_vec<<>>
                 (x, y, dst, ncols/2, stride_row, channel_ratio, stride_channel_x, stride_channel_y, stride_channel_dst);
         } break;
         default: {
@@ -134,25 +149,25 @@ static void launch_mul_mat_vec_cuda(
     }
 }
 
+template
 static void mul_mat_vec_cuda(
-        const half * x, const float * y, float * dst,
+        const T * x, const float * y, float * dst,
         const int64_t ncols, const int64_t nrows, const int64_t stride_row, const int64_t nchannels_x, const int64_t nchannels_y,
         const int64_t stride_channel_x, const int64_t stride_channel_y, const int64_t stride_channel_dst,
         enum ggml_prec prec, cudaStream_t stream) {
     switch (prec) {
         case GGML_PREC_DEFAULT: {
-            launch_mul_mat_vec_cuda(x, y, dst, ncols, nrows, stride_row, nchannels_x, nchannels_y,
+            launch_mul_mat_vec_cuda(x, y, dst, ncols, nrows, stride_row, nchannels_x, nchannels_y,
                 stride_channel_x, stride_channel_y, stride_channel_dst, stream);
         } break;
         case GGML_PREC_F32: {
-            launch_mul_mat_vec_cuda(x, y, dst, ncols, nrows, stride_row, nchannels_x, nchannels_y,
+            launch_mul_mat_vec_cuda(x, y, dst, ncols, nrows, stride_row, nchannels_x, nchannels_y,
                 stride_channel_x, stride_channel_y, stride_channel_dst, stream);
         } break;
     }
 }
 
 void ggml_cuda_mul_mat_vec(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
-    GGML_ASSERT(src0->type == GGML_TYPE_F16);
     GGML_ASSERT(src1->type == GGML_TYPE_F32);
     GGML_ASSERT(dst->type  == GGML_TYPE_F32);
 
@@ -164,7 +179,6 @@ void ggml_cuda_mul_mat_vec(ggml_backend_cuda_context & ctx, const ggml_tensor *
     const int cc = ggml_cuda_info().devices[ggml_cuda_get_device()].cc;
     const enum ggml_prec prec = fast_fp16_available(cc) ? ggml_prec(dst->op_params[0]) : GGML_PREC_F32;
 
-    const half  * src0_d = (const half  *) src0->data;
     const float * src1_d = (const float *) src1->data;
     float       *  dst_d = (float       *)  dst->data;
 
@@ -181,7 +195,20 @@ void ggml_cuda_mul_mat_vec(ggml_backend_cuda_context & ctx, const ggml_tensor *
     const int64_t channel_stride_y   = src1->nb[2] / ggml_type_size(src1->type);
     const int64_t channel_stride_dst =  dst->nb[2] / ggml_type_size( dst->type);
 
-    mul_mat_vec_cuda(src0_d, src1_d, dst_d, ne00, ne01, stride_row, ne02, ne12, channel_stride_x, channel_stride_y, channel_stride_dst, prec, ctx.stream());
+    switch (src0->type) {
+        case GGML_TYPE_F16: {
+            const half * src0_d = (const half *) src0->data;
+            mul_mat_vec_cuda(src0_d, src1_d, dst_d, ne00, ne01, stride_row, ne02, ne12,
+                channel_stride_x, channel_stride_y, channel_stride_dst, prec, ctx.stream());
+        } break;
+        case GGML_TYPE_BF16: {
+            const nv_bfloat16 * src0_d = (const nv_bfloat16 *) src0->data;
+            mul_mat_vec_cuda(src0_d, src1_d, dst_d, ne00, ne01, stride_row, ne02, ne12,
+                channel_stride_x, channel_stride_y, channel_stride_dst, prec, ctx.stream());
+        } break;
+        default:
+            GGML_ABORT("unsupported type: %s", ggml_type_name(src0->type));
+    }
 }
 
 void ggml_cuda_op_mul_mat_vec(
@@ -190,7 +217,6 @@ void ggml_cuda_op_mul_mat_vec(
     const char * src1_ddq_i, float * dst_dd_i, const int64_t row_low, const int64_t row_high, const int64_t src1_ncols,
     const int64_t src1_padded_row_size, cudaStream_t stream) {
 
-    GGML_ASSERT(src0->type == GGML_TYPE_F16);
     GGML_ASSERT(src1->type == GGML_TYPE_F32);
     GGML_ASSERT(dst->type  == GGML_TYPE_F32);
 
@@ -211,8 +237,20 @@ void ggml_cuda_op_mul_mat_vec(
     const int64_t channel_stride_y   = 0;
     const int64_t channel_stride_dst = 0;
 
-    mul_mat_vec_cuda((const half *) src0_dd_i, src1_ddf_i, dst_dd_i, ne00, row_diff, stride_row,
-        nchannels_x, nchannels_y, channel_stride_x, channel_stride_y, channel_stride_dst, prec, stream);
+    switch (src0->type) {
+        case GGML_TYPE_F16: {
+            const half * src0_d = (const half *) src0_dd_i;
+            mul_mat_vec_cuda(src0_d, src1_ddf_i, dst_dd_i, ne00, row_diff, stride_row,
+                nchannels_x, nchannels_y, channel_stride_x, channel_stride_y, channel_stride_dst, prec, stream);
+        } break;
+        case GGML_TYPE_BF16: {
+            const nv_bfloat16 * src0_d = (const nv_bfloat16 *) src0_dd_i;
+            mul_mat_vec_cuda(src0_d, src1_ddf_i, dst_dd_i, ne00, row_diff, stride_row,
+                nchannels_x, nchannels_y, channel_stride_x, channel_stride_y, channel_stride_dst, prec, stream);
+        } break;
+        default:
+            GGML_ABORT("unsupported type: %s", ggml_type_name(src0->type));
+    }
 
     GGML_UNUSED(ctx);
     GGML_UNUSED(src1);
diff --git a/ggml/src/ggml-cuda/vendors/cuda.h b/ggml/src/ggml-cuda/vendors/cuda.h
index db9f6a165..1746b0732 100644
--- a/ggml/src/ggml-cuda/vendors/cuda.h
+++ b/ggml/src/ggml-cuda/vendors/cuda.h
@@ -3,6 +3,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 
 #if CUDART_VERSION < 11020
diff --git a/ggml/src/ggml-cuda/vendors/hip.h b/ggml/src/ggml-cuda/vendors/hip.h
index 3205534d6..c905b15d7 100644
--- a/ggml/src/ggml-cuda/vendors/hip.h
+++ b/ggml/src/ggml-cuda/vendors/hip.h
@@ -3,6 +3,7 @@
 #include 
 #include 
 #include 
+#include 
 #ifdef __HIP_PLATFORM_AMD__
 // for rocblas_initialize()
 #include "rocblas/rocblas.h"
@@ -121,6 +122,8 @@
     #define __has_builtin(x) 0
 #endif
 
+typedef hip_bfloat16 nv_bfloat16;
+
 typedef int8_t int8x4_t __attribute__((ext_vector_type(4)));
 typedef uint8_t uint8x4_t __attribute__((ext_vector_type(4)));
 static __device__ __forceinline__ int __vsubss4(const int a, const int b) {
diff --git a/ggml/src/ggml-cuda/vendors/musa.h b/ggml/src/ggml-cuda/vendors/musa.h
index 1604b8229..6cc1b69ee 100644
--- a/ggml/src/ggml-cuda/vendors/musa.h
+++ b/ggml/src/ggml-cuda/vendors/musa.h
@@ -3,6 +3,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #define CUBLAS_COMPUTE_16F CUDA_R_16F
 #define CUBLAS_COMPUTE_32F CUDA_R_32F
@@ -132,3 +133,5 @@
 #define cudaKernelNodeParams musaKernelNodeParams
 #define cudaStreamCaptureModeRelaxed musaStreamCaptureModeRelaxed
 #define cudaStreamEndCapture musaStreamEndCapture
+
+typedef mt_bfloat16 nv_bfloat16;

From 5047dd3546951dea3d65c02257d06c46c8662338 Mon Sep 17 00:00:00 2001
From: Georgi Gerganov 
Date: Mon, 6 Jan 2025 10:52:01 +0200
Subject: [PATCH 46/81] llama : use _impl suffix instead of _internal (#11060)

ggml-ci
---
 src/llama-quant.cpp | 20 ++++++++++----------
 src/llama.cpp       | 16 ++++++++--------
 2 files changed, 18 insertions(+), 18 deletions(-)

diff --git a/src/llama-quant.cpp b/src/llama-quant.cpp
index 42974f8f1..104f90343 100644
--- a/src/llama-quant.cpp
+++ b/src/llama-quant.cpp
@@ -22,7 +22,7 @@ static void zeros(std::ofstream & file, size_t n) {
     }
 }
 
-struct quantize_state_internal {
+struct quantize_state_impl {
     const llama_model                 & model;
     const llama_model_quantize_params * params;
 
@@ -43,13 +43,13 @@ struct quantize_state_internal {
     // used to figure out if a model shares tok_embd with the output weight
     bool has_output = false;
 
-    quantize_state_internal(const llama_model & model, const llama_model_quantize_params * params)
+    quantize_state_impl(const llama_model & model, const llama_model_quantize_params * params)
         : model(model)
         , params(params)
         {}
 };
 
-static void llama_tensor_dequantize_internal(
+static void llama_tensor_dequantize_impl(
     struct ggml_tensor * tensor, std::vector> & output, std::vector & workers,
     const size_t nelements, const int nthread
 ) {
@@ -121,7 +121,7 @@ static void llama_tensor_dequantize_internal(
     workers.clear();
 }
 
-static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type new_type, const ggml_tensor * tensor, llama_ftype ftype) {
+static ggml_type llama_tensor_get_type(quantize_state_impl & qs, ggml_type new_type, const ggml_tensor * tensor, llama_ftype ftype) {
     const std::string name = ggml_get_name(tensor);
 
     // TODO: avoid hardcoded tensor names - use the TN_* constants
@@ -410,7 +410,7 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
     return new_type;
 }
 
-static size_t llama_tensor_quantize_internal(enum ggml_type new_type, const float * f32_data, void * new_data, const int64_t chunk_size, int64_t nrows, int64_t n_per_row, const float * imatrix, std::vector & workers, const int nthread) {
+static size_t llama_tensor_quantize_impl(enum ggml_type new_type, const float * f32_data, void * new_data, const int64_t chunk_size, int64_t nrows, int64_t n_per_row, const float * imatrix, std::vector & workers, const int nthread) {
     if (nthread < 2) {
         // single-thread
         size_t new_size = ggml_quantize_chunk(new_type, f32_data, new_data, 0, nrows, n_per_row, imatrix);
@@ -464,7 +464,7 @@ static size_t llama_tensor_quantize_internal(enum ggml_type new_type, const floa
     return new_size;
 }
 
-static void llama_model_quantize_internal(const std::string & fname_inp, const std::string & fname_out, const llama_model_quantize_params * params) {
+static void llama_model_quantize_impl(const std::string & fname_inp, const std::string & fname_out, const llama_model_quantize_params * params) {
     ggml_type default_type;
     llama_ftype ftype = params->ftype;
 
@@ -534,7 +534,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
     llm_load_hparams(ml, model);
     llm_load_stats  (ml, model);
 
-    struct quantize_state_internal qs(model, params);
+    struct quantize_state_impl qs(model, params);
 
     if (params->only_copy) {
         ftype = model.ftype;
@@ -837,7 +837,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
             } else if (ggml_is_quantized(tensor->type) && !params->allow_requantize) {
                 throw std::runtime_error(format("requantizing from type %s is disabled", ggml_type_name(tensor->type)));
             } else {
-                llama_tensor_dequantize_internal(tensor, f32_conv_buf, workers, nelements, nthread);
+                llama_tensor_dequantize_impl(tensor, f32_conv_buf, workers, nelements, nthread);
                 f32_data = (float *) f32_conv_buf.data();
             }
 
@@ -866,7 +866,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
                 void * new_data_03 = (char *)new_data + ggml_row_size(new_type, n_per_row) * i03 * nrows;
                 const float * imatrix_03 = imatrix ? imatrix + i03 * n_per_row : nullptr;
 
-                new_size += llama_tensor_quantize_internal(new_type, f32_data_03, new_data_03, chunk_size, nrows, n_per_row, imatrix_03, workers, nthread_use);
+                new_size += llama_tensor_quantize_impl(new_type, f32_data_03, new_data_03, chunk_size, nrows, n_per_row, imatrix_03, workers, nthread_use);
             }
             LLAMA_LOG_INFO("size = %8.2f MiB -> %8.2f MiB\n", ggml_nbytes(tensor)/1024.0/1024.0, new_size/1024.0/1024.0);
         }
@@ -919,7 +919,7 @@ uint32_t llama_model_quantize(
         const char * fname_out,
         const llama_model_quantize_params * params) {
     try {
-        llama_model_quantize_internal(fname_inp, fname_out, params);
+        llama_model_quantize_impl(fname_inp, fname_out, params);
     } catch (const std::exception & err) {
         LLAMA_LOG_ERROR("%s: failed to quantize: %s\n", __func__, err.what());
         return 1;
diff --git a/src/llama.cpp b/src/llama.cpp
index ea78ea487..4a6798f41 100644
--- a/src/llama.cpp
+++ b/src/llama.cpp
@@ -10717,7 +10717,7 @@ static enum ggml_status llama_graph_compute(
 // return positive int on warning
 // return negative int on error
 //
-static int llama_decode_internal(
+static int llama_decode_impl(
          llama_context & lctx,
            llama_batch   inp_batch) {
 
@@ -11052,7 +11052,7 @@ static int llama_decode_internal(
 // return positive int on warning
 // return negative int on error
 //
-static int llama_encode_internal(
+static int llama_encode_impl(
          llama_context & lctx,
            llama_batch   inp_batch) {
 
@@ -11234,7 +11234,7 @@ static int llama_encode_internal(
 }
 
 // find holes from the beginning of the KV cache and fill them by moving data from the end of the cache
-static void llama_kv_cache_defrag_internal(struct llama_context & lctx) {
+static void llama_kv_cache_defrag_impl(struct llama_context & lctx) {
     auto & kv_self = lctx.kv_self;
 
     const auto & hparams = lctx.model.hparams;
@@ -11454,7 +11454,7 @@ static void llama_kv_cache_defrag_internal(struct llama_context & lctx) {
     //LLAMA_LOG_INFO("(tmp log) KV defrag time: %.3f ms\n", (t_end - t_start)/1000.0);
 }
 
-static void llama_kv_cache_update_internal(struct llama_context & lctx) {
+static void llama_kv_cache_update_impl(struct llama_context & lctx) {
     bool need_reserve = false;
 
     if (lctx.kv_self.has_shift) {
@@ -11490,7 +11490,7 @@ static void llama_kv_cache_update_internal(struct llama_context & lctx) {
 
     // defragment the KV cache if needed
     if (lctx.kv_self.do_defrag) {
-        llama_kv_cache_defrag_internal(lctx);
+        llama_kv_cache_defrag_impl(lctx);
 
         need_reserve = true;
 
@@ -12191,7 +12191,7 @@ void llama_kv_cache_defrag(struct llama_context * ctx) {
 }
 
 void llama_kv_cache_update(struct llama_context * ctx) {
-    llama_kv_cache_update_internal(*ctx);
+    llama_kv_cache_update_impl(*ctx);
 }
 
 bool llama_kv_cache_can_shift(struct llama_context * ctx) {
@@ -12203,7 +12203,7 @@ bool llama_kv_cache_can_shift(struct llama_context * ctx) {
 int32_t llama_encode(
         struct llama_context * ctx,
           struct llama_batch   batch) {
-    const int ret = llama_encode_internal(*ctx, batch);
+    const int ret = llama_encode_impl(*ctx, batch);
     if (ret != 0) {
         LLAMA_LOG_ERROR("%s: failed to encode, ret = %d\n", __func__, ret);
     }
@@ -12214,7 +12214,7 @@ int32_t llama_encode(
 int32_t llama_decode(
         struct llama_context * ctx,
           struct llama_batch   batch) {
-    const int ret = llama_decode_internal(*ctx, batch);
+    const int ret = llama_decode_impl(*ctx, batch);
     if (ret != 0) {
         LLAMA_LOG_ERROR("%s: failed to decode, ret = %d\n", __func__, ret);
     }

From 727368c60f2ebf2d6a7473a4a9f80957ab063a8e Mon Sep 17 00:00:00 2001
From: Georgi Gerganov 
Date: Mon, 6 Jan 2025 10:52:15 +0200
Subject: [PATCH 47/81] llama : use LLAMA_TOKEN_NULL (#11062)

ggml-ci
---
 common/common.cpp                             |  2 +-
 common/ngram-cache.cpp                        | 24 +++++++-------
 common/ngram-cache.h                          |  4 +--
 examples/batched/batched.cpp                  |  2 +-
 .../convert-llama2c-to-ggml.cpp               |  4 +--
 examples/main/main.cpp                        |  4 +--
 examples/server/utils.hpp                     |  2 +-
 include/llama.h                               |  1 -
 src/llama-model.cpp                           | 32 +++++++++----------
 src/llama-sampling.cpp                        |  8 ++---
 src/llama-vocab.cpp                           | 24 +++++++-------
 11 files changed, 53 insertions(+), 54 deletions(-)

diff --git a/common/common.cpp b/common/common.cpp
index 4bb140ee2..d6a7ab753 100644
--- a/common/common.cpp
+++ b/common/common.cpp
@@ -982,7 +982,7 @@ struct common_init_result common_init_from_params(common_params & params) {
         if (llama_model_has_encoder(model)) {
             llama_encode(lctx, llama_batch_get_one(tmp.data(), tmp.size()));
             llama_token decoder_start_token_id = llama_model_decoder_start_token(model);
-            if (decoder_start_token_id == -1) {
+            if (decoder_start_token_id == LLAMA_TOKEN_NULL) {
                 decoder_start_token_id = bos;
             }
             tmp.clear();
diff --git a/common/ngram-cache.cpp b/common/ngram-cache.cpp
index a9dfb6714..a057ae45f 100644
--- a/common/ngram-cache.cpp
+++ b/common/ngram-cache.cpp
@@ -65,13 +65,13 @@ constexpr int     draft_min_percent_strict[LLAMA_NGRAM_MAX] = {75, 66, 66, 66};
 static llama_token try_draft(common_ngram_cache & nc_static, const common_ngram ngram_static) {
     common_ngram_cache::iterator part_static_it = nc_static.find(ngram_static);
     if (part_static_it == nc_static.end()) {
-        return -1;
+        return LLAMA_TOKEN_NULL;
     }
     const common_ngram_cache_part part_static = part_static_it->second;
 
     int max_count_static  = 0;
     int sum_count_static  = 0;
-    llama_token max_token = -1;
+    llama_token max_token = LLAMA_TOKEN_NULL;
 
     for (std::pair token_count_static : part_static) {
         const llama_token token = token_count_static.first;
@@ -85,10 +85,10 @@ static llama_token try_draft(common_ngram_cache & nc_static, const common_ngram
     }
 
     if (sum_count_static < draft_min_sample_size_lax[LLAMA_NGRAM_STATIC-1]) {
-        return -1;
+        return LLAMA_TOKEN_NULL;
     }
     if (100*max_count_static < draft_min_percent_lax[LLAMA_NGRAM_STATIC-1]*sum_count_static) {
-        return -1;
+        return LLAMA_TOKEN_NULL;
     }
     return max_token;
 }
@@ -98,9 +98,9 @@ static llama_token try_draft(
     common_ngram_cache & nc_primary, const std::vector & ngrams_primary, common_ngram_cache_part & part_static,
     const int * min_sample_size, const int * min_percent) {
 
-    llama_token drafted_token = -1;
+    llama_token drafted_token = LLAMA_TOKEN_NULL;
 
-    for (int i = ngrams_primary.size()-1; i >= 0 && drafted_token == -1; --i) {
+    for (int i = ngrams_primary.size()-1; i >= 0 && drafted_token == LLAMA_TOKEN_NULL; --i) {
         const common_ngram ngram_primary = ngrams_primary[i];
 
         common_ngram_cache::iterator part_primary_it = nc_primary.find(ngram_primary);
@@ -112,7 +112,7 @@ static llama_token try_draft(
         int max_count_primary = 0;
         int max_count_static  = 0;
         int sum_count_primary = 0;
-        llama_token max_token = -1;
+        llama_token max_token = LLAMA_TOKEN_NULL;
 
         for (std::pair token_count_primary : part_primary) {
             const llama_token token = token_count_primary.first;
@@ -154,7 +154,7 @@ void common_ngram_cache_draft(
     }
 
     while ((int) draft.size()-1 < n_draft) {
-        llama_token drafted_token = -1;
+        llama_token drafted_token = LLAMA_TOKEN_NULL;
 
         const int ngram_start_static = inp_size-LLAMA_NGRAM_STATIC + draft.size()-1;
         common_ngram ngram_static;
@@ -177,17 +177,17 @@ void common_ngram_cache_draft(
             }
             ngrams_cd.push_back(ngram_cd);
         }
-        if (drafted_token == -1) {
+        if (drafted_token == LLAMA_TOKEN_NULL) {
             drafted_token = try_draft(nc_context, ngrams_cd, part_static, draft_min_sample_size_lax, draft_min_percent_lax);
         }
-        if (drafted_token == -1) {
+        if (drafted_token == LLAMA_TOKEN_NULL) {
             drafted_token = try_draft(nc_dynamic, ngrams_cd, part_static, draft_min_sample_size_strict, draft_min_percent_strict);
         }
-        if (drafted_token == -1) {
+        if (drafted_token == LLAMA_TOKEN_NULL) {
             drafted_token = try_draft(nc_static, ngram_static);
         }
 
-        if (drafted_token == -1) {
+        if (drafted_token == LLAMA_TOKEN_NULL) {
             break;
         }
 
diff --git a/common/ngram-cache.h b/common/ngram-cache.h
index 09c2b0319..dfe012abe 100644
--- a/common/ngram-cache.h
+++ b/common/ngram-cache.h
@@ -17,13 +17,13 @@ struct common_ngram {
 
     common_ngram() {
         for (int i = 0; i < LLAMA_NGRAM_MAX; ++i) {
-            tokens[i] = -1;
+            tokens[i] = LLAMA_TOKEN_NULL;
         }
     }
 
     common_ngram(const llama_token * input, const int ngram_size) {
         for (int i = 0; i < LLAMA_NGRAM_MAX; ++i) {
-            tokens[i] = i < ngram_size ? input[i] : -1;
+            tokens[i] = i < ngram_size ? input[i] : LLAMA_TOKEN_NULL;
         }
     }
 
diff --git a/examples/batched/batched.cpp b/examples/batched/batched.cpp
index e2e01f2d5..2e25b62f6 100644
--- a/examples/batched/batched.cpp
+++ b/examples/batched/batched.cpp
@@ -120,7 +120,7 @@ int main(int argc, char ** argv) {
         }
 
         llama_token decoder_start_token_id = llama_model_decoder_start_token(model);
-        if (decoder_start_token_id == -1) {
+        if (decoder_start_token_id == LLAMA_TOKEN_NULL) {
             decoder_start_token_id = llama_token_bos(model);
         }
 
diff --git a/examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp b/examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp
index 736035d78..9c3a0c367 100644
--- a/examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp
+++ b/examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp
@@ -689,8 +689,8 @@ static void save_as_llama_model(
     gguf_set_val_u32(ctx, KV_TOKENIZER_UNK_ID, UNKNOWN_TOKEN_ID);
     gguf_set_val_u32(ctx, KV_TOKENIZER_BOS_ID, BOS_TOKEN_ID);
     gguf_set_val_u32(ctx, KV_TOKENIZER_EOS_ID, EOS_TOKEN_ID);
-    gguf_set_val_u32(ctx, KV_TOKENIZER_SEP_ID, -1);
-    gguf_set_val_u32(ctx, KV_TOKENIZER_PAD_ID, -1);
+    gguf_set_val_u32(ctx, KV_TOKENIZER_SEP_ID, LLAMA_TOKEN_NULL);
+    gguf_set_val_u32(ctx, KV_TOKENIZER_PAD_ID, LLAMA_TOKEN_NULL);
 
     gguf_set_val_u32(ctx, KV_CONTEXT_LENGTH, model->hparams.n_ctx);
     gguf_set_val_u32(ctx, KV_EMBEDDING_LENGTH, model->hparams.n_embd);
diff --git a/examples/main/main.cpp b/examples/main/main.cpp
index b5e477f5b..aaee47e32 100644
--- a/examples/main/main.cpp
+++ b/examples/main/main.cpp
@@ -494,7 +494,7 @@ int main(int argc, char ** argv) {
         }
 
         llama_token decoder_start_token_id = llama_model_decoder_start_token(model);
-        if (decoder_start_token_id == -1) {
+        if (decoder_start_token_id == LLAMA_TOKEN_NULL) {
             decoder_start_token_id = llama_token_bos(model);
         }
 
@@ -831,7 +831,7 @@ int main(int argc, char ** argv) {
                     // if user stop generation mid-way, we must add EOT to finish model's last response
                     if (need_insert_eot && format_chat) {
                         llama_token eot = llama_token_eot(model);
-                        embd_inp.push_back(eot == -1 ? llama_token_eos(model) : eot);
+                        embd_inp.push_back(eot == LLAMA_TOKEN_NULL ? llama_token_eos(model) : eot);
                         need_insert_eot = false;
                     }
 
diff --git a/examples/server/utils.hpp b/examples/server/utils.hpp
index dc6e6e67e..ad130d490 100644
--- a/examples/server/utils.hpp
+++ b/examples/server/utils.hpp
@@ -507,7 +507,7 @@ static std::string tokens_to_str(llama_context * ctx, Iter begin, Iter end) {
 
 // format incomplete utf-8 multibyte character for output
 static std::string tokens_to_output_formatted_string(const llama_context * ctx, const llama_token token) {
-    std::string out = token == -1 ? "" : common_token_to_piece(ctx, token);
+    std::string out = token == LLAMA_TOKEN_NULL ? "" : common_token_to_piece(ctx, token);
 
     // if the size is 1 and first bit is 1, meaning it's a partial character
     //   (size > 1 meaning it's already a known token)
diff --git a/include/llama.h b/include/llama.h
index a0d5ba5dd..0f619aa19 100644
--- a/include/llama.h
+++ b/include/llama.h
@@ -34,7 +34,6 @@
 
 #define LLAMA_DEFAULT_SEED 0xFFFFFFFF
 
-// TODO: use everywhere in the implementation
 #define LLAMA_TOKEN_NULL -1
 
 #define LLAMA_FILE_MAGIC_GGLA 0x67676c61u // 'ggla'
diff --git a/src/llama-model.cpp b/src/llama-model.cpp
index 405e0528f..22596499a 100644
--- a/src/llama-model.cpp
+++ b/src/llama-model.cpp
@@ -1923,24 +1923,24 @@ void llm_load_print_meta(llama_model_loader & ml, llama_model & model) {
     LLAMA_LOG_INFO("%s: general.name     = %s\n",    __func__, model.name.c_str());
 
     // special tokens
-    if (vocab.special_bos_id  != -1)    { LLAMA_LOG_INFO( "%s: BOS token        = %d '%s'\n", __func__, vocab.special_bos_id,     vocab.id_to_token[vocab.special_bos_id].text.c_str() );  }
-    if (vocab.special_eos_id  != -1)    { LLAMA_LOG_INFO( "%s: EOS token        = %d '%s'\n", __func__, vocab.special_eos_id,     vocab.id_to_token[vocab.special_eos_id].text.c_str() );  }
-    if (vocab.special_eot_id  != -1)    { LLAMA_LOG_INFO( "%s: EOT token        = %d '%s'\n", __func__, vocab.special_eot_id,     vocab.id_to_token[vocab.special_eot_id].text.c_str() );  }
-    if (vocab.special_eom_id  != -1)    { LLAMA_LOG_INFO( "%s: EOM token        = %d '%s'\n", __func__, vocab.special_eom_id,     vocab.id_to_token[vocab.special_eom_id].text.c_str() );  }
-    if (vocab.special_unk_id  != -1)    { LLAMA_LOG_INFO( "%s: UNK token        = %d '%s'\n", __func__, vocab.special_unk_id,     vocab.id_to_token[vocab.special_unk_id].text.c_str() );  }
-    if (vocab.special_sep_id  != -1)    { LLAMA_LOG_INFO( "%s: SEP token        = %d '%s'\n", __func__, vocab.special_sep_id,     vocab.id_to_token[vocab.special_sep_id].text.c_str() );  }
-    if (vocab.special_pad_id  != -1)    { LLAMA_LOG_INFO( "%s: PAD token        = %d '%s'\n", __func__, vocab.special_pad_id,     vocab.id_to_token[vocab.special_pad_id].text.c_str() );  }
-    if (vocab.special_cls_id  != -1)    { LLAMA_LOG_INFO( "%s: CLS token        = %d '%s'\n", __func__, vocab.special_cls_id,     vocab.id_to_token[vocab.special_cls_id].text.c_str() );  }
-    if (vocab.special_mask_id != -1)    { LLAMA_LOG_INFO( "%s: MASK token       = %d '%s'\n", __func__, vocab.special_mask_id,    vocab.id_to_token[vocab.special_mask_id].text.c_str() ); }
+    if (vocab.special_bos_id  != LLAMA_TOKEN_NULL)    { LLAMA_LOG_INFO( "%s: BOS token        = %d '%s'\n", __func__, vocab.special_bos_id,     vocab.id_to_token[vocab.special_bos_id].text.c_str() );  }
+    if (vocab.special_eos_id  != LLAMA_TOKEN_NULL)    { LLAMA_LOG_INFO( "%s: EOS token        = %d '%s'\n", __func__, vocab.special_eos_id,     vocab.id_to_token[vocab.special_eos_id].text.c_str() );  }
+    if (vocab.special_eot_id  != LLAMA_TOKEN_NULL)    { LLAMA_LOG_INFO( "%s: EOT token        = %d '%s'\n", __func__, vocab.special_eot_id,     vocab.id_to_token[vocab.special_eot_id].text.c_str() );  }
+    if (vocab.special_eom_id  != LLAMA_TOKEN_NULL)    { LLAMA_LOG_INFO( "%s: EOM token        = %d '%s'\n", __func__, vocab.special_eom_id,     vocab.id_to_token[vocab.special_eom_id].text.c_str() );  }
+    if (vocab.special_unk_id  != LLAMA_TOKEN_NULL)    { LLAMA_LOG_INFO( "%s: UNK token        = %d '%s'\n", __func__, vocab.special_unk_id,     vocab.id_to_token[vocab.special_unk_id].text.c_str() );  }
+    if (vocab.special_sep_id  != LLAMA_TOKEN_NULL)    { LLAMA_LOG_INFO( "%s: SEP token        = %d '%s'\n", __func__, vocab.special_sep_id,     vocab.id_to_token[vocab.special_sep_id].text.c_str() );  }
+    if (vocab.special_pad_id  != LLAMA_TOKEN_NULL)    { LLAMA_LOG_INFO( "%s: PAD token        = %d '%s'\n", __func__, vocab.special_pad_id,     vocab.id_to_token[vocab.special_pad_id].text.c_str() );  }
+    if (vocab.special_cls_id  != LLAMA_TOKEN_NULL)    { LLAMA_LOG_INFO( "%s: CLS token        = %d '%s'\n", __func__, vocab.special_cls_id,     vocab.id_to_token[vocab.special_cls_id].text.c_str() );  }
+    if (vocab.special_mask_id != LLAMA_TOKEN_NULL)    { LLAMA_LOG_INFO( "%s: MASK token       = %d '%s'\n", __func__, vocab.special_mask_id,    vocab.id_to_token[vocab.special_mask_id].text.c_str() ); }
 
-    if (vocab.linefeed_id != -1)        { LLAMA_LOG_INFO( "%s: LF token         = %d '%s'\n", __func__, vocab.linefeed_id,        vocab.id_to_token[vocab.linefeed_id].text.c_str() ); }
+    if (vocab.linefeed_id != LLAMA_TOKEN_NULL)        { LLAMA_LOG_INFO( "%s: LF token         = %d '%s'\n", __func__, vocab.linefeed_id,        vocab.id_to_token[vocab.linefeed_id].text.c_str() ); }
 
-    if (vocab.special_fim_pre_id != -1) { LLAMA_LOG_INFO( "%s: FIM PRE token    = %d '%s'\n", __func__, vocab.special_fim_pre_id, vocab.id_to_token[vocab.special_fim_pre_id].text.c_str() ); }
-    if (vocab.special_fim_suf_id != -1) { LLAMA_LOG_INFO( "%s: FIM SUF token    = %d '%s'\n", __func__, vocab.special_fim_suf_id, vocab.id_to_token[vocab.special_fim_suf_id].text.c_str() ); }
-    if (vocab.special_fim_mid_id != -1) { LLAMA_LOG_INFO( "%s: FIM MID token    = %d '%s'\n", __func__, vocab.special_fim_mid_id, vocab.id_to_token[vocab.special_fim_mid_id].text.c_str() ); }
-    if (vocab.special_fim_pad_id != -1) { LLAMA_LOG_INFO( "%s: FIM PAD token    = %d '%s'\n", __func__, vocab.special_fim_pad_id, vocab.id_to_token[vocab.special_fim_pad_id].text.c_str() ); }
-    if (vocab.special_fim_rep_id != -1) { LLAMA_LOG_INFO( "%s: FIM REP token    = %d '%s'\n", __func__, vocab.special_fim_rep_id, vocab.id_to_token[vocab.special_fim_rep_id].text.c_str() ); }
-    if (vocab.special_fim_sep_id != -1) { LLAMA_LOG_INFO( "%s: FIM SEP token    = %d '%s'\n", __func__, vocab.special_fim_sep_id, vocab.id_to_token[vocab.special_fim_sep_id].text.c_str() ); }
+    if (vocab.special_fim_pre_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: FIM PRE token    = %d '%s'\n", __func__, vocab.special_fim_pre_id, vocab.id_to_token[vocab.special_fim_pre_id].text.c_str() ); }
+    if (vocab.special_fim_suf_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: FIM SUF token    = %d '%s'\n", __func__, vocab.special_fim_suf_id, vocab.id_to_token[vocab.special_fim_suf_id].text.c_str() ); }
+    if (vocab.special_fim_mid_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: FIM MID token    = %d '%s'\n", __func__, vocab.special_fim_mid_id, vocab.id_to_token[vocab.special_fim_mid_id].text.c_str() ); }
+    if (vocab.special_fim_pad_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: FIM PAD token    = %d '%s'\n", __func__, vocab.special_fim_pad_id, vocab.id_to_token[vocab.special_fim_pad_id].text.c_str() ); }
+    if (vocab.special_fim_rep_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: FIM REP token    = %d '%s'\n", __func__, vocab.special_fim_rep_id, vocab.id_to_token[vocab.special_fim_rep_id].text.c_str() ); }
+    if (vocab.special_fim_sep_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: FIM SEP token    = %d '%s'\n", __func__, vocab.special_fim_sep_id, vocab.id_to_token[vocab.special_fim_sep_id].text.c_str() ); }
 
     for (const auto & id : vocab.special_eog_ids) {
         LLAMA_LOG_INFO( "%s: EOG token        = %d '%s'\n", __func__, id, vocab.id_to_token[id].text.c_str() );
diff --git a/src/llama-sampling.cpp b/src/llama-sampling.cpp
index 69cea2f14..ef5a576cc 100644
--- a/src/llama-sampling.cpp
+++ b/src/llama-sampling.cpp
@@ -257,7 +257,7 @@ static void llama_sampler_top_k_impl(llama_token_data_array * cur_p, int32_t k)
             for (int i = 0; i < (int)cur_p->size; ++i) {
                 const float val = cur_p->data[i].logit;
                 int ib = int(bucket_scale * val + bucket_inter); //nbuckets * (val - bucket_low) / (bucket_high - bucket_low);
-                ib = std::max(0, std::min(nbuckets-1, ib));
+                ib = std::max(0, std::min(nbuckets - 1, ib));
                 bucket_idx[i] = ib;
                 ++histo[ib];
             }
@@ -280,13 +280,13 @@ static void llama_sampler_top_k_impl(llama_token_data_array * cur_p, int32_t k)
             for (int i = 0; i < (int)cur_p->size; ++i) {
                 int j = bucket_idx[i];
                 if (j >= ib) {
-                    *bucket_ptrs[nbuckets-1-j]++ = cur_p->data[i];
+                    *bucket_ptrs[nbuckets - 1 - j]++ = cur_p->data[i];
                 }
             }
 
             ptr = tmp_tokens.data();
             int ndone = 0;
-            for (int j = nbuckets-1; j > ib; --j) {
+            for (int j = nbuckets - 1; j > ib; --j) {
                 std::sort(ptr, ptr + histo[j], comp);
                 ptr += histo[j];
                 ndone += histo[j];
@@ -1832,7 +1832,7 @@ static void llama_sampler_dry_apply(struct llama_sampler * smpl, llama_token_dat
                 ctx->dry_repeat_count[last - k] = std::min(n, rep_limit);
                 if (n > 0) {
                     lt = k;
-                    rt = k+n-1;
+                    rt = k + n - 1;
                 }
             } else {
                 // If k is inside the current Z-box, consider two cases.
diff --git a/src/llama-vocab.cpp b/src/llama-vocab.cpp
index 3fcfcaa3f..a4c015484 100644
--- a/src/llama-vocab.cpp
+++ b/src/llama-vocab.cpp
@@ -497,7 +497,7 @@ struct llm_tokenizer_bpe_session {
 
     bool append_bos(std::vector & output) const {
         if (vocab.tokenizer_add_bos) {
-            GGML_ASSERT(vocab.special_bos_id != -1);
+            GGML_ASSERT(vocab.special_bos_id != LLAMA_TOKEN_NULL);
             output.push_back(vocab.special_bos_id);
             return true;
         }
@@ -506,7 +506,7 @@ struct llm_tokenizer_bpe_session {
 
     bool append_eos(std::vector & output) const {
         if (vocab.tokenizer_add_eos) {
-            GGML_ASSERT(vocab.special_eos_id != -1);
+            GGML_ASSERT(vocab.special_eos_id != LLAMA_TOKEN_NULL);
             output.push_back(vocab.special_eos_id);
             return true;
         }
@@ -1403,7 +1403,7 @@ static void tokenizer_st_partition(const llama_vocab & vocab, std::forward_list<
                         if (source == 0) {
                             buffer.erase_after(buffer.before_begin());
                         } else {
-                            buffer.erase_after(std::next(buffer.begin(), (source-1)));
+                            buffer.erase_after(std::next(buffer.begin(), (source - 1)));
                         }
 
                         // repeat for the right side
@@ -1417,7 +1417,7 @@ static void tokenizer_st_partition(const llama_vocab & vocab, std::forward_list<
                         if (source == 0) {
                             buffer.erase_after(buffer.before_begin());
                         } else {
-                            buffer.erase_after(std::next(buffer.begin(), (source-1)));
+                            buffer.erase_after(std::next(buffer.begin(), (source - 1)));
                         }
                         break;
                     }
@@ -1454,7 +1454,7 @@ std::vector llama_tokenize_internal(
                 bool is_prev_special = true;  // prefix with space if first token
 
                 if (add_special && vocab.tokenizer_add_bos) {
-                    GGML_ASSERT(vocab.special_bos_id != -1);
+                    GGML_ASSERT(vocab.special_bos_id != LLAMA_TOKEN_NULL);
                     output.push_back(vocab.special_bos_id);
                     is_prev_special = true;
                 }
@@ -1489,7 +1489,7 @@ std::vector llama_tokenize_internal(
                 }
 
                 if (add_special && vocab.tokenizer_add_eos) {
-                    GGML_ASSERT(vocab.special_eos_id != -1);
+                    GGML_ASSERT(vocab.special_eos_id != LLAMA_TOKEN_NULL);
                     output.push_back(vocab.special_eos_id);
                 }
             } break;
@@ -1522,7 +1522,7 @@ std::vector llama_tokenize_internal(
         case LLAMA_VOCAB_TYPE_WPM:
             {
                 if (add_special) {
-                    GGML_ASSERT(vocab.special_cls_id != -1);
+                    GGML_ASSERT(vocab.special_cls_id != LLAMA_TOKEN_NULL);
                     output.push_back(vocab.special_cls_id);
                 }
 
@@ -1542,14 +1542,14 @@ std::vector llama_tokenize_internal(
                 }
 
                 if (add_special) {
-                    GGML_ASSERT(vocab.special_sep_id != -1);
+                    GGML_ASSERT(vocab.special_sep_id != LLAMA_TOKEN_NULL);
                     output.push_back(vocab.special_sep_id);
                 }
             } break;
         case LLAMA_VOCAB_TYPE_UGM:
             {
                 if (add_special && vocab.tokenizer_add_bos) {
-                    GGML_ASSERT(vocab.special_bos_id != -1);
+                    GGML_ASSERT(vocab.special_bos_id != LLAMA_TOKEN_NULL);
                     output.push_back(vocab.special_bos_id);
                 }
                 llm_tokenizer_ugm_session session(vocab);
@@ -1574,7 +1574,7 @@ std::vector llama_tokenize_internal(
                 }
 
                 if (add_special && vocab.tokenizer_add_eos) {
-                    GGML_ASSERT(vocab.special_eos_id != -1);
+                    GGML_ASSERT(vocab.special_eos_id != LLAMA_TOKEN_NULL);
                     output.push_back(vocab.special_eos_id);
                 }
             } break;
@@ -1642,7 +1642,7 @@ llama_token_attr llama_token_get_attr_impl(const struct llama_vocab & vocab, lla
 }
 
 bool llama_token_is_eog_impl(const struct llama_vocab & vocab, llama_token token) {
-    return token != -1 && vocab.special_eog_ids.count(token) > 0;
+    return token != LLAMA_TOKEN_NULL && vocab.special_eog_ids.count(token) > 0;
 }
 
 bool llama_token_is_control_impl(const struct llama_vocab & vocab, llama_token token) {
@@ -1881,7 +1881,7 @@ int32_t llama_detokenize_impl(
     }
 
     if (remove_special && vocab.tokenizer_add_eos) {
-        if (n_tokens > 0 && tokens[n_tokens-1] == vocab.special_eos_id) {
+        if (n_tokens > 0 && tokens[n_tokens - 1] == vocab.special_eos_id) {
             n_tokens--;
         }
     }

From ae2f606bb598b287f5fb69c9fdfc98b86598c6cc Mon Sep 17 00:00:00 2001
From: Georgi Gerganov 
Date: Mon, 6 Jan 2025 10:52:38 +0200
Subject: [PATCH 48/81] mmap : fix fileno macro clash (#11076)

* mmap : fix fileno macro clash

ggml-ci

* cont

ggml-ci
---
 src/llama-mmap.cpp | 10 +++++++---
 src/llama-mmap.h   |  2 +-
 2 files changed, 8 insertions(+), 4 deletions(-)

diff --git a/src/llama-mmap.cpp b/src/llama-mmap.cpp
index a99326335..a8cb9439b 100644
--- a/src/llama-mmap.cpp
+++ b/src/llama-mmap.cpp
@@ -241,12 +241,16 @@ llama_file::~llama_file() = default;
 size_t llama_file::tell() const { return pimpl->tell(); }
 size_t llama_file::size() const { return pimpl->size; }
 
-int llama_file::fileno() const {
+int llama_file::file_id() const {
 #ifdef _WIN32
     return _fileno(pimpl->fp);
+#else
+#if defined(fileno)
+    return fileno(pimpl->fp);
 #else
     return ::fileno(pimpl->fp);
 #endif
+#endif
 }
 
 void llama_file::seek(size_t offset, int whence) const { pimpl->seek(offset, whence); }
@@ -265,7 +269,7 @@ struct llama_mmap::impl {
 
     impl(struct llama_file * file, size_t prefetch, bool numa) {
         size = file->size();
-        int fd = file->fileno();
+        int fd = file->file_id();
         int flags = MAP_SHARED;
         if (numa) { prefetch = 0; }
 #ifdef __linux__
@@ -357,7 +361,7 @@ struct llama_mmap::impl {
 
         size = file->size();
 
-        HANDLE hFile = (HANDLE) _get_osfhandle(file->fileno());
+        HANDLE hFile = (HANDLE) _get_osfhandle(file->file_id());
 
         HANDLE hMapping = CreateFileMappingA(hFile, NULL, PAGE_READONLY, 0, 0, NULL);
 
diff --git a/src/llama-mmap.h b/src/llama-mmap.h
index 6bcddee8c..1da9ecb6b 100644
--- a/src/llama-mmap.h
+++ b/src/llama-mmap.h
@@ -18,7 +18,7 @@ struct llama_file {
     size_t tell() const;
     size_t size() const;
 
-    int fileno() const;
+    int file_id() const; // fileno overload
 
     void seek(size_t offset, int whence) const;
 

From 3e6e7a6bc2c4b980a0cf0fcb5cb3b79a965b5f14 Mon Sep 17 00:00:00 2001
From: Georgi Gerganov 
Date: Mon, 6 Jan 2025 10:54:25 +0200
Subject: [PATCH 49/81] tokenize : escape the prompt (#11058)

* tokenize : escape the prompt

* tokenize : update help
---
 examples/tokenize/tokenize.cpp | 10 ++++++++++
 1 file changed, 10 insertions(+)

diff --git a/examples/tokenize/tokenize.cpp b/examples/tokenize/tokenize.cpp
index c97e22724..57d9d4312 100644
--- a/examples/tokenize/tokenize.cpp
+++ b/examples/tokenize/tokenize.cpp
@@ -31,6 +31,7 @@ static void print_usage_information(const char * argv0) {
     printf("    -p PROMPT, --prompt PROMPT           read prompt from the argument.\n");
     printf("    --stdin                              read prompt from standard input.\n");
     printf("    --no-bos                             do not ever add a BOS token to the prompt, even if normally the model uses a BOS token.\n");
+    printf("    --no-escape                          do not escape input (such as \\n, \\t, etc.).\n");
     printf("    --no-parse-special                   do not parse control tokens.\n");
     printf("    --log-disable                        disable logs. Makes stderr quiet when loading the model.\n");
     printf("    --show-count                         print the total number of tokens.\n");
@@ -198,6 +199,7 @@ int main(int raw_argc, char ** raw_argv) {
     // variables where to put any arguments we see.
     bool printing_ids = false;
     bool no_bos = false;
+    bool no_escape = false;
     bool no_parse_special = false;
     bool disable_logging = false;
     bool show_token_count = false;
@@ -233,6 +235,9 @@ int main(int raw_argc, char ** raw_argv) {
         else if (arg == "--no-bos") {
             no_bos = true;
         }
+        else if (arg == "--no-escape") {
+            no_escape = true;
+        }
         else if (arg == "--no-parse-special") {
             no_parse_special = true;
         }
@@ -363,6 +368,11 @@ int main(int raw_argc, char ** raw_argv) {
     const bool model_wants_add_bos = llama_add_bos_token(model);
     const bool add_bos = model_wants_add_bos && !no_bos;
     const bool parse_special = !no_parse_special;
+    const bool escape = !no_escape;
+
+    if (escape) {
+        string_process_escapes(prompt);
+    }
 
     std::vector tokens;
     tokens = common_tokenize(model, prompt, add_bos, parse_special);

From 47182dd03fe04a4ffda5d7f4c8a109ae0056cf56 Mon Sep 17 00:00:00 2001
From: Georgi Gerganov 
Date: Mon, 6 Jan 2025 10:55:18 +0200
Subject: [PATCH 50/81] llama : update llama_model API names (#11063)

* llama : deprecate llama_free_model, add llama_model_free

ggml-ci

* llama : change `llama_load_model_from_file` -> `llama_model_load_from_file`

ggml-ci
---
 common/common.cpp                          | 14 +++++++-------
 examples/batched-bench/batched-bench.cpp   |  4 ++--
 examples/batched/batched.cpp               |  4 ++--
 examples/gritlm/gritlm.cpp                 |  4 ++--
 examples/llama-bench/llama-bench.cpp       |  8 ++++----
 examples/llava/llava-cli.cpp               |  6 +++---
 examples/llava/minicpmv-cli.cpp            |  4 ++--
 examples/llava/qwen2vl-cli.cpp             |  6 +++---
 examples/passkey/passkey.cpp               |  4 ++--
 examples/quantize-stats/quantize-stats.cpp |  8 ++++----
 examples/run/run.cpp                       |  2 +-
 examples/simple-chat/simple-chat.cpp       |  4 ++--
 examples/simple/simple.cpp                 |  4 ++--
 examples/tokenize/tokenize.cpp             |  4 ++--
 include/llama-cpp.h                        |  2 +-
 include/llama.h                            | 13 ++++++++++---
 src/llama-model.cpp                        |  4 ++++
 src/llama.cpp                              | 16 +++++++++++-----
 tests/test-autorelease.cpp                 |  4 ++--
 tests/test-model-load-cancel.cpp           |  2 +-
 tests/test-tokenizer-0.cpp                 |  6 +++---
 tests/test-tokenizer-1-bpe.cpp             |  6 +++---
 tests/test-tokenizer-1-spm.cpp             |  6 +++---
 23 files changed, 76 insertions(+), 59 deletions(-)

diff --git a/common/common.cpp b/common/common.cpp
index d6a7ab753..4fd36105e 100644
--- a/common/common.cpp
+++ b/common/common.cpp
@@ -846,7 +846,7 @@ struct common_init_result common_init_from_params(common_params & params) {
     } else if (!params.model_url.empty()) {
         model = common_load_model_from_url(params.model_url, params.model, params.hf_token, mparams);
     } else {
-        model = llama_load_model_from_file(params.model.c_str(), mparams);
+        model = llama_model_load_from_file(params.model.c_str(), mparams);
     }
 
     if (model == NULL) {
@@ -873,7 +873,7 @@ struct common_init_result common_init_from_params(common_params & params) {
         }
 
         if (!ok) {
-            llama_free_model(model);
+            llama_model_free(model);
 
             return iparams;
         }
@@ -884,7 +884,7 @@ struct common_init_result common_init_from_params(common_params & params) {
     llama_context * lctx = llama_new_context_with_model(model, cparams);
     if (lctx == NULL) {
         LOG_ERR("%s: failed to create context with model '%s'\n", __func__, params.model.c_str());
-        llama_free_model(model);
+        llama_model_free(model);
         return iparams;
     }
 
@@ -900,7 +900,7 @@ struct common_init_result common_init_from_params(common_params & params) {
         const auto cvec = common_control_vector_load(params.control_vectors);
         if (cvec.n_embd == -1) {
             llama_free(lctx);
-            llama_free_model(model);
+            llama_model_free(model);
 
             return iparams;
         }
@@ -913,7 +913,7 @@ struct common_init_result common_init_from_params(common_params & params) {
                                              params.control_vector_layer_end);
         if (err) {
             llama_free(lctx);
-            llama_free_model(model);
+            llama_model_free(model);
 
             return iparams;
         }
@@ -926,7 +926,7 @@ struct common_init_result common_init_from_params(common_params & params) {
         if (lora == nullptr) {
             LOG_ERR("%s: failed to apply lora adapter '%s'\n", __func__, la.path.c_str());
             llama_free(lctx);
-            llama_free_model(model);
+            llama_model_free(model);
             return iparams;
         }
 
@@ -1411,7 +1411,7 @@ struct llama_model * common_load_model_from_url(
         }
     }
 
-    return llama_load_model_from_file(local_path.c_str(), params);
+    return llama_model_load_from_file(local_path.c_str(), params);
 }
 
 struct llama_model * common_load_model_from_hf(
diff --git a/examples/batched-bench/batched-bench.cpp b/examples/batched-bench/batched-bench.cpp
index a3b21ad6b..dd75ff9f1 100644
--- a/examples/batched-bench/batched-bench.cpp
+++ b/examples/batched-bench/batched-bench.cpp
@@ -38,7 +38,7 @@ int main(int argc, char ** argv) {
 
     llama_model_params model_params = common_model_params_to_llama(params);
 
-    llama_model * model = llama_load_model_from_file(params.model.c_str(), model_params);
+    llama_model * model = llama_model_load_from_file(params.model.c_str(), model_params);
 
     if (model == NULL) {
         fprintf(stderr , "%s: error: unable to load model\n" , __func__);
@@ -194,7 +194,7 @@ int main(int argc, char ** argv) {
     llama_batch_free(batch);
 
     llama_free(ctx);
-    llama_free_model(model);
+    llama_model_free(model);
 
     llama_backend_free();
 
diff --git a/examples/batched/batched.cpp b/examples/batched/batched.cpp
index 2e25b62f6..d34b03099 100644
--- a/examples/batched/batched.cpp
+++ b/examples/batched/batched.cpp
@@ -41,7 +41,7 @@ int main(int argc, char ** argv) {
 
     llama_model_params model_params = common_model_params_to_llama(params);
 
-    llama_model * model = llama_load_model_from_file(params.model.c_str(), model_params);
+    llama_model * model = llama_model_load_from_file(params.model.c_str(), model_params);
 
     if (model == NULL) {
         LOG_ERR("%s: error: unable to load model\n" , __func__);
@@ -236,7 +236,7 @@ int main(int argc, char ** argv) {
 
     llama_sampler_free(smpl);
     llama_free(ctx);
-    llama_free_model(model);
+    llama_model_free(model);
 
     llama_backend_free();
 
diff --git a/examples/gritlm/gritlm.cpp b/examples/gritlm/gritlm.cpp
index 18a945b33..4d2db5624 100644
--- a/examples/gritlm/gritlm.cpp
+++ b/examples/gritlm/gritlm.cpp
@@ -165,7 +165,7 @@ int main(int argc, char * argv[]) {
 
     llama_backend_init();
 
-    llama_model * model = llama_load_model_from_file(params.model.c_str(), mparams);
+    llama_model * model = llama_model_load_from_file(params.model.c_str(), mparams);
 
     // create generation context
     llama_context * ctx = llama_new_context_with_model(model, cparams);
@@ -219,7 +219,7 @@ int main(int argc, char * argv[]) {
 
     llama_sampler_free(smpl);
     llama_free(ctx);
-    llama_free_model(model);
+    llama_model_free(model);
     llama_backend_free();
 
     return 0;
diff --git a/examples/llama-bench/llama-bench.cpp b/examples/llama-bench/llama-bench.cpp
index 2338ad106..2a0916766 100644
--- a/examples/llama-bench/llama-bench.cpp
+++ b/examples/llama-bench/llama-bench.cpp
@@ -1526,10 +1526,10 @@ int main(int argc, char ** argv) {
         // keep the same model between tests when possible
         if (!lmodel || !prev_inst || !inst.equal_mparams(*prev_inst)) {
             if (lmodel) {
-                llama_free_model(lmodel);
+                llama_model_free(lmodel);
             }
 
-            lmodel = llama_load_model_from_file(inst.model.c_str(), inst.to_llama_mparams());
+            lmodel = llama_model_load_from_file(inst.model.c_str(), inst.to_llama_mparams());
             if (lmodel == NULL) {
                 fprintf(stderr, "%s: error: failed to load model '%s'\n", __func__, inst.model.c_str());
                 return 1;
@@ -1540,7 +1540,7 @@ int main(int argc, char ** argv) {
         llama_context * ctx = llama_new_context_with_model(lmodel, inst.to_llama_cparams());
         if (ctx == NULL) {
             fprintf(stderr, "%s: error: failed to create context with model '%s'\n", __func__, inst.model.c_str());
-            llama_free_model(lmodel);
+            llama_model_free(lmodel);
             return 1;
         }
 
@@ -1626,7 +1626,7 @@ int main(int argc, char ** argv) {
         ggml_threadpool_free_fn(threadpool);
     }
 
-    llama_free_model(lmodel);
+    llama_model_free(lmodel);
 
     if (p) {
         p->print_footer();
diff --git a/examples/llava/llava-cli.cpp b/examples/llava/llava-cli.cpp
index 2691c6e6b..27215a42e 100644
--- a/examples/llava/llava-cli.cpp
+++ b/examples/llava/llava-cli.cpp
@@ -221,7 +221,7 @@ static struct llama_model * llava_init(common_params * params) {
 
     llama_model_params model_params = common_model_params_to_llama(*params);
 
-    llama_model * model = llama_load_model_from_file(params->model.c_str(), model_params);
+    llama_model * model = llama_model_load_from_file(params->model.c_str(), model_params);
     if (model == NULL) {
         LOG_ERR("%s: unable to load model\n" , __func__);
         return NULL;
@@ -265,7 +265,7 @@ static void llava_free(struct llava_context * ctx_llava) {
     }
 
     llama_free(ctx_llava->ctx_llama);
-    llama_free_model(ctx_llava->model);
+    llama_model_free(ctx_llava->model);
     llama_backend_free();
 }
 
@@ -323,7 +323,7 @@ int main(int argc, char ** argv) {
         }
     }
 
-    llama_free_model(model);
+    llama_model_free(model);
 
     return 0;
 }
diff --git a/examples/llava/minicpmv-cli.cpp b/examples/llava/minicpmv-cli.cpp
index e9cbb51ed..2342bdd09 100644
--- a/examples/llava/minicpmv-cli.cpp
+++ b/examples/llava/minicpmv-cli.cpp
@@ -31,7 +31,7 @@ static struct llama_model * llava_init(common_params * params) {
 
     llama_model_params model_params = common_model_params_to_llama(*params);
 
-    llama_model * model = llama_load_model_from_file(params->model.c_str(), model_params);
+    llama_model * model = llama_model_load_from_file(params->model.c_str(), model_params);
     if (model == NULL) {
         LOG_ERR("%s: unable to load model\n" , __func__);
         return NULL;
@@ -75,7 +75,7 @@ static void llava_free(struct llava_context * ctx_llava) {
     }
 
     llama_free(ctx_llava->ctx_llama);
-    llama_free_model(ctx_llava->model);
+    llama_model_free(ctx_llava->model);
     llama_backend_free();
 }
 
diff --git a/examples/llava/qwen2vl-cli.cpp b/examples/llava/qwen2vl-cli.cpp
index e86a60280..f3e5d66e2 100644
--- a/examples/llava/qwen2vl-cli.cpp
+++ b/examples/llava/qwen2vl-cli.cpp
@@ -310,7 +310,7 @@ static struct llama_model * llava_init(common_params * params) {
 
     llama_model_params model_params = common_model_params_to_llama(*params);
 
-    llama_model * model = llama_load_model_from_file(params->model.c_str(), model_params);
+    llama_model * model = llama_model_load_from_file(params->model.c_str(), model_params);
     if (model == NULL) {
         LOG_ERR("%s: unable to load model\n" , __func__);
         return NULL;
@@ -354,7 +354,7 @@ static void llava_free(struct llava_context * ctx_llava) {
     }
 
     llama_free(ctx_llava->ctx_llama);
-    llama_free_model(ctx_llava->model);
+    llama_model_free(ctx_llava->model);
     llama_backend_free();
 }
 
@@ -575,7 +575,7 @@ int main(int argc, char ** argv) {
         }
     }
 
-    llama_free_model(model);
+    llama_model_free(model);
 
     return 0;
 }
diff --git a/examples/passkey/passkey.cpp b/examples/passkey/passkey.cpp
index 09bba708f..ea91f376c 100644
--- a/examples/passkey/passkey.cpp
+++ b/examples/passkey/passkey.cpp
@@ -63,7 +63,7 @@ int main(int argc, char ** argv) {
 
     llama_model_params model_params = common_model_params_to_llama(params);
 
-    llama_model * model = llama_load_model_from_file(params.model.c_str(), model_params);
+    llama_model * model = llama_model_load_from_file(params.model.c_str(), model_params);
 
     if (model == NULL) {
         LOG_ERR("%s: unable to load model\n" , __func__);
@@ -266,7 +266,7 @@ int main(int argc, char ** argv) {
     llama_batch_free(batch);
 
     llama_free(ctx);
-    llama_free_model(model);
+    llama_model_free(model);
 
     llama_backend_free();
 
diff --git a/examples/quantize-stats/quantize-stats.cpp b/examples/quantize-stats/quantize-stats.cpp
index ab91d0b40..9bfbb8862 100644
--- a/examples/quantize-stats/quantize-stats.cpp
+++ b/examples/quantize-stats/quantize-stats.cpp
@@ -309,7 +309,7 @@ int main(int argc, char ** argv) {
         auto mparams = llama_model_default_params();
         mparams.use_mlock  = false;
 
-        model = llama_load_model_from_file(params.model.c_str(), mparams);
+        model = llama_model_load_from_file(params.model.c_str(), mparams);
 
         if (model == NULL) {
             fprintf(stderr, "%s: error: failed to load model '%s'\n", __func__, params.model.c_str());
@@ -323,7 +323,7 @@ int main(int argc, char ** argv) {
 
         if (ctx == NULL) {
             fprintf(stderr, "%s: error: failed to create context with model '%s'\n", __func__, params.model.c_str());
-            llama_free_model(model);
+            llama_model_free(model);
             return 1;
         }
     }
@@ -347,7 +347,7 @@ int main(int argc, char ** argv) {
             fprintf(stderr, "%s: error: Quantization should be tested with a float model, "
                 "this model contains already quantized layers (%s is type %d)\n", __func__, kv_tensor.first.c_str(), kv_tensor.second->type);
             llama_free(ctx);
-            llama_free_model(model);
+            llama_model_free(model);
             return 1;
         }
         included_layers++;
@@ -409,7 +409,7 @@ int main(int argc, char ** argv) {
 
 
     llama_free(ctx);
-    llama_free_model(model);
+    llama_model_free(model);
     // report timing
     {
         const int64_t t_main_end_us = ggml_time_us();
diff --git a/examples/run/run.cpp b/examples/run/run.cpp
index 75b817272..c52a7961f 100644
--- a/examples/run/run.cpp
+++ b/examples/run/run.cpp
@@ -664,7 +664,7 @@ class LlamaData {
             "\r%*s"
             "\rLoading model",
             get_terminal_width(), " ");
-        llama_model_ptr model(llama_load_model_from_file(opt.model_.c_str(), opt.model_params));
+        llama_model_ptr model(llama_model_load_from_file(opt.model_.c_str(), opt.model_params));
         if (!model) {
             printe("%s: error: unable to load model from file: %s\n", __func__, opt.model_.c_str());
         }
diff --git a/examples/simple-chat/simple-chat.cpp b/examples/simple-chat/simple-chat.cpp
index 7f4da666b..d72f5bcdd 100644
--- a/examples/simple-chat/simple-chat.cpp
+++ b/examples/simple-chat/simple-chat.cpp
@@ -69,7 +69,7 @@ int main(int argc, char ** argv) {
     llama_model_params model_params = llama_model_default_params();
     model_params.n_gpu_layers = ngl;
 
-    llama_model * model = llama_load_model_from_file(model_path.c_str(), model_params);
+    llama_model * model = llama_model_load_from_file(model_path.c_str(), model_params);
     if (!model) {
         fprintf(stderr , "%s: error: unable to load model\n" , __func__);
         return 1;
@@ -194,7 +194,7 @@ int main(int argc, char ** argv) {
     }
     llama_sampler_free(smpl);
     llama_free(ctx);
-    llama_free_model(model);
+    llama_model_free(model);
 
     return 0;
 }
diff --git a/examples/simple/simple.cpp b/examples/simple/simple.cpp
index 3288c0250..f69117890 100644
--- a/examples/simple/simple.cpp
+++ b/examples/simple/simple.cpp
@@ -83,7 +83,7 @@ int main(int argc, char ** argv) {
     llama_model_params model_params = llama_model_default_params();
     model_params.n_gpu_layers = ngl;
 
-    llama_model * model = llama_load_model_from_file(model_path.c_str(), model_params);
+    llama_model * model = llama_model_load_from_file(model_path.c_str(), model_params);
 
     if (model == NULL) {
         fprintf(stderr , "%s: error: unable to load model\n" , __func__);
@@ -199,7 +199,7 @@ int main(int argc, char ** argv) {
 
     llama_sampler_free(smpl);
     llama_free(ctx);
-    llama_free_model(model);
+    llama_model_free(model);
 
     return 0;
 }
diff --git a/examples/tokenize/tokenize.cpp b/examples/tokenize/tokenize.cpp
index 57d9d4312..684ca054a 100644
--- a/examples/tokenize/tokenize.cpp
+++ b/examples/tokenize/tokenize.cpp
@@ -338,7 +338,7 @@ int main(int raw_argc, char ** raw_argv) {
 
     llama_model_params model_params = llama_model_default_params();
     model_params.vocab_only = true;
-    llama_model * model = llama_load_model_from_file(model_path, model_params);
+    llama_model * model = llama_model_load_from_file(model_path, model_params);
     if (!model) {
         fprintf(stderr, "Error: could not load model from file '%s'.\n", model_path);
         return 1;
@@ -408,7 +408,7 @@ int main(int raw_argc, char ** raw_argv) {
     }
     // silence valgrind
     llama_free(ctx);
-    llama_free_model(model);
+    llama_model_free(model);
 
     return 0;
 }
diff --git a/include/llama-cpp.h b/include/llama-cpp.h
index 1500cb2fc..11306b17f 100644
--- a/include/llama-cpp.h
+++ b/include/llama-cpp.h
@@ -9,7 +9,7 @@
 #include "llama.h"
 
 struct llama_model_deleter {
-    void operator()(llama_model * model) { llama_free_model(model); }
+    void operator()(llama_model * model) { llama_model_free(model); }
 };
 
 struct llama_context_deleter {
diff --git a/include/llama.h b/include/llama.h
index 0f619aa19..0295a51fb 100644
--- a/include/llama.h
+++ b/include/llama.h
@@ -413,12 +413,19 @@ extern "C" {
     // Call once at the end of the program - currently only used for MPI
     LLAMA_API void llama_backend_free(void);
 
-    LLAMA_API struct llama_model * llama_load_model_from_file(
+    DEPRECATED(LLAMA_API struct llama_model * llama_load_model_from_file(
+                             const char * path_model,
+              struct llama_model_params   params),
+            "use llama_model_load_from_file instead");
+
+    LLAMA_API struct llama_model * llama_model_load_from_file(
                              const char * path_model,
               struct llama_model_params   params);
 
-    // TODO: rename to llama_model_free
-    LLAMA_API void llama_free_model(struct llama_model * model);
+    DEPRECATED(LLAMA_API void llama_free_model(struct llama_model * model),
+            "use llama_model_free instead");
+
+    LLAMA_API void llama_model_free(struct llama_model * model);
 
     // TODO: rename to llama_init_from_model
     LLAMA_API struct llama_context * llama_new_context_with_model(
diff --git a/src/llama-model.cpp b/src/llama-model.cpp
index 22596499a..7deb3683b 100644
--- a/src/llama-model.cpp
+++ b/src/llama-model.cpp
@@ -2009,6 +2009,10 @@ struct llama_model_params llama_model_default_params() {
 }
 
 void llama_free_model(struct llama_model * model) {
+    llama_model_free(model);
+}
+
+void llama_model_free(struct llama_model * model) {
     delete model;
 }
 
diff --git a/src/llama.cpp b/src/llama.cpp
index 4a6798f41..7337c34ce 100644
--- a/src/llama.cpp
+++ b/src/llama.cpp
@@ -11656,6 +11656,12 @@ int64_t llama_time_us(void) {
 struct llama_model * llama_load_model_from_file(
         const char * path_model,
         struct llama_model_params params) {
+    return llama_model_load_from_file(path_model, params);
+}
+
+struct llama_model * llama_model_load_from_file(
+        const char * path_model,
+        struct llama_model_params params) {
     ggml_time_init();
 
     llama_model * model = new llama_model;
@@ -11694,7 +11700,7 @@ struct llama_model * llama_load_model_from_file(
         ggml_backend_reg_t rpc_reg = ggml_backend_reg_by_name("RPC");
         if (!rpc_reg) {
             LLAMA_LOG_ERROR("%s: failed to find RPC backend\n", __func__);
-            llama_free_model(model);
+            llama_model_free(model);
             return nullptr;
         }
 
@@ -11702,7 +11708,7 @@ struct llama_model * llama_load_model_from_file(
         ggml_backend_rpc_add_device_t ggml_backend_rpc_add_device_fn = (ggml_backend_rpc_add_device_t) ggml_backend_reg_get_proc_address(rpc_reg, "ggml_backend_rpc_add_device");
         if (!ggml_backend_rpc_add_device_fn) {
             LLAMA_LOG_ERROR("%s: failed to find RPC device add function\n", __func__);
-            llama_free_model(model);
+            llama_model_free(model);
             return nullptr;
         }
 
@@ -11712,7 +11718,7 @@ struct llama_model * llama_load_model_from_file(
                 model->devices.push_back(dev);
             } else {
                 LLAMA_LOG_ERROR("%s: failed to add RPC device for server '%s'\n", __func__, server.c_str());
-                llama_free_model(model);
+                llama_model_free(model);
                 return nullptr;
             }
         }
@@ -11744,7 +11750,7 @@ struct llama_model * llama_load_model_from_file(
     if (params.split_mode == LLAMA_SPLIT_MODE_NONE) {
         if (params.main_gpu < 0 || params.main_gpu >= (int)model->devices.size()) {
             LLAMA_LOG_ERROR("%s: invalid value for main_gpu: %d (available devices: %d)\n", __func__, params.main_gpu, (int)model->devices.size());
-            llama_free_model(model);
+            llama_model_free(model);
             return nullptr;
         }
         ggml_backend_dev_t main_gpu = model->devices[params.main_gpu];
@@ -11767,7 +11773,7 @@ struct llama_model * llama_load_model_from_file(
             LLAMA_LOG_INFO("%s: cancelled model load\n", __func__);
         }
 
-        llama_free_model(model);
+        llama_model_free(model);
         return nullptr;
     }
 
diff --git a/tests/test-autorelease.cpp b/tests/test-autorelease.cpp
index 57fa00011..ba084a91a 100644
--- a/tests/test-autorelease.cpp
+++ b/tests/test-autorelease.cpp
@@ -13,10 +13,10 @@ int main(int argc, char ** argv) {
 
     std::thread([&model_path]() {
         llama_backend_init();
-        auto * model = llama_load_model_from_file(model_path, llama_model_default_params());
+        auto * model = llama_model_load_from_file(model_path, llama_model_default_params());
         auto * ctx = llama_new_context_with_model(model, llama_context_default_params());
         llama_free(ctx);
-        llama_free_model(model);
+        llama_model_free(model);
         llama_backend_free();
     }).join();
 
diff --git a/tests/test-model-load-cancel.cpp b/tests/test-model-load-cancel.cpp
index 858535c3c..9095826fa 100644
--- a/tests/test-model-load-cancel.cpp
+++ b/tests/test-model-load-cancel.cpp
@@ -21,7 +21,7 @@ int main(int argc, char *argv[] ) {
         (void) ctx;
         return progress > 0.50;
     };
-    auto * model = llama_load_model_from_file(model_path, params);
+    auto * model = llama_model_load_from_file(model_path, params);
     llama_backend_free();
     return model == nullptr ? EXIT_SUCCESS : EXIT_FAILURE;
 }
diff --git a/tests/test-tokenizer-0.cpp b/tests/test-tokenizer-0.cpp
index 0af85f002..121c2c60c 100644
--- a/tests/test-tokenizer-0.cpp
+++ b/tests/test-tokenizer-0.cpp
@@ -152,7 +152,7 @@ int main(int argc, char **argv) {
 
         mparams.vocab_only = true;
 
-        model = llama_load_model_from_file(fname.c_str(), mparams);
+        model = llama_model_load_from_file(fname.c_str(), mparams);
 
         if (model == NULL) {
             fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
@@ -165,7 +165,7 @@ int main(int argc, char **argv) {
 
         if (ctx == NULL) {
             fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
-            llama_free_model(model);
+            llama_model_free(model);
             return 1;
         }
     }
@@ -300,7 +300,7 @@ int main(int argc, char **argv) {
         fprintf(stderr, "%s : tokens written to '%s'\n", __func__, (fname_text + ".tokcpp").c_str());
     }
 
-    llama_free_model(model);
+    llama_model_free(model);
     llama_free(ctx);
 
     llama_backend_free();
diff --git a/tests/test-tokenizer-1-bpe.cpp b/tests/test-tokenizer-1-bpe.cpp
index 0ff7fc833..5718fab04 100644
--- a/tests/test-tokenizer-1-bpe.cpp
+++ b/tests/test-tokenizer-1-bpe.cpp
@@ -46,7 +46,7 @@ int main(int argc, char **argv) {
 
         mparams.vocab_only = true;
 
-        model = llama_load_model_from_file(fname.c_str(), mparams);
+        model = llama_model_load_from_file(fname.c_str(), mparams);
 
         if (model == NULL) {
             fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
@@ -59,7 +59,7 @@ int main(int argc, char **argv) {
 
         if (ctx == NULL) {
             fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
-            llama_free_model(model);
+            llama_model_free(model);
             return 1;
         }
     }
@@ -143,7 +143,7 @@ int main(int argc, char **argv) {
         }
     }
 
-    llama_free_model(model);
+    llama_model_free(model);
     llama_free(ctx);
 
     llama_backend_free();
diff --git a/tests/test-tokenizer-1-spm.cpp b/tests/test-tokenizer-1-spm.cpp
index 9b0716a43..ac05387c9 100644
--- a/tests/test-tokenizer-1-spm.cpp
+++ b/tests/test-tokenizer-1-spm.cpp
@@ -34,7 +34,7 @@ int main(int argc, char ** argv) {
 
         mparams.vocab_only = true;
 
-        model = llama_load_model_from_file(fname.c_str(), mparams);
+        model = llama_model_load_from_file(fname.c_str(), mparams);
 
         if (model == NULL) {
             fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
@@ -47,7 +47,7 @@ int main(int argc, char ** argv) {
 
         if (ctx == NULL) {
             fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
-            llama_free_model(model);
+            llama_model_free(model);
             return 1;
         }
     }
@@ -113,7 +113,7 @@ int main(int argc, char ** argv) {
         }
     }
 
-    llama_free_model(model);
+    llama_model_free(model);
     llama_free(ctx);
 
     llama_backend_free();

From 6369f867a410416239d9f20ec27c2b1d6a9fee52 Mon Sep 17 00:00:00 2001
From: Daniel Bevenius 
Date: Mon, 6 Jan 2025 10:28:17 +0100
Subject: [PATCH 51/81] llama : rename missed batch params/vars to ubatch
 (#10059)

This commit renames the `batch` parameter to `ubatch` in the
`llama_kv_cache_find_slot`, `llm_build_inp_embd`, and
`llm_build_mamba` functions.

The motivation for this is that this should have been done as part of
Commit 19d900a7565b8f6b0a708836a57d26966cb9efe2 ("llama : rename batch
to ubatch (#9950)") but for some reason I missed these functions in
that commit and only noticed them now (sorry).
---
 src/llama-kv-cache.cpp | 32 ++++++++++++++++----------------
 src/llama.cpp          | 18 +++++++++---------
 2 files changed, 25 insertions(+), 25 deletions(-)

diff --git a/src/llama-kv-cache.cpp b/src/llama-kv-cache.cpp
index 53379253a..90b6c56ed 100644
--- a/src/llama-kv-cache.cpp
+++ b/src/llama-kv-cache.cpp
@@ -119,10 +119,10 @@ bool llama_kv_cache_init(
 
 struct llama_kv_cache_slot_info llama_kv_cache_find_slot(
            struct llama_kv_cache & cache,
-       const struct llama_ubatch & batch) {
-    const uint32_t n_tokens = batch.n_tokens;
-    const uint32_t n_seqs   = batch.n_seqs;
-    const uint32_t n_seq_tokens = batch.n_seq_tokens;
+       const struct llama_ubatch & ubatch) {
+    const uint32_t n_tokens = ubatch.n_tokens;
+    const uint32_t n_seqs   = ubatch.n_seqs;
+    const uint32_t n_seq_tokens = ubatch.n_seq_tokens;
 
     if (cache.recurrent) {
         // For recurrent state architectures (like Mamba or RWKV),
@@ -130,16 +130,16 @@ struct llama_kv_cache_slot_info llama_kv_cache_find_slot(
         // A slot should be always be contiguous.
 
         // can only process batches with an equal number of new tokens in each sequence
-        GGML_ASSERT(batch.equal_seqs);
+        GGML_ASSERT(ubatch.equal_seqs);
 
         int32_t min = cache.size - 1;
         int32_t max = 0;
 
         // everything should fit if all seq_ids are smaller than the max
         for (uint32_t s = 0; s < n_seqs; ++s) {
-            const uint32_t n_seq_id = batch.n_seq_id[s];
+            const uint32_t n_seq_id = ubatch.n_seq_id[s];
             for (uint32_t j = 0; j < n_seq_id; ++j) {
-                const llama_seq_id seq_id = batch.seq_id[s][j];
+                const llama_seq_id seq_id = ubatch.seq_id[s][j];
 
                 if (seq_id < 0 || (uint32_t) seq_id >= cache.size) {
                     // too big seq_id
@@ -198,7 +198,7 @@ struct llama_kv_cache_slot_info llama_kv_cache_find_slot(
 
         // find usable cell range
         for (uint32_t s = 0; s < n_seqs; ++s) {
-            const llama_seq_id seq_id = batch.seq_id[s][0];
+            const llama_seq_id seq_id = ubatch.seq_id[s][0];
             llama_kv_cell & seq_meta = cache.cells[seq_id];
             bool has_cell = false;
             if (seq_meta.tail >= 0) {
@@ -237,7 +237,7 @@ struct llama_kv_cache_slot_info llama_kv_cache_find_slot(
         // gather and re-order
         for (uint32_t s = 0; s < n_seqs; ++s) {
             int32_t dst_id = s + min;
-            int32_t src_id = cache.cells[batch.seq_id[s][0]].tail;
+            int32_t src_id = cache.cells[ubatch.seq_id[s][0]].tail;
             if (dst_id != src_id) {
                 llama_kv_cell & dst_cell = cache.cells[dst_id];
                 llama_kv_cell & src_cell = cache.cells[src_id];
@@ -258,7 +258,7 @@ struct llama_kv_cache_slot_info llama_kv_cache_find_slot(
 
         // update the pos of the used seqs
         for (uint32_t s = 0; s < n_seqs; ++s) {
-            const llama_pos last_pos = batch.pos[n_seq_tokens * s + n_seq_tokens - 1];
+            const llama_pos last_pos = ubatch.pos[n_seq_tokens * s + n_seq_tokens - 1];
             int32_t cell_id = s + min;
             llama_kv_cell & cell = cache.cells[cell_id];
 
@@ -266,12 +266,12 @@ struct llama_kv_cache_slot_info llama_kv_cache_find_slot(
                 // What should happen when the pos backtracks or skips a value?
                 // Clearing the state mid-batch would require special-casing which isn't done.
                 LLAMA_LOG_WARN("%s: non-consecutive token position %d after %d for sequence %d with %u new tokens\n",
-                    __func__, last_pos, cell.pos, batch.seq_id[s][0], n_seq_tokens);
+                    __func__, last_pos, cell.pos, ubatch.seq_id[s][0], n_seq_tokens);
             }
             cell.pos = last_pos;
             cell.seq_id.clear();
-            for (int32_t j = 0; j < batch.n_seq_id[s]; ++j) {
-                const llama_seq_id seq_id = batch.seq_id[s][j];
+            for (int32_t j = 0; j < ubatch.n_seq_id[s]; ++j) {
+                const llama_seq_id seq_id = ubatch.seq_id[s][j];
                 cell.seq_id.insert(seq_id);
                 cache.cells[seq_id].tail = cell_id;
             }
@@ -325,10 +325,10 @@ struct llama_kv_cache_slot_info llama_kv_cache_find_slot(
     for (uint32_t s = 0; s < n_seqs; s++) {
         for (uint32_t i = 0; i < n_seq_tokens; ++i) {
             uint32_t k = s*n_seq_tokens + i;
-            cache.cells[cache.head + k].pos = batch.pos[k];
+            cache.cells[cache.head + k].pos = ubatch.pos[k];
 
-            for (int32_t j = 0; j < batch.n_seq_id[s]; j++) {
-                cache.cells[cache.head + k].seq_id.insert(batch.seq_id[s][j]);
+            for (int32_t j = 0; j < ubatch.n_seq_id[s]; j++) {
+                cache.cells[cache.head + k].seq_id.insert(ubatch.seq_id[s][j]);
             }
         }
     }
diff --git a/src/llama.cpp b/src/llama.cpp
index 7337c34ce..60728e5bb 100644
--- a/src/llama.cpp
+++ b/src/llama.cpp
@@ -2540,21 +2540,21 @@ static struct ggml_tensor * llm_build_inp_embd(
         struct ggml_context * ctx,
        struct llama_context & lctx,
         const llama_hparams & hparams,
-         const llama_ubatch & batch,
+         const llama_ubatch & ubatch,
          struct ggml_tensor * tok_embd,
          const llm_build_cb & cb) {
     const int64_t n_embd = hparams.n_embd;
 
     struct ggml_tensor * inpL;
 
-    if (batch.token) {
-        lctx.inp_tokens = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, batch.n_tokens);
+    if (ubatch.token) {
+        lctx.inp_tokens = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, ubatch.n_tokens);
         cb(lctx.inp_tokens, "inp_tokens", -1);
         ggml_set_input(lctx.inp_tokens);
 
         inpL = ggml_get_rows(ctx, tok_embd, lctx.inp_tokens);
     } else {
-        lctx.inp_embd = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, batch.n_tokens);
+        lctx.inp_embd = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, ubatch.n_tokens);
         inpL = lctx.inp_embd;
         ggml_set_input(lctx.inp_embd);
     }
@@ -3149,7 +3149,7 @@ static struct ggml_tensor * llm_build_copy_mask_state(
 static struct ggml_tensor * llm_build_mamba(
         struct ggml_context * ctx,
        struct llama_context & lctx,
-         const llama_ubatch & batch,
+         const llama_ubatch & ubatch,
          struct ggml_cgraph * graph,
          struct ggml_tensor * cur,
          struct ggml_tensor * state_copy,
@@ -3165,17 +3165,17 @@ static struct ggml_tensor * llm_build_mamba(
     const int64_t d_inner = hparams.ssm_d_inner;
     const int64_t d_state = hparams.ssm_d_state;
     const int64_t dt_rank = hparams.ssm_dt_rank;
-    const int64_t n_seqs  = batch.n_seqs;
+    const int64_t n_seqs  = ubatch.n_seqs;
     // Some variants of Mamba arch (e.g. FalconMamba do apply layer norm on B and Dt layers)
     const bool ssm_dt_b_c_rms = hparams.ssm_dt_b_c_rms;
     // Use the same RMS norm as the final layer norm
     const float norm_rms_eps = hparams.f_norm_rms_eps;
 
-    const int64_t n_seq_tokens = batch.n_seq_tokens;
+    const int64_t n_seq_tokens = ubatch.n_seq_tokens;
 
     GGML_ASSERT(n_seqs != 0);
-    GGML_ASSERT(batch.equal_seqs);
-    GGML_ASSERT(batch.n_tokens == n_seq_tokens * n_seqs);
+    GGML_ASSERT(ubatch.equal_seqs);
+    GGML_ASSERT(ubatch.n_tokens == n_seq_tokens * n_seqs);
 
     struct ggml_tensor * conv_states_all = kv.k_l[il];
     struct ggml_tensor * ssm_states_all  = kv.v_l[il];

From 96a1dc27c3f09bf1ed83a26292d571795bcf27fa Mon Sep 17 00:00:00 2001
From: Asghar Ghorbani 
Date: Mon, 6 Jan 2025 12:21:46 +0100
Subject: [PATCH 52/81] llama : prevent system info string accumulation across
 calls (#11101)

---
 src/llama.cpp | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/src/llama.cpp b/src/llama.cpp
index 60728e5bb..c162c31a6 100644
--- a/src/llama.cpp
+++ b/src/llama.cpp
@@ -12458,6 +12458,8 @@ int llama_split_prefix(char * dest, size_t maxlen, const char * split_path, int
 
 const char * llama_print_system_info(void) {
     static std::string s;
+    s.clear(); // Clear the string, since it's static, otherwise it will accumulate data from previous calls.
+
 
     for (size_t i = 0; i < ggml_backend_reg_count(); i++) {
         auto * reg = ggml_backend_reg_get(i);

From 09186fabbe05236f2b9446ba6c643cb737540d10 Mon Sep 17 00:00:00 2001
From: Xuan Son Nguyen 
Date: Mon, 6 Jan 2025 13:41:12 +0100
Subject: [PATCH 53/81] llama : remove check flash_attn with lora (#11104)

---
 src/llama.cpp | 6 ------
 1 file changed, 6 deletions(-)

diff --git a/src/llama.cpp b/src/llama.cpp
index c162c31a6..ebd6e3b29 100644
--- a/src/llama.cpp
+++ b/src/llama.cpp
@@ -11519,13 +11519,7 @@ int32_t llama_lora_adapter_set(
             struct llama_context * ctx,
             struct llama_lora_adapter * adapter,
             float scale) {
-    if (ctx->cparams.flash_attn) {
-        LLAMA_LOG_ERROR("%s: flash_attn is not compatible with LoRA\n", __func__);
-        return -1;
-    }
-
     ctx->lora_adapters[adapter] = scale;
-
     return 0;
 }
 

From e6e7c75d94adf4d39e846d30807c531ff22865e7 Mon Sep 17 00:00:00 2001
From: Georgi Gerganov 
Date: Mon, 6 Jan 2025 15:36:08 +0200
Subject: [PATCH 54/81] server : fix extra BOS in infill endpoint (#11106)

* server : fix extra BOS in infill endpoing

ggml-ci

* server : update infill tests
---
 examples/server/server.cpp                | 2 +-
 examples/server/tests/unit/test_infill.py | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/examples/server/server.cpp b/examples/server/server.cpp
index c2e62ba69..127323e77 100644
--- a/examples/server/server.cpp
+++ b/examples/server/server.cpp
@@ -3797,7 +3797,7 @@ int main(int argc, char ** argv) {
         data["input_extra"] = input_extra; // default to empty array if it's not exist
 
         std::string prompt = json_value(data, "prompt", std::string());
-        std::vector tokenized_prompts = tokenize_input_prompts(ctx_server.ctx, prompt, true, true);
+        std::vector tokenized_prompts = tokenize_input_prompts(ctx_server.ctx, prompt, false, true);
         SRV_DBG("creating infill tasks, n_prompts = %d\n", (int) tokenized_prompts.size());
         data["prompt"] = format_infill(
             ctx_server.ctx,
diff --git a/examples/server/tests/unit/test_infill.py b/examples/server/tests/unit/test_infill.py
index ad4b8192a..10554db0f 100644
--- a/examples/server/tests/unit/test_infill.py
+++ b/examples/server/tests/unit/test_infill.py
@@ -18,7 +18,7 @@ def test_infill_without_input_extra():
         "input_suffix": "}\n",
     })
     assert res.status_code == 200
-    assert match_regex("(Ann|small|shiny)+", res.body["content"])
+    assert match_regex("(Ann|small|shiny|Daddy)+", res.body["content"])
 
 
 def test_infill_with_input_extra():

From 96be8c32649378a23031630a48c440f3a5d0839b Mon Sep 17 00:00:00 2001
From: Xuan Son Nguyen 
Date: Mon, 6 Jan 2025 16:34:49 +0100
Subject: [PATCH 55/81] github : add cmd line field to bug report (#11090)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

* github : cmd line to bug report

* codeowners : (@ngxson) only watch dockerfile

* Apply suggestions from code review [no ci]

Co-authored-by: Johannes Gäßler 

* rm cmd in log output [no ci]

* rm 2 [no ci]

* no need backticks [no ci]

---------

Co-authored-by: Johannes Gäßler 
---
 .github/ISSUE_TEMPLATE/010-bug-compilation.yml | 12 +++++++++++-
 .github/ISSUE_TEMPLATE/019-bug-misc.yml        | 12 +++++++++++-
 CODEOWNERS                                     |  2 +-
 3 files changed, 23 insertions(+), 3 deletions(-)

diff --git a/.github/ISSUE_TEMPLATE/010-bug-compilation.yml b/.github/ISSUE_TEMPLATE/010-bug-compilation.yml
index f10b3a2b2..b85bf5741 100644
--- a/.github/ISSUE_TEMPLATE/010-bug-compilation.yml
+++ b/.github/ISSUE_TEMPLATE/010-bug-compilation.yml
@@ -65,12 +65,22 @@ body:
         If possible, please do a git bisect and identify the exact commit that introduced the bug.
     validations:
       required: false
+  - type: textarea
+    id: command
+    attributes:
+      label: Compile command
+      description: >
+        Please provide the exact command you used to compile llama.cpp. For example: `cmake -B ...`.
+        This will be automatically formatted into code, so no need for backticks.
+      render: shell
+    validations:
+      required: true
   - type: textarea
     id: logs
     attributes:
       label: Relevant log output
       description: >
-          Please copy and paste any relevant log output, including the command that you entered and any generated text.
+          Please copy and paste any relevant log output, including any generated text.
           This will be automatically formatted into code, so no need for backticks.
       render: shell
     validations:
diff --git a/.github/ISSUE_TEMPLATE/019-bug-misc.yml b/.github/ISSUE_TEMPLATE/019-bug-misc.yml
index d157ea307..1904e31fd 100644
--- a/.github/ISSUE_TEMPLATE/019-bug-misc.yml
+++ b/.github/ISSUE_TEMPLATE/019-bug-misc.yml
@@ -52,6 +52,16 @@ body:
         - Other (Please specify in the next section)
     validations:
       required: false
+  - type: textarea
+    id: command
+    attributes:
+      label: Command line
+      description: >
+        Please provide the exact commands you entered, if applicable. For example: `llama-server -m ... -c ...`, `llama-cli -m ...`, etc.
+        This will be automatically formatted into code, so no need for backticks.
+      render: shell
+    validations:
+      required: false
   - type: textarea
     id: info
     attributes:
@@ -74,7 +84,7 @@ body:
     attributes:
       label: Relevant log output
       description: >
-          If applicable, please copy and paste any relevant log output, including the command that you entered and any generated text.
+          If applicable, please copy and paste any relevant log output, including any generated text.
           This will be automatically formatted into code, so no need for backticks.
       render: shell
     validations:
diff --git a/CODEOWNERS b/CODEOWNERS
index adeba5395..c9fa34761 100644
--- a/CODEOWNERS
+++ b/CODEOWNERS
@@ -1,5 +1,5 @@
 # collaborators can optionally add themselves here to indicate their availability for reviewing related PRs
 
 /ci/ @ggerganov
-/.devops/ @ngxson
+/.devops/*.Dockerfile @ngxson
 /examples/server/ @ngxson

From ecebbd292d741ac084cf248146b2cfb17002aa1d Mon Sep 17 00:00:00 2001
From: Georgi Gerganov 
Date: Mon, 6 Jan 2025 17:52:35 +0200
Subject: [PATCH 56/81] llama : remove unused headers (#11109)

ggml-ci
---
 src/llama.cpp | 14 +++-----------
 1 file changed, 3 insertions(+), 11 deletions(-)

diff --git a/src/llama.cpp b/src/llama.cpp
index ebd6e3b29..8ea6686c9 100644
--- a/src/llama.cpp
+++ b/src/llama.cpp
@@ -8,7 +8,6 @@
 #include "llama-kv-cache.h"
 #include "llama-model-loader.h"
 #include "llama-model.h"
-#include "llama-quant.h"
 
 #include "ggml.h"
 #include "ggml-alloc.h"
@@ -18,12 +17,8 @@
 #include 
 #include 
 #include 
-#include 
 #include 
-#include 
-#include 
 #include 
-#include 
 #include 
 #include 
 #include 
@@ -31,10 +26,7 @@
 #include 
 #include 
 #include 
-#include 
 #include 
-#include 
-#include 
 
 #if defined(_MSC_VER)
 #pragma warning(disable: 4244 4267) // possible loss of data
@@ -12434,16 +12426,16 @@ int llama_split_path(char * split_path, size_t maxlen, const char * path_prefix,
     return 0;
 }
 
-int llama_split_prefix(char * dest, size_t maxlen, const char * split_path, int split_no, int split_count) {
+int llama_split_prefix(char * split_prefix, size_t maxlen, const char * split_path, int split_no, int split_count) {
     std::string str_split_path(split_path);
     char postfix[32];
     snprintf(postfix, 32, "-%05d-of-%05d.gguf", split_no + 1, split_count);
     std::string str_postfix(postfix);
 
-    // check if dest ends with postfix
+    // check if split_prefix ends with postfix
     int size_prefix = str_split_path.size() - str_postfix.size();
     if (size_prefix > 0 && str_split_path.find(str_postfix, size_prefix) != std::string::npos) {
-        snprintf(dest, std::min((size_t) size_prefix + 1, maxlen), "%s", split_path);
+        snprintf(split_prefix, std::min((size_t) size_prefix + 1, maxlen), "%s", split_path);
         return size_prefix;
     }
 

From dc7cef9f373f2a24b851f0df7a618c5209e593fa Mon Sep 17 00:00:00 2001
From: Eric Curtin 
Date: Mon, 6 Jan 2025 22:45:28 +0000
Subject: [PATCH 57/81] llama-run : fix context size (#11094)

Set `n_ctx` equal to `n_batch` in `Opt` class. Now context size is
a more reasonable 2048.

Signed-off-by: Eric Curtin 
---
 examples/run/run.cpp | 1 +
 1 file changed, 1 insertion(+)

diff --git a/examples/run/run.cpp b/examples/run/run.cpp
index c52a7961f..2888fcfed 100644
--- a/examples/run/run.cpp
+++ b/examples/run/run.cpp
@@ -83,6 +83,7 @@ class Opt {
         }
 
         ctx_params.n_batch        = context_size >= 0 ? context_size : context_size_default;
+        ctx_params.n_ctx          = ctx_params.n_batch;
         model_params.n_gpu_layers = ngl >= 0 ? ngl : ngl_default;
         temperature               = temperature >= 0 ? temperature : temperature_default;
 

From c0d6f790d07aa78be15584ec394ac20739ade93b Mon Sep 17 00:00:00 2001
From: Akarshan Biswas 
Date: Tue, 7 Jan 2025 11:56:07 +0530
Subject: [PATCH 58/81] SYCL: Use get_multi_ptr instead of deprecated
 get_pointer in wkv6 (#11087)

* SYCL: Use get_multi_ptr instead of deprecated get_pointer in wkv6

* Revert "SYCL: Use get_multi_ptr instead of deprecated get_pointer in wkv6"

This reverts commit f62dc45f318e48d375e7734b34cbddee81deed52.

* Reland: Use get_multi_ptr instead of deprecated get_pointer in wkv6
---
 ggml/src/ggml-sycl/wkv6.cpp | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/ggml/src/ggml-sycl/wkv6.cpp b/ggml/src/ggml-sycl/wkv6.cpp
index 75ddfb86a..105db6f03 100644
--- a/ggml/src/ggml-sycl/wkv6.cpp
+++ b/ggml/src/ggml-sycl/wkv6.cpp
@@ -131,7 +131,7 @@ void ggml_sycl_op_rwkv_wkv6(ggml_backend_sycl_context& ctx, const ggml_tensor* s
             [=](sycl::nd_item<3> item_ct1) {
                 rwkv_wkv_f32_kernel(
                     B, T, C, H, k_d, v_d, r_d, tf_d, td_d, s_d, dst_d,
-                    item_ct1, shared_mem_acc.get_pointer()
+                    item_ct1, (float*)shared_mem_acc.get_multi_ptr().get()
                 );
             });
     });

From a4dd490069a66ae56b42127048f06757fc4de4f7 Mon Sep 17 00:00:00 2001
From: Radoslav Gerganov 
Date: Tue, 7 Jan 2025 08:37:02 +0200
Subject: [PATCH 59/81] rpc : code cleanup (#11107)

Remove duplicated macros, use GGML_LOG_ERROR for errors
---
 ggml/src/ggml-rpc/ggml-rpc.cpp | 49 ++++++++++++++--------------------
 1 file changed, 20 insertions(+), 29 deletions(-)

diff --git a/ggml/src/ggml-rpc/ggml-rpc.cpp b/ggml/src/ggml-rpc/ggml-rpc.cpp
index 2213aba9f..63da2b86b 100644
--- a/ggml/src/ggml-rpc/ggml-rpc.cpp
+++ b/ggml/src/ggml-rpc/ggml-rpc.cpp
@@ -27,15 +27,6 @@
 #endif
 #include 
 
-#define UNUSED GGML_UNUSED
-
-#define GGML_DEBUG 0
-#if (GGML_DEBUG >= 1)
-#define GGML_PRINT_DEBUG(...) printf(__VA_ARGS__)
-#else
-#define GGML_PRINT_DEBUG(...)
-#endif
-
 #ifdef _WIN32
 typedef SOCKET sockfd_t;
 using ssize_t = __int64;
@@ -411,7 +402,7 @@ static std::shared_ptr get_socket(const std::string & endpoint) {
         initialized = true;
     }
 #else
-    UNUSED(initialized);
+    GGML_UNUSED(initialized);
 #endif
     auto sock = socket_connect(host.c_str(), port);
     if (sock == nullptr) {
@@ -640,7 +631,7 @@ static void ggml_backend_rpc_free(ggml_backend_t backend) {
 }
 
 static void ggml_backend_rpc_synchronize(ggml_backend_t backend) {
-    UNUSED(backend);
+    GGML_UNUSED(backend);
     // this is no-op because we don't have any async operations
 }
 
@@ -850,7 +841,7 @@ void rpc_server::alloc_buffer(const rpc_msg_alloc_buffer_req & request, rpc_msg_
         GGML_PRINT_DEBUG("[%s] size: %" PRIu64 " -> remote_ptr: %" PRIx64 ", remote_size: %" PRIu64 "\n", __func__, request.size, response.remote_ptr, response.remote_size);
         buffers.insert(buffer);
     } else {
-        GGML_PRINT_DEBUG("[%s] size: %" PRIu64 " -> failed\n", __func__, request.size);
+        GGML_LOG_ERROR("[%s] size: %" PRIu64 " -> failed\n", __func__, request.size);
     }
 }
 
@@ -872,7 +863,7 @@ bool rpc_server::buffer_get_base(const rpc_msg_buffer_get_base_req & request, rp
     GGML_PRINT_DEBUG("[%s] remote_ptr: %" PRIx64 "\n", __func__, request.remote_ptr);
     ggml_backend_buffer_t buffer = reinterpret_cast(request.remote_ptr);
     if (buffers.find(buffer) == buffers.end()) {
-        GGML_PRINT_DEBUG("[%s] buffer not found\n", __func__);
+        GGML_LOG_ERROR("[%s] buffer not found\n", __func__);
         return false;
     }
     void * base = ggml_backend_buffer_get_base(buffer);
@@ -884,7 +875,7 @@ bool rpc_server::free_buffer(const rpc_msg_free_buffer_req & request) {
     GGML_PRINT_DEBUG("[%s] remote_ptr: %" PRIx64 "\n", __func__, request.remote_ptr);
     ggml_backend_buffer_t buffer = reinterpret_cast(request.remote_ptr);
     if (buffers.find(buffer) == buffers.end()) {
-        GGML_PRINT_DEBUG("[%s] buffer not found\n", __func__);
+        GGML_LOG_ERROR("[%s] buffer not found\n", __func__);
         return false;
     }
     ggml_backend_buffer_free(buffer);
@@ -896,7 +887,7 @@ bool rpc_server::buffer_clear(const rpc_msg_buffer_clear_req & request) {
     GGML_PRINT_DEBUG("[%s] remote_ptr: %" PRIx64 ", value: %u\n", __func__, request.remote_ptr, request.value);
     ggml_backend_buffer_t buffer = reinterpret_cast(request.remote_ptr);
     if (buffers.find(buffer) == buffers.end()) {
-        GGML_PRINT_DEBUG("[%s] buffer not found\n", __func__);
+        GGML_LOG_ERROR("[%s] buffer not found\n", __func__);
         return false;
     }
     ggml_backend_buffer_clear(buffer, request.value);
@@ -952,7 +943,7 @@ bool rpc_server::set_tensor(const std::vector & input) {
     struct ggml_context * ctx = ggml_init(params);
     ggml_tensor * tensor = deserialize_tensor(ctx, in_tensor);
     if (tensor == nullptr) {
-        GGML_PRINT_DEBUG("[%s] error deserializing tensor\n", __func__);
+        GGML_LOG_ERROR("[%s] error deserializing tensor\n", __func__);
         ggml_free(ctx);
         return false;
     }
@@ -1017,7 +1008,7 @@ bool rpc_server::get_tensor(const rpc_msg_get_tensor_req & request, std::vector<
     struct ggml_context * ctx = ggml_init(params);
     ggml_tensor * tensor = deserialize_tensor(ctx, &request.tensor);
     if (tensor == nullptr) {
-        GGML_PRINT_DEBUG("[%s] error deserializing tensor\n", __func__);
+        GGML_LOG_ERROR("[%s] error deserializing tensor\n", __func__);
         ggml_free(ctx);
         return false;
     }
@@ -1051,7 +1042,7 @@ bool rpc_server::copy_tensor(const rpc_msg_copy_tensor_req & request, rpc_msg_co
     ggml_tensor * src = deserialize_tensor(ctx, &request.src);
     ggml_tensor * dst = deserialize_tensor(ctx, &request.dst);
     if (src == nullptr || dst == nullptr) {
-        GGML_PRINT_DEBUG("[%s] error deserializing tensors\n", __func__);
+        GGML_LOG_ERROR("[%s] error deserializing tensors\n", __func__);
         ggml_free(ctx);
         return false;
     }
@@ -1385,14 +1376,14 @@ static void ggml_backend_rpc_device_get_memory(ggml_backend_dev_t dev, size_t *
 
     ggml_backend_rpc_get_device_memory(ctx->endpoint.c_str(), free, total);
 
-    UNUSED(dev);
+    GGML_UNUSED(dev);
 }
 
 static enum ggml_backend_dev_type ggml_backend_rpc_device_get_type(ggml_backend_dev_t dev) {
     // TODO: obtain value from the server
     return GGML_BACKEND_DEVICE_TYPE_GPU;
 
-    UNUSED(dev);
+    GGML_UNUSED(dev);
 }
 
 static void ggml_backend_rpc_device_get_props(ggml_backend_dev_t dev, struct ggml_backend_dev_props * props) {
@@ -1413,7 +1404,7 @@ static ggml_backend_t ggml_backend_rpc_device_init(ggml_backend_dev_t dev, const
 
     return ggml_backend_rpc_init(ctx->endpoint.c_str());
 
-    UNUSED(params);
+    GGML_UNUSED(params);
 }
 
 static ggml_backend_buffer_type_t ggml_backend_rpc_device_get_buffer_type(ggml_backend_dev_t dev) {
@@ -1421,12 +1412,12 @@ static ggml_backend_buffer_type_t ggml_backend_rpc_device_get_buffer_type(ggml_b
 
     return ggml_backend_rpc_buffer_type(ctx->endpoint.c_str());
 
-    UNUSED(dev);
+    GGML_UNUSED(dev);
 }
 
 static bool ggml_backend_rpc_device_supports_op(ggml_backend_dev_t dev, const struct ggml_tensor * op) {
-    UNUSED(dev);
-    UNUSED(op);
+    GGML_UNUSED(dev);
+    GGML_UNUSED(op);
     //TODO: call the remote backend and cache the results
     return true;
 }
@@ -1463,20 +1454,20 @@ static const struct ggml_backend_device_i ggml_backend_rpc_device_i = {
 static const char * ggml_backend_rpc_reg_get_name(ggml_backend_reg_t reg) {
     return "RPC";
 
-    UNUSED(reg);
+    GGML_UNUSED(reg);
 }
 
 static size_t ggml_backend_rpc_reg_get_device_count(ggml_backend_reg_t reg) {
     return 0;
 
-    UNUSED(reg);
+    GGML_UNUSED(reg);
 }
 
 static ggml_backend_dev_t ggml_backend_rpc_reg_get_device(ggml_backend_reg_t reg, size_t index) {
     GGML_ABORT("The RPC backend does not have enumerated devices - use ggml_backend_add_device instead");
 
-    UNUSED(reg);
-    UNUSED(index);
+    GGML_UNUSED(reg);
+    GGML_UNUSED(index);
 }
 
 static void * ggml_backend_rpc_get_proc_address(ggml_backend_reg_t reg, const char * name) {
@@ -1485,7 +1476,7 @@ static void * ggml_backend_rpc_get_proc_address(ggml_backend_reg_t reg, const ch
     }
     return NULL;
 
-    UNUSED(reg);
+    GGML_UNUSED(reg);
 }
 
 static const struct ggml_backend_reg_i ggml_backend_rpc_reg_i = {

From a3d50bc022bedd6c7754c24749a1fef4d2d60c7c Mon Sep 17 00:00:00 2001
From: Diego Devesa 
Date: Tue, 7 Jan 2025 12:38:05 +0100
Subject: [PATCH 60/81] ggml-backend : only offload from host buffers (#11120)

---
 ggml/src/ggml-backend.cpp | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/ggml/src/ggml-backend.cpp b/ggml/src/ggml-backend.cpp
index e2d6c4056..d034f8b7f 100644
--- a/ggml/src/ggml-backend.cpp
+++ b/ggml/src/ggml-backend.cpp
@@ -761,7 +761,7 @@ static int ggml_backend_sched_backend_id_from_cur(ggml_backend_sched_t sched, st
         }
         // skip ROPE since the rope freqs tensor is too small to choose a backend based on it
         // not an ideal solution
-        if (tensor->op != GGML_OP_ROPE && src->buffer != NULL && src->buffer->usage == GGML_BACKEND_BUFFER_USAGE_WEIGHTS) {
+        if (tensor->op != GGML_OP_ROPE && src->buffer != NULL && src->buffer->usage == GGML_BACKEND_BUFFER_USAGE_WEIGHTS && ggml_backend_buffer_is_host(src->buffer)) {
             int src_backend_id = ggml_backend_sched_backend_from_buffer(sched, src, tensor);
             // check if a backend with higher prio wants to offload the op
             if (src_backend_id == sched->n_backends - 1) {

From 017cc5f446863316d05522a87f25ec48713a9492 Mon Sep 17 00:00:00 2001
From: Diego Devesa 
Date: Tue, 7 Jan 2025 16:11:57 +0100
Subject: [PATCH 61/81] ggml-backend : only offload from host buffers (fix)
 (#11124)

---
 ggml/src/ggml-backend.cpp              | 4 ++--
 ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp | 2 ++
 2 files changed, 4 insertions(+), 2 deletions(-)

diff --git a/ggml/src/ggml-backend.cpp b/ggml/src/ggml-backend.cpp
index d034f8b7f..dba7be33b 100644
--- a/ggml/src/ggml-backend.cpp
+++ b/ggml/src/ggml-backend.cpp
@@ -761,10 +761,10 @@ static int ggml_backend_sched_backend_id_from_cur(ggml_backend_sched_t sched, st
         }
         // skip ROPE since the rope freqs tensor is too small to choose a backend based on it
         // not an ideal solution
-        if (tensor->op != GGML_OP_ROPE && src->buffer != NULL && src->buffer->usage == GGML_BACKEND_BUFFER_USAGE_WEIGHTS && ggml_backend_buffer_is_host(src->buffer)) {
+        if (tensor->op != GGML_OP_ROPE && src->buffer != NULL && src->buffer->usage == GGML_BACKEND_BUFFER_USAGE_WEIGHTS) {
             int src_backend_id = ggml_backend_sched_backend_from_buffer(sched, src, tensor);
             // check if a backend with higher prio wants to offload the op
-            if (src_backend_id == sched->n_backends - 1) {
+            if (src_backend_id == sched->n_backends - 1 && ggml_backend_buffer_is_host(src->buffer)) {
                 for (int b = 0; b < src_backend_id; b++) {
                     if (ggml_backend_supports_op(sched->backends[b], tensor) && ggml_backend_offload_op(sched->backends[b], tensor)) {
                         SET_CAUSE(tensor, "1.off");
diff --git a/ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp b/ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp
index 622c63f1f..b311a5b1c 100644
--- a/ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp
+++ b/ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp
@@ -4169,6 +4169,8 @@ static ggml_backend_buffer_t ggml_backend_cpu_aarch64_buffer_type_alloc_buffer(g
     buffer->buft              = buft;
     buffer->iface.init_tensor = ggml_backend_cpu_aarch64_buffer_init_tensor;
     buffer->iface.set_tensor  = ggml_backend_cpu_aarch64_buffer_set_tensor;
+    buffer->iface.get_tensor  = nullptr;
+    buffer->iface.cpy_tensor  = nullptr;
     return buffer;
 }
 

From 53ff6b9b9fb25ed0ec0a213e05534fe7c3d0040f Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Johannes=20G=C3=A4=C3=9Fler?= 
Date: Tue, 7 Jan 2025 18:01:58 +0100
Subject: [PATCH 62/81] GGUF: C++ refactor, backend support, misc fixes
 (#11030)

* GGUF: C++ refactor, backend support, misc fixes

remove ggml_tensor.backend

update CODEOWNERS [no ci]

remove gguf_get_data from API

revise GGUF API data types
---
 CODEOWNERS                                    |    6 +
 common/common.cpp                             |    3 +
 .../convert-llama2c-to-ggml.cpp               |    2 +
 .../cvector-generator/cvector-generator.cpp   |    4 +-
 examples/export-lora/export-lora.cpp          |    6 +-
 examples/gguf-hash/gguf-hash.cpp              |    1 +
 examples/gguf-split/gguf-split.cpp            |   14 +-
 examples/gguf/gguf.cpp                        |   16 +-
 examples/llava/clip.cpp                       |    6 +-
 ggml/CMakeLists.txt                           |    3 +-
 ggml/include/ggml-cpp.h                       |    1 +
 ggml/include/ggml.h                           |  140 --
 ggml/include/gguf.h                           |  202 +++
 ggml/src/CMakeLists.txt                       |    4 +-
 ggml/src/ggml-impl.h                          |   27 +-
 ggml/src/ggml.c                               | 1276 ----------------
 ggml/src/gguf.cpp                             | 1325 +++++++++++++++++
 src/llama-impl.cpp                            |    3 +-
 src/llama-model-loader.cpp                    |    9 +-
 src/llama-quant.cpp                           |    3 +-
 tests/test-gguf.cpp                           |  371 ++---
 21 files changed, 1795 insertions(+), 1627 deletions(-)
 create mode 100644 ggml/include/gguf.h
 create mode 100644 ggml/src/gguf.cpp

diff --git a/CODEOWNERS b/CODEOWNERS
index c9fa34761..72d594b46 100644
--- a/CODEOWNERS
+++ b/CODEOWNERS
@@ -3,3 +3,9 @@
 /ci/ @ggerganov
 /.devops/*.Dockerfile @ngxson
 /examples/server/ @ngxson
+/ggml/src/ggml-cuda/fattn* @JohannesGaessler
+/ggml/src/ggml-cuda/mmq.* @JohannesGaessler
+/ggml/src/ggml-cuda/mmv.* @JohannesGaessler
+/ggml/src/ggml-cuda/mmvq.* @JohannesGaessler
+/ggml/src/ggml-opt.cpp @JohannesGaessler
+/ggml/src/gguf.cpp @JohannesGaessler
diff --git a/common/common.cpp b/common/common.cpp
index 4fd36105e..86e4e1e24 100644
--- a/common/common.cpp
+++ b/common/common.cpp
@@ -2,6 +2,9 @@
 #define _SILENCE_CXX17_CODECVT_HEADER_DEPRECATION_WARNING
 #endif
 
+#include "ggml.h"
+#include "gguf.h"
+
 #include "common.h"
 #include "log.h"
 // Change JSON_ASSERT from assert() to GGML_ASSERT:
diff --git a/examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp b/examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp
index 9c3a0c367..1256abb17 100644
--- a/examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp
+++ b/examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp
@@ -1,4 +1,6 @@
 #include "ggml.h"
+#include "gguf.h"
+
 #include "llama.h"
 #include "common.h"
 #include "log.h"
diff --git a/examples/cvector-generator/cvector-generator.cpp b/examples/cvector-generator/cvector-generator.cpp
index 7c9f50228..e899c1078 100644
--- a/examples/cvector-generator/cvector-generator.cpp
+++ b/examples/cvector-generator/cvector-generator.cpp
@@ -1,7 +1,9 @@
+#include "ggml.h"
+#include "gguf.h"
+
 #include "arg.h"
 #include "common.h"
 #include "llama.h"
-#include "ggml.h"
 #include "pca.hpp"
 #include "mean.hpp"
 
diff --git a/examples/export-lora/export-lora.cpp b/examples/export-lora/export-lora.cpp
index 058b5cc86..d5dcd20a0 100644
--- a/examples/export-lora/export-lora.cpp
+++ b/examples/export-lora/export-lora.cpp
@@ -1,7 +1,9 @@
-#include "arg.h"
-#include "common.h"
 #include "ggml.h"
 #include "ggml-alloc.h"
+#include "gguf.h"
+
+#include "arg.h"
+#include "common.h"
 
 #include 
 #include 
diff --git a/examples/gguf-hash/gguf-hash.cpp b/examples/gguf-hash/gguf-hash.cpp
index e96c75117..9523ec122 100644
--- a/examples/gguf-hash/gguf-hash.cpp
+++ b/examples/gguf-hash/gguf-hash.cpp
@@ -1,4 +1,5 @@
 #include "ggml.h"
+#include "gguf.h"
 
 #include    /* abort() */
 #include 
diff --git a/examples/gguf-split/gguf-split.cpp b/examples/gguf-split/gguf-split.cpp
index 9e3d44984..ef3ceb686 100644
--- a/examples/gguf-split/gguf-split.cpp
+++ b/examples/gguf-split/gguf-split.cpp
@@ -1,16 +1,18 @@
+#include "ggml.h"
+#include "gguf.h"
 #include "llama.h"
 #include "common.h"
 
 #include 
+#include 
+#include 
+#include 
 #include 
+#include 
+#include 
 #include 
 #include 
 #include 
-#include 
-
-#include 
-#include 
-#include 
 
 #if defined(_WIN32)
     #include 
@@ -296,7 +298,7 @@ struct split_strategy {
                 total_size += ggml_nbytes(t);
             }
             total_size = total_size / 1000 / 1000; // convert to megabytes
-            printf("split %05d: n_tensors = %d, total_size = %zuM\n", i_split + 1, gguf_get_n_tensors(ctx_out), total_size);
+            printf("split %05d: n_tensors = %" PRIi64 ", total_size = %zuM\n", i_split + 1, gguf_get_n_tensors(ctx_out), total_size);
             i_split++;
         }
     }
diff --git a/examples/gguf/gguf.cpp b/examples/gguf/gguf.cpp
index 7498f85ef..f31989c8c 100644
--- a/examples/gguf/gguf.cpp
+++ b/examples/gguf/gguf.cpp
@@ -1,10 +1,9 @@
 #include "ggml.h"
+#include "gguf.h"
 
 #include 
-#include 
 #include 
 #include 
-#include 
 #include 
 
 #undef MIN
@@ -135,9 +134,10 @@ static bool gguf_ex_read_0(const std::string & fname) {
 
         for (int i = 0; i < n_tensors; ++i) {
             const char * name   = gguf_get_tensor_name  (ctx, i);
+            const size_t size   = gguf_get_tensor_size  (ctx, i);
             const size_t offset = gguf_get_tensor_offset(ctx, i);
 
-            printf("%s: tensor[%d]: name = %s, offset = %zu\n", __func__, i, name, offset);
+            printf("%s: tensor[%d]: name = %s, size = %zu, offset = %zu\n", __func__, i, name, size, offset);
         }
     }
 
@@ -182,9 +182,10 @@ static bool gguf_ex_read_1(const std::string & fname, bool check_data) {
 
         for (int i = 0; i < n_tensors; ++i) {
             const char * name   = gguf_get_tensor_name  (ctx, i);
+            const size_t size   = gguf_get_tensor_size  (ctx, i);
             const size_t offset = gguf_get_tensor_offset(ctx, i);
 
-            printf("%s: tensor[%d]: name = %s, offset = %zu\n", __func__, i, name, offset);
+            printf("%s: tensor[%d]: name = %s, size = %zu, offset = %zu\n", __func__, i, name, size, offset);
         }
     }
 
@@ -199,7 +200,8 @@ static bool gguf_ex_read_1(const std::string & fname, bool check_data) {
 
             struct ggml_tensor * cur = ggml_get_tensor(ctx_data, name);
 
-            printf("%s: tensor[%d]: n_dims = %d, name = %s, data = %p\n", __func__, i, ggml_n_dims(cur), cur->name, cur->data);
+            printf("%s: tensor[%d]: n_dims = %d, ne = (%d, %d, %d, %d), name = %s, data = %p\n",
+                __func__, i, ggml_n_dims(cur), int(cur->ne[0]), int(cur->ne[1]), int(cur->ne[2]), int(cur->ne[3]), cur->name, cur->data);
 
             // print first 10 elements
             const float * data = (const float *) cur->data;
@@ -215,7 +217,7 @@ static bool gguf_ex_read_1(const std::string & fname, bool check_data) {
                 const float * data = (const float *) cur->data;
                 for (int j = 0; j < ggml_nelements(cur); ++j) {
                     if (data[j] != 100 + i) {
-                        fprintf(stderr, "%s: tensor[%d]: data[%d] = %f\n", __func__, i, j, data[j]);
+                        fprintf(stderr, "%s: tensor[%d], data[%d]: found %f, expected %f\n", __func__, i, j, data[j], float(100 + i));
                         gguf_free(ctx);
                         return false;
                     }
@@ -245,6 +247,8 @@ int main(int argc, char ** argv) {
         check_data = false;
     }
 
+    srand(123456);
+
     const std::string fname(argv[1]);
     const std::string mode (argv[2]);
 
diff --git a/examples/llava/clip.cpp b/examples/llava/clip.cpp
index 3cd0d2fa8..7a8a3156b 100644
--- a/examples/llava/clip.cpp
+++ b/examples/llava/clip.cpp
@@ -7,6 +7,7 @@
 #include "ggml-cpu.h"
 #include "ggml-alloc.h"
 #include "ggml-backend.h"
+#include "gguf.h"
 
 //#ifdef GGML_USE_CUDA
 //#include "ggml-cuda.h"
@@ -262,7 +263,7 @@ static std::string gguf_kv_to_str(const struct gguf_context * ctx_gguf, int i) {
             {
                 const enum gguf_type arr_type = gguf_get_arr_type(ctx_gguf, i);
                 int arr_n = gguf_get_arr_n(ctx_gguf, i);
-                const void * data = gguf_get_arr_data(ctx_gguf, i);
+                const void * data = arr_type == GGUF_TYPE_STRING ? nullptr : gguf_get_arr_data(ctx_gguf, i);
                 std::stringstream ss;
                 ss << "[";
                 for (int j = 0; j < arr_n; j++) {
@@ -2734,7 +2735,8 @@ bool clip_model_quantize(const char * fname_inp, const char * fname_out, const i
         total_size_org += orig_size;
         total_size_new += new_size;
         gguf_set_tensor_type(ctx_out, name.c_str(), new_type);
-        gguf_set_tensor_data(ctx_out, name.c_str(), new_data, new_size);
+        GGML_ASSERT(gguf_get_tensor_size(ctx_out, gguf_find_tensor(ctx_out, name.c_str())) == new_size);
+        gguf_set_tensor_data(ctx_out, name.c_str(), new_data);
         fout.write((const char *)new_data, new_size);
         size_t pad = GGML_PAD(new_size, gguf_get_alignment(ctx_out)) - new_size;
         for (size_t j = 0; j < pad; ++j) {
diff --git a/ggml/CMakeLists.txt b/ggml/CMakeLists.txt
index 393506533..fe8acc803 100644
--- a/ggml/CMakeLists.txt
+++ b/ggml/CMakeLists.txt
@@ -243,7 +243,8 @@ set(GGML_PUBLIC_HEADERS
     include/ggml-metal.h
     include/ggml-rpc.h
     include/ggml-sycl.h
-    include/ggml-vulkan.h)
+    include/ggml-vulkan.h
+    include/gguf.h)
 
 set_target_properties(ggml PROPERTIES PUBLIC_HEADER "${GGML_PUBLIC_HEADERS}")
 #if (GGML_METAL)
diff --git a/ggml/include/ggml-cpp.h b/ggml/include/ggml-cpp.h
index 219361af4..a12342c25 100644
--- a/ggml/include/ggml-cpp.h
+++ b/ggml/include/ggml-cpp.h
@@ -7,6 +7,7 @@
 #include "ggml.h"
 #include "ggml-alloc.h"
 #include "ggml-backend.h"
+#include "gguf.h"
 #include 
 
 // Smart pointers for ggml types
diff --git a/ggml/include/ggml.h b/ggml/include/ggml.h
index c714fc8c8..8630d92c5 100644
--- a/ggml/include/ggml.h
+++ b/ggml/include/ggml.h
@@ -241,12 +241,6 @@
 #define GGML_ROPE_TYPE_MROPE  8
 #define GGML_ROPE_TYPE_VISION 24
 
-#define GGUF_MAGIC "GGUF"
-
-#define GGUF_VERSION 3
-
-#define GGUF_DEFAULT_ALIGNMENT 32
-
 #define GGML_UNUSED(x) (void)(x)
 
 #define GGML_PAD(x, n) (((x) + (n) - 1) & ~((n) - 1))
@@ -403,12 +397,6 @@ extern "C" {
         GGML_PREC_F32,
     };
 
-    enum ggml_backend_type {
-        GGML_BACKEND_TYPE_CPU = 0,
-        GGML_BACKEND_TYPE_GPU = 10,
-        GGML_BACKEND_TYPE_GPU_SPLIT = 20,
-    };
-
     // model file types
     enum ggml_ftype {
         GGML_FTYPE_UNKNOWN        = -1,
@@ -587,8 +575,6 @@ extern "C" {
     struct ggml_tensor {
         enum ggml_type type;
 
-        GGML_DEPRECATED(enum ggml_backend_type backend, "use the buffer type to find the storage location of the tensor");
-
         struct ggml_backend_buffer * buffer;
 
         int64_t ne[GGML_MAX_DIMS]; // number of elements
@@ -2111,132 +2097,6 @@ extern "C" {
                    int64_t   n_per_row,
                const float * imatrix);
 
-    //
-    // gguf
-    //
-
-    enum gguf_type {
-        GGUF_TYPE_UINT8   = 0,
-        GGUF_TYPE_INT8    = 1,
-        GGUF_TYPE_UINT16  = 2,
-        GGUF_TYPE_INT16   = 3,
-        GGUF_TYPE_UINT32  = 4,
-        GGUF_TYPE_INT32   = 5,
-        GGUF_TYPE_FLOAT32 = 6,
-        GGUF_TYPE_BOOL    = 7,
-        GGUF_TYPE_STRING  = 8,
-        GGUF_TYPE_ARRAY   = 9,
-        GGUF_TYPE_UINT64  = 10,
-        GGUF_TYPE_INT64   = 11,
-        GGUF_TYPE_FLOAT64 = 12,
-        GGUF_TYPE_COUNT,       // marks the end of the enum
-    };
-
-    struct gguf_context;
-
-    struct gguf_init_params {
-        bool no_alloc;
-
-        // if not NULL, create a ggml_context and allocate the tensor data in it
-        struct ggml_context ** ctx;
-    };
-
-    GGML_API struct gguf_context * gguf_init_empty(void);
-    GGML_API struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_params params);
-    //GGML_API struct gguf_context * gguf_init_from_buffer(..);
-
-    GGML_API void gguf_free(struct gguf_context * ctx);
-
-    GGML_API const char * gguf_type_name(enum gguf_type type);
-
-    GGML_API int    gguf_get_version    (const struct gguf_context * ctx);
-    GGML_API size_t gguf_get_alignment  (const struct gguf_context * ctx);
-    GGML_API size_t gguf_get_data_offset(const struct gguf_context * ctx);
-    GGML_API void * gguf_get_data       (const struct gguf_context * ctx);
-
-    GGML_API int          gguf_get_n_kv(const struct gguf_context * ctx);
-    GGML_API int          gguf_find_key(const struct gguf_context * ctx, const char * key);
-    GGML_API const char * gguf_get_key (const struct gguf_context * ctx, int key_id);
-
-    GGML_API enum gguf_type gguf_get_kv_type (const struct gguf_context * ctx, int key_id);
-    GGML_API enum gguf_type gguf_get_arr_type(const struct gguf_context * ctx, int key_id);
-
-    // will abort if the wrong type is used for the key
-    GGML_API uint8_t      gguf_get_val_u8  (const struct gguf_context * ctx, int key_id);
-    GGML_API int8_t       gguf_get_val_i8  (const struct gguf_context * ctx, int key_id);
-    GGML_API uint16_t     gguf_get_val_u16 (const struct gguf_context * ctx, int key_id);
-    GGML_API int16_t      gguf_get_val_i16 (const struct gguf_context * ctx, int key_id);
-    GGML_API uint32_t     gguf_get_val_u32 (const struct gguf_context * ctx, int key_id);
-    GGML_API int32_t      gguf_get_val_i32 (const struct gguf_context * ctx, int key_id);
-    GGML_API float        gguf_get_val_f32 (const struct gguf_context * ctx, int key_id);
-    GGML_API uint64_t     gguf_get_val_u64 (const struct gguf_context * ctx, int key_id);
-    GGML_API int64_t      gguf_get_val_i64 (const struct gguf_context * ctx, int key_id);
-    GGML_API double       gguf_get_val_f64 (const struct gguf_context * ctx, int key_id);
-    GGML_API bool         gguf_get_val_bool(const struct gguf_context * ctx, int key_id);
-    GGML_API const char * gguf_get_val_str (const struct gguf_context * ctx, int key_id);
-    GGML_API const void * gguf_get_val_data(const struct gguf_context * ctx, int key_id);
-    GGML_API int          gguf_get_arr_n   (const struct gguf_context * ctx, int key_id);
-    GGML_API const void * gguf_get_arr_data(const struct gguf_context * ctx, int key_id);
-    GGML_API const char * gguf_get_arr_str (const struct gguf_context * ctx, int key_id, int i);
-
-    GGML_API int            gguf_get_n_tensors    (const struct gguf_context * ctx);
-    GGML_API int            gguf_find_tensor      (const struct gguf_context * ctx, const char * name);
-    GGML_API size_t         gguf_get_tensor_offset(const struct gguf_context * ctx, int i);
-    GGML_API char *         gguf_get_tensor_name  (const struct gguf_context * ctx, int i);
-    GGML_API enum ggml_type gguf_get_tensor_type  (const struct gguf_context * ctx, int i);
-
-    // removes key if it exists
-    GGML_API void gguf_remove_key(struct gguf_context * ctx, const char * key);
-
-    // overrides existing values or adds a new one
-    GGML_API void gguf_set_val_u8  (struct gguf_context * ctx, const char * key, uint8_t  val);
-    GGML_API void gguf_set_val_i8  (struct gguf_context * ctx, const char * key, int8_t   val);
-    GGML_API void gguf_set_val_u16 (struct gguf_context * ctx, const char * key, uint16_t val);
-    GGML_API void gguf_set_val_i16 (struct gguf_context * ctx, const char * key, int16_t  val);
-    GGML_API void gguf_set_val_u32 (struct gguf_context * ctx, const char * key, uint32_t val);
-    GGML_API void gguf_set_val_i32 (struct gguf_context * ctx, const char * key, int32_t  val);
-    GGML_API void gguf_set_val_f32 (struct gguf_context * ctx, const char * key, float    val);
-    GGML_API void gguf_set_val_u64 (struct gguf_context * ctx, const char * key, uint64_t val);
-    GGML_API void gguf_set_val_i64 (struct gguf_context * ctx, const char * key, int64_t  val);
-    GGML_API void gguf_set_val_f64 (struct gguf_context * ctx, const char * key, double   val);
-    GGML_API void gguf_set_val_bool(struct gguf_context * ctx, const char * key, bool     val);
-    GGML_API void gguf_set_val_str (struct gguf_context * ctx, const char * key, const char * val);
-    GGML_API void gguf_set_arr_data(struct gguf_context * ctx, const char * key, enum gguf_type type, const void * data, int n);
-    GGML_API void gguf_set_arr_str (struct gguf_context * ctx, const char * key, const char ** data, int n);
-
-    // set or add KV pairs from another context
-    GGML_API void gguf_set_kv(struct gguf_context * ctx, struct gguf_context * src);
-
-    // manage tensor info
-    GGML_API void gguf_add_tensor(struct gguf_context * ctx, const struct ggml_tensor * tensor);
-    GGML_API void gguf_set_tensor_type(struct gguf_context * ctx, const char * name, enum ggml_type type);
-    GGML_API void gguf_set_tensor_data(struct gguf_context * ctx, const char * name, const void * data, size_t size);
-
-    // writing gguf files can be done in 2 ways:
-    //
-    // - write the entire gguf_context to a binary file in a single pass:
-    //
-    //   gguf_write_to_file(ctx, fname);
-    //
-    // - first prepare a file with a placeholder for the meta data, write the tensor data, then write the meta data:
-    //
-    //   FILE * f = fopen(fname, "wb");
-    //   fseek(f, gguf_get_meta_size(ctx), SEEK_SET);
-    //   fwrite(f, ...);
-    //   void * data = gguf_meta_get_meta_data(ctx);
-    //   fseek(f, 0, SEEK_SET);
-    //   fwrite(f, data, gguf_get_meta_size(ctx));
-    //   free(data);
-    //   fclose(f);
-    //
-
-    // write the entire context to a binary file
-    GGML_API void gguf_write_to_file(const struct gguf_context * ctx, const char * fname, bool only_meta);
-
-    // get the size in bytes of the meta data (header, kv pairs, tensor info) including padding
-    GGML_API size_t gguf_get_meta_size(const struct gguf_context * ctx);
-    GGML_API void   gguf_get_meta_data(const struct gguf_context * ctx, void * data);
-
 #ifdef __cplusplus
     // restrict not standard in C++
 #    if defined(__GNUC__)
diff --git a/ggml/include/gguf.h b/ggml/include/gguf.h
new file mode 100644
index 000000000..79ee20206
--- /dev/null
+++ b/ggml/include/gguf.h
@@ -0,0 +1,202 @@
+// This file contains functionality related to "GGUF" files, the binary file format used by ggml.
+// GGUF files have the following structure:
+//
+// 1. File magic "GGUF" (4 bytes).
+// 2. File version (uint32_t).
+// 3. Number of ggml tensors in file (int64_t).
+// 4. Number of key-value-pairs in file (int64_t).
+// 5. For each KV pair:
+//   1. The key (string).
+//   2. The value type (gguf_type).
+//   3a. If the value type is GGUF_TYPE_ARRAY:
+//     1. The type of the array (gguf_type).
+//     2. The number of elements in the array (uint64_t).
+//     3. The binary representation of each element in the array.
+//   3b. Otherwise:
+//     1. The binary representation of the value.
+// 6. For each ggml tensor:
+//   1. The tensor name (string).
+//   2. The number of dimensions of the tensor (uint32_t).
+//   3. For each dimension:
+//     1. The size of the tensor in the dimension (int64_t).
+//   4. The tensor data type (ggml_type).
+//   5. The tensor data offset in the tensor data binary blob (uint64_t).
+// 7. The tensor data binary blob (optional, aligned).
+//
+// Strings are serialized as the string length (uint64_t) followed by the C string without the null terminator.
+// All enums are stored as int32_t.
+// All bool values are stored as int8_t.
+// If the special key "general.alignment" (uint32_t) is defined it is used for alignment,
+//   otherwise GGUF_DEFAULT_ALIGNMENT is used.
+//
+// Module maintainer: Johannes Gäßler (@JohannesGaessler, johannesg@5d6.de)
+
+#pragma once
+
+#include "ggml.h"
+
+#include 
+#include 
+
+#define GGUF_MAGIC   "GGUF"
+#define GGUF_VERSION 3
+
+#define GGUF_KEY_GENERAL_ALIGNMENT "general.alignment"
+
+#define GGUF_DEFAULT_ALIGNMENT 32
+
+#ifdef  __cplusplus
+extern "C" {
+#endif
+
+    // types that can be stored as GGUF KV data
+    enum gguf_type {
+        GGUF_TYPE_UINT8   = 0,
+        GGUF_TYPE_INT8    = 1,
+        GGUF_TYPE_UINT16  = 2,
+        GGUF_TYPE_INT16   = 3,
+        GGUF_TYPE_UINT32  = 4,
+        GGUF_TYPE_INT32   = 5,
+        GGUF_TYPE_FLOAT32 = 6,
+        GGUF_TYPE_BOOL    = 7,
+        GGUF_TYPE_STRING  = 8,
+        GGUF_TYPE_ARRAY   = 9,
+        GGUF_TYPE_UINT64  = 10,
+        GGUF_TYPE_INT64   = 11,
+        GGUF_TYPE_FLOAT64 = 12,
+        GGUF_TYPE_COUNT,       // marks the end of the enum
+    };
+
+    struct gguf_context;
+
+    struct gguf_init_params {
+        bool no_alloc;
+
+        // if not NULL, create a ggml_context and allocate the tensor data in it
+        struct ggml_context ** ctx;
+    };
+
+    GGML_API struct gguf_context * gguf_init_empty(void);
+    GGML_API struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_params params);
+    //GGML_API struct gguf_context * gguf_init_from_buffer(..);
+
+    GGML_API void gguf_free(struct gguf_context * ctx);
+
+    GGML_API const char * gguf_type_name(enum gguf_type type);
+
+    GGML_API uint32_t gguf_get_version    (const struct gguf_context * ctx);
+    GGML_API size_t   gguf_get_alignment  (const struct gguf_context * ctx);
+    GGML_API size_t   gguf_get_data_offset(const struct gguf_context * ctx);
+
+    GGML_API int64_t      gguf_get_n_kv(const struct gguf_context * ctx);
+    GGML_API int64_t      gguf_find_key(const struct gguf_context * ctx, const char * key); // returns -1 if key is not found
+    GGML_API const char * gguf_get_key (const struct gguf_context * ctx, int64_t key_id);
+
+    GGML_API enum gguf_type gguf_get_kv_type (const struct gguf_context * ctx, int64_t key_id);
+    GGML_API enum gguf_type gguf_get_arr_type(const struct gguf_context * ctx, int64_t key_id);
+
+    // will abort if the wrong type is used for the key
+    GGML_API uint8_t      gguf_get_val_u8  (const struct gguf_context * ctx, int64_t key_id);
+    GGML_API int8_t       gguf_get_val_i8  (const struct gguf_context * ctx, int64_t key_id);
+    GGML_API uint16_t     gguf_get_val_u16 (const struct gguf_context * ctx, int64_t key_id);
+    GGML_API int16_t      gguf_get_val_i16 (const struct gguf_context * ctx, int64_t key_id);
+    GGML_API uint32_t     gguf_get_val_u32 (const struct gguf_context * ctx, int64_t key_id);
+    GGML_API int32_t      gguf_get_val_i32 (const struct gguf_context * ctx, int64_t key_id);
+    GGML_API float        gguf_get_val_f32 (const struct gguf_context * ctx, int64_t key_id);
+    GGML_API uint64_t     gguf_get_val_u64 (const struct gguf_context * ctx, int64_t key_id);
+    GGML_API int64_t      gguf_get_val_i64 (const struct gguf_context * ctx, int64_t key_id);
+    GGML_API double       gguf_get_val_f64 (const struct gguf_context * ctx, int64_t key_id);
+    GGML_API bool         gguf_get_val_bool(const struct gguf_context * ctx, int64_t key_id);
+    GGML_API const char * gguf_get_val_str (const struct gguf_context * ctx, int64_t key_id);
+    GGML_API const void * gguf_get_val_data(const struct gguf_context * ctx, int64_t key_id);
+    GGML_API size_t       gguf_get_arr_n   (const struct gguf_context * ctx, int64_t key_id);
+
+    // get raw pointer to the first element of the array with the given key_id
+    // for bool arrays, note that they are always stored as int8 on all platforms (usually this makes no difference)
+    GGML_API const void * gguf_get_arr_data(const struct gguf_context * ctx, int64_t key_id);
+
+    // get ith C string from array with given key_id
+    GGML_API const char * gguf_get_arr_str (const struct gguf_context * ctx, int64_t key_id, size_t i);
+
+    GGML_API int64_t        gguf_get_n_tensors    (const struct gguf_context * ctx);
+    GGML_API int64_t        gguf_find_tensor      (const struct gguf_context * ctx, const char * name); // returns -1 if the tensor is not found
+    GGML_API size_t         gguf_get_tensor_offset(const struct gguf_context * ctx, int64_t tensor_id);
+    GGML_API const char *   gguf_get_tensor_name  (const struct gguf_context * ctx, int64_t tensor_id);
+    GGML_API enum ggml_type gguf_get_tensor_type  (const struct gguf_context * ctx, int64_t tensor_id);
+    GGML_API size_t         gguf_get_tensor_size  (const struct gguf_context * ctx, int64_t tensor_id);
+
+    // removes key if it exists, returns id that the key had prior to removal (-1 if it didn't exist)
+    GGML_API int64_t gguf_remove_key(struct gguf_context * ctx, const char * key);
+
+    // overrides an existing KV pair or adds a new one, the new KV pair is always at the back
+    GGML_API void gguf_set_val_u8  (struct gguf_context * ctx, const char * key, uint8_t      val);
+    GGML_API void gguf_set_val_i8  (struct gguf_context * ctx, const char * key, int8_t       val);
+    GGML_API void gguf_set_val_u16 (struct gguf_context * ctx, const char * key, uint16_t     val);
+    GGML_API void gguf_set_val_i16 (struct gguf_context * ctx, const char * key, int16_t      val);
+    GGML_API void gguf_set_val_u32 (struct gguf_context * ctx, const char * key, uint32_t     val);
+    GGML_API void gguf_set_val_i32 (struct gguf_context * ctx, const char * key, int32_t      val);
+    GGML_API void gguf_set_val_f32 (struct gguf_context * ctx, const char * key, float        val);
+    GGML_API void gguf_set_val_u64 (struct gguf_context * ctx, const char * key, uint64_t     val);
+    GGML_API void gguf_set_val_i64 (struct gguf_context * ctx, const char * key, int64_t      val);
+    GGML_API void gguf_set_val_f64 (struct gguf_context * ctx, const char * key, double       val);
+    GGML_API void gguf_set_val_bool(struct gguf_context * ctx, const char * key, bool         val);
+    GGML_API void gguf_set_val_str (struct gguf_context * ctx, const char * key, const char * val);
+
+    // creates a new array with n elements of the given type and copies the corresponding number of bytes from data
+    GGML_API void gguf_set_arr_data(struct gguf_context * ctx, const char * key, enum gguf_type type, const void * data, size_t n);
+
+    // creates a new array with n strings and copies the corresponding strings from data
+    GGML_API void gguf_set_arr_str (struct gguf_context * ctx, const char * key, const char ** data, size_t n);
+
+    // set or add KV pairs from another context
+    GGML_API void gguf_set_kv(struct gguf_context * ctx, const struct gguf_context * src);
+
+    // add tensor to GGUF context, tensor name must be unique
+    GGML_API void gguf_add_tensor(struct gguf_context * ctx, const struct ggml_tensor * tensor);
+
+    // after changing a tensor's type, the offsets of all tensors with higher indices are immediately recalculated
+    //   in such a way that the tensor data remains as one contiguous block (except for padding)
+    GGML_API void gguf_set_tensor_type(struct gguf_context * ctx, const char * name, enum ggml_type type);
+
+    // assumes that at least gguf_get_tensor_size bytes can be read from data
+    GGML_API void gguf_set_tensor_data(struct gguf_context * ctx, const char * name, const void * data);
+
+    // writing gguf files can be done in 3 ways:
+    //
+    // - write the entire gguf_context to a binary file in a single pass:
+    //
+    //   gguf_write_to_file(ctx, fname, /*only_meta =*/ false);
+    //
+    // - write only the meta data to a file, then re-open the file and append the tensor data:
+    //
+    //   gguf_write_to_file(ctx, fname, /*only_meta =*/ true);
+    //   FILE * f = fopen(fname, "ab");
+    //   fwrite(f, ...); // write tensor data
+    //   fclose(f);
+    //
+    // - first prepare a file with a placeholder for the meta data, write the tensor data, then write the meta data:
+    //
+    //   FILE * f = fopen(fname, "wb");
+    //   const size_t size_meta = gguf_get_meta_size(ctx);
+    //   fseek(f, size_meta, SEEK_SET);
+    //   fwrite(f, ...); // write tensor data
+    //   void * data = malloc(size_meta);
+    //   gguf_get_meta_data(ctx, data);
+    //   rewind(f);
+    //   fwrite(data, 1, data, f);
+    //   free(data);
+    //   fclose(f);
+    //
+
+    // write the entire context to a binary file
+    GGML_API bool gguf_write_to_file(const struct gguf_context * ctx, const char * fname, bool only_meta);
+
+    // get the size in bytes of the meta data (header, kv pairs, tensor info) including padding
+    GGML_API size_t gguf_get_meta_size(const struct gguf_context * ctx);
+
+    // writes the meta data to pointer "data"
+    GGML_API void   gguf_get_meta_data(const struct gguf_context * ctx, void * data);
+
+#ifdef  __cplusplus
+}
+#endif
diff --git a/ggml/src/CMakeLists.txt b/ggml/src/CMakeLists.txt
index 84101c32c..ae1cd2337 100644
--- a/ggml/src/CMakeLists.txt
+++ b/ggml/src/CMakeLists.txt
@@ -208,6 +208,7 @@ add_library(ggml-base
             ../include/ggml-backend.h
             ../include/ggml-cpp.h
             ../include/ggml-opt.h
+            ../include/gguf.h
             ggml.c
             ggml-alloc.c
             ggml-backend.cpp
@@ -215,7 +216,8 @@ add_library(ggml-base
             ggml-threading.cpp
             ggml-threading.h
             ggml-quants.c
-            ggml-quants.h)
+            ggml-quants.h
+            gguf.cpp)
 
 target_include_directories(ggml-base PRIVATE .)
 
diff --git a/ggml/src/ggml-impl.h b/ggml/src/ggml-impl.h
index 549772c57..eab017889 100644
--- a/ggml/src/ggml-impl.h
+++ b/ggml/src/ggml-impl.h
@@ -3,6 +3,8 @@
 // GGML internal header
 
 #include "ggml.h"
+#include "gguf.h"
+
 #include 
 #include 
 #include  // load `stdlib.h` before other headers to work around MinGW bug: https://sourceforge.net/p/mingw-w64/bugs/192/
@@ -551,22 +553,15 @@ static inline ggml_bf16_t ggml_compute_fp32_to_bf16(float s) {
 #define GGML_FP32_TO_BF16(x) ggml_compute_fp32_to_bf16(x)
 #define GGML_BF16_TO_FP32(x) ggml_compute_bf16_to_fp32(x)
 
-// expose GGUF internals for test code
-
-GGML_API size_t gguf_type_size(enum gguf_type type);
-
-GGML_API struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_params params);
-
-struct gguf_buf {
-    void * data;
-    size_t size;
-    size_t offset;
-};
-GGML_API struct gguf_buf gguf_buf_init(size_t size);
-GGML_API void gguf_buf_free(struct gguf_buf buf);
-
-GGML_API void gguf_write_to_buf(const struct gguf_context * ctx, struct gguf_buf * buf, bool only_meta);
-
 #ifdef __cplusplus
 }
 #endif
+
+#ifdef __cplusplus
+#include 
+
+// expose GGUF internals for test code
+GGML_API size_t gguf_type_size(enum gguf_type type);
+GGML_API struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_params params);
+GGML_API void gguf_write_to_buf(const struct gguf_context * ctx, std::vector & buf, bool only_meta);
+#endif // __cplusplus
diff --git a/ggml/src/ggml.c b/ggml/src/ggml.c
index 2bbe5f482..90abc6ad4 100644
--- a/ggml/src/ggml.c
+++ b/ggml/src/ggml.c
@@ -1588,15 +1588,8 @@ static struct ggml_tensor * ggml_new_tensor_impl(
 
     struct ggml_tensor * const result = (struct ggml_tensor *)((char *)ctx->mem_buffer + obj_new->offs);
 
-#ifdef __clang__
-    // temporary until ggml_tensor::backend is removed
-    #pragma clang diagnostic push
-    #pragma clang diagnostic ignored "-Wdeprecated-declarations"
-#endif
-
     *result = (struct ggml_tensor) {
         /*.type         =*/ type,
-        /*.backend      =*/ GGML_BACKEND_TYPE_CPU,
         /*.buffer       =*/ NULL,
         /*.ne           =*/ { 1, 1, 1, 1 },
         /*.nb           =*/ { 0, 0, 0, 0 },
@@ -1612,10 +1605,6 @@ static struct ggml_tensor * ggml_new_tensor_impl(
         /*.padding      =*/ { 0 },
     };
 
-#ifdef __clang__
-    #pragma clang diagnostic pop
-#endif
-
     // TODO: this should not be needed as long as we don't rely on aligned SIMD loads
     //GGML_ASSERT_ALIGNED(result->data);
 
@@ -6417,1271 +6406,6 @@ size_t ggml_quantize_chunk(
 
 ////////////////////////////////////////////////////////////////////////////////
 
-struct gguf_str {
-    uint64_t n;  // GGUFv2
-    char * data;
-};
-
-static const size_t GGUF_TYPE_SIZE[GGUF_TYPE_COUNT] = {
-    [GGUF_TYPE_UINT8]   = sizeof(uint8_t),
-    [GGUF_TYPE_INT8]    = sizeof(int8_t),
-    [GGUF_TYPE_UINT16]  = sizeof(uint16_t),
-    [GGUF_TYPE_INT16]   = sizeof(int16_t),
-    [GGUF_TYPE_UINT32]  = sizeof(uint32_t),
-    [GGUF_TYPE_INT32]   = sizeof(int32_t),
-    [GGUF_TYPE_FLOAT32] = sizeof(float),
-    [GGUF_TYPE_BOOL]    = sizeof(bool),
-    [GGUF_TYPE_STRING]  = sizeof(struct gguf_str),
-    [GGUF_TYPE_UINT64]  = sizeof(uint64_t),
-    [GGUF_TYPE_INT64]   = sizeof(int64_t),
-    [GGUF_TYPE_FLOAT64] = sizeof(double),
-    [GGUF_TYPE_ARRAY]   = 0, // undefined
-};
-static_assert(GGUF_TYPE_COUNT == 13, "GGUF_TYPE_COUNT != 13");
-
-static const char * GGUF_TYPE_NAME[GGUF_TYPE_COUNT] = {
-    [GGUF_TYPE_UINT8]   = "u8",
-    [GGUF_TYPE_INT8]    = "i8",
-    [GGUF_TYPE_UINT16]  = "u16",
-    [GGUF_TYPE_INT16]   = "i16",
-    [GGUF_TYPE_UINT32]  = "u32",
-    [GGUF_TYPE_INT32]   = "i32",
-    [GGUF_TYPE_FLOAT32] = "f32",
-    [GGUF_TYPE_BOOL]    = "bool",
-    [GGUF_TYPE_STRING]  = "str",
-    [GGUF_TYPE_ARRAY]   = "arr",
-    [GGUF_TYPE_UINT64]  = "u64",
-    [GGUF_TYPE_INT64]   = "i64",
-    [GGUF_TYPE_FLOAT64] = "f64",
-};
-static_assert(GGUF_TYPE_COUNT == 13, "GGUF_TYPE_COUNT != 13");
-
-union gguf_value {
-    uint8_t  uint8;
-    int8_t   int8;
-    uint16_t uint16;
-    int16_t  int16;
-    uint32_t uint32;
-    int32_t  int32;
-    float    float32;
-    uint64_t uint64;
-    int64_t  int64;
-    double   float64;
-    bool     bool_;
-
-    struct gguf_str str;
-
-    struct {
-        enum gguf_type type;
-
-        uint64_t n;  // GGUFv2
-        void * data;
-    } arr;
-};
-
-struct gguf_kv {
-    struct gguf_str key;
-
-    enum  gguf_type  type;
-    union gguf_value value;
-};
-
-struct gguf_header {
-    char magic[4];
-
-    uint32_t version;
-    uint64_t n_tensors; // GGUFv2
-    uint64_t n_kv;      // GGUFv2
-};
-
-struct gguf_tensor_info {
-    struct gguf_str name;
-
-    uint32_t n_dims;
-    uint64_t ne[GGML_MAX_DIMS];
-
-    enum ggml_type type;
-
-    uint64_t offset; // offset from start of `data`, must be a multiple of `ALIGNMENT`
-
-    // for writing API
-    const void * data;
-    size_t size;
-};
-
-struct gguf_context {
-    struct gguf_header header;
-
-    struct gguf_kv          * kv;
-    struct gguf_tensor_info * infos;
-
-    size_t alignment;
-    size_t offset;    // offset of `data` from beginning of file
-    size_t size;      // size of `data` in bytes
-
-    //uint8_t * padding;
-    void * data;
-};
-
-size_t gguf_type_size(enum gguf_type type) {
-    GGML_ASSERT(0 <= type && type < GGUF_TYPE_COUNT);
-    return GGUF_TYPE_SIZE[type];
-}
-
-static bool gguf_tensor_info_sanitize(struct gguf_tensor_info * info) {
-    if (info->n_dims > GGML_MAX_DIMS) {
-        fprintf(stderr, "%s: invalid number of dimensions (%" PRIu32 ")\n", __func__, info->n_dims);
-        return false;
-    }
-
-    if (info->type < 0 || info->type >= GGML_TYPE_COUNT) {
-        fprintf(stderr, "%s: invalid type (%d)\n", __func__, info->type);
-        return false;
-    }
-
-    if (strlen(info->name.data) >= GGML_MAX_NAME) {
-        fprintf(stderr, "%s: tensor '%s' name is too long\n", __func__, info->name.data);
-        return false;
-    }
-
-    for (uint32_t i = 0; i < info->n_dims; ++i) {
-        if (info->ne[i] <= 0) {
-            fprintf(stderr, "%s: invalid number of elements (%" PRIu64 ")\n", __func__, info->ne[i]);
-            return false;
-        }
-    }
-
-    // prevent overflow for total number of elements
-    if (INT64_MAX/info->ne[1] <= info->ne[0]) {
-        fprintf(stderr, "%s: invalid number of elements (%" PRIu64 ")\n", __func__, info->ne[1]);
-        return false;
-    }
-
-    if (INT64_MAX/info->ne[2] <= info->ne[0]*info->ne[1]) {
-        fprintf(stderr, "%s: invalid number of elements (%" PRIu64 ")\n", __func__, info->ne[2]);
-        return false;
-    }
-
-    if (INT64_MAX/info->ne[3] <= info->ne[0]*info->ne[1]*info->ne[2]) {
-        fprintf(stderr, "%s: invalid number of elements (%" PRIu64 ")\n", __func__, info->ne[3]);
-        return false;
-    }
-
-    return true;
-}
-
-static bool gguf_fread_el(FILE * file, void * dst, size_t size, size_t * offset) {
-    const size_t n = fread(dst, 1, size, file);
-    *offset += n;
-    return n == size;
-}
-
-static bool gguf_fread_str(FILE * file, struct gguf_str * p, size_t * offset) {
-    p->n    = 0;
-    p->data = NULL;
-
-    bool ok = true;
-
-    ok = ok && gguf_fread_el(file, &p->n, sizeof(p->n), offset);
-
-    // early exit if string length is invalid, prevents from integer overflow
-    if (p->n == SIZE_MAX) {
-        fprintf(stderr, "%s: invalid string length (%" PRIu64 ")\n", __func__, p->n);
-        return false;
-    }
-
-    p->data = calloc(p->n + 1, 1);
-    if (!p->data) {
-        fprintf(stderr, "%s: failed to allocate memory for string of length %" PRIu64 "\n", __func__, p->n);
-        return false;
-    }
-
-    ok = ok && gguf_fread_el(file,  p->data, p->n, offset);
-
-    return ok;
-}
-
-static void gguf_free_kv(struct gguf_kv * kv) {
-    if (kv->key.data) {
-        GGML_FREE(kv->key.data);
-    }
-
-    if (kv->type == GGUF_TYPE_STRING) {
-        if (kv->value.str.data) {
-            GGML_FREE(kv->value.str.data);
-        }
-    }
-
-    if (kv->type == GGUF_TYPE_ARRAY) {
-        if (kv->value.arr.data) {
-            if (kv->value.arr.type == GGUF_TYPE_STRING) {
-                for (uint64_t j = 0; j < kv->value.arr.n; ++j) {
-                    struct gguf_str * str = &((struct gguf_str *) kv->value.arr.data)[j];
-                    if (str->data) {
-                        GGML_FREE(str->data);
-                    }
-                }
-            }
-            GGML_FREE(kv->value.arr.data);
-        }
-    }
-}
-
-struct gguf_context * gguf_init_empty(void) {
-    struct gguf_context * ctx = calloc(1, sizeof(struct gguf_context));
-    if (!ctx) {
-        fprintf(stderr, "%s: failed to allocate memory for context\n", __func__);
-        return NULL;
-    }
-
-    memcpy(ctx->header.magic, GGUF_MAGIC, sizeof(ctx->header.magic));
-    ctx->header.version   = GGUF_VERSION;
-    ctx->header.n_tensors = 0;
-    ctx->header.n_kv      = 0;
-
-    ctx->kv    = NULL;
-    ctx->infos = NULL;
-
-    ctx->alignment = GGUF_DEFAULT_ALIGNMENT;
-    ctx->offset    = 0;
-    ctx->size      = 0;
-
-    ctx->data = NULL;
-
-    return ctx;
-}
-
-struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_params params) {
-    // offset from start of file
-    size_t offset = 0;
-
-    char magic[4];
-
-    // check the magic before making allocations
-    {
-        gguf_fread_el(file, &magic, sizeof(magic), &offset);
-
-        for (uint32_t i = 0; i < sizeof(magic); i++) {
-            if (magic[i] != GGUF_MAGIC[i]) {
-                fprintf(stderr, "%s: invalid magic characters '%c%c%c%c'\n", __func__, magic[0], magic[1], magic[2], magic[3]);
-                return NULL;
-            }
-        }
-    }
-
-    bool ok = true;
-
-    struct gguf_context * ctx = calloc(1, sizeof(struct gguf_context));
-    if (!ctx) {
-        fprintf(stderr, "%s: failed to allocate memory for context\n", __func__);
-        return NULL;
-    }
-
-    // read the header
-    {
-        strncpy(ctx->header.magic, magic, 4);
-
-        ctx->kv    = NULL;
-        ctx->infos = NULL;
-        ctx->data  = NULL;
-
-        ok = ok && gguf_fread_el(file, &ctx->header.version,   sizeof(ctx->header.version),   &offset);
-        ok = ok && gguf_fread_el(file, &ctx->header.n_tensors, sizeof(ctx->header.n_tensors), &offset);
-        ok = ok && gguf_fread_el(file, &ctx->header.n_kv,      sizeof(ctx->header.n_kv),      &offset);
-
-        if (ctx->header.version == 1) {
-            fprintf(stderr, "%s: GGUFv1 is no longer supported. please use a more up-to-date version\n", __func__);
-            gguf_free(ctx);
-            return NULL;
-        }
-
-        // sanity-checks to prevent from integer/buffer overflows
-
-        ok = ok && (ctx->header.n_tensors < (SIZE_MAX/2)/sizeof(struct gguf_tensor_info));
-        ok = ok && (ctx->header.n_tensors < (SIZE_MAX/2)/ggml_tensor_overhead());
-        ok = ok && (ctx->header.n_kv      < (SIZE_MAX/2)/sizeof(struct gguf_kv));
-
-        if (!ok) {
-            fprintf(stderr, "%s: failed to read header\n", __func__);
-            gguf_free(ctx);
-            return NULL;
-        }
-    }
-
-    // read the kv pairs
-    {
-        const uint64_t n_kv = ctx->header.n_kv;
-
-        if (n_kv > 0) {
-            ctx->kv = calloc(n_kv, sizeof(struct gguf_kv));
-            if (!ctx->kv) {
-                fprintf(stderr, "%s: failed to allocate memory for kv pairs\n", __func__);
-                gguf_free(ctx);
-                return NULL;
-            }
-        }
-
-        for (uint64_t i = 0; i < n_kv; ++i) {
-            struct gguf_kv * kv = &ctx->kv[i];
-
-            //fprintf(stderr, "%s: reading kv %d\n", __func__, i);
-
-            ok = ok && gguf_fread_str(file, &kv->key,                    &offset);
-            ok = ok && gguf_fread_el (file, &kv->type, sizeof(kv->type), &offset);
-
-            //fprintf(stderr, "%s: reading kv with key %s\n", __func__, kv->key.data);
-
-            switch (kv->type) {
-                case GGUF_TYPE_UINT8:   ok = ok && gguf_fread_el (file, &kv->value.uint8,   sizeof(kv->value.uint8),   &offset); break;
-                case GGUF_TYPE_INT8:    ok = ok && gguf_fread_el (file, &kv->value.int8,    sizeof(kv->value.int8),    &offset); break;
-                case GGUF_TYPE_UINT16:  ok = ok && gguf_fread_el (file, &kv->value.uint16,  sizeof(kv->value.uint16),  &offset); break;
-                case GGUF_TYPE_INT16:   ok = ok && gguf_fread_el (file, &kv->value.int16,   sizeof(kv->value.int16),   &offset); break;
-                case GGUF_TYPE_UINT32:  ok = ok && gguf_fread_el (file, &kv->value.uint32,  sizeof(kv->value.uint32),  &offset); break;
-                case GGUF_TYPE_INT32:   ok = ok && gguf_fread_el (file, &kv->value.int32,   sizeof(kv->value.int32),   &offset); break;
-                case GGUF_TYPE_FLOAT32: ok = ok && gguf_fread_el (file, &kv->value.float32, sizeof(kv->value.float32), &offset); break;
-                case GGUF_TYPE_UINT64:  ok = ok && gguf_fread_el (file, &kv->value.uint64,  sizeof(kv->value.uint64),  &offset); break;
-                case GGUF_TYPE_INT64:   ok = ok && gguf_fread_el (file, &kv->value.int64,   sizeof(kv->value.int64),   &offset); break;
-                case GGUF_TYPE_FLOAT64: ok = ok && gguf_fread_el (file, &kv->value.float64, sizeof(kv->value.float64), &offset); break;
-                case GGUF_TYPE_BOOL:    ok = ok && gguf_fread_el (file, &kv->value.bool_,   sizeof(kv->value.bool_),   &offset); break;
-                case GGUF_TYPE_STRING:  ok = ok && gguf_fread_str(file, &kv->value.str,                                &offset); break;
-                case GGUF_TYPE_ARRAY:
-                    {
-                        ok = ok && gguf_fread_el(file, &kv->value.arr.type, sizeof(kv->value.arr.type), &offset);
-                        ok = ok && gguf_fread_el(file, &kv->value.arr.n,    sizeof(kv->value.arr.n),    &offset);
-
-                        switch (kv->value.arr.type) {
-                            case GGUF_TYPE_UINT8:
-                            case GGUF_TYPE_INT8:
-                            case GGUF_TYPE_UINT16:
-                            case GGUF_TYPE_INT16:
-                            case GGUF_TYPE_UINT32:
-                            case GGUF_TYPE_INT32:
-                            case GGUF_TYPE_FLOAT32:
-                            case GGUF_TYPE_UINT64:
-                            case GGUF_TYPE_INT64:
-                            case GGUF_TYPE_FLOAT64:
-                            case GGUF_TYPE_BOOL:
-                                {
-                                    // prevent from integer overflow in the malloc below
-                                    if (kv->value.arr.n >= SIZE_MAX/gguf_type_size(kv->value.arr.type)) {
-                                        fprintf(stderr, "%s: array size is too large (%" PRIu64 ")\n", __func__, kv->value.arr.n);
-                                        gguf_free(ctx);
-                                        return NULL;
-                                    }
-
-                                    kv->value.arr.data = calloc(kv->value.arr.n, gguf_type_size(kv->value.arr.type));
-                                    if (!kv->value.arr.data) {
-                                        fprintf(stderr, "%s: failed to allocate memory for array\n", __func__);
-                                        gguf_free(ctx);
-                                        return NULL;
-                                    }
-
-                                    ok = ok && gguf_fread_el(file, kv->value.arr.data, kv->value.arr.n * gguf_type_size(kv->value.arr.type), &offset);
-                                } break;
-                            case GGUF_TYPE_STRING:
-                                {
-                                    // prevent from integer overflow in the malloc below
-                                    if (kv->value.arr.n >= SIZE_MAX/sizeof(struct gguf_str)) {
-                                        fprintf(stderr, "%s: array size is too large (%" PRIu64 ")\n", __func__, kv->value.arr.n);
-                                        gguf_free(ctx);
-                                        return NULL;
-                                    }
-
-                                    kv->value.arr.data = calloc(kv->value.arr.n, sizeof(struct gguf_str));
-                                    if (!kv->value.arr.data) {
-                                        fprintf(stderr, "%s: failed to allocate memory for array\n", __func__);
-                                        gguf_free(ctx);
-                                        return NULL;
-                                    }
-
-                                    for (uint64_t j = 0; j < kv->value.arr.n; ++j) {
-                                        ok = ok && gguf_fread_str(file, &((struct gguf_str *) kv->value.arr.data)[j], &offset);
-                                    }
-                                } break;
-                            case GGUF_TYPE_ARRAY:
-                            default:
-                                {
-                                    fprintf(stderr, "%s: invalid array type %d\n", __func__, kv->value.arr.type);
-                                    ok = false;
-                                } break;
-                        }
-                    } break;
-                default:
-                    {
-                        fprintf(stderr, "%s: invalid type %d\n", __func__, kv->type);
-                        ok = false;
-                    } break;
-            }
-
-            if (!ok) {
-                break;
-            }
-        }
-
-        if (!ok) {
-            fprintf(stderr, "%s: failed to read key-value pairs\n", __func__);
-            gguf_free(ctx);
-            return NULL;
-        }
-    }
-
-    // read the tensor infos
-    if (ctx->header.n_tensors > 0) {
-        ctx->infos = calloc(ctx->header.n_tensors, sizeof(struct gguf_tensor_info));
-        if (!ctx->infos) {
-            fprintf(stderr, "%s: failed to allocate memory for tensor infos\n", __func__);
-            gguf_free(ctx);
-            return NULL;
-        }
-
-        for (uint64_t i = 0; i < ctx->header.n_tensors; ++i) {
-            struct gguf_tensor_info * info = &ctx->infos[i];
-
-            for (int j = 0; j < GGML_MAX_DIMS; ++j) {
-                info->ne[j] = 1;
-            }
-
-            ok = ok && gguf_fread_str(file, &info->name,                          &offset);
-            ok = ok && gguf_fread_el (file, &info->n_dims, sizeof(info->n_dims),  &offset);
-
-            ok = ok && (info->n_dims <= GGML_MAX_DIMS);
-
-            for (uint32_t j = 0; j < info->n_dims; ++j) {
-                ok = ok && gguf_fread_el(file, &info->ne[j], sizeof(info->ne[j]), &offset);
-            }
-
-            ok = ok && gguf_fread_el (file, &info->type,   sizeof(info->type),    &offset);
-            ok = ok && gguf_fread_el (file, &info->offset, sizeof(info->offset),  &offset);
-
-            ok = ok && gguf_tensor_info_sanitize(info);
-
-            // make sure there is no duplicated tensor names
-            for (uint64_t j = 0; j < i && ok; ++j) {
-                if (strcmp(info->name.data, ctx->infos[j].name.data) == 0) {
-                    fprintf(stderr, "%s: duplicated tensor name %s\n", __func__, info->name.data);
-                    ok = false;
-                }
-            }
-
-            if (!ok) {
-                fprintf(stderr, "%s: failed to read tensor info\n", __func__);
-                gguf_free(ctx);
-                return NULL;
-            }
-        }
-    }
-
-    ctx->alignment = GGUF_DEFAULT_ALIGNMENT;
-
-    int alignment_idx = gguf_find_key(ctx, "general.alignment");
-    if (alignment_idx != -1) {
-        ctx->alignment = gguf_get_val_u32(ctx, alignment_idx);
-    }
-
-    // we require the data section to be aligned, so take into account any padding
-    {
-        const size_t offset_pad = offset % ctx->alignment;
-
-        if (offset_pad != 0) {
-            offset += ctx->alignment - offset_pad;
-            fseek(file, offset, SEEK_SET);
-        }
-    }
-
-    // store the current file offset - this is where the data section starts
-    ctx->offset = offset;
-
-    // compute the total size of the data section, taking into account the alignment
-    {
-        ctx->size = 0;
-        for (uint64_t i = 0; i < ctx->header.n_tensors; ++i) {
-            struct gguf_tensor_info * info = &ctx->infos[i];
-
-            const int64_t ne =
-                (int64_t) info->ne[0] *
-                (int64_t) info->ne[1] *
-                (int64_t) info->ne[2] *
-                (int64_t) info->ne[3];
-
-            if (ggml_blck_size(info->type) == 0 ) {
-                // this tensor type support have been removed:
-                fprintf(stderr, "%s: tensor '%s' of type %d: %s\n",
-                        __func__, info->name.data, (int) info->type, ggml_type_name(info->type));
-                gguf_free(ctx);
-                return NULL;
-            }
-
-            if (ne % ggml_blck_size(info->type) != 0) {
-                fprintf(stderr, "%s: tensor '%s' of type %d (%s) number of elements (%" PRId64 ") is not a multiple of block size (%" PRId64 ")\n",
-                        __func__, info->name.data, (int) info->type, ggml_type_name(info->type), ne, ggml_blck_size(info->type));
-                gguf_free(ctx);
-                return NULL;
-            }
-
-            const size_t size_cur = ggml_row_size(info->type, ne);
-
-            ctx->size += GGML_PAD(size_cur, ctx->alignment);
-        }
-    }
-
-    // load the tensor data only if requested
-    if (params.ctx != NULL) {
-        // if the provided gguf_context is no_alloc, then we create "empty" tensors and do not read the binary blob
-        // otherwise, we load the binary blob into the created ggml_context as well, and point the "data" members of
-        // the ggml_tensor structs to the appropriate locations in the binary blob
-
-        // compute the exact size needed for the new ggml_context
-        const size_t mem_size =
-            params.no_alloc ?
-            (ctx->header.n_tensors    )*ggml_tensor_overhead() :
-            (ctx->header.n_tensors + 1)*ggml_tensor_overhead() + ctx->size;
-
-        struct ggml_init_params pdata = {
-            .mem_size   = mem_size,
-            .mem_buffer = NULL,
-            .no_alloc   = params.no_alloc,
-        };
-
-        *params.ctx = ggml_init(pdata);
-        if (*params.ctx == NULL) {
-            fprintf(stderr, "%s: failed to initialize context\n", __func__);
-            gguf_free(ctx);
-            return NULL;
-        }
-
-        struct ggml_context * ctx_data = *params.ctx;
-
-        struct ggml_tensor * data = NULL;
-
-        if (!params.no_alloc) {
-            data = ggml_new_tensor_1d(ctx_data, GGML_TYPE_I8, ctx->size);
-
-            ok = ok && data != NULL;
-
-            // read the binary blob with the tensor data
-            ok = ok && gguf_fread_el(file, data->data, ctx->size, &offset);
-
-            if (!ok) {
-                fprintf(stderr, "%s: failed to read tensor data\n", __func__);
-                ggml_free(ctx_data);
-                gguf_free(ctx);
-                return NULL;
-            }
-
-            ctx->data = data->data;
-        }
-
-        ggml_set_no_alloc(ctx_data, true);
-
-        // create the tensors
-        for (uint64_t i = 0; i < ctx->header.n_tensors; ++i) {
-            const int64_t ne[GGML_MAX_DIMS] = {
-                ctx->infos[i].ne[0],
-                ctx->infos[i].ne[1],
-                ctx->infos[i].ne[2],
-                ctx->infos[i].ne[3],
-            };
-
-            struct ggml_tensor * cur = ggml_new_tensor(ctx_data, ctx->infos[i].type, ctx->infos[i].n_dims, ne);
-
-            ok = ok && cur != NULL;
-
-            if (!ok) {
-                break;
-            }
-
-            ggml_set_name(cur, ctx->infos[i].name.data);
-
-            // point the data member to the appropriate location in the binary blob using the tensor infos
-            if (!params.no_alloc) {
-              //cur->data = (char *) data->data + ctx->infos[i].offset - ctx->offset; // offset from start of file
-                cur->data = (char *) data->data + ctx->infos[i].offset;               // offset from data
-            }
-        }
-
-        if (!ok) {
-            fprintf(stderr, "%s: failed to read the tensor data\n", __func__);
-            ggml_free(ctx_data);
-            gguf_free(ctx);
-            return NULL;
-        }
-
-        ggml_set_no_alloc(ctx_data, params.no_alloc);
-    }
-
-    return ctx;
-}
-
-struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_params params) {
-    FILE * file = ggml_fopen(fname, "rb");
-    if (!file) {
-        fprintf(stderr, "%s: failed to open '%s': '%s'\n", __func__, fname, strerror(errno));
-        return NULL;
-    }
-
-    struct gguf_context * result = gguf_init_from_file_impl(file, params);
-    fclose(file);
-    return result;
-}
-
-void gguf_free(struct gguf_context * ctx) {
-    if (ctx == NULL) {
-        return;
-    }
-
-    if (ctx->kv) {
-        // free string memory - not great..
-        for (uint64_t i = 0; i < ctx->header.n_kv; ++i) {
-            gguf_free_kv(&ctx->kv[i]);
-        }
-
-        GGML_FREE(ctx->kv);
-    }
-
-    if (ctx->infos) {
-        for (uint64_t i = 0; i < ctx->header.n_tensors; ++i) {
-            struct gguf_tensor_info * info = &ctx->infos[i];
-
-            if (info->name.data) {
-                GGML_FREE(info->name.data);
-            }
-        }
-
-        GGML_FREE(ctx->infos);
-    }
-
-    GGML_FREE(ctx);
-}
-
-const char * gguf_type_name(enum gguf_type type) {
-    return GGUF_TYPE_NAME[type];
-}
-
-int gguf_get_version(const struct gguf_context * ctx) {
-    return ctx->header.version;
-}
-
-size_t gguf_get_alignment(const struct gguf_context * ctx) {
-    return ctx->alignment;
-}
-
-size_t gguf_get_data_offset(const struct gguf_context * ctx) {
-    return ctx->offset;
-}
-
-void * gguf_get_data(const struct gguf_context * ctx) {
-    return ctx->data;
-}
-
-int gguf_get_n_kv(const struct gguf_context * ctx) {
-    return ctx->header.n_kv;
-}
-
-int gguf_find_key(const struct gguf_context * ctx, const char * key) {
-    // return -1 if key not found
-    int keyfound = -1;
-
-    const int n_kv = gguf_get_n_kv(ctx);
-
-    for (int i = 0; i < n_kv; ++i) {
-        if (strcmp(key, gguf_get_key(ctx, i)) == 0) {
-            keyfound = i;
-            break;
-        }
-    }
-
-    return keyfound;
-}
-
-const char * gguf_get_key(const struct gguf_context * ctx, int key_id) {
-    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
-    return ctx->kv[key_id].key.data;
-}
-
-enum gguf_type gguf_get_kv_type(const struct gguf_context * ctx, int key_id) {
-    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
-    return ctx->kv[key_id].type;
-}
-
-enum gguf_type gguf_get_arr_type(const struct gguf_context * ctx, int key_id) {
-    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
-    GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_ARRAY);
-    return ctx->kv[key_id].value.arr.type;
-}
-
-const void * gguf_get_arr_data(const struct gguf_context * ctx, int key_id) {
-    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
-    GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_ARRAY);
-    return ctx->kv[key_id].value.arr.data;
-}
-
-const char * gguf_get_arr_str(const struct gguf_context * ctx, int key_id, int i) {
-    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
-    GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_ARRAY);
-    struct gguf_kv * kv = &ctx->kv[key_id];
-    struct gguf_str * str = &((struct gguf_str *) kv->value.arr.data)[i];
-    return str->data;
-}
-
-int gguf_get_arr_n(const struct gguf_context * ctx, int key_id) {
-    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
-    GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_ARRAY);
-    return ctx->kv[key_id].value.arr.n;
-}
-
-uint8_t gguf_get_val_u8(const struct gguf_context * ctx, int key_id) {
-    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
-    GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_UINT8);
-    return ctx->kv[key_id].value.uint8;
-}
-
-int8_t gguf_get_val_i8(const struct gguf_context * ctx, int key_id) {
-    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
-    GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_INT8);
-    return ctx->kv[key_id].value.int8;
-}
-
-uint16_t gguf_get_val_u16(const struct gguf_context * ctx, int key_id) {
-    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
-    GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_UINT16);
-    return ctx->kv[key_id].value.uint16;
-}
-
-int16_t gguf_get_val_i16(const struct gguf_context * ctx, int key_id) {
-    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
-    GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_INT16);
-    return ctx->kv[key_id].value.int16;
-}
-
-uint32_t gguf_get_val_u32(const struct gguf_context * ctx, int key_id) {
-    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
-    GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_UINT32);
-    return ctx->kv[key_id].value.uint32;
-}
-
-int32_t gguf_get_val_i32(const struct gguf_context * ctx, int key_id) {
-    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
-    GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_INT32);
-    return ctx->kv[key_id].value.int32;
-}
-
-float gguf_get_val_f32(const struct gguf_context * ctx, int key_id) {
-    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
-    GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_FLOAT32);
-    return ctx->kv[key_id].value.float32;
-}
-
-uint64_t gguf_get_val_u64(const struct gguf_context * ctx, int key_id) {
-    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
-    GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_UINT64);
-    return ctx->kv[key_id].value.uint64;
-}
-
-int64_t gguf_get_val_i64(const struct gguf_context * ctx, int key_id) {
-    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
-    GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_INT64);
-    return ctx->kv[key_id].value.int64;
-}
-
-double gguf_get_val_f64(const struct gguf_context * ctx, int key_id) {
-    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
-    GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_FLOAT64);
-    return ctx->kv[key_id].value.float64;
-}
-
-bool gguf_get_val_bool(const struct gguf_context * ctx, int key_id) {
-    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
-    GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_BOOL);
-    return ctx->kv[key_id].value.bool_;
-}
-
-const char * gguf_get_val_str(const struct gguf_context * ctx, int key_id) {
-    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
-    GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_STRING);
-    return ctx->kv[key_id].value.str.data;
-}
-
-const void * gguf_get_val_data(const struct gguf_context * ctx, int key_id) {
-    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
-    GGML_ASSERT(ctx->kv[key_id].type != GGUF_TYPE_ARRAY);
-    GGML_ASSERT(ctx->kv[key_id].type != GGUF_TYPE_STRING);
-    return &ctx->kv[key_id].value;
-}
-
-int gguf_get_n_tensors(const struct gguf_context * ctx) {
-    return ctx->header.n_tensors;
-}
-
-int gguf_find_tensor(const struct gguf_context * ctx, const char * name) {
-    // return -1 if tensor not found
-    int tensorfound = -1;
-
-    const int n_tensors = gguf_get_n_tensors(ctx);
-
-    for (int i = 0; i < n_tensors; ++i) {
-        if (strcmp(name, gguf_get_tensor_name(ctx, i)) == 0) {
-            tensorfound = i;
-            break;
-        }
-    }
-
-    return tensorfound;
-}
-
-size_t gguf_get_tensor_offset(const struct gguf_context * ctx, int i) {
-    return ctx->infos[i].offset;
-}
-
-char * gguf_get_tensor_name(const struct gguf_context * ctx, int i) {
-    return ctx->infos[i].name.data;
-}
-
-enum ggml_type gguf_get_tensor_type(const struct gguf_context * ctx, int i) {
-    return ctx->infos[i].type;
-}
-
-// returns the index
-static int gguf_get_or_add_key(struct gguf_context * ctx, const char * key) {
-    const int idx = gguf_find_key(ctx, key);
-    if (idx >= 0) {
-        return idx;
-    }
-
-    const int n_kv = gguf_get_n_kv(ctx);
-
-    ctx->kv = realloc(ctx->kv, (n_kv + 1) * sizeof(struct gguf_kv));
-    ctx->kv[n_kv].key.n    = strlen(key);
-    ctx->kv[n_kv].key.data = strdup(key);
-    ctx->header.n_kv++;
-
-    return n_kv;
-}
-
-void gguf_remove_key(struct gguf_context * ctx, const char * key) {
-    const int idx = gguf_find_key(ctx, key);
-    if (idx >= 0) {
-        const int n_kv = gguf_get_n_kv(ctx);
-        gguf_free_kv(&ctx->kv[idx]);
-        for (int i = idx; i < n_kv-1; ++i) {
-            ctx->kv[i] = ctx->kv[i+1];
-        }
-        ctx->kv = realloc(ctx->kv, (n_kv - 1) * sizeof(struct gguf_kv));
-        ctx->header.n_kv--;
-    }
-}
-
-void gguf_set_val_u8(struct gguf_context * ctx, const char * key, uint8_t val) {
-    const int idx = gguf_get_or_add_key(ctx, key);
-
-    ctx->kv[idx].type        = GGUF_TYPE_UINT8;
-    ctx->kv[idx].value.uint8 = val;
-}
-
-void gguf_set_val_i8(struct gguf_context * ctx, const char * key, int8_t val) {
-    const int idx = gguf_get_or_add_key(ctx, key);
-
-    ctx->kv[idx].type       = GGUF_TYPE_INT8;
-    ctx->kv[idx].value.int8 = val;
-}
-
-void gguf_set_val_u16(struct gguf_context * ctx, const char * key, uint16_t val) {
-    const int idx = gguf_get_or_add_key(ctx, key);
-
-    ctx->kv[idx].type         = GGUF_TYPE_UINT16;
-    ctx->kv[idx].value.uint16 = val;
-}
-
-void gguf_set_val_i16(struct gguf_context * ctx, const char * key, int16_t val) {
-    const int idx = gguf_get_or_add_key(ctx, key);
-
-    ctx->kv[idx].type        = GGUF_TYPE_INT16;
-    ctx->kv[idx].value.int16 = val;
-}
-
-void gguf_set_val_u32(struct gguf_context * ctx, const char * key, uint32_t val) {
-    const int idx = gguf_get_or_add_key(ctx, key);
-
-    ctx->kv[idx].type         = GGUF_TYPE_UINT32;
-    ctx->kv[idx].value.uint32 = val;
-}
-
-void gguf_set_val_i32(struct gguf_context * ctx, const char * key, int32_t val) {
-    const int idx = gguf_get_or_add_key(ctx, key);
-
-    ctx->kv[idx].type        = GGUF_TYPE_INT32;
-    ctx->kv[idx].value.int32 = val;
-}
-
-void gguf_set_val_f32(struct gguf_context * ctx, const char * key, float val) {
-    const int idx = gguf_get_or_add_key(ctx, key);
-
-    ctx->kv[idx].type          = GGUF_TYPE_FLOAT32;
-    ctx->kv[idx].value.float32 = val;
-}
-
-void gguf_set_val_u64(struct gguf_context * ctx, const char * key, uint64_t val) {
-    const int idx = gguf_get_or_add_key(ctx, key);
-
-    ctx->kv[idx].type         = GGUF_TYPE_UINT64;
-    ctx->kv[idx].value.uint64 = val;
-}
-
-void gguf_set_val_i64(struct gguf_context * ctx, const char * key, int64_t val) {
-    const int idx = gguf_get_or_add_key(ctx, key);
-
-    ctx->kv[idx].type        = GGUF_TYPE_INT64;
-    ctx->kv[idx].value.int64 = val;
-}
-
-void gguf_set_val_f64(struct gguf_context * ctx, const char * key, double val) {
-    const int idx = gguf_get_or_add_key(ctx, key);
-
-    ctx->kv[idx].type          = GGUF_TYPE_FLOAT64;
-    ctx->kv[idx].value.float64 = val;
-}
-
-void gguf_set_val_bool(struct gguf_context * ctx, const char * key, bool val) {
-    const int idx = gguf_get_or_add_key(ctx, key);
-
-    ctx->kv[idx].type        = GGUF_TYPE_BOOL;
-    ctx->kv[idx].value.bool_ = val;
-}
-
-void gguf_set_val_str(struct gguf_context * ctx, const char * key, const char * val) {
-    const int idx = gguf_get_or_add_key(ctx, key);
-
-    ctx->kv[idx].type           = GGUF_TYPE_STRING;
-    ctx->kv[idx].value.str.n    = strlen(val);
-    ctx->kv[idx].value.str.data = strdup(val);
-}
-
-void gguf_set_arr_data(struct gguf_context * ctx, const char * key, enum gguf_type type, const void * data, int n) {
-    const int idx = gguf_get_or_add_key(ctx, key);
-
-    ctx->kv[idx].type           = GGUF_TYPE_ARRAY;
-    ctx->kv[idx].value.arr.type = type;
-    ctx->kv[idx].value.arr.n    = n;
-    ctx->kv[idx].value.arr.data = GGML_CALLOC(n, gguf_type_size(type));
-    memcpy(ctx->kv[idx].value.arr.data, data, n*gguf_type_size(type));
-}
-
-void gguf_set_arr_str(struct gguf_context * ctx, const char * key, const char ** data, int n) {
-    const int idx = gguf_get_or_add_key(ctx, key);
-
-    ctx->kv[idx].type           = GGUF_TYPE_ARRAY;
-    ctx->kv[idx].value.arr.type = GGUF_TYPE_STRING;
-    ctx->kv[idx].value.arr.n    = n;
-    ctx->kv[idx].value.arr.data = GGML_CALLOC(n, sizeof(struct gguf_str));
-    for (int i = 0; i < n; i++) {
-        struct gguf_str * str = &((struct gguf_str *)ctx->kv[idx].value.arr.data)[i];
-        str->n    = strlen(data[i]);
-        str->data = strdup(data[i]);
-    }
-}
-
-// set or add KV pairs from another context
-void gguf_set_kv(struct gguf_context * ctx, struct gguf_context * src) {
-    for (uint32_t i = 0; i < src->header.n_kv; i++) {
-        switch (src->kv[i].type) {
-            case GGUF_TYPE_UINT8:   gguf_set_val_u8  (ctx, src->kv[i].key.data, src->kv[i].value.uint8);    break;
-            case GGUF_TYPE_INT8:    gguf_set_val_i8  (ctx, src->kv[i].key.data, src->kv[i].value.int8);     break;
-            case GGUF_TYPE_UINT16:  gguf_set_val_u16 (ctx, src->kv[i].key.data, src->kv[i].value.uint16);   break;
-            case GGUF_TYPE_INT16:   gguf_set_val_i16 (ctx, src->kv[i].key.data, src->kv[i].value.int16);    break;
-            case GGUF_TYPE_UINT32:  gguf_set_val_u32 (ctx, src->kv[i].key.data, src->kv[i].value.uint32);   break;
-            case GGUF_TYPE_INT32:   gguf_set_val_i32 (ctx, src->kv[i].key.data, src->kv[i].value.int32);    break;
-            case GGUF_TYPE_FLOAT32: gguf_set_val_f32 (ctx, src->kv[i].key.data, src->kv[i].value.float32);  break;
-            case GGUF_TYPE_UINT64:  gguf_set_val_u64 (ctx, src->kv[i].key.data, src->kv[i].value.uint64);   break;
-            case GGUF_TYPE_INT64:   gguf_set_val_i64 (ctx, src->kv[i].key.data, src->kv[i].value.int64);    break;
-            case GGUF_TYPE_FLOAT64: gguf_set_val_f64 (ctx, src->kv[i].key.data, src->kv[i].value.float64);  break;
-            case GGUF_TYPE_BOOL:    gguf_set_val_bool(ctx, src->kv[i].key.data, src->kv[i].value.bool_);    break;
-            case GGUF_TYPE_STRING:  gguf_set_val_str (ctx, src->kv[i].key.data, src->kv[i].value.str.data); break;
-            case GGUF_TYPE_ARRAY:
-                {
-                    if (src->kv[i].value.arr.type == GGUF_TYPE_STRING) {
-                        const char ** data = GGML_CALLOC(src->kv[i].value.arr.n, sizeof(char *));
-                        for (uint32_t j = 0; j < src->kv[i].value.arr.n; j++) {
-                            data[j] = ((struct gguf_str *)src->kv[i].value.arr.data)[j].data;
-                        }
-                        gguf_set_arr_str(ctx, src->kv[i].key.data, data, src->kv[i].value.arr.n);
-                        GGML_FREE((void *)data);
-                    } else if (src->kv[i].value.arr.type == GGUF_TYPE_ARRAY) {
-                        GGML_ABORT("nested arrays not supported");
-                    } else {
-                        gguf_set_arr_data(ctx, src->kv[i].key.data, src->kv[i].value.arr.type, src->kv[i].value.arr.data, src->kv[i].value.arr.n);
-                    }
-                } break;
-            default: GGML_ABORT("invalid type");
-        }
-    }
-}
-
-void gguf_add_tensor(
-             struct gguf_context * ctx,
-        const struct ggml_tensor * tensor) {
-    GGML_ASSERT(tensor);
-    if (gguf_find_tensor(ctx, tensor->name) != -1) {
-        GGML_ABORT("duplicated tensor name");
-    }
-
-    const int idx = ctx->header.n_tensors;
-    ctx->infos = realloc(ctx->infos, (idx + 1)*sizeof(struct gguf_tensor_info));
-
-    ctx->infos[idx].name.n    = strlen(tensor->name);
-    ctx->infos[idx].name.data = strdup(tensor->name);
-
-    for (int i = 0; i < GGML_MAX_DIMS; ++i) {
-        ctx->infos[idx].ne[i] = 1;
-    }
-
-    ctx->infos[idx].n_dims = ggml_n_dims(tensor);
-    for (uint32_t i = 0; i < ctx->infos[idx].n_dims; i++) {
-        ctx->infos[idx].ne[i] = tensor->ne[i];
-    }
-
-    ctx->infos[idx].type   = tensor->type;
-    ctx->infos[idx].offset = 0;
-    ctx->infos[idx].data   = tensor->data;
-    ctx->infos[idx].size   = ggml_nbytes(tensor);
-
-    if (ctx->header.n_tensors > 0) {
-        ctx->infos[idx].offset = ctx->infos[idx - 1].offset + GGML_PAD(ctx->infos[idx - 1].size, ctx->alignment);
-    }
-
-    ctx->header.n_tensors++;
-}
-
-void gguf_set_tensor_type(struct gguf_context * ctx, const char * name, enum ggml_type type) {
-    const int idx = gguf_find_tensor(ctx, name);
-    if (idx < 0) {
-        GGML_ABORT("tensor not found");
-    }
-
-    ctx->infos[idx].type = type;
-}
-
-void gguf_set_tensor_data(struct gguf_context * ctx, const char * name, const void * data, size_t size) {
-    const int idx = gguf_find_tensor(ctx, name);
-    if (idx < 0) {
-        GGML_ABORT("tensor not found");
-    }
-
-    ctx->infos[idx].data = data;
-    ctx->infos[idx].size = size;
-
-    // update offsets
-    for (uint32_t i = idx + 1; i < ctx->header.n_tensors; ++i) {
-        ctx->infos[i].offset = ctx->infos[i - 1].offset + GGML_PAD(ctx->infos[i - 1].size, ctx->alignment);
-    }
-}
-
-//static void gguf_fwrite_str(FILE * file, const struct gguf_str * val) {
-//    fwrite(&val->n,   sizeof(val->n),    1, file);
-//    fwrite(val->data, sizeof(char), val->n, file);
-//}
-//
-//static void gguf_fwrite_el(FILE * file, const void * val, size_t size) {
-//    fwrite(val, sizeof(char), size, file);
-//}
-
-struct gguf_buf gguf_buf_init(size_t size) {
-    struct gguf_buf buf = {
-        /*buf.data   =*/ size == 0 ? NULL : GGML_CALLOC(1, size),
-        /*buf.size   =*/ size,
-        /*buf.offset =*/ 0,
-    };
-
-    return buf;
-}
-
-void gguf_buf_free(struct gguf_buf buf) {
-    if (buf.data) {
-        GGML_FREE(buf.data);
-    }
-}
-
-static void gguf_buf_grow(struct gguf_buf * buf, size_t size) {
-    if (buf->offset + size > buf->size) {
-        buf->size = 1.5*(buf->offset + size);
-        if (buf->data) {
-            buf->data = realloc(buf->data, buf->size);
-        }
-    }
-}
-
-static void gguf_bwrite_str(struct gguf_buf * buf, const struct gguf_str * val) {
-    gguf_buf_grow(buf, sizeof(val->n) + val->n);
-
-    if (buf->data) {
-        memcpy((char *) buf->data + buf->offset, &val->n, sizeof(val->n));
-    }
-    buf->offset += sizeof(val->n);
-
-    if (buf->data) {
-        memcpy((char *) buf->data + buf->offset, val->data, val->n);
-    }
-    buf->offset += val->n;
-}
-
-static void gguf_bwrite_el(struct gguf_buf * buf, const void * val, size_t el_size) {
-    gguf_buf_grow(buf, el_size);
-
-    if (buf->data) {
-        memcpy((char *) buf->data + buf->offset, val, el_size);
-    }
-    buf->offset += el_size;
-}
-
-void gguf_write_to_buf(const struct gguf_context * ctx, struct gguf_buf * buf, bool only_meta) {
-    // write header
-    gguf_bwrite_el(buf, &ctx->header.magic,     sizeof(ctx->header.magic));
-    gguf_bwrite_el(buf, &ctx->header.version,   sizeof(ctx->header.version));
-    gguf_bwrite_el(buf, &ctx->header.n_tensors, sizeof(ctx->header.n_tensors));
-    gguf_bwrite_el(buf, &ctx->header.n_kv,      sizeof(ctx->header.n_kv));
-
-    // write key-value pairs
-    for (uint32_t i = 0; i < ctx->header.n_kv; ++i) {
-        struct gguf_kv * kv = &ctx->kv[i];
-
-        gguf_bwrite_str(buf, &kv->key);
-        gguf_bwrite_el (buf, &kv->type, sizeof(kv->type));
-
-        switch (kv->type) {
-            case GGUF_TYPE_UINT8:   gguf_bwrite_el( buf, &kv->value.uint8,   sizeof(kv->value.uint8)  ); break;
-            case GGUF_TYPE_INT8:    gguf_bwrite_el (buf, &kv->value.int8,    sizeof(kv->value.int8)   ); break;
-            case GGUF_TYPE_UINT16:  gguf_bwrite_el (buf, &kv->value.uint16,  sizeof(kv->value.uint16) ); break;
-            case GGUF_TYPE_INT16:   gguf_bwrite_el (buf, &kv->value.int16,   sizeof(kv->value.int16)  ); break;
-            case GGUF_TYPE_UINT32:  gguf_bwrite_el (buf, &kv->value.uint32,  sizeof(kv->value.uint32) ); break;
-            case GGUF_TYPE_INT32:   gguf_bwrite_el (buf, &kv->value.int32,   sizeof(kv->value.int32)  ); break;
-            case GGUF_TYPE_FLOAT32: gguf_bwrite_el (buf, &kv->value.float32, sizeof(kv->value.float32)); break;
-            case GGUF_TYPE_UINT64:  gguf_bwrite_el (buf, &kv->value.uint64,  sizeof(kv->value.uint64) ); break;
-            case GGUF_TYPE_INT64:   gguf_bwrite_el (buf, &kv->value.int64,   sizeof(kv->value.int64)  ); break;
-            case GGUF_TYPE_FLOAT64: gguf_bwrite_el (buf, &kv->value.float64, sizeof(kv->value.float64)); break;
-            case GGUF_TYPE_BOOL:    gguf_bwrite_el (buf, &kv->value.bool_,   sizeof(kv->value.bool_)  ); break;
-            case GGUF_TYPE_STRING:  gguf_bwrite_str(buf, &kv->value.str                               ); break;
-            case GGUF_TYPE_ARRAY:
-                {
-                    gguf_bwrite_el(buf, &kv->value.arr.type, sizeof(kv->value.arr.type));
-                    gguf_bwrite_el(buf, &kv->value.arr.n,    sizeof(kv->value.arr.n)   );
-
-                    switch (kv->value.arr.type) {
-                        case GGUF_TYPE_UINT8:
-                        case GGUF_TYPE_INT8:
-                        case GGUF_TYPE_UINT16:
-                        case GGUF_TYPE_INT16:
-                        case GGUF_TYPE_UINT32:
-                        case GGUF_TYPE_INT32:
-                        case GGUF_TYPE_FLOAT32:
-                        case GGUF_TYPE_UINT64:
-                        case GGUF_TYPE_INT64:
-                        case GGUF_TYPE_FLOAT64:
-                        case GGUF_TYPE_BOOL:
-                            {
-                                gguf_bwrite_el(buf, kv->value.arr.data, kv->value.arr.n * gguf_type_size(kv->value.arr.type));
-                            } break;
-                        case GGUF_TYPE_STRING:
-                            {
-                                for (uint32_t j = 0; j < kv->value.arr.n; ++j) {
-                                    gguf_bwrite_str(buf, &((struct gguf_str *) kv->value.arr.data)[j]);
-                                }
-                            } break;
-                        case GGUF_TYPE_ARRAY:
-                        default: GGML_ABORT("invalid type");
-                    }
-                } break;
-            default: GGML_ABORT("invalid type");
-        }
-    }
-
-    // write tensor infos
-    for (uint32_t i = 0; i < ctx->header.n_tensors; ++i) {
-        struct gguf_tensor_info * info = &ctx->infos[i];
-
-        gguf_bwrite_str(buf, &info->name);
-        gguf_bwrite_el (buf, &info->n_dims, sizeof(info->n_dims));
-        for (uint32_t j = 0; j < info->n_dims; ++j) {
-            gguf_bwrite_el(buf, &info->ne[j], sizeof(info->ne[j]));
-        }
-        gguf_bwrite_el(buf, &info->type,   sizeof(info->type));
-        gguf_bwrite_el(buf, &info->offset, sizeof(info->offset));
-    }
-
-    // we require the data section to be aligned, so take into account any padding
-    {
-        const size_t offset     = buf->offset;
-        const size_t offset_pad = GGML_PAD(offset, ctx->alignment);
-
-        if (offset_pad != offset) {
-            uint8_t pad = 0;
-            for (size_t i = 0; i < offset_pad - offset; ++i) {
-                gguf_bwrite_el(buf, &pad, sizeof(pad));
-            }
-        }
-    }
-
-    if (only_meta) {
-        return;
-    }
-
-    size_t offset = 0;
-
-    // write tensor data
-    for (uint32_t i = 0; i < ctx->header.n_tensors; ++i) {
-        struct gguf_tensor_info * info = &ctx->infos[i];
-
-        const size_t size     = info->size;
-        const size_t size_pad = GGML_PAD(size, ctx->alignment);
-
-        gguf_bwrite_el(buf, info->data, size);
-
-        if (size_pad != size) {
-            uint8_t pad = 0;
-            for (size_t j = 0; j < size_pad - size; ++j) {
-                gguf_bwrite_el(buf, &pad, sizeof(pad));
-            }
-        }
-
-        GGML_ASSERT(offset == info->offset);
-
-        offset += size_pad;
-    }
-}
-
-void gguf_write_to_file(const struct gguf_context * ctx, const char * fname, bool only_meta) {
-    FILE * file = ggml_fopen(fname, "wb");
-    if (!file) {
-        GGML_ABORT("failed to open file for writing");
-    }
-
-    struct gguf_buf buf = gguf_buf_init(16*1024);
-
-    gguf_write_to_buf(ctx, &buf, only_meta);
-
-    fwrite(buf.data, 1, buf.offset, file);
-
-    gguf_buf_free(buf);
-
-    fclose(file);
-}
-
-size_t gguf_get_meta_size(const struct gguf_context * ctx) {
-    // no allocs - only compute size
-    struct gguf_buf buf = gguf_buf_init(0);
-
-    gguf_write_to_buf(ctx, &buf, true);
-
-    return buf.offset;
-}
-
-void gguf_get_meta_data(const struct gguf_context * ctx, void * data) {
-    struct gguf_buf buf = gguf_buf_init(16*1024);
-
-    gguf_write_to_buf(ctx, &buf, true);
-
-    memcpy(data, buf.data, buf.offset);
-
-    gguf_buf_free(buf);
-}
-
 void ggml_log_set(ggml_log_callback log_callback, void * user_data) {
     g_logger_state.log_callback = log_callback ? log_callback : ggml_log_callback_default;
     g_logger_state.log_callback_user_data = user_data;
diff --git a/ggml/src/gguf.cpp b/ggml/src/gguf.cpp
new file mode 100644
index 000000000..655ed600a
--- /dev/null
+++ b/ggml/src/gguf.cpp
@@ -0,0 +1,1325 @@
+#include "ggml.h"
+#include "ggml-backend.h"
+#include "ggml-impl.h"
+#include "gguf.h"
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+template 
+struct type_to_gguf_type;
+
+template <>
+struct type_to_gguf_type {
+    static constexpr enum gguf_type value = GGUF_TYPE_UINT8;
+};
+
+template <>
+struct type_to_gguf_type {
+    static constexpr enum gguf_type value = GGUF_TYPE_INT8;
+};
+
+template <>
+struct type_to_gguf_type {
+    static constexpr enum gguf_type value = GGUF_TYPE_UINT16;
+};
+
+template <>
+struct type_to_gguf_type {
+    static constexpr enum gguf_type value = GGUF_TYPE_INT16;
+};
+
+template <>
+struct type_to_gguf_type {
+    static constexpr enum gguf_type value = GGUF_TYPE_UINT32;
+};
+
+template <>
+struct type_to_gguf_type {
+    static constexpr enum gguf_type value = GGUF_TYPE_INT32;
+};
+
+template <>
+struct type_to_gguf_type {
+    static constexpr enum gguf_type value = GGUF_TYPE_FLOAT32;
+};
+
+template <>
+struct type_to_gguf_type {
+    static constexpr enum gguf_type value = GGUF_TYPE_BOOL;
+};
+
+template <>
+struct type_to_gguf_type {
+    static constexpr enum gguf_type value = GGUF_TYPE_STRING;
+};
+
+template <>
+struct type_to_gguf_type {
+    static constexpr enum gguf_type value = GGUF_TYPE_UINT64;
+};
+
+template <>
+struct type_to_gguf_type {
+    static constexpr enum gguf_type value = GGUF_TYPE_INT64;
+};
+
+template <>
+struct type_to_gguf_type {
+    static constexpr enum gguf_type value = GGUF_TYPE_FLOAT64;
+};
+
+static const std::map GGUF_TYPE_SIZE = {
+    {GGUF_TYPE_UINT8,   sizeof(uint8_t)},
+    {GGUF_TYPE_INT8,    sizeof(int8_t)},
+    {GGUF_TYPE_UINT16,  sizeof(uint16_t)},
+    {GGUF_TYPE_INT16,   sizeof(int16_t)},
+    {GGUF_TYPE_UINT32,  sizeof(uint32_t)},
+    {GGUF_TYPE_INT32,   sizeof(int32_t)},
+    {GGUF_TYPE_FLOAT32, sizeof(float)},
+    {GGUF_TYPE_BOOL,    sizeof(int8_t)},
+    {GGUF_TYPE_STRING,  0}, // undefined
+    {GGUF_TYPE_ARRAY,   0}, // undefined
+    {GGUF_TYPE_UINT64,  sizeof(uint64_t)},
+    {GGUF_TYPE_INT64,   sizeof(int64_t)},
+    {GGUF_TYPE_FLOAT64, sizeof(double)},
+};
+static_assert(GGUF_TYPE_COUNT == 13, "GGUF_TYPE_COUNT != 13");
+
+static const std::map GGUF_TYPE_NAME = {
+    {GGUF_TYPE_UINT8,   "u8"},
+    {GGUF_TYPE_INT8,    "i8"},
+    {GGUF_TYPE_UINT16,  "u16"},
+    {GGUF_TYPE_INT16,   "i16"},
+    {GGUF_TYPE_UINT32,  "u32"},
+    {GGUF_TYPE_INT32,   "i32"},
+    {GGUF_TYPE_FLOAT32, "f32"},
+    {GGUF_TYPE_BOOL,    "bool"},
+    {GGUF_TYPE_STRING,  "str"},
+    {GGUF_TYPE_ARRAY,   "arr"},
+    {GGUF_TYPE_UINT64,  "u64"},
+    {GGUF_TYPE_INT64,   "i64"},
+    {GGUF_TYPE_FLOAT64, "f64"},
+};
+static_assert(GGUF_TYPE_COUNT == 13, "GGUF_TYPE_COUNT != 13");
+
+size_t gguf_type_size(enum gguf_type type) {
+    auto it = GGUF_TYPE_SIZE.find(type);
+    return it == GGUF_TYPE_SIZE.end() ? 0 : it->second;
+}
+
+struct gguf_kv {
+    std::string key;
+
+    bool is_array;
+    enum gguf_type type;
+
+    std::vector      data;
+    std::vector data_string;
+
+    template 
+    gguf_kv(const std::string & key, const T value)
+            : key(key), is_array(false), type(type_to_gguf_type::value) {
+        GGML_ASSERT(!key.empty());
+        data.resize(sizeof(T));
+        memcpy(data.data(), &value, sizeof(T));
+    }
+
+    template 
+    gguf_kv(const std::string & key, const std::vector & value)
+            : key(key), is_array(true), type(type_to_gguf_type::value) {
+        GGML_ASSERT(!key.empty());
+        data.resize(value.size()*sizeof(T));
+        for (size_t i = 0; i < value.size(); ++i) {
+            const T tmp = value[i];
+            memcpy(data.data() + i*sizeof(T), &tmp, sizeof(T));
+        }
+    }
+
+    gguf_kv(const std::string & key, const std::string & value)
+            : key(key), is_array(false), type(GGUF_TYPE_STRING) {
+        GGML_ASSERT(!key.empty());
+        data_string.push_back(value);
+    }
+
+    gguf_kv(const std::string & key, const std::vector & value)
+            : key(key), is_array(true), type(GGUF_TYPE_STRING) {
+        GGML_ASSERT(!key.empty());
+        data_string = value;
+    }
+
+    const std::string & get_key() const {
+        return key;
+    }
+
+    const enum gguf_type & get_type() const {
+        return type;
+    }
+
+    size_t get_ne() const {
+        if (type == GGUF_TYPE_STRING) {
+            const size_t ne = data_string.size();
+            GGML_ASSERT(is_array || ne == 1);
+            return ne;
+        }
+        const size_t type_size = gguf_type_size(type);
+        GGML_ASSERT(data.size() % type_size == 0);
+        const size_t ne = data.size() / type_size;
+        GGML_ASSERT(is_array || ne == 1);
+        return ne;
+    }
+
+    template 
+    const T & get_val(const size_t i = 0) const {
+        GGML_ASSERT(type_to_gguf_type::value == type);
+        if constexpr (std::is_same::value) {
+            GGML_ASSERT(data_string.size() >= i+1);
+            return data_string[i];
+        }
+        const size_t type_size = gguf_type_size(type);
+        GGML_ASSERT(data.size() % type_size == 0);
+        GGML_ASSERT(data.size() >= (i+1)*type_size);
+        return reinterpret_cast(data.data())[i];
+    }
+
+    void cast(const enum gguf_type new_type) {
+        const size_t new_type_size = gguf_type_size(new_type);
+        GGML_ASSERT(data.size() % new_type_size == 0);
+        type = new_type;
+    }
+};
+
+struct gguf_tensor_info {
+    struct ggml_tensor t; // for holding the equivalent info
+    uint64_t offset;      // offset from start of `data`, must be a multiple of `ALIGNMENT`
+};
+
+struct gguf_context {
+    uint32_t version = GGUF_VERSION;
+
+    std::vector kv;
+    std::vector info;
+
+    size_t alignment = GGUF_DEFAULT_ALIGNMENT;
+    size_t offset    = 0; // offset of `data` from beginning of file
+    size_t size      = 0; // size of `data` in bytes
+
+    void * data = nullptr;
+};
+
+struct gguf_reader {
+    FILE * file;
+
+    gguf_reader(FILE * file) : file(file) {}
+
+    template 
+    bool read(T & dst) const {
+        return fread(&dst, 1, sizeof(dst), file) == sizeof(dst);
+    }
+
+    template 
+    bool read(std::vector & dst, const size_t n) const {
+        dst.resize(n);
+        for (size_t i = 0; i < dst.size(); ++i) {
+            if constexpr (std::is_same::value) {
+                bool tmp;
+                if (!read(tmp)) {
+                    return false;
+                }
+                dst[i] = tmp;
+            } else {
+                if (!read(dst[i])) {
+                    return false;
+                }
+            }
+        }
+        return true;
+    }
+
+    bool read(bool & dst) const {
+        int8_t tmp = -1;
+        if (!read(tmp)) {
+            return false;
+        }
+        dst = tmp != 0;
+        return true;
+    }
+
+    bool read(enum ggml_type & dst) const {
+        int32_t tmp = -1;
+        if (!read(tmp)) {
+            return false;
+        }
+        dst = ggml_type(tmp);
+        return true;
+    }
+
+    bool read(enum gguf_type & dst) const {
+        int32_t tmp = -1;
+        if (!read(tmp)) {
+            return false;
+        }
+        dst = gguf_type(tmp);
+        return true;
+    }
+
+    bool read(std::string & dst) const {
+        uint64_t size = -1;
+        if (!read(size)) {
+            return false;
+        }
+        dst.resize(size);
+        return fread(dst.data(), 1, dst.length(), file) == dst.length();
+    }
+
+    bool read(void * dst, const size_t size) const {
+        return fread(dst, 1, size, file) == size;
+    }
+};
+
+struct gguf_context * gguf_init_empty(void) {
+    return new gguf_context;
+}
+
+template
+bool gguf_read_emplace_helper(const struct gguf_reader & gr, std::vector & kv, const std::string & key, const bool is_array, const size_t n) {
+    if (is_array) {
+        std::vector value;
+        try {
+            if (!gr.read(value, n)) {
+                return false;
+            }
+        } catch (std::length_error &) {
+            fprintf(stderr, "%s: encountered length_error while reading value for key '%s'\n", __func__, key.c_str());
+            return false;
+        } catch (std::bad_alloc &) {
+            fprintf(stderr, "%s: encountered bad_alloc error while reading value for key '%s'\n", __func__, key.c_str());
+            return false;
+        }
+        kv.emplace_back(key, value);
+    } else {
+        T value;
+        if (!gr.read(value)) {
+            return false;
+        }
+        kv.emplace_back(key, value);
+    }
+    return true;
+}
+
+struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_params params) {
+    const struct gguf_reader gr(file);
+    struct gguf_context * ctx = new gguf_context;
+
+    bool ok = true;
+
+    // file magic
+    {
+        std::vector magic;
+        ok = ok && gr.read(magic, 4);
+
+        if (!ok) {
+            fprintf(stderr, "%s: failed to read magic\n", __func__);
+            gguf_free(ctx);
+            return nullptr;
+        }
+
+        for (uint32_t i = 0; i < magic.size(); i++) {
+            if (magic[i] != GGUF_MAGIC[i]) {
+                fprintf(stderr, "%s: invalid magic characters: '%c%c%c%c', expected 'GGUF'\n", __func__, magic[0], magic[1], magic[2], magic[3]);
+                gguf_free(ctx);
+                return nullptr;
+            }
+        }
+    }
+
+    // header
+    int64_t n_kv      = 0;
+    int64_t n_tensors = 0;
+
+    if (ok && gr.read(ctx->version)) {
+        if (ctx->version == 1) {
+            fprintf(stderr, "%s: GGUFv1 is no longer supported, please use a more up-to-date version\n", __func__);
+            ok = false;
+        }
+        if (ctx->version > GGUF_VERSION) {
+            fprintf(stderr, "%s: this GGUF file is version %" PRIu32 " but this software only supports up to version %d\n",
+                __func__, ctx->version, GGUF_VERSION);
+            ok = false;
+        }
+    } else {
+        ok = false;
+    }
+
+    if (ok && gr.read(n_tensors)) {
+        static_assert(sizeof(size_t) <= 8 && sizeof(gguf_tensor_info) >= 2, "int64_t insufficient for indexing");
+        if (n_tensors < 0 || n_tensors > int64_t(SIZE_MAX/sizeof(gguf_tensor_info))) {
+            fprintf(stderr, "%s: number of tensors is %" PRIi64 " but must be in [0, %zu]\n",
+                __func__, n_tensors, SIZE_MAX/sizeof(gguf_tensor_info));
+            ok = false;
+        }
+    } else {
+        ok = false;
+    }
+
+    if (ok && gr.read(n_kv)) {
+        static_assert(sizeof(size_t) <= 8 && sizeof(gguf_tensor_info) >= 2, "int64_t insufficient for indexing");
+        if (n_kv < 0 || n_kv > int64_t(SIZE_MAX/sizeof(gguf_kv))) {
+            fprintf(stderr, "%s: number of key value pairs is %" PRIi64 " but must be in [0, %zu]\n",
+                    __func__, n_kv, SIZE_MAX/sizeof(gguf_kv));
+            ok = false;
+        }
+    } else {
+        ok = false;
+    }
+
+    if (!ok) {
+        fprintf(stderr, "%s: failed to read header\n", __func__);
+        gguf_free(ctx);
+        return nullptr;
+    }
+
+    // KV pairs
+    {
+        for (int64_t i = 0; ok && i < n_kv; ++i) {
+            std::string key;
+            gguf_type   type     = gguf_type(-1);
+            bool        is_array = false;
+            uint64_t    n        = 1;
+
+            try {
+                ok = ok && gr.read(key);
+            } catch (std::length_error &) {
+                fprintf(stderr, "%s: encountered length_error while reading key %" PRIi64 "\n", __func__, i);
+                ok = false;
+            } catch (std::bad_alloc &) {
+                fprintf(stderr, "%s: encountered bad_alloc error while reading key %" PRIi64 "\n", __func__, i);
+                ok = false;
+            }
+            for (size_t j = 0; ok && j < ctx->kv.size(); ++j) {
+                if (key == ctx->kv[j].key) {
+                    fprintf(stderr, "%s: duplicate key '%s' for tensors %zu and %" PRIi64 " \n", __func__, key.c_str(), j, i);
+                    ok = false;
+                }
+            }
+            if (!ok) {
+                break;
+            }
+
+            ok = ok && gr.read(type);
+            if (type == GGUF_TYPE_ARRAY) {
+                is_array = true;
+                ok = ok && gr.read(type);
+                ok = ok && gr.read(n);
+            }
+            if (!ok) {
+                break;
+            }
+
+            switch (type) {
+                case GGUF_TYPE_UINT8:   ok = ok && gguf_read_emplace_helper    (gr, ctx->kv, key, is_array, n); break;
+                case GGUF_TYPE_INT8:    ok = ok && gguf_read_emplace_helper     (gr, ctx->kv, key, is_array, n); break;
+                case GGUF_TYPE_UINT16:  ok = ok && gguf_read_emplace_helper   (gr, ctx->kv, key, is_array, n); break;
+                case GGUF_TYPE_INT16:   ok = ok && gguf_read_emplace_helper    (gr, ctx->kv, key, is_array, n); break;
+                case GGUF_TYPE_UINT32:  ok = ok && gguf_read_emplace_helper   (gr, ctx->kv, key, is_array, n); break;
+                case GGUF_TYPE_INT32:   ok = ok && gguf_read_emplace_helper    (gr, ctx->kv, key, is_array, n); break;
+                case GGUF_TYPE_FLOAT32: ok = ok && gguf_read_emplace_helper      (gr, ctx->kv, key, is_array, n); break;
+                case GGUF_TYPE_BOOL:    ok = ok && gguf_read_emplace_helper       (gr, ctx->kv, key, is_array, n); break;
+                case GGUF_TYPE_STRING:  ok = ok && gguf_read_emplace_helper(gr, ctx->kv, key, is_array, n); break;
+                case GGUF_TYPE_UINT64:  ok = ok && gguf_read_emplace_helper   (gr, ctx->kv, key, is_array, n); break;
+                case GGUF_TYPE_INT64:   ok = ok && gguf_read_emplace_helper    (gr, ctx->kv, key, is_array, n); break;
+                case GGUF_TYPE_FLOAT64: ok = ok && gguf_read_emplace_helper     (gr, ctx->kv, key, is_array, n); break;
+                case GGUF_TYPE_ARRAY:
+                default:
+                    {
+                        fprintf(stderr, "%s: key '%s' has invalid GGUF type %d\n", __func__, key.c_str(), type);
+                        ok = false;
+                    } break;
+            }
+        }
+
+        if (!ok) {
+            fprintf(stderr, "%s: failed to read key-value pairs\n", __func__);
+            gguf_free(ctx);
+            return nullptr;
+        }
+        GGML_ASSERT(int64_t(ctx->kv.size()) == n_kv);
+
+        const int alignment_idx = gguf_find_key(ctx, GGUF_KEY_GENERAL_ALIGNMENT);
+        ctx->alignment = alignment_idx == -1 ? GGUF_DEFAULT_ALIGNMENT : gguf_get_val_u32(ctx, alignment_idx);
+
+        if (ctx->alignment == 0 || (ctx->alignment & (ctx->alignment - 1)) != 0) {
+            fprintf(stderr, "%s: alignment %zu is not a power of 2\n", __func__, ctx->alignment);
+            gguf_free(ctx);
+            return nullptr;
+        }
+    }
+
+    // read the tensor info
+    for (int64_t i = 0; ok && i < n_tensors; ++i) {
+        struct gguf_tensor_info info;
+
+        // tensor name
+        {
+            std::string name;
+            try {
+                ok = ok && gr.read(name);
+            } catch (std::length_error &) {
+                fprintf(stderr, "%s: encountered length_error while reading tensor name %" PRIi64 "\n", __func__, i);
+                ok = false;
+            } catch (std::bad_alloc &) {
+                fprintf(stderr, "%s: encountered bad_alloc error while reading tensor name %" PRIi64 "\n", __func__, i);
+                ok = false;
+            }
+            if (name.length() >= GGML_MAX_NAME) {
+                fprintf(stderr, "%s: tensor name %" PRIi64 " is too long: %zu >= %d\n", __func__, i, name.length(), GGML_MAX_NAME);
+                ok = false;
+                break;
+            }
+            ggml_set_name(&info.t, name.c_str());
+
+            // make sure there are no duplicate tensor names
+            for (int64_t j = 0; ok && j < i; ++j) {
+                if (strcmp(info.t.name, ctx->info[j].t.name) == 0) {
+                    fprintf(stderr, "%s: duplicate tensor name '%s' for tensors %" PRIi64 " and %" PRIi64 "\n", __func__, info.t.name, j, i);
+                    ok = false;
+                    break;
+                }
+            }
+        }
+        if (!ok) {
+            break;
+        }
+
+        // tensor shape
+        {
+            uint32_t n_dims = -1;
+            ok = ok && gr.read(n_dims);
+            if (n_dims > GGML_MAX_DIMS) {
+                fprintf(stderr, "%s: tensor '%s' has invalid number of dimensions: %" PRIu32 " > %" PRIu32 "\n",
+                    __func__, info.t.name, n_dims, GGML_MAX_DIMS);
+                ok = false;
+                break;
+            }
+            for (uint32_t j = 0; ok && j < GGML_MAX_DIMS; ++j) {
+                info.t.ne[j] = 1;
+                if (j < n_dims) {
+                    ok = ok && gr.read(info.t.ne[j]);
+                }
+
+                // check that all ne are non-negative
+                if (info.t.ne[j] < 0) {
+                    fprintf(stderr, "%s: tensor '%s' dimension %" PRIu32 " has invalid number of elements: %" PRIi64 " < 0\n",
+                        __func__, info.t.name, j, info.t.ne[j]);
+                    ok = false;
+                    break;
+                }
+            }
+
+            // check that the total number of elements is representable
+            if (ok && ((INT64_MAX/info.t.ne[1] <= info.t.ne[0]) ||
+                       (INT64_MAX/info.t.ne[2] <= info.t.ne[0]*info.t.ne[1]) ||
+                       (INT64_MAX/info.t.ne[3] <= info.t.ne[0]*info.t.ne[1]*info.t.ne[2]))) {
+
+                fprintf(stderr, "%s: total number of elements in tensor '%s' with shape "
+                    "(%" PRIi64 ", %" PRIi64 ", %" PRIi64 ", %" PRIi64 ") is >= %" PRIi64 "\n",
+                    __func__, info.t.name, info.t.ne[0], info.t.ne[1], info.t.ne[2], info.t.ne[3], INT64_MAX);
+                ok = false;
+                break;
+            }
+        }
+        if (!ok) {
+            break;
+        }
+
+        // tensor type
+        {
+            ok = ok && gr.read(info.t.type);
+
+            // check that tensor type is within defined range
+            if (info.t.type < 0 || info.t.type >= GGML_TYPE_COUNT) {
+                fprintf(stderr, "%s: tensor '%s' has invalid ggml type %d (%s)\n",
+                    __func__, info.t.name, info.t.type, ggml_type_name(info.t.type));
+                ok = false;
+                break;
+            }
+            const size_t  type_size = ggml_type_size(info.t.type);
+            const int64_t blck_size = ggml_blck_size(info.t.type);
+
+            // check that row size is divisible by block size
+            if (blck_size == 0 || info.t.ne[0] % blck_size != 0) {
+                fprintf(stderr, "%s: tensor '%s' of type %d (%s) has %" PRId64 " elements per row, "
+                    "not a multiple of block size (%" PRId64 ")\n",
+                    __func__, info.t.name, (int) info.t.type, ggml_type_name(info.t.type), info.t.ne[0], blck_size);
+                ok = false;
+                break;
+            }
+
+            // calculate byte offsets given the tensor shape and type
+            info.t.nb[0] = type_size;
+            info.t.nb[1] = info.t.nb[0]*(info.t.ne[0]/blck_size);
+            for (int j = 2; j < GGML_MAX_DIMS; ++j) {
+                info.t.nb[j] = info.t.nb[j - 1]*info.t.ne[j - 1];
+            }
+        }
+        if (!ok) {
+            break;
+        }
+
+        // tensor data offset within buffer
+        ok = ok && gr.read(info.offset);
+
+        ctx->info.push_back(info);
+    }
+
+    if (!ok) {
+        fprintf(stderr, "%s: failed to read tensor info\n", __func__);
+        gguf_free(ctx);
+        return nullptr;
+    }
+    GGML_ASSERT(int64_t(ctx->info.size()) == n_tensors);
+
+    // we require the data section to be aligned, so take into account any padding
+    if (fseek(file, GGML_PAD(ftell(file), ctx->alignment), SEEK_SET) != 0) {
+        fprintf(stderr, "%s: failed to seek to beginning of data section\n", __func__);
+        gguf_free(ctx);
+        return nullptr;
+    }
+
+    // store the current file offset - this is where the data section starts
+    ctx->offset = ftell(file);
+
+    // compute the total size of the data section, taking into account the alignment
+    {
+        ctx->size = 0;
+        for (size_t i = 0; i < ctx->info.size(); ++i) {
+            const gguf_tensor_info & ti = ctx->info[i];
+            if (ti.offset != ctx->size) {
+                fprintf(stderr, "%s: tensor '%s' has offset %" PRIu64 ", expected %zu\n",
+                    __func__, ti.t.name, ti.offset, ctx->size);
+                fprintf(stderr, "%s: failed to read tensor data\n", __func__);
+                gguf_free(ctx);
+                return nullptr;
+            }
+            ctx->size += GGML_PAD(ggml_nbytes(&ti.t), ctx->alignment);
+        }
+    }
+
+    // load the tensor data only if requested
+    if (params.ctx != nullptr) {
+        // if the provided gguf_context is no_alloc, then we create "empty" tensors and do not read the binary blob
+        // otherwise, we load the binary blob into the created ggml_context as well, and point the "data" members of
+        //   the ggml_tensor structs to the appropriate locations in the binary blob
+
+        // compute the exact size needed for the new ggml_context
+        const size_t mem_size =
+            params.no_alloc ?
+            (n_tensors    )*ggml_tensor_overhead() :
+            (n_tensors + 1)*ggml_tensor_overhead() + ctx->size;
+
+        struct ggml_init_params pdata = {
+            /*mem_size   =*/ mem_size,
+            /*mem_buffer =*/ nullptr,
+            /*no_alloc   =*/ params.no_alloc,
+        };
+
+        *params.ctx = ggml_init(pdata);
+        if (*params.ctx == nullptr) {
+            fprintf(stderr, "%s: failed to initialize ggml context for storing tensors\n", __func__);
+            gguf_free(ctx);
+            return nullptr;
+        }
+
+        struct ggml_context * ctx_data = *params.ctx;
+
+        struct ggml_tensor * data = nullptr;
+
+        if (!params.no_alloc) {
+            data = ggml_new_tensor_1d(ctx_data, GGML_TYPE_I8, ctx->size);
+
+            ok = ok && data != nullptr;
+
+            // read the binary blob with the tensor data
+            ok = ok && gr.read(data->data, ctx->size);
+
+            if (!ok) {
+                fprintf(stderr, "%s: failed to read tensor data binary blob\n", __func__);
+                ggml_free(ctx_data);
+                *params.ctx = nullptr;
+                gguf_free(ctx);
+                return nullptr;
+            }
+
+            ctx->data = data->data;
+        }
+
+        ggml_set_no_alloc(ctx_data, true);
+
+        // create the tensors
+        for (size_t i = 0; i < ctx->info.size(); ++i) {
+            const struct gguf_tensor_info & info = ctx->info[i];
+
+            struct ggml_tensor * cur = ggml_new_tensor(ctx_data, info.t.type, GGML_MAX_DIMS, info.t.ne);
+
+            ok = ok && cur != nullptr;
+
+            if (!ok) {
+                break;
+            }
+
+            ggml_set_name(cur, info.t.name);
+
+            // point the data member to the appropriate location in the binary blob using the tensor info
+            if (!params.no_alloc) {
+                cur->data = (char *) data->data + info.offset;
+            }
+        }
+
+        if (!ok) {
+            fprintf(stderr, "%s: failed to create tensors\n", __func__);
+            ggml_free(ctx_data);
+            *params.ctx = nullptr;
+            gguf_free(ctx);
+            return nullptr;
+        }
+
+        ggml_set_no_alloc(ctx_data, params.no_alloc);
+    }
+
+    return ctx;
+}
+
+struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_params params) {
+    FILE * file = ggml_fopen(fname, "rb");
+
+    if (!file) {
+        fprintf(stderr, "%s: failed to open GGUF file '%s'\n", __func__, fname);
+        return nullptr;
+    }
+
+    struct gguf_context * result = gguf_init_from_file_impl(file, params);
+    fclose(file);
+    return result;
+}
+
+void gguf_free(struct gguf_context * ctx) {
+    if (ctx == nullptr) {
+        return;
+    }
+    delete ctx;
+}
+
+const char * gguf_type_name(enum gguf_type type) {
+    auto it = GGUF_TYPE_NAME.find(type);
+    return it == GGUF_TYPE_NAME.end() ? nullptr : it->second;
+}
+
+uint32_t gguf_get_version(const struct gguf_context * ctx) {
+    return ctx->version;
+}
+
+size_t gguf_get_alignment(const struct gguf_context * ctx) {
+    return ctx->alignment;
+}
+
+size_t gguf_get_data_offset(const struct gguf_context * ctx) {
+    return ctx->offset;
+}
+
+int64_t gguf_get_n_kv(const struct gguf_context * ctx) {
+    return ctx->kv.size();
+}
+
+int64_t gguf_find_key(const struct gguf_context * ctx, const char * key) {
+    // return -1 if key not found
+    int64_t keyfound = -1;
+
+    const int64_t n_kv = gguf_get_n_kv(ctx);
+
+    for (int64_t i = 0; i < n_kv; ++i) {
+        if (strcmp(key, gguf_get_key(ctx, i)) == 0) {
+            keyfound = i;
+            break;
+        }
+    }
+
+    return keyfound;
+}
+
+const char * gguf_get_key(const struct gguf_context * ctx, int64_t key_id) {
+    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
+    return ctx->kv[key_id].get_key().c_str();
+}
+
+enum gguf_type gguf_get_kv_type(const struct gguf_context * ctx, int64_t key_id) {
+    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
+    return ctx->kv[key_id].is_array ? GGUF_TYPE_ARRAY : ctx->kv[key_id].get_type();
+}
+
+enum gguf_type gguf_get_arr_type(const struct gguf_context * ctx, int64_t key_id) {
+    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
+    GGML_ASSERT(ctx->kv[key_id].is_array);
+    return ctx->kv[key_id].get_type();
+}
+
+const void * gguf_get_arr_data(const struct gguf_context * ctx, int64_t key_id) {
+    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
+    GGML_ASSERT(ctx->kv[key_id].get_type() != GGUF_TYPE_STRING);
+    return ctx->kv[key_id].data.data();
+}
+
+const char * gguf_get_arr_str(const struct gguf_context * ctx, int64_t key_id, size_t i) {
+    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
+    GGML_ASSERT(ctx->kv[key_id].get_type() == GGUF_TYPE_STRING);
+    return ctx->kv[key_id].data_string[i].c_str();
+}
+
+size_t gguf_get_arr_n(const struct gguf_context * ctx, int64_t key_id) {
+    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
+
+    if (ctx->kv[key_id].type == GGUF_TYPE_STRING) {
+        return ctx->kv[key_id].data_string.size();
+    }
+
+    const size_t type_size = gguf_type_size(ctx->kv[key_id].type);
+    GGML_ASSERT(ctx->kv[key_id].data.size() % type_size == 0);
+    return ctx->kv[key_id].data.size() / type_size;
+}
+
+uint8_t gguf_get_val_u8(const struct gguf_context * ctx, int64_t key_id) {
+    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
+    GGML_ASSERT(ctx->kv[key_id].get_ne() == 1);
+    return ctx->kv[key_id].get_val();
+}
+
+int8_t gguf_get_val_i8(const struct gguf_context * ctx, int64_t key_id) {
+    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
+    GGML_ASSERT(ctx->kv[key_id].get_ne() == 1);
+    return ctx->kv[key_id].get_val();
+}
+
+uint16_t gguf_get_val_u16(const struct gguf_context * ctx, int64_t key_id) {
+    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
+    GGML_ASSERT(ctx->kv[key_id].get_ne() == 1);
+    return ctx->kv[key_id].get_val();
+}
+
+int16_t gguf_get_val_i16(const struct gguf_context * ctx, int64_t key_id) {
+    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
+    GGML_ASSERT(ctx->kv[key_id].get_ne() == 1);
+    return ctx->kv[key_id].get_val();
+}
+
+uint32_t gguf_get_val_u32(const struct gguf_context * ctx, int64_t key_id) {
+    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
+    GGML_ASSERT(ctx->kv[key_id].get_ne() == 1);
+    return ctx->kv[key_id].get_val();
+}
+
+int32_t gguf_get_val_i32(const struct gguf_context * ctx, int64_t key_id) {
+    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
+    GGML_ASSERT(ctx->kv[key_id].get_ne() == 1);
+    return ctx->kv[key_id].get_val();
+}
+
+float gguf_get_val_f32(const struct gguf_context * ctx, int64_t key_id) {
+    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
+    GGML_ASSERT(ctx->kv[key_id].get_ne() == 1);
+    return ctx->kv[key_id].get_val();
+}
+
+uint64_t gguf_get_val_u64(const struct gguf_context * ctx, int64_t key_id) {
+    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
+    GGML_ASSERT(ctx->kv[key_id].get_ne() == 1);
+    return ctx->kv[key_id].get_val();
+}
+
+int64_t gguf_get_val_i64(const struct gguf_context * ctx, int64_t key_id) {
+    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
+    GGML_ASSERT(ctx->kv[key_id].get_ne() == 1);
+    return ctx->kv[key_id].get_val();
+}
+
+double gguf_get_val_f64(const struct gguf_context * ctx, int64_t key_id) {
+    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
+    GGML_ASSERT(ctx->kv[key_id].get_ne() == 1);
+    return ctx->kv[key_id].get_val();
+}
+
+bool gguf_get_val_bool(const struct gguf_context * ctx, int64_t key_id) {
+    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
+    GGML_ASSERT(ctx->kv[key_id].get_ne() == 1);
+    return ctx->kv[key_id].get_val();
+}
+
+const char * gguf_get_val_str(const struct gguf_context * ctx, int64_t key_id) {
+    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
+    GGML_ASSERT(ctx->kv[key_id].get_ne() == 1);
+    return ctx->kv[key_id].get_val().c_str();
+}
+
+const void * gguf_get_val_data(const struct gguf_context * ctx, int64_t key_id) {
+    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
+    GGML_ASSERT(ctx->kv[key_id].get_ne() == 1);
+    GGML_ASSERT(ctx->kv[key_id].get_type() != GGUF_TYPE_STRING);
+    return ctx->kv[key_id].data.data();
+}
+
+int64_t gguf_get_n_tensors(const struct gguf_context * ctx) {
+    return ctx->info.size();
+}
+
+int64_t gguf_find_tensor(const struct gguf_context * ctx, const char * name) {
+    // return -1 if tensor not found
+    int64_t tensor_id = -1;
+
+    const int64_t n_tensors = gguf_get_n_tensors(ctx);
+
+    for (int64_t i = 0; i < n_tensors; ++i) {
+        if (strcmp(name, gguf_get_tensor_name(ctx, i)) == 0) {
+            tensor_id = i;
+            break;
+        }
+    }
+
+    return tensor_id;
+}
+
+size_t gguf_get_tensor_offset(const struct gguf_context * ctx, int64_t tensor_id) {
+    GGML_ASSERT(tensor_id >= 0 && tensor_id < gguf_get_n_tensors(ctx));
+    return ctx->info[tensor_id].offset;
+}
+
+const char * gguf_get_tensor_name(const struct gguf_context * ctx, int64_t tensor_id) {
+    GGML_ASSERT(tensor_id >= 0 && tensor_id < gguf_get_n_tensors(ctx));
+    return ctx->info[tensor_id].t.name;
+}
+
+enum ggml_type gguf_get_tensor_type(const struct gguf_context * ctx, int64_t tensor_id) {
+    GGML_ASSERT(tensor_id >= 0 && tensor_id < gguf_get_n_tensors(ctx));
+    return ctx->info[tensor_id].t.type;
+}
+
+size_t gguf_get_tensor_size(const struct gguf_context * ctx, int64_t tensor_id) {
+    GGML_ASSERT(tensor_id >= 0 && tensor_id < gguf_get_n_tensors(ctx));
+    return ggml_nbytes(&ctx->info[tensor_id].t);
+}
+
+int64_t gguf_remove_key(struct gguf_context * ctx, const char * key) {
+    const int64_t key_id = gguf_find_key(ctx, key);
+    if (key_id >= 0) {
+        ctx->kv.erase(ctx->kv.begin() + key_id);
+    }
+    return key_id;
+}
+
+template
+static void gguf_check_reserved_keys(const std::string & key, const T val) {
+    if (key == GGUF_KEY_GENERAL_ALIGNMENT) {
+        if constexpr (std::is_same::value) {
+            GGML_ASSERT(val > 0 && (val & (val - 1)) == 0 && GGUF_KEY_GENERAL_ALIGNMENT " must be power of 2");
+        } else {
+            GGML_ABORT(GGUF_KEY_GENERAL_ALIGNMENT " must be type u32");
+        }
+    }
+}
+
+void gguf_set_val_u8(struct gguf_context * ctx, const char * key, uint8_t val) {
+    gguf_check_reserved_keys(key, val);
+    gguf_remove_key(ctx, key);
+    ctx->kv.emplace_back(key, val);
+}
+
+void gguf_set_val_i8(struct gguf_context * ctx, const char * key, int8_t val) {
+    gguf_check_reserved_keys(key, val);
+    gguf_remove_key(ctx, key);
+    ctx->kv.emplace_back(key, val);
+}
+
+void gguf_set_val_u16(struct gguf_context * ctx, const char * key, uint16_t val) {
+    gguf_check_reserved_keys(key, val);
+    gguf_remove_key(ctx, key);
+    ctx->kv.emplace_back(key, val);
+}
+
+void gguf_set_val_i16(struct gguf_context * ctx, const char * key, int16_t val) {
+    gguf_check_reserved_keys(key, val);
+    gguf_remove_key(ctx, key);
+    ctx->kv.emplace_back(key, val);
+}
+
+void gguf_set_val_u32(struct gguf_context * ctx, const char * key, uint32_t val) {
+    gguf_check_reserved_keys(key, val);
+    gguf_remove_key(ctx, key);
+    ctx->kv.emplace_back(key, val);
+}
+
+void gguf_set_val_i32(struct gguf_context * ctx, const char * key, int32_t val) {
+    gguf_check_reserved_keys(key, val);
+    gguf_remove_key(ctx, key);
+    ctx->kv.emplace_back(key, val);
+}
+
+void gguf_set_val_f32(struct gguf_context * ctx, const char * key, float val) {
+    gguf_check_reserved_keys(key, val);
+    gguf_remove_key(ctx, key);
+    ctx->kv.emplace_back(key, val);
+}
+
+void gguf_set_val_u64(struct gguf_context * ctx, const char * key, uint64_t val) {
+    gguf_check_reserved_keys(key, val);
+    gguf_remove_key(ctx, key);
+    ctx->kv.emplace_back(key, val);
+}
+
+void gguf_set_val_i64(struct gguf_context * ctx, const char * key, int64_t val) {
+    gguf_check_reserved_keys(key, val);
+    gguf_remove_key(ctx, key);
+    ctx->kv.emplace_back(key, val);
+}
+
+void gguf_set_val_f64(struct gguf_context * ctx, const char * key, double val) {
+    gguf_check_reserved_keys(key, val);
+    gguf_remove_key(ctx, key);
+    ctx->kv.emplace_back(key, val);
+}
+
+void gguf_set_val_bool(struct gguf_context * ctx, const char * key, bool val) {
+    gguf_check_reserved_keys(key, val);
+    gguf_remove_key(ctx, key);
+    ctx->kv.emplace_back(key, val);
+}
+
+void gguf_set_val_str(struct gguf_context * ctx, const char * key, const char * val) {
+    gguf_check_reserved_keys(key, val);
+    gguf_remove_key(ctx, key);
+    ctx->kv.emplace_back(key, std::string(val));
+}
+
+void gguf_set_arr_data(struct gguf_context * ctx, const char * key, enum gguf_type type, const void * data, size_t n) {
+    gguf_check_reserved_keys(key, data);
+    gguf_remove_key(ctx, key);
+
+    const size_t nbytes = n*gguf_type_size(type);
+    std::vector tmp(nbytes);
+    if (!tmp.empty()) {
+        memcpy(tmp.data(), data, nbytes);
+    }
+    ctx->kv.emplace_back(key, tmp);
+    ctx->kv.back().cast(type);
+}
+
+void gguf_set_arr_str(struct gguf_context * ctx, const char * key, const char ** data, size_t n) {
+    gguf_check_reserved_keys(key, data);
+    gguf_remove_key(ctx, key);
+
+    std::vector tmp(n);
+    for (size_t i = 0; i < n; ++i) {
+        tmp[i] = data[i];
+    }
+    ctx->kv.emplace_back(key, tmp);
+}
+
+// set or add KV pairs from another context
+void gguf_set_kv(struct gguf_context * ctx, const struct gguf_context * src) {
+    const int64_t n_kv = gguf_get_n_kv(src);
+    for (int64_t i = 0; i < n_kv; ++i) {
+        const struct gguf_kv & kv = src->kv[i];
+
+        if (!kv.is_array) {
+            switch (kv.get_type()) {
+                case GGUF_TYPE_UINT8:   gguf_set_val_u8  (ctx, kv.get_key().c_str(), kv.get_val());             break;
+                case GGUF_TYPE_INT8:    gguf_set_val_i8  (ctx, kv.get_key().c_str(), kv.get_val());              break;
+                case GGUF_TYPE_UINT16:  gguf_set_val_u16 (ctx, kv.get_key().c_str(), kv.get_val());            break;
+                case GGUF_TYPE_INT16:   gguf_set_val_i16 (ctx, kv.get_key().c_str(), kv.get_val());             break;
+                case GGUF_TYPE_UINT32:  gguf_set_val_u32 (ctx, kv.get_key().c_str(), kv.get_val());            break;
+                case GGUF_TYPE_INT32:   gguf_set_val_i32 (ctx, kv.get_key().c_str(), kv.get_val());             break;
+                case GGUF_TYPE_FLOAT32: gguf_set_val_f32 (ctx, kv.get_key().c_str(), kv.get_val());               break;
+                case GGUF_TYPE_UINT64:  gguf_set_val_u64 (ctx, kv.get_key().c_str(), kv.get_val());            break;
+                case GGUF_TYPE_INT64:   gguf_set_val_i64 (ctx, kv.get_key().c_str(), kv.get_val());             break;
+                case GGUF_TYPE_FLOAT64: gguf_set_val_f64 (ctx, kv.get_key().c_str(), kv.get_val());              break;
+                case GGUF_TYPE_BOOL:    gguf_set_val_bool(ctx, kv.get_key().c_str(), kv.get_val());                break;
+                case GGUF_TYPE_STRING:  gguf_set_val_str (ctx, kv.get_key().c_str(), kv.get_val().c_str()); break;
+                case GGUF_TYPE_ARRAY:
+                default: GGML_ABORT("invalid type");
+            }
+            continue;
+        }
+
+        const size_t ne = kv.get_ne();
+
+        switch (kv.get_type()) {
+            case GGUF_TYPE_UINT8:
+            case GGUF_TYPE_INT8:
+            case GGUF_TYPE_UINT16:
+            case GGUF_TYPE_INT16:
+            case GGUF_TYPE_UINT32:
+            case GGUF_TYPE_INT32:
+            case GGUF_TYPE_FLOAT32:
+            case GGUF_TYPE_UINT64:
+            case GGUF_TYPE_INT64:
+            case GGUF_TYPE_FLOAT64:
+            case GGUF_TYPE_BOOL: {
+                gguf_set_arr_data(ctx, kv.get_key().c_str(), kv.get_type(), kv.data.data(), ne);
+            } break;
+            case GGUF_TYPE_STRING: {
+                std::vector tmp(ne);
+                for (size_t j = 0; j < ne; ++j) {
+                    tmp[j] = kv.data_string[j].c_str();
+                }
+                gguf_set_arr_str(ctx, kv.get_key().c_str(), tmp.data(), ne);
+            } break;
+            case GGUF_TYPE_ARRAY:
+            default: GGML_ABORT("invalid type");
+        }
+    }
+}
+
+void gguf_add_tensor(
+             struct gguf_context * ctx,
+        const struct ggml_tensor * tensor) {
+    GGML_ASSERT(tensor);
+    if (gguf_find_tensor(ctx, tensor->name) != -1) {
+        GGML_ABORT("duplicate tensor name: %s", tensor->name);
+    }
+
+    struct gguf_tensor_info ti;
+    ti.t = *tensor;
+    ti.offset = ctx->info.empty() ? 0 :
+        ctx->info.back().offset + GGML_PAD(ggml_nbytes(&ctx->info.back().t), ctx->alignment);
+    ctx->info.push_back(ti);
+}
+
+void gguf_set_tensor_type(struct gguf_context * ctx, const char * name, enum ggml_type type) {
+    const int64_t tensor_id = gguf_find_tensor(ctx, name);
+    if (tensor_id < 0) {
+        GGML_ABORT("tensor not found: %s", name);
+    }
+    struct ggml_tensor * tensor = &ctx->info[tensor_id].t;
+    const size_t  type_size = ggml_type_size(type);
+    const int64_t blck_size = ggml_blck_size(type);
+
+    tensor->type = type;
+    GGML_ASSERT(tensor->ne[0] % blck_size == 0 && "tensor row size not divisible by block size of new type");
+
+    tensor->nb[0] = type_size;
+    tensor->nb[1] = tensor->nb[0]*(tensor->ne[0]/blck_size);
+    for (int i = 2; i < GGML_MAX_DIMS; i++) {
+        tensor->nb[i] = tensor->nb[i - 1]*tensor->ne[i - 1];
+    }
+
+    // update offsets
+    const int64_t n_tensors = gguf_get_n_tensors(ctx);
+    for (int64_t i = tensor_id + 1; i < n_tensors; ++i) {
+        ctx->info[i].offset = ctx->info[i - 1].offset + GGML_PAD(ggml_nbytes(&ctx->info[i - 1].t), ctx->alignment);
+    }
+}
+
+void gguf_set_tensor_data(struct gguf_context * ctx, const char * name, const void * data) {
+    const int64_t tensor_id = gguf_find_tensor(ctx, name);
+    if (tensor_id < 0) {
+        GGML_ABORT("tensor not found: %s", name);
+    }
+
+    ctx->info[tensor_id].t.data = (void *)(uintptr_t)data; // double cast suppresses warning about casting away const
+}
+
+struct gguf_writer {
+    std::vector & buf;
+
+    gguf_writer(std::vector & buf) : buf(buf) {}
+
+    template 
+    void write(const T & val) const {
+        for (size_t i = 0; i < sizeof(val); ++i) {
+            buf.push_back(reinterpret_cast(&val)[i]);
+        }
+    }
+
+    void write(const std::vector & val) const {
+        buf.insert(buf.end(), val.begin(), val.end());
+    }
+
+    void write(const bool & val) const {
+        const int8_t val8 = val ? 1 : 0;
+        write(val8);
+    }
+
+    void write(const std::string & val) const {
+        {
+            const uint64_t n = val.length();
+            write(n);
+        }
+        for (size_t i = 0; i < val.length(); ++i) {
+            buf.push_back(reinterpret_cast(val.data())[i]);
+        }
+    }
+
+    void write(const char * val) const {
+        write(std::string(val));
+    }
+
+    void write(const enum ggml_type & val) const {
+        write(int32_t(val));
+    }
+
+    void write(const enum gguf_type & val) const {
+        write(int32_t(val));
+    }
+
+    void write(const struct gguf_kv & kv) const {
+        const uint64_t ne = kv.get_ne();
+
+        write(kv.get_key());
+
+        if (kv.is_array) {
+            write(GGUF_TYPE_ARRAY);
+            write(kv.get_type());
+            write(ne);
+        } else {
+            write(kv.get_type());
+        }
+
+        switch (kv.get_type()) {
+            case GGUF_TYPE_UINT8:
+            case GGUF_TYPE_INT8:
+            case GGUF_TYPE_UINT16:
+            case GGUF_TYPE_INT16:
+            case GGUF_TYPE_UINT32:
+            case GGUF_TYPE_INT32:
+            case GGUF_TYPE_FLOAT32:
+            case GGUF_TYPE_UINT64:
+            case GGUF_TYPE_INT64:
+            case GGUF_TYPE_FLOAT64: {
+                write(kv.data);
+            } break;
+            case GGUF_TYPE_BOOL: {
+                for (size_t i = 0; i < ne; ++i) {
+                    write(kv.get_val(i));
+                }
+            } break;
+            case GGUF_TYPE_STRING: {
+                for (size_t i = 0; i < ne; ++i) {
+                    write(kv.get_val(i));
+                }
+            } break;
+            case GGUF_TYPE_ARRAY:
+            default: GGML_ABORT("invalid type");
+        }
+    }
+
+    void write_tensor_meta(const struct gguf_tensor_info & info) const {
+        write(info.t.name);
+
+        const uint32_t n_dims = ggml_n_dims(&info.t);
+        write(n_dims);
+
+        for (uint32_t j = 0; j < n_dims; ++j) {
+            write(info.t.ne[j]);
+        }
+        write(info.t.type);
+        write(info.offset);
+    }
+
+    void pad(const size_t alignment) const {
+        while (buf.size() % alignment != 0) {
+            const int8_t zero = 0;
+            write(zero);
+        }
+    }
+
+    void write_tensor_data(const struct gguf_tensor_info & info, const size_t offset_data, const size_t alignment) const {
+        GGML_ASSERT(buf.size() - offset_data == info.offset);
+
+        GGML_ASSERT(ggml_is_contiguous(&info.t));
+        const size_t offset = buf.size();
+        const size_t nbytes = ggml_nbytes(&info.t);
+
+        buf.resize(offset + nbytes);
+        if (info.t.buffer) {
+            ggml_backend_tensor_get(&info.t, buf.data() + offset, 0, nbytes);
+        } else {
+            GGML_ASSERT(info.t.data);
+            memcpy(buf.data() + offset, info.t.data, nbytes);
+        }
+
+        pad(alignment);
+    }
+};
+
+void gguf_write_to_buf(const struct gguf_context * ctx, std::vector & buf, bool only_meta) {
+    const struct gguf_writer gw(buf);
+
+    const int64_t n_kv      = gguf_get_n_kv(ctx);
+    const int64_t n_tensors = gguf_get_n_tensors(ctx);
+
+    // write header
+    gw.write(GGUF_MAGIC[0]);
+    gw.write(GGUF_MAGIC[1]);
+    gw.write(GGUF_MAGIC[2]);
+    gw.write(GGUF_MAGIC[3]);
+    gw.write(ctx->version);
+    gw.write(n_tensors);
+    gw.write(n_kv);
+
+    // write key-value pairs
+    for (int64_t i = 0; i < n_kv; ++i) {
+        gw.write(ctx->kv[i]);
+    }
+
+    // write tensor info
+    for (int64_t i = 0; i < n_tensors; ++i) {
+        gw.write_tensor_meta(ctx->info[i]);
+    }
+
+    // we require the data section to be aligned
+    gw.pad(ctx->alignment);
+
+    if (only_meta) {
+        return;
+    }
+
+    const size_t offset_data = gw.buf.size();
+
+    // write tensor data
+    for (int64_t i = 0; i < n_tensors; ++i) {
+        gw.write_tensor_data(ctx->info[i], offset_data, ctx->alignment);
+    }
+}
+
+bool gguf_write_to_file(const struct gguf_context * ctx, const char * fname, bool only_meta) {
+    FILE * file = ggml_fopen(fname, "wb");
+
+    if (!file) {
+        fprintf(stderr, "%s: failed to open file '%s' for writing GGUF data\n", __func__, fname);
+        return false;
+    }
+
+    std::vector buf;
+    gguf_write_to_buf(ctx, buf, only_meta);
+    const bool ok = fwrite(buf.data(), 1, buf.size(), file) == buf.size();
+    fclose(file);
+    return ok;
+}
+
+size_t gguf_get_meta_size(const struct gguf_context * ctx) {
+    // only return size
+    std::vector buf;
+    gguf_write_to_buf(ctx, buf, /*only_meta =*/ true);
+    return buf.size();
+}
+
+void gguf_get_meta_data(const struct gguf_context * ctx, void * data) {
+    std::vector buf;
+    gguf_write_to_buf(ctx, buf, /*only_meta =*/ true);
+    memcpy(data, buf.data(), buf.size());
+}
diff --git a/src/llama-impl.cpp b/src/llama-impl.cpp
index a05ba4f63..6ec709dd3 100644
--- a/src/llama-impl.cpp
+++ b/src/llama-impl.cpp
@@ -1,5 +1,6 @@
 #include "llama-impl.h"
 
+#include "gguf.h"
 #include "llama.h"
 
 #include 
@@ -138,7 +139,7 @@ std::string gguf_kv_to_str(const struct gguf_context * ctx_gguf, int i) {
             {
                 const enum gguf_type arr_type = gguf_get_arr_type(ctx_gguf, i);
                 int arr_n = gguf_get_arr_n(ctx_gguf, i);
-                const void * data = gguf_get_arr_data(ctx_gguf, i);
+                const void * data = arr_type == GGUF_TYPE_STRING ? nullptr : gguf_get_arr_data(ctx_gguf, i);
                 std::stringstream ss;
                 ss << "[";
                 for (int j = 0; j < arr_n; j++) {
diff --git a/src/llama-model-loader.cpp b/src/llama-model-loader.cpp
index 7743b4652..1c4e30878 100644
--- a/src/llama-model-loader.cpp
+++ b/src/llama-model-loader.cpp
@@ -18,7 +18,7 @@ const char * llama_file_version_name(llama_fver version) {
 }
 
 namespace GGUFMeta {
-    template 
+    template 
     struct GKV_Base_Type {
         static constexpr gguf_type gt = gt_;
 
@@ -60,10 +60,11 @@ namespace GGUFMeta {
         public:
         static constexpr gguf_type gt = GGUF_TYPE_ARRAY;
         static ArrayInfo getter(const gguf_context *ctx, const int k) {
+            const enum gguf_type arr_type = gguf_get_arr_type(ctx, k);
             return ArrayInfo {
-                gguf_get_arr_type(ctx, k),
+                arr_type,
                 size_t(gguf_get_arr_n(ctx, k)),
-                gguf_get_arr_data(ctx, k),
+                arr_type == GGUF_TYPE_STRING ? nullptr : gguf_get_arr_data(ctx, k),
             };
         }
     };
@@ -553,7 +554,7 @@ llama_model_loader::llama_model_loader(const std::string & fname, bool use_mmap,
             const enum gguf_type type   = gguf_get_kv_type(meta.get(), i);
             const std::string type_name =
                 type == GGUF_TYPE_ARRAY
-                ? format("%s[%s,%d]", gguf_type_name(type), gguf_type_name(gguf_get_arr_type(meta.get(), i)), gguf_get_arr_n(meta.get(), i))
+                ? format("%s[%s,%zu]", gguf_type_name(type), gguf_type_name(gguf_get_arr_type(meta.get(), i)), gguf_get_arr_n(meta.get(), i))
                 : gguf_type_name(type);
 
             std::string value          = gguf_kv_to_str(meta.get(), i);
diff --git a/src/llama-quant.cpp b/src/llama-quant.cpp
index 104f90343..038cf58dd 100644
--- a/src/llama-quant.cpp
+++ b/src/llama-quant.cpp
@@ -875,7 +875,8 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std::
 
         // update the gguf meta data as we go
         gguf_set_tensor_type(ctx_outs[cur_split].get(), name.c_str(), new_type);
-        gguf_set_tensor_data(ctx_outs[cur_split].get(), name.c_str(), new_data, new_size);
+        GGML_ASSERT(gguf_get_tensor_size(ctx_outs[cur_split].get(), gguf_find_tensor(ctx_outs[cur_split].get(), name.c_str())) == new_size);
+        gguf_set_tensor_data(ctx_outs[cur_split].get(), name.c_str(), new_data);
 
         // write tensor data + padding
         fout.write((const char *) new_data, new_size);
diff --git a/tests/test-gguf.cpp b/tests/test-gguf.cpp
index 1bb5fb47c..611957ac0 100644
--- a/tests/test-gguf.cpp
+++ b/tests/test-gguf.cpp
@@ -15,66 +15,71 @@ constexpr int offset_has_tensors = 2000;
 constexpr int offset_has_data    = 3000;
 
 enum handcrafted_file_type {
-    HANDCRAFTED_HEADER_BAD_MAGIC          =  10,
-    HANDCRAFTED_HEADER_BAD_VERSION_1      =  20,
-    HANDCRAFTED_HEADER_BAD_VERSION_FUTURE =  30,
-    HANDCRAFTED_HEADER_BAD_N_TENSORS      =  40,
-    HANDCRAFTED_HEADER_BAD_N_KV           =  50,
-    HANDCRAFTED_HEADER_EMPTY              = 800,
+    HANDCRAFTED_HEADER_BAD_MAGIC           =  10,
+    HANDCRAFTED_HEADER_BAD_VERSION_1       =  20,
+    HANDCRAFTED_HEADER_BAD_VERSION_FUTURE  =  30,
+    HANDCRAFTED_HEADER_BAD_N_TENSORS       =  40,
+    HANDCRAFTED_HEADER_BAD_N_KV            =  50,
+    HANDCRAFTED_HEADER_EMPTY               = 800,
 
-    HANDCRAFTED_KV_BAD_KEY_SIZE           =  10 + offset_has_kv,
-    HANDCRAFTED_KV_BAD_TYPE               =  20 + offset_has_kv,
-    HANDCRAFTED_KV_BAD_VALUE_SIZE         =  30 + offset_has_kv,
-    HANDCRAFTED_KV_DUPLICATE_KEY          =  40 + offset_has_kv,
-    HANDCRAFTED_KV_SUCCESS                = 800 + offset_has_kv,
+    HANDCRAFTED_KV_BAD_KEY_SIZE            =  10 + offset_has_kv,
+    HANDCRAFTED_KV_BAD_TYPE                =  20 + offset_has_kv,
+    // HANDCRAFTED_KV_BAD_VALUE_SIZE          =  30 + offset_has_kv, // removed because it can result in allocations > 1 TB (default sanitizer limit)
+    HANDCRAFTED_KV_DUPLICATE_KEY           =  40 + offset_has_kv,
+    HANDCRAFTED_KV_BAD_ALIGN               =  50 + offset_has_kv,
+    HANDCRAFTED_KV_SUCCESS                 = 800 + offset_has_kv,
 
-    HANDCRAFTED_TENSORS_BAD_NAME_SIZE     =  10 + offset_has_tensors,
-    HANDCRAFTED_TENSORS_BAD_N_DIMS        =  20 + offset_has_tensors,
-    HANDCRAFTED_TENSORS_BAD_SHAPE         =  30 + offset_has_tensors,
-    HANDCRAFTED_TENSORS_NE_TOO_BIG        =  40 + offset_has_tensors,
-    HANDCRAFTED_TENSORS_BAD_TYPE          =  50 + offset_has_tensors,
-    HANDCRAFTED_TENSORS_BAD_OFFSET        =  60 + offset_has_tensors,
-    HANDCRAFTED_TENSORS_DUPLICATE_NAME    =  70 + offset_has_tensors,
-    HANDCRAFTED_TENSORS_BAD_ALIGNMENT     =  80 + offset_has_tensors,
-    HANDCRAFTED_TENSORS_SUCCESS           = 800 + offset_has_tensors,
-    HANDCRAFTED_TENSORS_CUSTOM_ALIGN      = 810 + offset_has_tensors,
+    HANDCRAFTED_TENSORS_BAD_NAME_SIZE      =  10 + offset_has_tensors,
+    HANDCRAFTED_TENSORS_BAD_N_DIMS         =  20 + offset_has_tensors,
+    HANDCRAFTED_TENSORS_BAD_SHAPE          =  30 + offset_has_tensors,
+    HANDCRAFTED_TENSORS_NE_TOO_BIG         =  40 + offset_has_tensors,
+    HANDCRAFTED_TENSORS_BAD_TYPE           =  50 + offset_has_tensors,
+    HANDCRAFTED_TENSORS_BAD_OFFSET         =  60 + offset_has_tensors,
+    HANDCRAFTED_TENSORS_DUPLICATE_NAME     =  70 + offset_has_tensors,
+    HANDCRAFTED_TENSORS_BAD_ALIGN          =  75 + offset_has_tensors,
+    HANDCRAFTED_TENSORS_INCONSISTENT_ALIGN =  80 + offset_has_tensors,
+    HANDCRAFTED_TENSORS_SUCCESS            = 800 + offset_has_tensors,
+    HANDCRAFTED_TENSORS_CUSTOM_ALIGN       = 810 + offset_has_tensors,
 
-    HANDCRAFTED_DATA_NOT_ENOUGH_DATA      =  10 + offset_has_data,
-    HANDCRAFTED_DATA_BAD_ALIGNMENT        =  20 + offset_has_data,
-    HANDCRAFTED_DATA_SUCCESS              = 800 + offset_has_data,
-    HANDCRAFTED_DATA_CUSTOM_ALIGN         = 810 + offset_has_data,
+    HANDCRAFTED_DATA_NOT_ENOUGH_DATA       =  10 + offset_has_data,
+    HANDCRAFTED_DATA_BAD_ALIGN             =  15 + offset_has_data,
+    HANDCRAFTED_DATA_INCONSISTENT_ALIGN    =  20 + offset_has_data,
+    HANDCRAFTED_DATA_SUCCESS               = 800 + offset_has_data,
+    HANDCRAFTED_DATA_CUSTOM_ALIGN          = 810 + offset_has_data,
 };
 
 std::string handcrafted_file_type_name(const enum handcrafted_file_type hft) {
     switch (hft) {
-        case HANDCRAFTED_HEADER_BAD_MAGIC:          return "HEADER_BAD_MAGIC";
-        case HANDCRAFTED_HEADER_BAD_VERSION_1:      return "HEADER_BAD_VERSION_1";
-        case HANDCRAFTED_HEADER_BAD_VERSION_FUTURE: return "HEADER_BAD_VERSION_FUTURE";
-        case HANDCRAFTED_HEADER_BAD_N_KV:           return "HEADER_BAD_N_KV";
-        case HANDCRAFTED_HEADER_BAD_N_TENSORS:      return "HEADER_BAD_N_TENSORS";
-        case HANDCRAFTED_HEADER_EMPTY:              return "HEADER_EMPTY";
+        case HANDCRAFTED_HEADER_BAD_MAGIC:           return "HEADER_BAD_MAGIC";
+        case HANDCRAFTED_HEADER_BAD_VERSION_1:       return "HEADER_BAD_VERSION_1";
+        case HANDCRAFTED_HEADER_BAD_VERSION_FUTURE:  return "HEADER_BAD_VERSION_FUTURE";
+        case HANDCRAFTED_HEADER_BAD_N_KV:            return "HEADER_BAD_N_KV";
+        case HANDCRAFTED_HEADER_BAD_N_TENSORS:       return "HEADER_BAD_N_TENSORS";
+        case HANDCRAFTED_HEADER_EMPTY:               return "HEADER_EMPTY";
 
-        case HANDCRAFTED_KV_BAD_KEY_SIZE:           return "KV_BAD_KEY_SIZE";
-        case HANDCRAFTED_KV_BAD_TYPE:               return "KV_BAD_TYPE";
-        case HANDCRAFTED_KV_BAD_VALUE_SIZE:         return "KV_BAD_VALUE_SIZE";
-        case HANDCRAFTED_KV_DUPLICATE_KEY:          return "KV_DUPLICATE_KEY";
-        case HANDCRAFTED_KV_SUCCESS:                return "KV_RANDOM_KV";
+        case HANDCRAFTED_KV_BAD_KEY_SIZE:            return "KV_BAD_KEY_SIZE";
+        case HANDCRAFTED_KV_BAD_TYPE:                return "KV_BAD_TYPE";
+        case HANDCRAFTED_KV_DUPLICATE_KEY:           return "KV_DUPLICATE_KEY";
+        case HANDCRAFTED_KV_BAD_ALIGN:               return "KV_BAD_ALIGN";
+        case HANDCRAFTED_KV_SUCCESS:                 return "KV_RANDOM_KV";
 
-        case HANDCRAFTED_TENSORS_BAD_NAME_SIZE:     return "TENSORS_BAD_NAME_SIZE";
-        case HANDCRAFTED_TENSORS_BAD_N_DIMS:        return "TENSORS_BAD_N_DIMS";
-        case HANDCRAFTED_TENSORS_BAD_SHAPE:         return "TENSORS_BAD_SHAPE";
-        case HANDCRAFTED_TENSORS_NE_TOO_BIG:        return "TENSORS_NE_TOO_BIG";
-        case HANDCRAFTED_TENSORS_BAD_TYPE:          return "TENSORS_BAD_TYPE";
-        case HANDCRAFTED_TENSORS_BAD_OFFSET:        return "TENSORS_BAD_OFFSET";
-        case HANDCRAFTED_TENSORS_DUPLICATE_NAME:    return "TENSORS_DUPLICATE_NAME";
-        case HANDCRAFTED_TENSORS_BAD_ALIGNMENT:     return "TENSORS_BAD_ALIGNMENT";
-        case HANDCRAFTED_TENSORS_SUCCESS:           return "TENSORS_SUCCESS";
-        case HANDCRAFTED_TENSORS_CUSTOM_ALIGN:      return "TENSORS_CUSTOM_ALIGN";
+        case HANDCRAFTED_TENSORS_BAD_NAME_SIZE:      return "TENSORS_BAD_NAME_SIZE";
+        case HANDCRAFTED_TENSORS_BAD_N_DIMS:         return "TENSORS_BAD_N_DIMS";
+        case HANDCRAFTED_TENSORS_BAD_SHAPE:          return "TENSORS_BAD_SHAPE";
+        case HANDCRAFTED_TENSORS_NE_TOO_BIG:         return "TENSORS_NE_TOO_BIG";
+        case HANDCRAFTED_TENSORS_BAD_TYPE:           return "TENSORS_BAD_TYPE";
+        case HANDCRAFTED_TENSORS_BAD_OFFSET:         return "TENSORS_BAD_OFFSET";
+        case HANDCRAFTED_TENSORS_DUPLICATE_NAME:     return "TENSORS_DUPLICATE_NAME";
+        case HANDCRAFTED_TENSORS_BAD_ALIGN:          return "TENSORS_BAD_ALIGN";
+        case HANDCRAFTED_TENSORS_INCONSISTENT_ALIGN: return "TENSORS_INCONSISTENT_ALIGN";
+        case HANDCRAFTED_TENSORS_SUCCESS:            return "TENSORS_SUCCESS";
+        case HANDCRAFTED_TENSORS_CUSTOM_ALIGN:       return "TENSORS_CUSTOM_ALIGN";
 
-        case HANDCRAFTED_DATA_NOT_ENOUGH_DATA:      return "DATA_NOT_ENOUGH_DATA";
-        case HANDCRAFTED_DATA_BAD_ALIGNMENT:        return "DATA_BAD_ALIGNMENT";
-        case HANDCRAFTED_DATA_SUCCESS:              return "DATA_SUCCESS";
-        case HANDCRAFTED_DATA_CUSTOM_ALIGN:         return "DATA_CUSTOM_ALIGN";
+        case HANDCRAFTED_DATA_NOT_ENOUGH_DATA:       return "DATA_NOT_ENOUGH_DATA";
+        case HANDCRAFTED_DATA_BAD_ALIGN:             return "DATA_BAD_ALIGN";
+        case HANDCRAFTED_DATA_INCONSISTENT_ALIGN:    return "DATA_INCONSISTENT_ALIGN";
+        case HANDCRAFTED_DATA_SUCCESS:               return "DATA_SUCCESS";
+        case HANDCRAFTED_DATA_CUSTOM_ALIGN:          return "DATA_CUSTOM_ALIGN";
     }
     GGML_ABORT("fatal error");
 }
@@ -140,31 +145,41 @@ std::vector> get_kv_types(std::mt19937
     return kv_types;
 }
 
-static void helper_write(const void * data, const size_t nbytes, FILE * file) {
+template 
+static void helper_write(FILE * file, const T & val) {
+    GGML_ASSERT(fwrite(&val, 1, sizeof(val), file) == sizeof(val));
+}
+
+static void helper_write(FILE * file, const void * data, const size_t nbytes) {
     GGML_ASSERT(fwrite(data, 1, nbytes, file) == nbytes);
 }
 
 static FILE * get_handcrafted_file(const unsigned int seed, const enum handcrafted_file_type hft, const int extra_bytes = 0) {
     FILE * file = tmpfile();
 
+    if (!file) {
+        return file;
+    }
+
     std::mt19937 rng(seed);
+    uint32_t alignment = GGUF_DEFAULT_ALIGNMENT;
 
     if (hft == HANDCRAFTED_HEADER_BAD_MAGIC) {
         const char bad_magic[4] = {'F', 'U', 'G', 'G'};
-        helper_write(bad_magic, sizeof(bad_magic), file);
+        helper_write(file, bad_magic, sizeof(bad_magic));
     } else {
-        helper_write(GGUF_MAGIC, 4, file);
+        helper_write(file, GGUF_MAGIC, 4);
     }
 
     if (hft == HANDCRAFTED_HEADER_BAD_VERSION_1) {
         const uint32_t version = 1;
-        helper_write(&version, sizeof(version), file);
+        helper_write(file, version);
     } else if (hft == HANDCRAFTED_HEADER_BAD_VERSION_FUTURE) {
         const uint32_t version = GGUF_VERSION + 1;
-        helper_write(&version, sizeof(version), file);
+        helper_write(file, version);
     } else {
         const uint32_t version = GGUF_VERSION;
-        helper_write(&version, sizeof(version), file);
+        helper_write(file, version);
     }
 
     std::vector tensor_configs;
@@ -174,10 +189,10 @@ static FILE * get_handcrafted_file(const unsigned int seed, const enum handcraft
 
     if (hft == HANDCRAFTED_HEADER_BAD_N_TENSORS) {
         const uint64_t n_tensors = -1;
-        helper_write(&n_tensors, sizeof(n_tensors), file);
+        helper_write(file, n_tensors);
     } else {
         const uint64_t n_tensors = tensor_configs.size();
-        helper_write(&n_tensors, sizeof(n_tensors), file);
+        helper_write(file, n_tensors);
     }
 
     std::vector> kv_types;
@@ -186,41 +201,49 @@ static FILE * get_handcrafted_file(const unsigned int seed, const enum handcraft
     }
     {
         uint64_t n_kv = kv_types.size();
-        if (hft == HANDCRAFTED_TENSORS_CUSTOM_ALIGN || hft == HANDCRAFTED_DATA_CUSTOM_ALIGN) {
+        if (hft == HANDCRAFTED_KV_BAD_ALIGN      ||
+            hft == HANDCRAFTED_TENSORS_BAD_ALIGN || hft == HANDCRAFTED_TENSORS_CUSTOM_ALIGN ||
+            hft == HANDCRAFTED_DATA_BAD_ALIGN    || hft == HANDCRAFTED_DATA_CUSTOM_ALIGN) {
+
             n_kv += 1;
         } else if (hft == HANDCRAFTED_HEADER_BAD_N_KV) {
             n_kv = -1;
         }
-        helper_write(&n_kv, sizeof(n_kv), file);
+        helper_write(file, n_kv);
     }
 
     if (hft < offset_has_kv) {
+        while (ftell(file) % alignment != 0) {
+            const char pad = 0;
+            helper_write(file, pad);
+        }
+
         for (int i = 0; i < extra_bytes; ++i) {
             const char tmp = 0;
-            helper_write(&tmp, sizeof(tmp), file);
+            helper_write(file, tmp);
         }
         rewind(file);
         return file;
     }
 
     for (int i = 0; i < int(kv_types.size()); ++i) {
-        const enum gguf_type type     = gguf_type(hft == HANDCRAFTED_KV_BAD_TYPE ? -1 : kv_types[i].first);
-        const enum gguf_type type_arr = gguf_type(hft == HANDCRAFTED_KV_BAD_TYPE ? -1 : kv_types[i].second);
+        const enum gguf_type type     = gguf_type(hft == HANDCRAFTED_KV_BAD_TYPE ? GGUF_TYPE_COUNT : kv_types[i].first);
+        const enum gguf_type type_arr = gguf_type(hft == HANDCRAFTED_KV_BAD_TYPE ? GGUF_TYPE_COUNT : kv_types[i].second);
 
         const std::string key = "my_key_" + std::to_string((hft == HANDCRAFTED_KV_DUPLICATE_KEY ? i/2 : i));
 
         if (hft == HANDCRAFTED_KV_BAD_KEY_SIZE) {
             const uint64_t n = -1;
-            helper_write(&n, sizeof(n), file);
+            helper_write(file, n);
         } else {
             const uint64_t n = key.length();
-            helper_write(&n, sizeof(n), file);
+            helper_write(file, n);
         }
-        helper_write(key.data(), key.length(), file);
+        helper_write(file, key.data(), key.length());
 
         {
             const int32_t type32 = int32_t(type);
-            helper_write(&type32, sizeof(type32), file);
+            helper_write(file, type32);
         }
 
         uint32_t data[16];
@@ -233,69 +256,67 @@ static FILE * get_handcrafted_file(const unsigned int seed, const enum handcraft
 
         if (type == GGUF_TYPE_STRING) {
             const uint64_t n = rng() % sizeof(data);
-            helper_write(&n,   sizeof(n), file);
-            helper_write(data,        n,  file);
+            helper_write(file, n);
+            helper_write(file, data, n);
             continue;
         }
 
         if (type == GGUF_TYPE_ARRAY) {
             {
                 const int32_t type32 = int32_t(type_arr);
-                helper_write(&type32, sizeof(type32), file);
+                helper_write(file, type32);
             }
             if (type_arr == GGUF_TYPE_STRING) {
                 const uint64_t nstr = rng() % (16 + 1);
-                helper_write(&nstr, sizeof(nstr), file);
+                helper_write(file, nstr);
                 for (uint64_t istr = 0; istr < nstr; ++istr) {
                     const uint64_t n = rng() % (sizeof(uint32_t) + 1);
-                    helper_write(&n,          sizeof(n), file);
-                    helper_write(&data[istr],        n,  file);
+                    helper_write(file, n);
+                    helper_write(file, &data[istr], n);
                 }
                 continue;
             }
             const size_t type_size = gguf_type_size(type_arr);
             const uint64_t n = (rng() % sizeof(data)) / type_size;
-            helper_write(&n,    sizeof(n),   file);
-            helper_write(&data, n*type_size, file);
+            helper_write(file, n);
+            helper_write(file, &data, n*type_size);
             continue;
         }
 
-        size_t type_size = hft == HANDCRAFTED_KV_BAD_TYPE ? 1 : gguf_type_size(type);
-        if (hft == HANDCRAFTED_KV_BAD_VALUE_SIZE) {
-            type_size += rng() % 3;
-        }
-        helper_write(data, type_size, file);
+        helper_write(file, data, hft == HANDCRAFTED_KV_BAD_TYPE ? 1 : gguf_type_size(type));
     }
 
-    if (hft == HANDCRAFTED_TENSORS_CUSTOM_ALIGN || hft == HANDCRAFTED_DATA_CUSTOM_ALIGN) {
-        const std::string key = "general.alignment";
-        {
-            const uint64_t n = key.length();
-            helper_write(&n, sizeof(n), file);
-        }
-        helper_write(key.data(), key.length(), file);
+    if (hft == HANDCRAFTED_KV_BAD_ALIGN      ||
+        hft == HANDCRAFTED_TENSORS_BAD_ALIGN || hft == HANDCRAFTED_TENSORS_CUSTOM_ALIGN ||
+        hft == HANDCRAFTED_DATA_BAD_ALIGN    || hft == HANDCRAFTED_DATA_CUSTOM_ALIGN) {
+
+        const uint64_t n = strlen(GGUF_KEY_GENERAL_ALIGNMENT);
+        helper_write(file, n);
+        helper_write(file, GGUF_KEY_GENERAL_ALIGNMENT, n);
 
         const int32_t type = gguf_type(GGUF_TYPE_UINT32);
-        helper_write(&type, sizeof(type), file);
+        helper_write(file, type);
 
-        const uint32_t alignment = GGUF_DEFAULT_ALIGNMENT + 1;
-        helper_write(&alignment, sizeof(alignment), file);
+        alignment = expect_context_not_null(hft) ? 1 : 13;
+        helper_write(file, alignment);
     }
 
     if (hft < offset_has_tensors) {
+        while (ftell(file) % alignment != 0) {
+            const char pad = 0;
+            helper_write(file, pad);
+        }
+
         for (int i = 0; i < extra_bytes; ++i) {
             const char tmp = 0;
-            helper_write(&tmp, sizeof(tmp), file);
+            helper_write(file, tmp);
         }
         rewind(file);
         return file;
     }
 
-    uint32_t alignment = GGUF_DEFAULT_ALIGNMENT;
-    if (hft == HANDCRAFTED_TENSORS_BAD_ALIGNMENT || hft == HANDCRAFTED_DATA_BAD_ALIGNMENT) {
-        alignment -= 1;
-    } else if (hft == HANDCRAFTED_TENSORS_CUSTOM_ALIGN || hft == HANDCRAFTED_DATA_CUSTOM_ALIGN) {
-        alignment += 1;
+    if (hft == HANDCRAFTED_TENSORS_INCONSISTENT_ALIGN || hft == HANDCRAFTED_DATA_INCONSISTENT_ALIGN) {
+        alignment = 1;
     }
 
     uint64_t offset = 0;
@@ -313,9 +334,9 @@ static FILE * get_handcrafted_file(const unsigned int seed, const enum handcraft
         }
         {
             const uint64_t n = name.length();
-            helper_write(&n, sizeof(n), file);
+            helper_write(file, n);
         }
-        helper_write(name.data(), name.length(), file);
+        helper_write(file, name.data(), name.length());
 
         uint32_t n_dims = hft == HANDCRAFTED_TENSORS_NE_TOO_BIG ? 2 : 1;
         for (int i = GGML_MAX_DIMS-1; i >= 1; --i) {
@@ -326,35 +347,35 @@ static FILE * get_handcrafted_file(const unsigned int seed, const enum handcraft
         }
         if (hft == HANDCRAFTED_TENSORS_BAD_N_DIMS) {
             const uint32_t n_dims_bad = GGML_MAX_DIMS + 1;
-            helper_write(&n_dims_bad, sizeof(n_dims_bad), file);
+            helper_write(file, n_dims_bad);
         } else {
-            helper_write(&n_dims,     sizeof(n_dims),     file);
+            helper_write(file, n_dims);
         }
 
         if (hft == HANDCRAFTED_TENSORS_BAD_SHAPE) {
             for (uint32_t j = 0; j < n_dims; ++j) {
                 const int64_t bad_dim = -1;
-                helper_write(&bad_dim, sizeof(bad_dim), file);
+                helper_write(file, bad_dim);
             }
         } else if (hft == HANDCRAFTED_TENSORS_NE_TOO_BIG){
             for (uint32_t j = 0; j < n_dims; ++j) {
                 const int64_t big_dim = 4*int64_t(INT32_MAX);
-                helper_write(&big_dim, sizeof(big_dim), file);
+                helper_write(file, big_dim);
             }
         } else {
-            helper_write(shape.data(), n_dims*sizeof(int64_t), file);
+            helper_write(file, shape.data(), n_dims*sizeof(int64_t));
         }
 
         {
-            const int32_t type32 = hft == HANDCRAFTED_TENSORS_BAD_TYPE ? -1 : int32_t(type);
-            helper_write(&type32, sizeof(type32), file);
+            const int32_t type32 = hft == HANDCRAFTED_TENSORS_BAD_TYPE ? GGML_TYPE_COUNT : int32_t(type);
+            helper_write(file, type32);
         }
 
         if (hft == HANDCRAFTED_TENSORS_BAD_OFFSET) {
             const uint64_t bad_offset = -1;
-            helper_write(&bad_offset, sizeof(bad_offset), file);
+            helper_write(file, bad_offset);
         } else {
-            helper_write(&offset, sizeof(offset), file);
+            helper_write(file, offset);
         }
 
         int64_t ne = shape[0];
@@ -364,12 +385,9 @@ static FILE * get_handcrafted_file(const unsigned int seed, const enum handcraft
         offset += GGML_PAD(ggml_row_size(type, ne), alignment);
     }
 
-    const uint32_t alignment_overshoot = ftell(file) % alignment;
-    if (alignment_overshoot != 0) {
-        for (size_t i = alignment_overshoot; i < alignment; ++i) {
-            const char pad = 0;
-            helper_write(&pad, sizeof(pad), file);
-        }
+    while (ftell(file) % alignment != 0) {
+        const char pad = 0;
+        helper_write(file, pad);
     }
 
     if (hft >= offset_has_data) {
@@ -380,13 +398,13 @@ static FILE * get_handcrafted_file(const unsigned int seed, const enum handcraft
         }
         for (uint64_t i = 0; i < nbytes; ++i) {
             const uint8_t random_byte = i % 256;
-            helper_write(&random_byte, sizeof(random_byte), file);
+            helper_write(file, random_byte);
         }
     }
 
     for (int i = 0; i < extra_bytes; ++i) {
         const char tmp = 0;
-        helper_write(&tmp, sizeof(tmp), file);
+        helper_write(file, tmp);
     }
     rewind(file);
     return file;
@@ -505,6 +523,16 @@ static bool handcrafted_check_kv(const gguf_context * gguf_ctx, const unsigned i
             }
 
             const char * data_gguf = reinterpret_cast(gguf_get_arr_data(gguf_ctx, id));
+
+            if (type_arr == GGUF_TYPE_BOOL) {
+                for (size_t arr_i = 0; arr_i < arr_n; ++arr_i) {
+                    if (bool(data8[arr_i]) != bool(data_gguf[arr_i])) {
+                        ok = false;
+                    }
+                }
+                continue;
+            }
+
             if (!std::equal(data8, data8 + arr_n*type_size, data_gguf)) {
                 ok = false;
             }
@@ -512,12 +540,20 @@ static bool handcrafted_check_kv(const gguf_context * gguf_ctx, const unsigned i
         }
 
         const char * data_gguf = reinterpret_cast(gguf_get_val_data(gguf_ctx, id));
+
+        if (type == GGUF_TYPE_BOOL) {
+            if (bool(*data8) != bool(*data_gguf)) {
+                ok = false;
+            }
+            continue;
+        }
+
         if (!std::equal(data8, data8 + gguf_type_size(type), data_gguf)) {
             ok = false;
         }
     }
 
-    const uint32_t expected_alignment = alignment_defined ? GGUF_DEFAULT_ALIGNMENT + 1 : GGUF_DEFAULT_ALIGNMENT;
+    const uint32_t expected_alignment = alignment_defined ? 1 : GGUF_DEFAULT_ALIGNMENT;
     if (gguf_get_alignment(gguf_ctx) != expected_alignment) {
         ok = false;
     }
@@ -539,7 +575,7 @@ static bool handcrafted_check_tensors(const gguf_context * gguf_ctx, const unsig
 
     bool ok = true;
 
-    const int id_alignment = gguf_find_key(gguf_ctx, "general.alignment");
+    const int id_alignment = gguf_find_key(gguf_ctx, GGUF_KEY_GENERAL_ALIGNMENT);
     const uint32_t alignment = id_alignment >= 0 ? gguf_get_val_u32(gguf_ctx, id_alignment) : GGUF_DEFAULT_ALIGNMENT;
 
     uint64_t expected_offset = 0;
@@ -607,7 +643,7 @@ static bool handcrafted_check_tensor_data(const gguf_context * gguf_ctx, const u
 
         std::vector data(size);
         GGML_ASSERT(fseek(file, gguf_get_data_offset(gguf_ctx) + offset, SEEK_SET) == 0);
-        GGML_ASSERT(fread(data.data(), 1, size, file) == size);
+        GGML_ASSERT(fread(data.data(), 1, data.size(), file) == data.size());
 
         for (size_t j = 0; j < size; ++j) {
             const uint8_t expected_byte = (j + offset) % 256;
@@ -627,15 +663,15 @@ static std::pair test_handcrafted_file(const unsigned int seed) {
     const std::vector hfts = {
         HANDCRAFTED_HEADER_BAD_MAGIC,
         HANDCRAFTED_HEADER_BAD_VERSION_1,
-        // HANDCRAFTED_FILE_TYPE_BAD_VERSION_FUTURE, // FIXME
+        HANDCRAFTED_HEADER_BAD_VERSION_FUTURE,
         HANDCRAFTED_HEADER_BAD_N_KV,
         HANDCRAFTED_HEADER_BAD_N_TENSORS,
         HANDCRAFTED_HEADER_EMPTY,
 
         HANDCRAFTED_KV_BAD_KEY_SIZE,
         HANDCRAFTED_KV_BAD_TYPE,
-        // HANDCRAFTED_KV_BAD_VALUE_SIZE, // FIXME sanitizer limit
-        // HANDCRAFTED_FILE_TYPE_DUPLICATE_KEY, // FIXME
+        HANDCRAFTED_KV_DUPLICATE_KEY,
+        HANDCRAFTED_KV_BAD_ALIGN,
         HANDCRAFTED_KV_SUCCESS,
 
         HANDCRAFTED_TENSORS_BAD_NAME_SIZE,
@@ -643,14 +679,16 @@ static std::pair test_handcrafted_file(const unsigned int seed) {
         HANDCRAFTED_TENSORS_BAD_SHAPE,
         HANDCRAFTED_TENSORS_NE_TOO_BIG,
         HANDCRAFTED_TENSORS_BAD_TYPE,
-        // HANDCRAFTED_TENSORS_BAD_OFFSET, // FIXME
+        HANDCRAFTED_TENSORS_BAD_OFFSET,
         HANDCRAFTED_TENSORS_DUPLICATE_NAME,
-        // HANDCRAFTED_TENSORS_BAD_ALIGNMENT, // FIXME
+        HANDCRAFTED_TENSORS_BAD_ALIGN,
+        HANDCRAFTED_TENSORS_INCONSISTENT_ALIGN,
         HANDCRAFTED_TENSORS_SUCCESS,
         HANDCRAFTED_TENSORS_CUSTOM_ALIGN,
 
         HANDCRAFTED_DATA_NOT_ENOUGH_DATA,
-        // HANDCRAFTED_DATA_BAD_ALIGNMENT, // FIXME
+        HANDCRAFTED_DATA_BAD_ALIGN,
+        HANDCRAFTED_DATA_INCONSISTENT_ALIGN,
         HANDCRAFTED_DATA_SUCCESS,
         HANDCRAFTED_DATA_CUSTOM_ALIGN,
     };
@@ -674,6 +712,7 @@ static std::pair test_handcrafted_file(const unsigned int seed) {
             /*no_alloc =*/ false,
             /*ctx      =*/ hft >= offset_has_data ? &ctx : nullptr,
         };
+
         struct gguf_context * gguf_ctx = gguf_init_from_file_impl(file, gguf_params);
 
         if (expect_context_not_null(hft)) {
@@ -689,7 +728,7 @@ static std::pair test_handcrafted_file(const unsigned int seed) {
         }
         ntest++;
 
-        if (false && hft >= offset_has_data && !expect_context_not_null(hft)) { // FIXME
+        if (hft >= offset_has_data && !expect_context_not_null(hft)) {
             printf("%s:   - no_dangling_ggml_context_pointer: ", __func__);
             if (ctx) {
                 printf("\033[1;31mFAIL\033[0m\n");
@@ -700,23 +739,6 @@ static std::pair test_handcrafted_file(const unsigned int seed) {
             ntest++;
         }
 
-        if (false && expect_context_not_null(hft)) { // FIXME
-            FILE * file_eb = get_handcrafted_file(seed, hft, /*extra_bytes =*/ 1);
-            struct gguf_context * gguf_ctx_eb = gguf_init_from_file_impl(file_eb, gguf_params);
-
-            printf("%s:   - context_null_with_extra_bytes: ", __func__);
-            if (gguf_ctx_eb) {
-                printf("\033[1;31mFAIL\033[0m\n");
-            } else {
-                printf("\033[1;32mOK\033[0m\n");
-                npass++;
-            }
-            ntest++;
-
-            gguf_free(gguf_ctx_eb);
-            fclose(file_eb);
-        }
-
         const bool alignment_defined = hft == HANDCRAFTED_TENSORS_CUSTOM_ALIGN || hft == HANDCRAFTED_DATA_CUSTOM_ALIGN;
 
         if (expect_context_not_null(hft)) {
@@ -763,14 +785,15 @@ static std::pair test_handcrafted_file(const unsigned int seed) {
             ntest++;
         }
 
+        fclose(file);
         if (gguf_ctx) {
             ggml_free(ctx);
             gguf_free(gguf_ctx);
         }
-        fclose(file);
         printf("\n");
     }
 
+
     return std::make_pair(npass, ntest);
 }
 
@@ -789,10 +812,6 @@ static struct random_gguf_context_result get_random_gguf_context(ggml_backend_t
         const std::string key = "my_key_" + std::to_string(rng() % 1024);
         const enum gguf_type type = gguf_type(rng() % GGUF_TYPE_COUNT);
 
-        if (type == GGUF_TYPE_STRING || type == GGUF_TYPE_ARRAY) {
-            continue; // FIXME memory leak
-        }
-
         switch (type) {
             case GGUF_TYPE_UINT8:   gguf_set_val_u8  (gguf_ctx, key.c_str(), rng() % (1 <<  7));             break;
             case GGUF_TYPE_INT8:    gguf_set_val_i8  (gguf_ctx, key.c_str(), rng() % (1 <<  7) - (1 <<  6)); break;
@@ -826,6 +845,9 @@ static struct random_gguf_context_result get_random_gguf_context(ggml_backend_t
                         std::vector random_data((nbytes + sizeof(uint32_t) - 1) / sizeof(uint32_t));
                         for (size_t j = 0; j < random_data.size(); ++j) {
                             random_data[j] = rng();
+                            if (type_arr == GGUF_TYPE_BOOL) {
+                                random_data[j] &= 0x01010101; // the sanitizer complains if booleans are not 0 or 1
+                            }
                         }
                         gguf_set_arr_data(gguf_ctx, key.c_str(), type_arr, random_data.data(), ne);
                     } break;
@@ -928,6 +950,17 @@ static bool all_kv_in_other(const gguf_context * ctx, const gguf_context * other
                 continue;
             }
 
+            if (type_arr == GGUF_TYPE_BOOL) {
+                const int8_t * data       = reinterpret_cast(gguf_get_arr_data(ctx,   id));
+                const int8_t * data_other = reinterpret_cast(gguf_get_arr_data(other, idx_other));
+                for (int arr_i = 0; arr_i < arr_n; ++arr_i) {
+                    if (bool(data[arr_i]) != bool(data_other[arr_i])) {
+                        ok = false;
+                    }
+                }
+                continue;
+            }
+
             if (type_arr == GGUF_TYPE_STRING) {
                 for (int arr_i = 0; arr_i < arr_n; ++arr_i) {
                     const std::string str       = gguf_get_arr_str(ctx,   id,       arr_i);
@@ -939,8 +972,8 @@ static bool all_kv_in_other(const gguf_context * ctx, const gguf_context * other
                 continue;
             }
 
-            const char * data       = reinterpret_cast(gguf_get_arr_data(ctx,   id));
-            const char * data_other = reinterpret_cast(gguf_get_arr_data(other, idx_other));
+            const int8_t * data       = reinterpret_cast(gguf_get_arr_data(ctx,   id));
+            const int8_t * data_other = reinterpret_cast(gguf_get_arr_data(other, idx_other));
             if (!std::equal(data, data + arr_n*gguf_type_size(type_arr), data_other)) {
                 ok = false;
             }
@@ -1028,21 +1061,6 @@ static bool same_tensor_data(const struct ggml_context * orig, const struct ggml
 }
 
 static std::pair test_roundtrip(ggml_backend_dev_t dev, const unsigned int seed, const bool only_meta) {
-    FILE * file = tmpfile();
-#ifdef _WIN32
-    if (!file) {
-        printf("%s: failed to create tmpfile(), needs elevated privileges on Windows");
-        printf("%s: skipping tests");
-        return std::make_pair(0, 0);
-    }
-#else
-    GGML_ASSERT(file);
-#endif // _WIN32
-
-    if (ggml_backend_dev_type(dev) != GGML_BACKEND_DEVICE_TYPE_CPU) {
-        return std::make_pair(0, 0); // FIXME
-    }
-
     ggml_backend_t backend = ggml_backend_dev_init(dev, nullptr);
     printf("%s: device=%s, backend=%s, only_meta=%s\n",
         __func__, ggml_backend_dev_description(dev), ggml_backend_name(backend), only_meta ? "yes" : "no");
@@ -1060,10 +1078,24 @@ static std::pair test_roundtrip(ggml_backend_dev_t dev, const unsigned
         bbuf       = result.buffer;
     }
 
-    struct gguf_buf gbuf = gguf_buf_init(16 * 1024);
-    gguf_write_to_buf(gguf_ctx_0, &gbuf, only_meta);
-    helper_write(gbuf.data, gbuf.offset, file);
-    rewind(file);
+    FILE * file = tmpfile();
+
+#ifdef _WIN32
+    if (!file) {
+        printf("%s: failed to create tmpfile(), needs elevated privileges on Windows");
+        printf("%s: skipping tests");
+        return std::make_pair(0, 0);
+    }
+#else
+    GGML_ASSERT(file);
+#endif // _WIN32
+
+    {
+        std::vector buf;
+        gguf_write_to_buf(gguf_ctx_0, buf, only_meta);
+        GGML_ASSERT(fwrite(buf.data(), 1, buf.size(), file) == buf.size());
+        rewind(file);
+    }
 
     struct ggml_context * ctx_1 = nullptr;
     struct gguf_init_params gguf_params = {
@@ -1151,9 +1183,8 @@ static std::pair test_roundtrip(ggml_backend_dev_t dev, const unsigned
     ggml_free(ctx_1);
     gguf_free(gguf_ctx_0);
     gguf_free(gguf_ctx_1);
-    gguf_buf_free(gbuf);
     ggml_backend_free(backend);
-    GGML_ASSERT(fclose(file) == 0);
+    fclose(file);
 
     printf("\n");
     return std::make_pair(npass, ntest);

From bec2183f2c8d37cf1278c11d1adb9311e9eaa242 Mon Sep 17 00:00:00 2001
From: ag2s20150909 <19373730+ag2s20150909@users.noreply.github.com>
Date: Wed, 8 Jan 2025 16:17:29 +0800
Subject: [PATCH 63/81] fix: Vulkan shader gen binary path when Cross-compiling
 (#11096)

* fix: Vulkan shader gen binary path when cross compiling
---
 ggml/src/ggml-vulkan/CMakeLists.txt | 6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/ggml/src/ggml-vulkan/CMakeLists.txt b/ggml/src/ggml-vulkan/CMakeLists.txt
index 9501de736..61de21d6a 100644
--- a/ggml/src/ggml-vulkan/CMakeLists.txt
+++ b/ggml/src/ggml-vulkan/CMakeLists.txt
@@ -69,11 +69,15 @@ if (Vulkan_FOUND)
 
     file(GLOB _ggml_vk_shader_deps "${_ggml_vk_input_dir}/*.comp")
 
+    if (NOT CMAKE_CROSSCOMPILING)
+        set(_ggml_vk_genshaders_cmd "$/${_ggml_vk_genshaders_cmd}")
+    endif ()
+
     add_custom_command(
         OUTPUT ${_ggml_vk_header}
                 ${_ggml_vk_source}
 
-        COMMAND "$/${_ggml_vk_genshaders_cmd}"
+        COMMAND ${_ggml_vk_genshaders_cmd}
             --glslc      ${Vulkan_GLSLC_EXECUTABLE}
             --input-dir  ${_ggml_vk_input_dir}
             --output-dir ${_ggml_vk_output_dir}

From 02f04301417e7fb44fa1025bc1b0aef866e2ca89 Mon Sep 17 00:00:00 2001
From: Mathieu Baudier 
Date: Wed, 8 Jan 2025 09:18:13 +0100
Subject: [PATCH 64/81] Disable GL_KHR_cooperative_matrix Vulkan extension if
 not available. (#11117)

* Disable GL_KHR_cooperative_matrix Vulkan extension if not available.

* Perform Vulkan extensions checks in a more sensible order

* Remove unnecessary #ifdef directive
---
 ggml/src/ggml-vulkan/CMakeLists.txt            | 14 ++++++++++++++
 ggml/src/ggml-vulkan/ggml-vulkan.cpp           | 18 +++++++++++++++---
 .../vulkan-shaders/test_coopmat_support.comp   |  7 +++++++
 .../vulkan-shaders/vulkan-shaders-gen.cpp      |  2 ++
 4 files changed, 38 insertions(+), 3 deletions(-)
 create mode 100644 ggml/src/ggml-vulkan/vulkan-shaders/test_coopmat_support.comp

diff --git a/ggml/src/ggml-vulkan/CMakeLists.txt b/ggml/src/ggml-vulkan/CMakeLists.txt
index 61de21d6a..c0ddaac82 100644
--- a/ggml/src/ggml-vulkan/CMakeLists.txt
+++ b/ggml/src/ggml-vulkan/CMakeLists.txt
@@ -8,6 +8,20 @@ if (Vulkan_FOUND)
                              ../../include/ggml-vulkan.h
                             )
 
+    # Compile a test shader to determine whether GL_KHR_cooperative_matrix is supported.
+    # If it's not, there will be an error to stderr.
+    # If it's supported, set a define to indicate that we should compile those shaders
+    execute_process(COMMAND ${Vulkan_GLSLC_EXECUTABLE} -o - -fshader-stage=compute --target-env=vulkan1.3 "${CMAKE_CURRENT_SOURCE_DIR}/vulkan-shaders/test_coopmat_support.comp"
+                    OUTPUT_VARIABLE glslc_output
+                    ERROR_VARIABLE glslc_error)
+
+    if (${glslc_error} MATCHES ".*extension not supported: GL_KHR_cooperative_matrix.*")
+        message(STATUS "GL_KHR_cooperative_matrix not supported by glslc")
+    else()
+        message(STATUS "GL_KHR_cooperative_matrix supported by glslc")
+        add_compile_definitions(GGML_VULKAN_COOPMAT_GLSLC_SUPPORT)
+    endif()
+
     # Compile a test shader to determine whether GL_NV_cooperative_matrix2 is supported.
     # If it's not, there will be an error to stderr.
     # If it's supported, set a define to indicate that we should compile those shaders
diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp
index d75cd6d61..077452424 100644
--- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp
+++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp
@@ -1645,6 +1645,7 @@ static void ggml_vk_load_shaders(vk_device& device) {
 #undef CREATE_MM2
     } else
 #endif  // defined(VK_NV_cooperative_matrix2) && defined(GGML_VULKAN_COOPMAT2_GLSLC_SUPPORT)
+#if defined(VK_KHR_cooperative_matrix) && defined(GGML_VULKAN_COOPMAT_GLSLC_SUPPORT)
     if (device->coopmat_support) {
         // Create 6 variants, {s,m,l}x{unaligned,aligned}
 #define CREATE_MM(PIPELINE_NAME, NAMELC, F16ACC, WG_DENOMS, WARPTILE, PUSHCONST, PARAMCOUNT, ID) \
@@ -1739,7 +1740,9 @@ static void ggml_vk_load_shaders(vk_device& device) {
         }
 #undef CREATE_MM2
 #undef CREATE_MM
-    } else if (device->fp16) {
+    } else
+#endif  // defined(VK_KHR_cooperative_matrix) && defined(GGML_VULKAN_COOPMAT_GLSLC_SUPPORT)
+    if (device->fp16) {
         // Create 6 variants, {s,m,l}x{unaligned,aligned}
 #define CREATE_MM(PIPELINE_NAME, NAMELC, F16ACC, WG_DENOMS, WARPTILE, PUSHCONST, PARAMCOUNT, ID) \
         if (device->mul_mat ## ID ## _l) \
@@ -2242,6 +2245,7 @@ static vk_device ggml_vk_get_device(size_t idx) {
             last_struct = (VkBaseOutStructure *)&subgroup_size_control_features;
         }
 
+#if defined(VK_KHR_cooperative_matrix)
         VkPhysicalDeviceCooperativeMatrixFeaturesKHR coopmat_features;
         coopmat_features.pNext = nullptr;
         coopmat_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_FEATURES_KHR;
@@ -2251,6 +2255,7 @@ static vk_device ggml_vk_get_device(size_t idx) {
             last_struct->pNext = (VkBaseOutStructure *)&coopmat_features;
             last_struct = (VkBaseOutStructure *)&coopmat_features;
         }
+#endif
 
 #if defined(VK_NV_cooperative_matrix2)
         VkPhysicalDeviceCooperativeMatrix2FeaturesNV coopmat2_features {};
@@ -2283,7 +2288,9 @@ static vk_device ggml_vk_get_device(size_t idx) {
             device_extensions.push_back("VK_EXT_subgroup_size_control");
         }
 
+#if defined(VK_KHR_cooperative_matrix)
         device->coopmat_support = device->coopmat_support && coopmat_features.cooperativeMatrix;
+#endif
 
         if (coopmat2_support) {
 #if defined(VK_NV_cooperative_matrix2) && defined(GGML_VULKAN_COOPMAT2_GLSLC_SUPPORT)
@@ -2376,6 +2383,7 @@ static vk_device ggml_vk_get_device(size_t idx) {
             device_extensions.push_back("VK_KHR_shader_float16_int8");
         }
 
+#if defined(VK_KHR_cooperative_matrix)
         if (device->coopmat_support) {
             // Query supported shapes
             std::vector cm_props;
@@ -2442,7 +2450,7 @@ static vk_device ggml_vk_get_device(size_t idx) {
         if (device->coopmat_support) {
             device_extensions.push_back("VK_KHR_cooperative_matrix");
         }
-
+#endif
         device->name = GGML_VK_NAME + std::to_string(idx);
 
         device_create_info = {
@@ -2553,9 +2561,11 @@ static void ggml_vk_print_gpu_info(size_t idx) {
             fp16_storage = true;
         } else if (strcmp("VK_KHR_shader_float16_int8", properties.extensionName) == 0) {
             fp16_compute = true;
-        } else if (strcmp("VK_KHR_cooperative_matrix", properties.extensionName) == 0 &&
+#if defined(GGML_VULKAN_COOPMAT_GLSLC_SUPPORT)
+       } else if (strcmp("VK_KHR_cooperative_matrix", properties.extensionName) == 0 &&
                    !getenv("GGML_VK_DISABLE_COOPMAT")) {
             coopmat_support = true;
+#endif
 #if defined(GGML_VULKAN_COOPMAT2_GLSLC_SUPPORT)
         } else if (strcmp("VK_NV_cooperative_matrix2", properties.extensionName) == 0 &&
                    !getenv("GGML_VK_DISABLE_COOPMAT2")) {
@@ -2593,6 +2603,7 @@ static void ggml_vk_print_gpu_info(size_t idx) {
     // Pointer to the last chain element
     VkBaseOutStructure * last_struct = (VkBaseOutStructure *)&vk12_features;
 
+#if defined(GGML_VULKAN_COOPMAT_GLSLC_SUPPORT)
     VkPhysicalDeviceCooperativeMatrixFeaturesKHR coopmat_features;
     coopmat_features.pNext = nullptr;
     coopmat_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_FEATURES_KHR;
@@ -2608,6 +2619,7 @@ static void ggml_vk_print_gpu_info(size_t idx) {
     fp16 = fp16 && vk12_features.shaderFloat16;
 
     coopmat_support = coopmat_support && coopmat_features.cooperativeMatrix;
+#endif
 
     std::string matrix_cores = coopmat2_support ? "NV_coopmat2" : coopmat_support ? "KHR_coopmat" : "none";
 
diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/test_coopmat_support.comp b/ggml/src/ggml-vulkan/vulkan-shaders/test_coopmat_support.comp
new file mode 100644
index 000000000..8c5dd1bd1
--- /dev/null
+++ b/ggml/src/ggml-vulkan/vulkan-shaders/test_coopmat_support.comp
@@ -0,0 +1,7 @@
+#version 460
+
+#extension GL_KHR_cooperative_matrix : require
+
+void main()
+{
+}
diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp b/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp
index 8111c0638..7b5044798 100644
--- a/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp
+++ b/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp
@@ -342,9 +342,11 @@ void process_shaders() {
         matmul_shaders(true, matmul_id, false, false, false);
         matmul_shaders(true, matmul_id, false, false, true);
 
+#if defined(GGML_VULKAN_COOPMAT_GLSLC_SUPPORT)
         // Coopmat, fp32acc and fp16acc
         matmul_shaders(true, matmul_id, true, false, false);
         matmul_shaders(true, matmul_id, true, false, true);
+#endif
 
 #if defined(GGML_VULKAN_COOPMAT2_GLSLC_SUPPORT)
         // Coopmat2, fp32acc and fp16acc

From 0d52a69e4bf0d6181beec7853307bdcdeec9905b Mon Sep 17 00:00:00 2001
From: Georgi Gerganov 
Date: Wed, 8 Jan 2025 11:29:34 +0200
Subject: [PATCH 65/81] ci : fix cmake option (#11125)

---
 .github/workflows/build.yml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index 602cf5220..02a193b86 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -665,7 +665,7 @@ jobs:
           - build: 'llvm-arm64'
             defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/arm64-windows-llvm.cmake -DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON'
           - build: 'msvc-arm64'
-            defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/arm64-windows-msvc.cmake -DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DBUILD_SHARED_LIBS=O'
+            defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/arm64-windows-msvc.cmake -DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON'
           - build: 'llvm-arm64-opencl-adreno'
             defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/arm64-windows-llvm.cmake -DCMAKE_PREFIX_PATH="$env:RUNNER_TEMP/opencl-arm64-release" -DGGML_OPENCL=ON -DGGML_OPENCL_USE_ADRENO_KERNELS=ON'
 

From 8cef75c743ba13ebbd6d380c531200c768a8b8aa Mon Sep 17 00:00:00 2001
From: amritahs-ibm 
Date: Wed, 8 Jan 2025 16:24:19 +0530
Subject: [PATCH 66/81] llamafile : ppc64le MMA INT8 implementation (#10912)

This change upstreams llamafile's cpu matrix
multiplication kernels for ppc64le using MMA
builtins for quantised int8 datatype.

This change results in 10% - 70% improvement
in total speed(ie all tokens/total time), across
various batch sizes.

The patch is tested with Meta-Lllama-3-8B,
Mistral-7B, Llama-2-7B-chat-hf models on a
IBM POWER10 machine.

Signed-off-by: Amrita H S 
---
 ggml/src/ggml-cpu/llamafile/sgemm.cpp | 836 ++++++++++++++++++++++++--
 1 file changed, 770 insertions(+), 66 deletions(-)

diff --git a/ggml/src/ggml-cpu/llamafile/sgemm.cpp b/ggml/src/ggml-cpu/llamafile/sgemm.cpp
index 8fce576c3..c22a66287 100644
--- a/ggml/src/ggml-cpu/llamafile/sgemm.cpp
+++ b/ggml/src/ggml-cpu/llamafile/sgemm.cpp
@@ -54,6 +54,7 @@
 #include "ggml-quants.h"
 
 #include 
+#include 
 
 #ifdef _MSC_VER
 #define NOINLINE __declspec(noinline)
@@ -1051,6 +1052,704 @@ class tinyBLAS_Q0_AVX {
       } \
    } \
 
+template 
+class tinyBLAS_Q0_PPC {
+  public:
+    tinyBLAS_Q0_PPC(int64_t k,
+                const TA *A, int64_t lda,
+                const TB *B, int64_t ldb,
+                TC *C, int64_t ldc,
+                int ith, int nth)
+        : A(A), B(B), C(C), k(k), lda(lda), ldb(ldb), ldc(ldc), ith(ith), nth(nth) {
+    }
+
+    void matmul(int64_t m, int64_t n) {
+        mnpack(0, m, 0, n);
+    }
+
+  private:
+
+    template
+    inline void save_res(int ii, int jj, int idx, vector float* fin_res) {
+       for (int I = 0; I < RM; I++) {
+          for (int J = 0; J < RN; J++) {
+             *((float*)(C+ii+((jj+J)*ldc)+I)) = *((float*)&fin_res[idx+I]+J);
+          }
+       }
+    }
+
+    template
+    inline void compute(acc_t* ACC, int c_idx, int s_idx, std::array& comparray, vector float* vs, vector float* fin_res) {
+       vector signed int vec_C[4];
+       vector float CA[4] = {0};
+       vector float res[4] = {0};
+       __builtin_mma_disassemble_acc(vec_C, ACC);
+       for (int i = 0; i < 4; i++) {
+          CA[i] = vec_splats((float)(((double)comparray[c_idx+i]) * -128.0));
+          res[i] = vec_add(vec_ctf(vec_C[i], 0), CA[i]);
+          fin_res[s_idx+i] = vec_madd(res[i], vs[s_idx+i], fin_res[s_idx+i]);
+       }
+    }
+
+    template
+    void packNormal(const TA* a, int64_t lda, int rows, int cols, VA* vec, bool flip) {
+        int64_t i, j;
+        TA *aoffset = NULL;
+        VA *vecOffset = NULL;
+        TA *aoffset1 = NULL, *aoffset2 = NULL, *aoffset3 = NULL, *aoffset4 = NULL;
+        TA *aoffset5 = NULL, *aoffset6 = NULL, *aoffset7 = NULL, *aoffset8 = NULL;
+        __vector_pair C1, C2, C3, C4, C5, C6, C7, C8;
+        VB c1[2] = {0}, c2[2] = {0}, c3[2] = {0}, c4[2]={0};
+        VB c5[2] = {0}, c6[2] = {0}, c7[2] = {0}, c8[2]={0};
+        VB t1, t2, t3, t4, t5, t6, t7, t8;
+        vector unsigned char xor_vector;
+        uint8_t flip_vec = 0x80;
+        xor_vector = vec_splats(flip_vec);
+        vector unsigned char swiz1 = {0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23};
+        vector unsigned char swiz2 = {8, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, 31};
+        vector unsigned char swiz3 = {0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27};
+        vector unsigned char swiz4 = {4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31};
+
+        aoffset = const_cast(a);
+        vecOffset = vec;
+        j = (rows >> 3);
+        if (j > 0) {
+            do {
+            aoffset1 = aoffset;
+            aoffset2 = aoffset1 + lda;
+            aoffset3 = aoffset2 + lda;
+            aoffset4 = aoffset3 + lda;
+            aoffset5 = aoffset4 + lda;
+            aoffset6 = aoffset5 + lda;
+            aoffset7 = aoffset6 + lda;
+            aoffset8 = aoffset7 + lda;
+            aoffset += 8 * lda;
+
+            i = (cols >> 3);
+            if (i > 0) {
+               do {
+                    C1 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset1->qs);
+                    C2 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset2->qs);
+                    C3 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset3->qs);
+                    C4 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset4->qs);
+                    C5 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset5->qs);
+                    C6 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset6->qs);
+                    C7 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset7->qs);
+                    C8 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset8->qs);
+
+                    __builtin_vsx_disassemble_pair(c1, &C1);
+                    __builtin_vsx_disassemble_pair(c2, &C2);
+                    __builtin_vsx_disassemble_pair(c3, &C3);
+                    __builtin_vsx_disassemble_pair(c4, &C4);
+                    __builtin_vsx_disassemble_pair(c5, &C5);
+                    __builtin_vsx_disassemble_pair(c6, &C6);
+                    __builtin_vsx_disassemble_pair(c7, &C7);
+                    __builtin_vsx_disassemble_pair(c8, &C8);
+
+                    t1 = vec_perm(c1[0], c2[0], swiz1);
+                    t2 = vec_perm(c1[0], c2[0], swiz2);
+                    t3 = vec_perm(c3[0], c4[0], swiz1);
+                    t4 = vec_perm(c3[0], c4[0], swiz2);
+                    t5 = vec_perm(t1, t3, swiz3);
+                    t6 = vec_perm(t1, t3, swiz4);
+                    t7 = vec_perm(t2, t4, swiz3);
+                    t8 = vec_perm(t2, t4, swiz4);
+                    if (flip == true) {
+                       t5 = vec_xor(t5, xor_vector);
+                       t6 = vec_xor(t6, xor_vector);
+                       t7 = vec_xor(t7, xor_vector);
+                       t8 = vec_xor(t8, xor_vector);
+                    }
+                    vec_xst(t5, 0, vecOffset);
+                    vec_xst(t6, 0, vecOffset+16);
+                    vec_xst(t7, 0, vecOffset+32);
+                    vec_xst(t8, 0, vecOffset+48);
+
+                    t1 = vec_perm(c1[1], c2[1], swiz1);
+                    t2 = vec_perm(c1[1], c2[1], swiz2);
+                    t3 = vec_perm(c3[1], c4[1], swiz1);
+                    t4 = vec_perm(c3[1], c4[1], swiz2);
+                    t5 = vec_perm(t1, t3, swiz3);
+                    t6 = vec_perm(t1, t3, swiz4);
+                    t7 = vec_perm(t2, t4, swiz3);
+                    t8 = vec_perm(t2, t4, swiz4);
+                    if (flip == true) {
+                       t5 = vec_xor(t5, xor_vector);
+                       t6 = vec_xor(t6, xor_vector);
+                       t7 = vec_xor(t7, xor_vector);
+                       t8 = vec_xor(t8, xor_vector);
+                    }
+                    vec_xst(t5, 0, vecOffset+64);
+                    vec_xst(t6, 0, vecOffset+80);
+                    vec_xst(t7, 0, vecOffset+96);
+                    vec_xst(t8, 0, vecOffset+112);
+
+                    t1 = vec_perm(c5[0], c6[0], swiz1);
+                    t2 = vec_perm(c5[0], c6[0], swiz2);
+                    t3 = vec_perm(c7[0], c8[0], swiz1);
+                    t4 = vec_perm(c7[0], c8[0], swiz2);
+                    t5 = vec_perm(t1, t3, swiz3);
+                    t6 = vec_perm(t1, t3, swiz4);
+                    t7 = vec_perm(t2, t4, swiz3);
+                    t8 = vec_perm(t2, t4, swiz4);
+                    if (flip == true) {
+                       t5 = vec_xor(t5, xor_vector);
+                       t6 = vec_xor(t6, xor_vector);
+                       t7 = vec_xor(t7, xor_vector);
+                       t8 = vec_xor(t8, xor_vector);
+                    }
+                    vec_xst(t5, 0, vecOffset+128);
+                    vec_xst(t6, 0, vecOffset+144);
+                    vec_xst(t7, 0, vecOffset+160);
+                    vec_xst(t8, 0, vecOffset+176);
+
+                    t1 = vec_perm(c5[1], c6[1], swiz1);
+                    t2 = vec_perm(c5[1], c6[1], swiz2);
+                    t3 = vec_perm(c7[1], c8[1], swiz1);
+                    t4 = vec_perm(c7[1], c8[1], swiz2);
+                    t5 = vec_perm(t1, t3, swiz3);
+                    t6 = vec_perm(t1, t3, swiz4);
+                    t7 = vec_perm(t2, t4, swiz3);
+                    t8 = vec_perm(t2, t4, swiz4);
+                    if (flip == true) {
+                       t5 = vec_xor(t5, xor_vector);
+                       t6 = vec_xor(t6, xor_vector);
+                       t7 = vec_xor(t7, xor_vector);
+                       t8 = vec_xor(t8, xor_vector);
+                    }
+                    vec_xst(t5, 0, vecOffset+192);
+                    vec_xst(t6, 0, vecOffset+208);
+                    vec_xst(t7, 0, vecOffset+224);
+                    vec_xst(t8, 0, vecOffset+240);
+
+                    aoffset1 += lda;
+                    aoffset2 += lda;
+                    aoffset3 += lda;
+                    aoffset4 += lda;
+                    aoffset5 += lda;
+                    aoffset6 += lda;
+                    aoffset7 += lda;
+                    aoffset8 += lda;
+                    vecOffset += 256;
+                    i--;
+               } while(i > 0);
+            }
+            j--;
+        } while(j > 0);
+    }
+
+    if (rows & 4) {
+            aoffset1 = aoffset;
+            aoffset2 = aoffset1 + lda;
+            aoffset3 = aoffset2 + lda;
+            aoffset4 = aoffset3 + lda;
+            aoffset += 4 * lda;
+
+        i = (cols >> 3);
+            if (i > 0) {
+               do {
+                    C1 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset1->qs);
+                    C2 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset2->qs);
+                    C3 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset3->qs);
+                    C4 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset4->qs);
+
+                    __builtin_vsx_disassemble_pair(c1, &C1);
+                    __builtin_vsx_disassemble_pair(c2, &C2);
+                    __builtin_vsx_disassemble_pair(c3, &C3);
+                    __builtin_vsx_disassemble_pair(c4, &C4);
+
+                    t1 = vec_perm(c1[0], c2[0], swiz1);
+                    t2 = vec_perm(c1[0], c2[0], swiz2);
+                    t3 = vec_perm(c3[0], c4[0], swiz1);
+                    t4 = vec_perm(c3[0], c4[0], swiz2);
+                    t5 = vec_perm(t1, t3, swiz3);
+                    t6 = vec_perm(t1, t3, swiz4);
+                    t7 = vec_perm(t2, t4, swiz3);
+                    t8 = vec_perm(t2, t4, swiz4);
+                    if (flip == true) {
+                       t5 = vec_xor(t5, xor_vector);
+                       t6 = vec_xor(t6, xor_vector);
+                       t7 = vec_xor(t7, xor_vector);
+                       t8 = vec_xor(t8, xor_vector);
+                    }
+                    vec_xst(t5, 0, vecOffset);
+                    vec_xst(t6, 0, vecOffset+16);
+                    vec_xst(t7, 0, vecOffset+32);
+                    vec_xst(t8, 0, vecOffset+48);
+
+                    t1 = vec_perm(c1[1], c2[1], swiz1);
+                    t2 = vec_perm(c1[1], c2[1], swiz2);
+                    t3 = vec_perm(c3[1], c4[1], swiz1);
+                    t4 = vec_perm(c3[1], c4[1], swiz2);
+                    t5 = vec_perm(t1, t3, swiz3);
+                    t6 = vec_perm(t1, t3, swiz4);
+                    t7 = vec_perm(t2, t4, swiz3);
+                    t8 = vec_perm(t2, t4, swiz4);
+                    if (flip == true) {
+                       t5 = vec_xor(t5, xor_vector);
+                       t6 = vec_xor(t6, xor_vector);
+                       t7 = vec_xor(t7, xor_vector);
+                       t8 = vec_xor(t8, xor_vector);
+                    }
+                    vec_xst(t5, 0, vecOffset+64);
+                    vec_xst(t6, 0, vecOffset+80);
+                    vec_xst(t7, 0, vecOffset+96);
+                    vec_xst(t8, 0, vecOffset+112);
+
+                    aoffset1 += lda;
+                    aoffset2 += lda;
+                    aoffset3 += lda;
+                    aoffset4 += lda;
+                    vecOffset += 128;
+                    i--;
+               } while(i > 0);
+            }
+        }
+        if (rows & 3) {
+            aoffset1 = aoffset;
+            aoffset2 = aoffset1 + lda;
+            aoffset3 = aoffset2 + lda;
+            i = (cols >> 3);
+        if (i > 0) {
+                do {
+                    switch(rows) {
+                        case 3: C3 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset3->qs);
+                                __builtin_vsx_disassemble_pair(c3, &C3);
+                        case 2: C2 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset2->qs);
+                                __builtin_vsx_disassemble_pair(c2, &C2);
+                        case 1: C1 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset1->qs);
+                                __builtin_vsx_disassemble_pair(c1, &C1);
+                                break;
+                    }
+                    t1 = vec_perm(c1[0], c2[0], swiz1);
+                    t2 = vec_perm(c1[0], c2[0], swiz2);
+                    t3 = vec_perm(c3[0], c4[0], swiz1);
+                    t4 = vec_perm(c3[0], c4[0], swiz2);
+                    t5 = vec_perm(t1, t3, swiz3);
+                    t6 = vec_perm(t1, t3, swiz4);
+                    t7 = vec_perm(t2, t4, swiz3);
+                    t8 = vec_perm(t2, t4, swiz4);
+                    if (flip == true) {
+                       t5 = vec_xor(t5, xor_vector);
+                       t6 = vec_xor(t6, xor_vector);
+                       t7 = vec_xor(t7, xor_vector);
+                       t8 = vec_xor(t8, xor_vector);
+                    }
+                    vec_xst(t5, 0, vecOffset);
+                    vec_xst(t6, 0, vecOffset+16);
+                    vec_xst(t7, 0, vecOffset+32);
+                    vec_xst(t8, 0, vecOffset+48);
+
+                    t1 = vec_perm(c1[1], c2[1], swiz1);
+                    t2 = vec_perm(c1[1], c2[1], swiz2);
+                    t3 = vec_perm(c3[1], c4[1], swiz1);
+                    t4 = vec_perm(c3[1], c4[1], swiz2);
+                    t5 = vec_perm(t1, t3, swiz3);
+                    t6 = vec_perm(t1, t3, swiz4);
+                    t7 = vec_perm(t2, t4, swiz3);
+                    t8 = vec_perm(t2, t4, swiz4);
+                    if (flip == true) {
+                       t5 = vec_xor(t5, xor_vector);
+                       t6 = vec_xor(t6, xor_vector);
+                       t7 = vec_xor(t7, xor_vector);
+                       t8 = vec_xor(t8, xor_vector);
+                    }
+                    vec_xst(t5, 0, vecOffset+64);
+                    vec_xst(t6, 0, vecOffset+80);
+                    vec_xst(t7, 0, vecOffset+96);
+                    vec_xst(t8, 0, vecOffset+112);
+
+                    aoffset1 += lda;
+                    aoffset2 += lda;
+                    aoffset3 += lda;
+                    vecOffset += 128;
+                    i--;
+               } while(i > 0);
+            }
+        }
+    }
+
+    void mnpack(int64_t m0, int64_t m, int64_t n0, int64_t n) {
+        int64_t mc, nc, mp, np;
+        int m_rem = MIN(m - m0, 8);
+        int n_rem = MIN(n - n0, 8);
+        // TO-DO: KERNEL_16x8 and KERNEL_8x16 are having some performance
+        // issues. After resolving them, below code will be enabled.
+        /*if (m_rem >= 16 && n_rem >= 8) {
+            mc = 16;
+            nc = 8;
+            gemm<16,8>(m0, m, n0, n);
+        } else if(m_rem >= 8 && n_rem >= 16) {
+            mc = 8;
+            nc = 16;
+            gemm<8,16>(m0, m, n0, n);
+        }*/
+        if (m_rem >= 8 && n_rem >= 8) {
+            mc = 8;
+            nc = 8;
+            gemm<8,8>(m0, m, n0, n);
+        } else if (m_rem >= 4 && n_rem >= 8) {
+            mc = 4;
+            nc = 8;
+            gemm<4,8>(m0, m, n0, n);
+        } else if (m_rem >= 8 && n_rem >= 4) {
+            mc = 8;
+            nc = 4;
+            gemm<8,4>(m0, m, n0, n);
+        } else if (m_rem >= 4 && n_rem >= 4) {
+            mc = 4;
+            nc = 4;
+            gemm_small<4, 4>(m0, m, n0, n);
+        } else if ((m_rem < 4) && (n_rem > 4)) {
+            nc = 4;
+            switch(m_rem) {
+                case 1:
+                    mc = 1;
+                    gemm_small<1, 4>(m0, m, n0, n);
+                    break;
+                case 2:
+                    mc = 2;
+                    gemm_small<2, 4>(m0, m, n0, n);
+                    break;
+                case 3:
+                    mc = 3;
+                    gemm_small<3, 4>(m0, m, n0, n);
+                    break;
+                default:
+                    return;
+            }
+        } else if ((m_rem > 4) && (n_rem < 4)) {
+            mc = 4;
+            switch(n_rem) {
+                case 1:
+                    nc = 1;
+                    gemm_small<4, 1>(m0, m, n0, n);
+                    break;
+                case 2:
+                    nc = 2;
+                    gemm_small<4, 2>(m0, m, n0, n);
+                    break;
+                case 3:
+                    nc = 3;
+                    gemm_small<4, 3>(m0, m, n0, n);
+                    break;
+                default:
+                    return;
+            }
+        } else {
+            switch((m_rem << 4) | n_rem) {
+                case 0x43:
+                    mc = 4;
+                    nc = 3;
+                    gemm_small<4, 3>(m0, m, n0, n);
+                    break;
+                case 0x42:
+                    mc = 4;
+                    nc = 2;
+                    gemm_small<4, 2>(m0, m, n0, n);
+                    break;
+                case 0x41:
+                    mc = 4;
+                    nc = 1;
+                    gemm_small<4, 1>(m0, m, n0, n);
+                    break;
+                case 0x34:
+                    mc = 3;
+                    nc = 4;
+                    gemm_small<3, 4>(m0, m, n0, n);
+                    break;
+                case 0x33:
+                    mc = 3;
+                    nc = 3;
+                    gemm_small<3, 3>(m0, m, n0, n);
+                    break;
+                case 0x32:
+                    mc = 3;
+                    nc = 2;
+                    gemm_small<3, 2>(m0, m, n0, n);
+                    break;
+                case 0x31:
+                    mc = 3;
+                    nc = 1;
+                    gemm_small<3, 1>(m0, m, n0, n);
+                    break;
+                case 0x24:
+                    mc = 2;
+                    nc = 4;
+                    gemm_small<2, 4>(m0, m, n0, n);
+                    break;
+                case 0x23:
+                    mc = 2;
+                    nc = 3;
+                    gemm_small<2, 3>(m0, m, n0, n);
+                    break;
+                case 0x22:
+                    mc = 2;
+                    nc = 2;
+                    gemm_small<2, 2>(m0, m, n0, n);
+                    break;
+                case 0x21:
+                    mc = 2;
+                    nc = 1;
+                    gemm_small<2, 1>(m0, m, n0, n);
+                    break;
+                case 0x14:
+                    mc = 1;
+                    nc = 4;
+                    gemm_small<1, 4>(m0, m, n0, n);
+                    break;
+                case 0x13:
+                    mc = 1;
+                    nc = 3;
+                    gemm_small<1, 3>(m0, m, n0, n);
+                    break;
+                case 0x12:
+                    mc = 1;
+                    nc = 2;
+                    gemm_small<1, 2>(m0, m, n0, n);
+                    break;
+                case 0x11:
+                    mc = 1;
+                    nc = 1;
+                    gemm_small<1, 1>(m0, m, n0, n);
+                    break;
+                default:
+                    return;
+            }
+        }
+        mp = m0 + (m - m0) / mc * mc;
+        np = n0 + (n - n0) / nc * nc;
+        mnpack(mp, m, n0, np);
+        mnpack(m0, m, np, n);
+    }
+
+    void KERNEL_4x8(int64_t ii, int64_t jj) {
+        vec_t vec_A[8], vec_B[16] = {0};
+        acc_t acc_0, acc_1;
+        std::array comparray;
+        vector float fin_res[8] = {0};
+        vector float vs[8] = {0};
+        for (int l = 0; l < k; l++) {
+            __builtin_mma_xxsetaccz(&acc_0);
+            __builtin_mma_xxsetaccz(&acc_1);
+            packNormal((A+(ii*lda)+l), lda, 4, 8, (int8_t*)vec_A, false);
+            packNormal((B+(jj*ldb)+l), ldb, 8, 8, (uint8_t*)vec_B, true);
+            for(int x = 0; x < 8; x++) {
+                __builtin_mma_xvi8ger4pp(&acc_0, vec_A[x], vec_B[x]);
+                __builtin_mma_xvi8ger4pp(&acc_1, vec_A[x], vec_B[x+8]);
+            }
+            for (int I = 0; I<4; I++) {
+                for (int J = 0; J<4; J++) {
+                    *((float*)&vs[I]+J) = (unhalf((A+((ii+I)*lda)+l)->d) * unhalf((B+((jj+J)*ldb)+l)->d));
+                    *((float*)&vs[I+4]+J) = (unhalf((A+((ii+I)*lda)+l)->d) * unhalf((B+((jj+J+4)*ldb)+l)->d));
+                }
+            }
+            auto aoffset = A+(ii*lda)+l;
+            for (int i = 0; i < 4; i++) {
+                comparray[i] = 0;
+                int ca = 0;
+                const int8_t *at = aoffset->qs;
+                for (int j = 0; j < 32; j++)
+                    ca += (int)*at++;
+                comparray[i] = ca;
+                aoffset += lda;
+            }
+            compute<4>(&acc_0, 0, 0, comparray, vs, fin_res);
+            compute<4>(&acc_1, 0, 4, comparray, vs, fin_res);
+        }
+        save_res<4, 4>(ii, jj, 0, fin_res);
+        save_res<4, 4>(ii, jj+4, 4, fin_res);
+    }
+
+    void KERNEL_8x4(int64_t ii, int64_t jj) {
+        vec_t vec_A[16], vec_B[8] = {0};
+        acc_t acc_0, acc_1;
+        std::array comparray;
+        vector float fin_res[8] = {0};
+        vector float vs[8] = {0};
+        for (int l = 0; l < k; l++) {
+            __builtin_mma_xxsetaccz(&acc_0);
+            __builtin_mma_xxsetaccz(&acc_1);
+            packNormal((A+(ii*lda)+l), lda, 8, 8, (int8_t*)vec_A, false);
+            packNormal((B+(jj*ldb)+l), ldb, 4, 8, (uint8_t*)vec_B, true);
+            for(int x = 0; x < 8; x++) {
+                __builtin_mma_xvi8ger4pp(&acc_0, vec_A[x], vec_B[x]);
+                __builtin_mma_xvi8ger4pp(&acc_1, vec_A[x+8], vec_B[x]);
+            }
+            for (int I = 0; I<8; I++) {
+                for (int J = 0; J<4; J++) {
+                    *((float*)&vs[I]+J) = (unhalf((A+((ii+I)*lda)+l)->d) * unhalf((B+((jj+J)*ldb)+l)->d));
+                }
+            }
+            auto aoffset = A+(ii*lda)+l;
+            for (int i = 0; i < 8; i++) {
+                comparray[i] = 0;
+                int ca = 0;
+                const int8_t *at = aoffset->qs;
+                for (int j = 0; j < 32; j++)
+                    ca += (int)*at++;
+                comparray[i] = ca;
+                aoffset += lda;
+            }
+            compute<8>(&acc_0, 0, 0, comparray, vs, fin_res);
+            compute<8>(&acc_1, 4, 4, comparray, vs, fin_res);
+        }
+        save_res<4, 4>(ii, jj, 0, fin_res);
+        save_res<4, 4>(ii+4, jj, 4, fin_res);
+    }
+
+    void KERNEL_8x8(int64_t ii, int64_t jj) {
+        vec_t vec_A[16], vec_B[16] = {0};
+        acc_t acc_0, acc_1, acc_2, acc_3;
+        std::array comparray;
+        vector float fin_res[16] = {0};
+        vector float vs[16] = {0};
+        for (int l = 0; l < k; l++) {
+            __builtin_mma_xxsetaccz(&acc_0);
+            __builtin_mma_xxsetaccz(&acc_1);
+            __builtin_mma_xxsetaccz(&acc_2);
+            __builtin_mma_xxsetaccz(&acc_3);
+            packNormal((A+(ii*lda)+l), lda, 8, 8, (int8_t*)vec_A, false);
+            packNormal((B+(jj*ldb)+l), ldb, 8, 8, (uint8_t*)vec_B, true);
+            for(int x = 0; x < 8; x++) {
+                __builtin_mma_xvi8ger4pp(&acc_0, vec_A[x], vec_B[x]);
+                __builtin_mma_xvi8ger4pp(&acc_1, vec_A[x+8], vec_B[x]);
+                __builtin_mma_xvi8ger4pp(&acc_2, vec_A[x], vec_B[x+8]);
+                __builtin_mma_xvi8ger4pp(&acc_3, vec_A[x+8], vec_B[x+8]);
+            }
+            for (int I = 0; I<8; I++) {
+                for (int J = 0; J<4; J++) {
+                    *((float*)&vs[I]+J) = (unhalf((A+((ii+I)*lda)+l)->d) * unhalf((B+((jj+J)*ldb)+l)->d));
+                    *((float*)&vs[I+8]+J) = (unhalf((A+((ii+I)*lda)+l)->d) * unhalf((B+((jj+J+4)*ldb)+l)->d));
+                }
+            }
+            auto aoffset = A+(ii*lda)+l;
+            for (int i = 0; i < 8; i++) {
+                comparray[i] = 0;
+                int ca = 0;
+                const int8_t *at = aoffset->qs;
+                for (int j = 0; j < 32; j++)
+                    ca += (int)*at++;
+                comparray[i] = ca;
+                aoffset += lda;
+            }
+            compute<8>(&acc_0, 0, 0, comparray, vs, fin_res);
+            compute<8>(&acc_1, 4, 4, comparray, vs, fin_res);
+            compute<8>(&acc_2, 0, 8, comparray, vs, fin_res);
+            compute<8>(&acc_3, 4, 12, comparray, vs, fin_res);
+        }
+        save_res<4, 4>(ii, jj, 0, fin_res);
+        save_res<4, 4>(ii+4, jj, 4, fin_res);
+        save_res<4, 4>(ii, jj+4, 8, fin_res);
+        save_res<4, 4>(ii+4, jj+4, 12, fin_res);
+    }
+
+    template
+    void gemm_small(int64_t m0, int64_t m, int64_t n0, int64_t n) {
+        int64_t ytiles = (m - m0) / RM;
+        int64_t xtiles = (n - n0) / RN;
+        int64_t tiles = xtiles * ytiles;
+        int64_t duty = (tiles + nth - 1) / nth;
+        int64_t start = duty * ith;
+        int64_t end = start + duty;
+        vec_t vec_A[8], vec_B[8] = {0};
+        vector signed int vec_C[4];
+        acc_t acc_0;
+
+        if (end > tiles)
+            end = tiles;
+        for (int64_t job = start; job < end; ++job) {
+            int64_t ii = m0 + job / xtiles * RM;
+            int64_t jj = n0 + job % xtiles * RN;
+            std::array comparray;
+            vector float res[4] = {0};
+            vector float fin_res[4] = {0};
+            vector float vs[4] = {0};
+            vector float CA[4] = {0};
+            __builtin_prefetch((A+(ii*lda)+0)->qs, 0, 1); // prefetch first value
+            __builtin_prefetch((B+(jj*ldb)+0)->qs, 0, 1); // prefetch first value
+            for (int l = 0; l < k; l++) {
+                __builtin_prefetch((A+(ii*lda)+(l+1))->qs, 0, 1); // prefetch one loop ahead
+                __builtin_prefetch((B+(jj*ldb)+(l+1))->qs, 0, 1); // prefetch one loop ahead
+                __builtin_mma_xxsetaccz(&acc_0);
+                packNormal((A+(ii*lda)+l), lda, RM, 8, (int8_t*)vec_A, false);
+                packNormal((B+(jj*ldb)+l), ldb, RN, 8, (uint8_t*)vec_B, true);
+                for(int x = 0; x < 8; x+=4) {
+                    __builtin_mma_xvi8ger4pp(&acc_0, vec_A[x], vec_B[x]);
+                    __builtin_mma_xvi8ger4pp(&acc_0, vec_A[x+1], vec_B[x+1]);
+                    __builtin_mma_xvi8ger4pp(&acc_0, vec_A[x+2], vec_B[x+2]);
+                    __builtin_mma_xvi8ger4pp(&acc_0, vec_A[x+3], vec_B[x+3]);
+                }
+                for (int I = 0; Id) * unhalf((B+((jj+J)*ldb)+l)->d));
+                    }
+                }
+                __builtin_mma_disassemble_acc(vec_C, &acc_0);
+                auto aoffset = A+(ii*lda)+l;
+                for (int i = 0; i < RM; i++) {
+                    comparray[i] = 0;
+                    int ca = 0;
+                    const int8_t *at = aoffset->qs;
+                    for (int j = 0; j < 32; j++)
+                        ca += (int)*at++;
+                    comparray[i] = ca;
+                    aoffset += lda;
+                }
+
+                for (int i = 0; i < RM; i++) {
+                    CA[i] = vec_splats((float)(((double)comparray[i]) * -128.0));
+                    res[i] = vec_add(vec_ctf(vec_C[i], 0), CA[i]);
+                    fin_res[i] = vec_madd(res[i], vs[i], fin_res[i]);
+                }
+            }
+            save_res(ii, jj, 0, fin_res);
+        }
+    }
+
+    template
+    inline void kernel(int64_t ii, int64_t jj) {
+       if constexpr(RM == 4 && RN == 8) {
+          KERNEL_4x8(ii,jj);
+       } else if constexpr(RM == 8 && RN == 4) {
+          KERNEL_8x4(ii,jj);
+       } else if constexpr(RM == 8 && RN == 8) {
+          KERNEL_8x8(ii,jj);
+       } else {
+          static_assert(false, "RN/RM values not supported");
+       }
+    }
+
+    template 
+    NOINLINE void gemm(int64_t m0, int64_t m, int64_t n0, int64_t n) {
+        int64_t ytiles = (m - m0) / RM;
+        int64_t xtiles = (n - n0) / RN;
+        int64_t tiles = xtiles * ytiles;
+        int64_t duty = (tiles + nth - 1) / nth;
+        int64_t start = duty * ith;
+        int64_t end = start + duty;
+        if (end > tiles)
+            end = tiles;
+        for (int64_t job = start; job < end; ++job) {
+            int64_t ii = m0 + job / xtiles * RM;
+            int64_t jj = n0 + job % xtiles * RN;
+            kernel(ii, jj);
+        }
+    }
+
+    const TA *const A;
+    const TB *const B;
+    TC *C;
+    TA *At;
+    TB *Bt;
+    const int64_t k;
+    const int64_t lda;
+    const int64_t ldb;
+    const int64_t ldc;
+    const int ith;
+    const int nth;
+};
+
 template 
 class tinyBLAS_PPC {
   public:
@@ -1070,13 +1769,17 @@ class tinyBLAS_PPC {
 
     void (tinyBLAS_PPC::*kernel)(int64_t, int64_t);
 
-    void READ_BLOCK(const float* a, int64_t lda, int rows, int cols, float* vec) {
+    template
+    void packTranspose(const TA* a, int64_t lda, int rows, int cols, TA* vec) {
         int64_t i, j;
-        float *aoffset = NULL, *boffset = NULL;
-        float *aoffset1 = NULL, *aoffset2 = NULL, *aoffset3 = NULL, *aoffset4 = NULL;
-        float *aoffset5 = NULL, *aoffset6 = NULL, *aoffset7 = NULL, *aoffset8 = NULL;
-
-        aoffset = const_cast(a);
+        TA *aoffset = NULL, *boffset = NULL;
+        TA *aoffset1 = NULL, *aoffset2 = NULL, *aoffset3 = NULL, *aoffset4 = NULL;
+        TA *aoffset5 = NULL, *aoffset6 = NULL, *aoffset7 = NULL, *aoffset8 = NULL;
+        __vector_pair C1, C2, C3, C4, C5, C6, C7, C8;
+        VA c1[2] = {0}, c2[2] = {0}, c3[2] = {0}, c4[2] = {0};
+        VA c5[2] = {0}, c6[2] = {0}, c7[2] = {0}, c8[2] = {0};
+        VA t1, t2, t3, t4, t5, t6, t7, t8;
+        aoffset = const_cast(a);
         boffset = vec;
         j = (rows >> 3);
         if (j > 0) {
@@ -1092,9 +1795,6 @@ class tinyBLAS_PPC {
                 aoffset += 8 * lda;
                 i = (cols >> 3);
                 if (i > 0) {
-                    __vector_pair C1, C2, C3, C4, C5, C6, C7, C8;
-                    vector float c1[2], c2[2], c3[2], c4[2], c5[2], c6[2], c7[2], c8[2];
-                    vector float t1, t2, t3, t4, t5, t6, t7, t8;
                     do {
                         C1 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset1);
                         C2 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset2);
@@ -1174,21 +1874,19 @@ class tinyBLAS_PPC {
                     } while(i > 0);
                 }
                 if (cols & 4) {
-                    vector float c1, c2, c3, c4, c5, c6, c7, c8;
-                    vector float t1, t2, t3, t4, t5, t6, t7, t8;
-                    c1 = vec_xl(0, aoffset1);
-                    c2 = vec_xl(0, aoffset2);
-                    c3 = vec_xl(0, aoffset3);
-                    c4 = vec_xl(0, aoffset4);
-                    c5 = vec_xl(0, aoffset5);
-                    c6 = vec_xl(0, aoffset6);
-                    c7 = vec_xl(0, aoffset7);
-                    c8 = vec_xl(0, aoffset8);
+                    c1[0] = vec_xl(0, aoffset1);
+                    c2[0] = vec_xl(0, aoffset2);
+                    c3[0] = vec_xl(0, aoffset3);
+                    c4[0] = vec_xl(0, aoffset4);
+                    c5[0] = vec_xl(0, aoffset5);
+                    c6[0] = vec_xl(0, aoffset6);
+                    c7[0] = vec_xl(0, aoffset7);
+                    c8[0] = vec_xl(0, aoffset8);
 
-                    t1 = vec_mergeh(c1, c2);
-                    t2 = vec_mergeh(c3, c4);
-                    t3 = vec_mergeh(c5, c6);
-                    t4 = vec_mergeh(c7, c8);
+                    t1 = vec_mergeh(c1[0], c2[0]);
+                    t2 = vec_mergeh(c3[0], c4[0]);
+                    t3 = vec_mergeh(c5[0], c6[0]);
+                    t4 = vec_mergeh(c7[0], c8[0]);
                     t5 = vec_xxpermdi(t1, t2, 0);
                     t6 = vec_xxpermdi(t3, t4, 0);
                     t7 = vec_xxpermdi(t1, t2, 3);
@@ -1198,10 +1896,10 @@ class tinyBLAS_PPC {
                     vec_xst(t7, 0, boffset+8);
                     vec_xst(t8, 0, boffset+12);
 
-                    t1 = vec_mergel(c1, c2);
-                    t2 = vec_mergel(c3, c4);
-                    t3 = vec_mergel(c5, c6);
-                    t4 = vec_mergel(c7, c8);
+                    t1 = vec_mergel(c1[0], c2[0]);
+                    t2 = vec_mergel(c3[0], c4[0]);
+                    t3 = vec_mergel(c5[0], c6[0]);
+                    t4 = vec_mergel(c7[0], c8[0]);
                     t5 = vec_xxpermdi(t1, t2, 0);
                     t6 = vec_xxpermdi(t3, t4, 0);
                     t7 = vec_xxpermdi(t1, t2, 3);
@@ -1223,9 +1921,6 @@ class tinyBLAS_PPC {
             aoffset += 4 * lda;
             i = (cols >> 3);
             if (i > 0) {
-                __vector_pair C1, C2, C3, C4;
-                vector float c1[2], c2[2], c3[2], c4[2];
-                vector float t1, t2, t3, t4, t5, t6, t7, t8;
                 do {
                     C1 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset1);
                     C2 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset2);
@@ -1272,22 +1967,20 @@ class tinyBLAS_PPC {
             }
 
             if (cols & 4) {
-                vector float c1, c2, c3, c4;
-                vector float t1, t2, t3, t4;
-                c1 = vec_xl(0, aoffset1);
-                c2 = vec_xl(0, aoffset2);
-                c3 = vec_xl(0, aoffset3);
-                c4 = vec_xl(0, aoffset4);
+                c1[0] = vec_xl(0, aoffset1);
+                c2[0] = vec_xl(0, aoffset2);
+                c3[0] = vec_xl(0, aoffset3);
+                c4[0] = vec_xl(0, aoffset4);
 
-                t1 = vec_mergeh(c1, c2);
-                t2 = vec_mergeh(c3, c4);
+                t1 = vec_mergeh(c1[0], c2[0]);
+                t2 = vec_mergeh(c3[0], c4[0]);
                 t3 = vec_xxpermdi(t1, t2, 0);
                 t4 = vec_xxpermdi(t1, t2, 3);
                 vec_xst(t3, 0, boffset);
                 vec_xst(t4, 0, boffset+4);
 
-                t1 = vec_mergel(c1, c2);
-                t2 = vec_mergel(c3, c4);
+                t1 = vec_mergel(c1[0], c2[0]);
+                t2 = vec_mergel(c3[0], c4[0]);
                 t3 = vec_xxpermdi(t1, t2, 0);
                 t4 = vec_xxpermdi(t1, t2, 3);
                 vec_xst(t3, 0, boffset+8);
@@ -1299,21 +1992,19 @@ class tinyBLAS_PPC {
             aoffset2 = aoffset1 + lda;
             aoffset3 = aoffset2 + lda;
             if (cols & 4) {
-                vector float c1, c2, c3, c4 = {0};
-                vector float t1, t2, t3, t4;
-                c1 = vec_xl(0, aoffset1);
-                c2 = vec_xl(0, aoffset2);
-                c3 = vec_xl(0, aoffset3);
+                c1[0] = vec_xl(0, aoffset1);
+                c2[0] = vec_xl(0, aoffset2);
+                c3[0] = vec_xl(0, aoffset3);
 
-                t1 = vec_mergeh(c1, c2);
-                t2 = vec_mergeh(c3, c4);
+                t1 = vec_mergeh(c1[0], c2[0]);
+                t2 = vec_mergeh(c3[0], c4[0]);
                 t3 = vec_xxpermdi(t1, t2, 0);
                 t4 = vec_xxpermdi(t1, t2, 3);
                 vec_xst(t3, 0, boffset);
                 vec_xst(t4, 0, boffset+4);
 
-                t1 = vec_mergel(c1, c2);
-                t2 = vec_mergel(c3, c4);
+                t1 = vec_mergel(c1[0], c2[0]);
+                t2 = vec_mergel(c3[0], c4[0]);
                 t3 = vec_xxpermdi(t1, t2, 0);
                 t4 = vec_xxpermdi(t1, t2, 3);
                 vec_xst(t3, 0, boffset+8);
@@ -1321,14 +2012,13 @@ class tinyBLAS_PPC {
             }
         }
     }
-
     void KERNEL_4x4(int64_t ii, int64_t jj) {
         vec_t vec_A[4], vec_B[4], vec_C[4];
         acc_t acc_0;
         __builtin_mma_xxsetaccz(&acc_0);
         for (int l = 0; l < k; l+=4) {
-            READ_BLOCK(A+(ii*lda)+l, lda, 4, 4, (float*)vec_A);
-            READ_BLOCK(B+(jj*ldb)+l, ldb, 4, 4, (float*)vec_B);
+            packTranspose(A+(ii*lda)+l, lda, 4, 4, (TA*)vec_A);
+            packTranspose(B+(jj*ldb)+l, ldb, 4, 4, (TA*)vec_B);
             __builtin_mma_xvf32gerpp(&acc_0, vec_A[0], vec_B[0]);
             __builtin_mma_xvf32gerpp(&acc_0, vec_A[1], vec_B[1]);
             __builtin_mma_xvf32gerpp(&acc_0, vec_A[2], vec_B[2]);
@@ -1343,8 +2033,8 @@ class tinyBLAS_PPC {
         __builtin_mma_xxsetaccz(&acc_0);
         __builtin_mma_xxsetaccz(&acc_1);
         for (int64_t l = 0; l < k; l+=4) {
-            READ_BLOCK(A+(ii*lda)+l, lda, 4, 4, (float*)vec_A);
-            READ_BLOCK(B+(jj*ldb)+l, ldb, 8, 4, (float*)vec_B);
+            packTranspose(A+(ii*lda)+l, lda, 4, 4, (TA*)vec_A);
+            packTranspose(B+(jj*ldb)+l, ldb, 8, 4, (TA*)vec_B);
             __builtin_mma_xvf32gerpp(&acc_0, vec_A[0], (vec_t)vec_B[0]);
             __builtin_mma_xvf32gerpp(&acc_1, vec_A[0], (vec_t)vec_B[1]);
             __builtin_mma_xvf32gerpp(&acc_0, vec_A[1], (vec_t)vec_B[2]);
@@ -1364,8 +2054,8 @@ class tinyBLAS_PPC {
         __builtin_mma_xxsetaccz(&acc_0);
         __builtin_mma_xxsetaccz(&acc_1);
         for (int64_t l = 0; l < k; l+=4) {
-            READ_BLOCK(A+(ii*lda)+l, lda, 8, 4, (float*)vec_A);
-            READ_BLOCK(B+(jj*ldb)+l, ldb, 4, 4, (float*)vec_B);
+            packTranspose(A+(ii*lda)+l, lda, 8, 4, (TA*)vec_A);
+            packTranspose(B+(jj*ldb)+l, ldb, 4, 4, (TA*)vec_B);
             __builtin_mma_xvf32gerpp(&acc_0, (vec_t)vec_A[0], vec_B[0]);
             __builtin_mma_xvf32gerpp(&acc_1, (vec_t)vec_A[1], vec_B[0]);
             __builtin_mma_xvf32gerpp(&acc_0, (vec_t)vec_A[2], vec_B[1]);
@@ -1387,8 +2077,8 @@ class tinyBLAS_PPC {
         __builtin_mma_xxsetaccz(&acc_2);
         __builtin_mma_xxsetaccz(&acc_3);
         for (int l = 0; l < k; l+=8) {
-            READ_BLOCK(A+(ii*lda)+l, lda, 8, 8, (float*)vec_A);
-            READ_BLOCK(B+(jj*ldb)+l, ldb, 8, 8, (float*)vec_B);
+            packTranspose(A+(ii*lda)+l, lda, 8, 8, (TA*)vec_A);
+            packTranspose(B+(jj*ldb)+l, ldb, 8, 8, (TA*)vec_B);
             for(int x = 0; x < 16; x+=2) {
                 __builtin_mma_xvf32gerpp(&acc_0, (vec_t)vec_A[x], vec_B[x]);
                 __builtin_mma_xvf32gerpp(&acc_1, (vec_t)vec_A[x], vec_B[x+1]);
@@ -1571,15 +2261,15 @@ class tinyBLAS_PPC {
             vec_t vec_A[4], vec_B[4];
             for (int l=0; l= 4 && RM == 1) {
-                    float* a = const_cast(A+(ii)*lda+l);
-                    READ_BLOCK(B+(jj*ldb)+l, ldb, 4, 4, (float*)vec_B);
+                    TA* a = const_cast(A+(ii)*lda+l);
+                    packTranspose(B+(jj*ldb)+l, ldb, 4, 4, (TA*)vec_B);
                     vec_A[0] = (vec_t)vec_xl(0,a);
-                    vec_A[1] = (vec_t)vec_splats(*((float*)&vec_A+1));
-                    vec_A[2] = (vec_t)vec_splats(*((float*)&vec_A+2));
-                    vec_A[3] = (vec_t)vec_splats(*((float*)&vec_A+3));
+                    vec_A[1] = (vec_t)vec_splats(*((TA*)&vec_A+1));
+                    vec_A[2] = (vec_t)vec_splats(*((TA*)&vec_A+2));
+                    vec_A[3] = (vec_t)vec_splats(*((TA*)&vec_A+3));
                 } else {
-                    READ_BLOCK(A+(ii*lda)+l, lda, RM, 4, (float*)vec_A);
-                    READ_BLOCK(B+(jj*ldb)+l, ldb, RN, 4, (float*)vec_B);
+                    packTranspose(A+(ii*lda)+l, lda, RM, 4, (TA*)vec_A);
+                    packTranspose(B+(jj*ldb)+l, ldb, RN, 4, (TA*)vec_B);
                 }
                 __builtin_mma_xvf32gerpp(&acc_0, vec_A[0], vec_B[0]);
                 __builtin_mma_xvf32gerpp(&acc_0, vec_A[1], vec_B[1]);
@@ -1589,7 +2279,7 @@ class tinyBLAS_PPC {
             __builtin_mma_disassemble_acc(vec_C, &acc_0);
             for (int I = 0; I < RM; I++) {
                 for (int J = 0; J < RN; J++) {
-                    *((float*)(C+ii+((jj+J)*ldc)+I)) = *((float*)&vec_C[I]+J);
+                    *((TC*)(C+ii+((jj+J)*ldc)+I)) = *((TC*)&vec_C[I]+J);
                 }
             }
        }
@@ -1812,6 +2502,20 @@ bool llamafile_sgemm(const struct ggml_compute_params * params, int64_t m, int64
             params->ith, params->nth};
         tb.matmul(m, n);
         return true;
+
+#elif defined(__MMA__)
+        if (n < 8 && n != 4)
+           return false;
+        if (m < 8 && m != 4)
+           return false;
+        tinyBLAS_Q0_PPC tb{
+            k, (const block_q8_0 *)A, lda,
+            (const block_q8_0 *)B, ldb,
+            (float *)C, ldc,
+            params->ith, params->nth};
+        tb.matmul(m, n);
+        return true;
+
 #else
         return false;
 #endif

From a3c1232c3f475f0a77b9cc5225516ac31c567a06 Mon Sep 17 00:00:00 2001
From: Georgi Gerganov 
Date: Wed, 8 Jan 2025 12:55:36 +0200
Subject: [PATCH 67/81] arg : option to exclude arguments from specific
 examples (#11136)

* arg : option to exclude arguments from specific examples

ggml-ci

* readme : remove old args [no ci]
---
 common/arg.cpp            | 17 +++++++++++++----
 common/arg.h              |  3 +++
 examples/server/README.md |  3 ---
 3 files changed, 16 insertions(+), 7 deletions(-)

diff --git a/common/arg.cpp b/common/arg.cpp
index c81b15217..27886b84e 100644
--- a/common/arg.cpp
+++ b/common/arg.cpp
@@ -22,6 +22,11 @@ common_arg & common_arg::set_examples(std::initializer_list
     return *this;
 }
 
+common_arg & common_arg::set_excludes(std::initializer_list excludes) {
+    this->excludes = std::move(excludes);
+    return *this;
+}
+
 common_arg & common_arg::set_env(const char * env) {
     help = help + "\n(env: " + env + ")";
     this->env = env;
@@ -37,6 +42,10 @@ bool common_arg::in_example(enum llama_example ex) {
     return examples.find(ex) != examples.end();
 }
 
+bool common_arg::is_exclude(enum llama_example ex) {
+    return excludes.find(ex) != excludes.end();
+}
+
 bool common_arg::get_value_from_env(std::string & output) {
     if (env == nullptr) return false;
     char * value = std::getenv(env);
@@ -420,7 +429,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
      * - if both {LLAMA_EXAMPLE_COMMON, LLAMA_EXAMPLE_*,} are set, we will prioritize the LLAMA_EXAMPLE_* matching current example
      */
     auto add_opt = [&](common_arg arg) {
-        if (arg.in_example(ex) || arg.in_example(LLAMA_EXAMPLE_COMMON)) {
+        if ((arg.in_example(ex) || arg.in_example(LLAMA_EXAMPLE_COMMON)) && !arg.is_exclude(ex)) {
             ctx_arg.options.push_back(std::move(arg));
         }
     };
@@ -649,7 +658,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
         [](common_params & params, const std::string & value) {
             params.prompt = value;
         }
-    ));
+    ).set_excludes({LLAMA_EXAMPLE_SERVER}));
     add_opt(common_arg(
         {"--no-perf"},
         string_format("disable internal libllama performance timings (default: %s)", params.no_perf ? "true" : "false"),
@@ -673,7 +682,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
                 params.prompt.pop_back();
             }
         }
-    ));
+    ).set_excludes({LLAMA_EXAMPLE_SERVER}));
     add_opt(common_arg(
         {"--in-file"}, "FNAME",
         "an input file (repeat to specify multiple files)",
@@ -700,7 +709,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
             params.prompt = ss.str();
             fprintf(stderr, "Read %zu bytes from binary file %s\n", params.prompt.size(), value.c_str());
         }
-    ));
+    ).set_excludes({LLAMA_EXAMPLE_SERVER}));
     add_opt(common_arg(
         {"-e", "--escape"},
         string_format("process escapes sequences (\\n, \\r, \\t, \\', \\\", \\\\) (default: %s)", params.escape ? "true" : "false"),
diff --git a/common/arg.h b/common/arg.h
index a6700d323..49ab8667b 100644
--- a/common/arg.h
+++ b/common/arg.h
@@ -12,6 +12,7 @@
 
 struct common_arg {
     std::set examples = {LLAMA_EXAMPLE_COMMON};
+    std::set excludes = {};
     std::vector args;
     const char * value_hint   = nullptr; // help text or example for arg value
     const char * value_hint_2 = nullptr; // for second arg value
@@ -53,9 +54,11 @@ struct common_arg {
     ) : args(args), value_hint(value_hint), value_hint_2(value_hint_2), help(help), handler_str_str(handler) {}
 
     common_arg & set_examples(std::initializer_list examples);
+    common_arg & set_excludes(std::initializer_list excludes);
     common_arg & set_env(const char * env);
     common_arg & set_sparam();
     bool in_example(enum llama_example ex);
+    bool is_exclude(enum llama_example ex);
     bool get_value_from_env(std::string & output);
     bool has_value_from_env();
     std::string to_string();
diff --git a/examples/server/README.md b/examples/server/README.md
index 3ce16945a..1f0a27d96 100644
--- a/examples/server/README.md
+++ b/examples/server/README.md
@@ -45,10 +45,7 @@ The project is under active development, and we are [looking for feedback and co
 | `-ub, --ubatch-size N` | physical maximum batch size (default: 512)
(env: LLAMA_ARG_UBATCH) | | `--keep N` | number of tokens to keep from the initial prompt (default: 0, -1 = all) | | `-fa, --flash-attn` | enable Flash Attention (default: disabled)
(env: LLAMA_ARG_FLASH_ATTN) | -| `-p, --prompt PROMPT` | prompt to start generation with | | `--no-perf` | disable internal libllama performance timings (default: false)
(env: LLAMA_ARG_NO_PERF) | -| `-f, --file FNAME` | a file containing the prompt (default: none) | -| `-bf, --binary-file FNAME` | binary file containing the prompt (default: none) | | `-e, --escape` | process escapes sequences (\n, \r, \t, \', \", \\) (default: true) | | `--no-escape` | do not process escape sequences | | `--rope-scaling {none,linear,yarn}` | RoPE frequency scaling method, defaults to linear unless specified by the model
(env: LLAMA_ARG_ROPE_SCALING_TYPE) | From 80ccf5d725571035b454659e3c1b4b2b07b65e71 Mon Sep 17 00:00:00 2001 From: Xuan Son Nguyen Date: Wed, 8 Jan 2025 12:07:20 +0100 Subject: [PATCH 68/81] ci : pin dependency to specific version (#11137) * ci : pin dependency to specific version * will this fix ec? --- .github/workflows/docker.yml | 2 +- .github/workflows/editorconfig.yml | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 41f1a89ee..f5af72d0b 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -100,7 +100,7 @@ jobs: # https://github.com/jlumbroso/free-disk-space/tree/54081f138730dfa15788a46383842cd2f914a1be#example - name: Free Disk Space (Ubuntu) if: ${{ matrix.config.free_disk_space == true }} - uses: jlumbroso/free-disk-space@main + uses: jlumbroso/free-disk-space@v1.3.1 with: # this might remove tools that are actually needed, # if set to "true" but frees about 6 GB diff --git a/.github/workflows/editorconfig.yml b/.github/workflows/editorconfig.yml index ae86e9927..f02b7c219 100644 --- a/.github/workflows/editorconfig.yml +++ b/.github/workflows/editorconfig.yml @@ -23,5 +23,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - uses: editorconfig-checker/action-editorconfig-checker@main + - uses: editorconfig-checker/action-editorconfig-checker@v2 + with: + version: v3.0.3 - run: editorconfig-checker From c792dcf4880461c2b5f3960584db241ac71a893a Mon Sep 17 00:00:00 2001 From: Radoslav Gerganov Date: Sun, 5 Jan 2025 09:50:37 +0200 Subject: [PATCH 69/81] ggml : allow loading backend with env variable (ggml/1059) ref: #1058 --- ggml/src/ggml-backend-reg.cpp | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/ggml/src/ggml-backend-reg.cpp b/ggml/src/ggml-backend-reg.cpp index 7ddd178b5..955ed505f 100644 --- a/ggml/src/ggml-backend-reg.cpp +++ b/ggml/src/ggml-backend-reg.cpp @@ -574,4 +574,9 @@ void ggml_backend_load_all_from_path(const char * dir_path) { ggml_backend_load_best("opencl", silent, dir_path); ggml_backend_load_best("musa", silent, dir_path); ggml_backend_load_best("cpu", silent, dir_path); + // check the environment variable GGML_BACKEND_PATH to load an out-of-tree backend + const char * backend_path = std::getenv("GGML_BACKEND_PATH"); + if (backend_path) { + ggml_backend_load(backend_path); + } } From 99a3755a3c518119d0156766122f7b4b796ea576 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Wed, 8 Jan 2025 13:40:30 +0200 Subject: [PATCH 70/81] sync : ggml --- scripts/sync-ggml.last | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/sync-ggml.last b/scripts/sync-ggml.last index b67445ecd..a0921f1a9 100644 --- a/scripts/sync-ggml.last +++ b/scripts/sync-ggml.last @@ -1 +1 @@ -a2af72be7baf5b1f4a33d34e77e509e5e85b7cd7 +c8bd0fee71dc8328d93be301bbee06bc10d30429 From c07d437bbd417f42b122e767ad42b3298767dca0 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Wed, 8 Jan 2025 16:19:36 +0200 Subject: [PATCH 71/81] llama : avoid hardcoded QK_K (#11061) ggml-ci --- src/llama-quant.cpp | 27 +++++++++++++-------------- 1 file changed, 13 insertions(+), 14 deletions(-) diff --git a/src/llama-quant.cpp b/src/llama-quant.cpp index 038cf58dd..466e7bc61 100644 --- a/src/llama-quant.cpp +++ b/src/llama-quant.cpp @@ -7,14 +7,12 @@ #include #include #include +#include #include #include #include #include -// TODO: replace with ggml API call -#define QK_K 256 - static void zeros(std::ofstream & file, size_t n) { char zero = 0; for (size_t i = 0; i < n; ++i) { @@ -154,8 +152,10 @@ static ggml_type llama_tensor_get_type(quantize_state_impl & qs, ggml_type new_t if (qs.params->output_tensor_type < GGML_TYPE_COUNT) { new_type = qs.params->output_tensor_type; } else { - int nx = tensor->ne[0]; - if (arch == LLM_ARCH_FALCON || nx % QK_K != 0) { + const int64_t nx = tensor->ne[0]; + const int64_t qk_k = ggml_blck_size(new_type); + + if (arch == LLM_ARCH_FALCON || nx % qk_k != 0) { new_type = GGML_TYPE_Q8_0; } else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS || ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS || @@ -367,20 +367,19 @@ static ggml_type llama_tensor_get_type(quantize_state_impl & qs, ggml_type new_t // if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_S) new_type = GGML_TYPE_Q4_K; //} bool convert_incompatible_tensor = false; - if (new_type == GGML_TYPE_Q2_K || new_type == GGML_TYPE_Q3_K || new_type == GGML_TYPE_Q4_K || - new_type == GGML_TYPE_Q5_K || new_type == GGML_TYPE_Q6_K || new_type == GGML_TYPE_IQ4_XS || - new_type == GGML_TYPE_IQ2_XS || new_type == GGML_TYPE_IQ2_XXS || new_type == GGML_TYPE_IQ2_S || - new_type == GGML_TYPE_IQ3_XXS || new_type == GGML_TYPE_IQ1_S || new_type == GGML_TYPE_IQ3_S || - new_type == GGML_TYPE_IQ1_M) { - int nx = tensor->ne[0]; - int ny = tensor->ne[1]; - if (nx % QK_K != 0) { - LLAMA_LOG_WARN("\n\n%s : tensor cols %d x %d are not divisible by %d, required for %s", __func__, nx, ny, QK_K, ggml_type_name(new_type)); + { + const int64_t nx = tensor->ne[0]; + const int64_t ny = tensor->ne[1]; + const int64_t qk_k = ggml_blck_size(new_type); + + if (nx % qk_k != 0) { + LLAMA_LOG_WARN("\n\n%s : tensor cols %" PRId64 " x %" PRId64 " are not divisible by %" PRId64 ", required for %s", __func__, nx, ny, qk_k, ggml_type_name(new_type)); convert_incompatible_tensor = true; } else { ++qs.n_k_quantized; } } + if (convert_incompatible_tensor) { switch (new_type) { case GGML_TYPE_TQ1_0: From 4d2b3d88041705b20c30b3219838aa435e7ffbde Mon Sep 17 00:00:00 2001 From: Xuan Son Nguyen Date: Wed, 8 Jan 2025 15:59:53 +0100 Subject: [PATCH 72/81] lora : improve compat with `mergekit-extract-lora` (#11131) * (wip) support mergekit-extracted lora * support mergekit-extract-lora * use lora->get_scale * correct comment * correct norm name & condition * add some hints --- convert_lora_to_gguf.py | 34 +++++++++++++++++++++++++++++++--- src/llama-adapter.cpp | 24 ++++++++++++++++++------ src/llama-adapter.h | 7 +++++++ src/llama.cpp | 21 ++++++++++++++++++--- 4 files changed, 74 insertions(+), 12 deletions(-) diff --git a/convert_lora_to_gguf.py b/convert_lora_to_gguf.py index ed1014cae..6dea14a23 100755 --- a/convert_lora_to_gguf.py +++ b/convert_lora_to_gguf.py @@ -226,6 +226,9 @@ def get_base_tensor_name(lora_tensor_name: str) -> str: base_name = lora_tensor_name.replace("base_model.model.", "") base_name = base_name.replace(".lora_A.weight", ".weight") base_name = base_name.replace(".lora_B.weight", ".weight") + # models produced by mergekit-extract-lora have token embeddings in the adapter + base_name = base_name.replace(".lora_embedding_A", ".weight") + base_name = base_name.replace(".lora_embedding_B", ".weight") return base_name @@ -260,6 +263,10 @@ def parse_args() -> argparse.Namespace: "--base", type=Path, help="directory containing Hugging Face model config files (config.json, tokenizer.json) for the base model that the adapter is based on - only config is needed, actual model weights are not required. If base model is unspecified, it will be loaded from Hugging Face hub based on the adapter config", ) + parser.add_argument( + "--base-model-id", type=str, + help="the model ID of the base model, if it is not available locally or in the adapter config. If specified, it will ignore --base and load the base model config from the Hugging Face hub (Example: 'meta-llama/Llama-3.2-1B-Instruct')", + ) parser.add_argument( "lora_path", type=Path, help="directory containing Hugging Face PEFT LoRA config (adapter_model.json) and weights (adapter_model.safetensors or adapter_model.bin)", @@ -290,6 +297,7 @@ if __name__ == '__main__': dir_base_model: Path | None = args.base dir_lora: Path = args.lora_path + base_model_id: str | None = args.base_model_id lora_config = dir_lora / "adapter_config.json" input_model = dir_lora / "adapter_model.safetensors" @@ -313,7 +321,10 @@ if __name__ == '__main__': lparams: dict[str, Any] = json.load(f) # load base model - if dir_base_model is None: + if base_model_id is not None: + logger.info(f"Loading base model from Hugging Face: {base_model_id}") + hparams = load_hparams_from_hf(base_model_id) + elif dir_base_model is None: if "base_model_name_or_path" in lparams: model_id = lparams["base_model_name_or_path"] logger.info(f"Loading base model from Hugging Face: {model_id}") @@ -371,11 +382,16 @@ if __name__ == '__main__': if self.lazy: tensor = LazyTorchTensor.from_eager(tensor) base_name = get_base_tensor_name(name) - is_lora_a = ".lora_A.weight" in name - is_lora_b = ".lora_B.weight" in name + # note: mergekit-extract-lora also adds token embeddings to the adapter + is_lora_a = ".lora_A.weight" in name or ".lora_embedding_A" in name + is_lora_b = ".lora_B.weight" in name or ".lora_embedding_B" in name if not is_lora_a and not is_lora_b: if ".base_layer.weight" in name: continue + # mergekit-extract-lora add these layernorm to the adapter, we need to keep them + if "_layernorm" in name or ".norm" in name: + yield (base_name, tensor) + continue logger.error(f"Unexpected name '{name}': Not a lora_A or lora_B tensor") if ".embed_tokens.weight" in name or ".lm_head.weight" in name: logger.error("Embeddings is present in the adapter. This can be due to new tokens added during fine tuning") @@ -407,9 +423,21 @@ if __name__ == '__main__': if name == "lm_head.weight" and len(dest) == 0: raise ValueError("lm_head is present in adapter, but is ignored in base model") for dest_name, dest_data in dest: + # mergekit-extract-lora add these layernorm to the adapter + if "_norm" in dest_name: + assert dest_data.dim() == 1 + yield (dest_name, dest_data) + continue + + # otherwise, we must get the lora_A and lora_B tensors assert isinstance(dest_data, LoraTorchTensor) lora_a, lora_b = dest_data.get_lora_A_B() + # note: mergekit-extract-lora flip and transpose A and B + # here we only need to transpose token_embd.lora_a, see llm_build_inp_embd() + if "token_embd.weight" in dest_name: + lora_a = lora_a.T + yield (dest_name + ".lora_a", lora_a) yield (dest_name + ".lora_b", lora_b) diff --git a/src/llama-adapter.cpp b/src/llama-adapter.cpp index 9fd7edea3..d4879b778 100644 --- a/src/llama-adapter.cpp +++ b/src/llama-adapter.cpp @@ -242,6 +242,10 @@ static void llama_lora_adapter_init_impl(struct llama_model & model, const char } else { ab_map[name].b = cur; } + } else if (str_endswith(name, "_norm.weight")) { + // TODO: add support for norm vector + // for now, we don't really care because most adapters still work fine without it + continue; } else { throw std::runtime_error("LoRA tensor '" + name + "' has unexpected suffix"); } @@ -251,6 +255,7 @@ static void llama_lora_adapter_init_impl(struct llama_model & model, const char for (auto & it : ab_map) { const std::string & name = it.first; llama_lora_weight & w = it.second; + bool is_token_embd = str_endswith(name, "token_embd.weight"); if (!w.a || !w.b) { throw std::runtime_error("LoRA tensor pair for '" + name + "' is missing one component"); @@ -259,16 +264,23 @@ static void llama_lora_adapter_init_impl(struct llama_model & model, const char // device buft and device ctx auto * model_tensor = llama_model_get_tensor(model, name.c_str()); if (!model_tensor) { - throw std::runtime_error("LoRA tensor '" + name + "' does not exist in base model"); + throw std::runtime_error("LoRA tensor '" + name + "' does not exist in base model (hint: maybe wrong base model?)"); } struct ggml_context * dev_ctx = ctx_for_buft(ggml_backend_buffer_get_type(model_tensor->buffer)); // validate tensor shape - if (model_tensor->ne[0] != w.a->ne[0] || model_tensor->ne[1] != w.b->ne[1]) { - throw std::runtime_error("tensor '" + name + "' has incorrect shape"); - } - if (w.a->ne[1] != w.b->ne[0]) { - throw std::runtime_error("lora_a tensor is not transposed (hint: adapter from \"finetune\" example is no longer supported)"); + if (is_token_embd) { + // expect B to be non-transposed, A and B are flipped; see llm_build_inp_embd() + if (model_tensor->ne[0] != w.b->ne[1] || model_tensor->ne[1] != w.a->ne[1]) { + throw std::runtime_error("tensor '" + name + "' has incorrect shape (hint: maybe wrong base model?)"); + } + } else { + if (model_tensor->ne[0] != w.a->ne[0] || model_tensor->ne[1] != w.b->ne[1]) { + throw std::runtime_error("tensor '" + name + "' has incorrect shape (hint: maybe wrong base model?)"); + } + if (w.a->ne[1] != w.b->ne[0]) { + throw std::runtime_error("lora_a tensor is not transposed (hint: adapter from \"finetune\" example is no longer supported)"); + } } // save tensor to adapter diff --git a/src/llama-adapter.h b/src/llama-adapter.h index 5f1870cc8..3448656b1 100644 --- a/src/llama-adapter.h +++ b/src/llama-adapter.h @@ -45,6 +45,13 @@ struct llama_lora_weight { struct ggml_tensor * a = nullptr; struct ggml_tensor * b = nullptr; + // get actual scale based on rank and alpha + float get_scale(float alpha, float adapter_scale) { + const float rank = (float) b->ne[0]; + const float scale = alpha ? adapter_scale * alpha / rank : adapter_scale; + return scale; + } + llama_lora_weight() = default; llama_lora_weight(struct ggml_tensor * a, struct ggml_tensor * b) : a(a), b(b) {} }; diff --git a/src/llama.cpp b/src/llama.cpp index 8ea6686c9..97e716cd6 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -2545,6 +2545,21 @@ static struct ggml_tensor * llm_build_inp_embd( ggml_set_input(lctx.inp_tokens); inpL = ggml_get_rows(ctx, tok_embd, lctx.inp_tokens); + + // apply lora for embedding tokens if needed + for (auto & it : lctx.lora_adapters) { + struct llama_lora_weight * lora = it.first->get_weight(tok_embd); + if (lora == nullptr) { + continue; + } + const float adapter_scale = it.second; + const float scale = lora->get_scale(it.first->alpha, adapter_scale); + struct ggml_tensor * inpL_delta = ggml_scale(ctx, ggml_mul_mat( + ctx, lora->b, // non-transposed lora_b + ggml_get_rows(ctx, lora->a, lctx.inp_tokens) + ), scale); + inpL = ggml_add(ctx, inpL, inpL_delta); + } } else { lctx.inp_embd = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, ubatch.n_tokens); inpL = lctx.inp_embd; @@ -2617,9 +2632,8 @@ static struct ggml_tensor * llm_build_lora_mm( if (lora == nullptr) { continue; } - const float alpha = it.first->alpha; - const float rank = (float) lora->b->ne[0]; - const float scale = alpha ? it.second * alpha / rank : it.second; + const float adapter_scale = it.second; + const float scale = lora->get_scale(it.first->alpha, adapter_scale); struct ggml_tensor * ab_cur = ggml_mul_mat( ctx0, lora->b, ggml_mul_mat(ctx0, lora->a, cur) @@ -3967,6 +3981,7 @@ struct llm_build_context { // feed-forward network if (model.layers[il].ffn_gate_inp == nullptr) { + cur = llm_build_norm(ctx0, ffn_inp, hparams, model.layers[il].ffn_norm, NULL, LLM_NORM_RMS, cb, il); From f7cd13301c2a88f97073fd119072b4cc92c08df1 Mon Sep 17 00:00:00 2001 From: Xuan Son Nguyen Date: Wed, 8 Jan 2025 16:09:20 +0100 Subject: [PATCH 73/81] ci : use actions from ggml-org (#11140) --- .github/workflows/build.yml | 2 +- .github/workflows/docker.yml | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 02a193b86..c85999b89 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -1237,7 +1237,7 @@ jobs: - name: Create release id: create_release - uses: anzz1/action-create-release@v1 + uses: ggml-org/action-create-release@v1 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index f5af72d0b..d71f1eb38 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -97,10 +97,9 @@ jobs: GITHUB_BRANCH_NAME: ${{ github.head_ref || github.ref_name }} GITHUB_REPOSITORY_OWNER: '${{ github.repository_owner }}' - # https://github.com/jlumbroso/free-disk-space/tree/54081f138730dfa15788a46383842cd2f914a1be#example - name: Free Disk Space (Ubuntu) if: ${{ matrix.config.free_disk_space == true }} - uses: jlumbroso/free-disk-space@v1.3.1 + uses: ggml-org/free-disk-space@v1.3.1 with: # this might remove tools that are actually needed, # if set to "true" but frees about 6 GB From 1bf839b1e8b9d043306c65eddd9021fe4337733e Mon Sep 17 00:00:00 2001 From: Eric Curtin Date: Wed, 8 Jan 2025 18:47:05 +0000 Subject: [PATCH 74/81] Enhance user input handling for llama-run (#11138) The main motivation for this change is it was not handing ctrl-c/ctrl-d correctly. Modify `read_user_input` to handle EOF, "/bye" command, and empty input cases. Introduce `get_user_input` function to manage user input loop and handle different return cases. Signed-off-by: Eric Curtin --- examples/run/run.cpp | 63 +++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 60 insertions(+), 3 deletions(-) diff --git a/examples/run/run.cpp b/examples/run/run.cpp index 2888fcfed..61420e441 100644 --- a/examples/run/run.cpp +++ b/examples/run/run.cpp @@ -11,6 +11,8 @@ # include #endif +#include + #include #include #include @@ -25,6 +27,13 @@ #include "json.hpp" #include "llama-cpp.h" +#if defined(__unix__) || (defined(__APPLE__) && defined(__MACH__)) || defined(_WIN32) +[[noreturn]] static void sigint_handler(int) { + printf("\n"); + exit(0); // not ideal, but it's the only way to guarantee exit in all cases +} +#endif + GGML_ATTRIBUTE_FORMAT(1, 2) static std::string fmt(const char * fmt, ...) { va_list ap; @@ -801,7 +810,20 @@ static int generate(LlamaData & llama_data, const std::string & prompt, std::str static int read_user_input(std::string & user) { std::getline(std::cin, user); - return user.empty(); // Should have data in happy path + if (std::cin.eof()) { + printf("\n"); + return 1; + } + + if (user == "/bye") { + return 1; + } + + if (user.empty()) { + return 2; + } + + return 0; // Should have data in happy path } // Function to generate a response based on the prompt @@ -868,7 +890,25 @@ static bool is_stdout_a_terminal() { #endif } -// Function to tokenize the prompt +// Function to handle user input +static int get_user_input(std::string & user_input, const std::string & user) { + while (true) { + const int ret = handle_user_input(user_input, user); + if (ret == 1) { + return 1; + } + + if (ret == 2) { + continue; + } + + break; + } + + return 0; +} + +// Main chat loop function static int chat_loop(LlamaData & llama_data, const std::string & user) { int prev_len = 0; llama_data.fmtted.resize(llama_n_ctx(llama_data.context.get())); @@ -876,7 +916,8 @@ static int chat_loop(LlamaData & llama_data, const std::string & user) { while (true) { // Get user input std::string user_input; - while (handle_user_input(user_input, user)) { + if (get_user_input(user_input, user) == 1) { + return 0; } add_message("user", user.empty() ? user_input : user, llama_data); @@ -917,7 +958,23 @@ static std::string read_pipe_data() { return result.str(); } +static void ctrl_c_handling() { +#if defined(__unix__) || (defined(__APPLE__) && defined(__MACH__)) + struct sigaction sigint_action; + sigint_action.sa_handler = sigint_handler; + sigemptyset(&sigint_action.sa_mask); + sigint_action.sa_flags = 0; + sigaction(SIGINT, &sigint_action, NULL); +#elif defined(_WIN32) + auto console_ctrl_handler = +[](DWORD ctrl_type) -> BOOL { + return (ctrl_type == CTRL_C_EVENT) ? (sigint_handler(SIGINT), true) : false; + }; + SetConsoleCtrlHandler(reinterpret_cast(console_ctrl_handler), true); +#endif +} + int main(int argc, const char ** argv) { + ctrl_c_handling(); Opt opt; const int ret = opt.init(argc, argv); if (ret == 2) { From 8a1d9c25fafbaf4182dd0b785dd6303ee40d55bc Mon Sep 17 00:00:00 2001 From: Vinesh Janarthanan <36610342+VJHack@users.noreply.github.com> Date: Wed, 8 Jan 2025 12:54:58 -0600 Subject: [PATCH 75/81] gguf-py : move scripts directory (#11116) * Moved scripts dir and fixed pyproject.toml * updated readme * fixed README urls * bump pypi gguf to v0.14.0 * retrigger ci * empty commit - trigger ci --- gguf-py/README.md | 8 ++++---- gguf-py/{ => gguf}/scripts/__init__.py | 0 gguf-py/{ => gguf}/scripts/gguf_convert_endian.py | 0 gguf-py/{ => gguf}/scripts/gguf_dump.py | 0 gguf-py/{ => gguf}/scripts/gguf_hash.py | 0 gguf-py/{ => gguf}/scripts/gguf_new_metadata.py | 0 gguf-py/{ => gguf}/scripts/gguf_set_metadata.py | 0 gguf-py/pyproject.toml | 11 +++++------ 8 files changed, 9 insertions(+), 10 deletions(-) rename gguf-py/{ => gguf}/scripts/__init__.py (100%) rename gguf-py/{ => gguf}/scripts/gguf_convert_endian.py (100%) rename gguf-py/{ => gguf}/scripts/gguf_dump.py (100%) rename gguf-py/{ => gguf}/scripts/gguf_hash.py (100%) rename gguf-py/{ => gguf}/scripts/gguf_new_metadata.py (100%) rename gguf-py/{ => gguf}/scripts/gguf_set_metadata.py (100%) diff --git a/gguf-py/README.md b/gguf-py/README.md index 24af96a17..37a75923b 100644 --- a/gguf-py/README.md +++ b/gguf-py/README.md @@ -15,13 +15,13 @@ pip install gguf [examples/writer.py](https://github.com/ggerganov/llama.cpp/blob/master/gguf-py/examples/writer.py) — Generates `example.gguf` in the current directory to demonstrate generating a GGUF file. Note that this file cannot be used as a model. -[scripts/gguf_dump.py](https://github.com/ggerganov/llama.cpp/blob/master/gguf-py/scripts/gguf_dump.py) — Dumps a GGUF file's metadata to the console. +[gguf/scripts/gguf_dump.py](https://github.com/ggerganov/llama.cpp/blob/master/gguf-py/gguf/scripts/gguf_dump.py) — Dumps a GGUF file's metadata to the console. -[scripts/gguf_set_metadata.py](https://github.com/ggerganov/llama.cpp/blob/master/gguf-py/scripts/gguf_set_metadata.py) — Allows changing simple metadata values in a GGUF file by key. +[gguf/scripts/gguf_set_metadata.py](https://github.com/ggerganov/llama.cpp/blob/master/gguf-py/gguf/scripts/gguf_set_metadata.py) — Allows changing simple metadata values in a GGUF file by key. -[scripts/gguf_convert_endian.py](https://github.com/ggerganov/llama.cpp/blob/master/gguf-py/scripts/gguf_convert_endian.py) — Allows converting the endianness of GGUF files. +[gguf/scripts/gguf_convert_endian.py](https://github.com/ggerganov/llama.cpp/blob/master/gguf-py/gguf/scripts/gguf_convert_endian.py) — Allows converting the endianness of GGUF files. -[scripts/gguf_new_metadata.py](https://github.com/ggerganov/llama.cpp/blob/master/gguf-py/scripts/gguf_new_metadata.py) — Copies a GGUF file with added/modified/removed metadata values. +[gguf/scripts/gguf_new_metadata.py](https://github.com/ggerganov/llama.cpp/blob/master/gguf-py/gguf/scripts/gguf_new_metadata.py) — Copies a GGUF file with added/modified/removed metadata values. ## Development Maintainers who participate in development of this package are advised to install it in editable mode: diff --git a/gguf-py/scripts/__init__.py b/gguf-py/gguf/scripts/__init__.py similarity index 100% rename from gguf-py/scripts/__init__.py rename to gguf-py/gguf/scripts/__init__.py diff --git a/gguf-py/scripts/gguf_convert_endian.py b/gguf-py/gguf/scripts/gguf_convert_endian.py similarity index 100% rename from gguf-py/scripts/gguf_convert_endian.py rename to gguf-py/gguf/scripts/gguf_convert_endian.py diff --git a/gguf-py/scripts/gguf_dump.py b/gguf-py/gguf/scripts/gguf_dump.py similarity index 100% rename from gguf-py/scripts/gguf_dump.py rename to gguf-py/gguf/scripts/gguf_dump.py diff --git a/gguf-py/scripts/gguf_hash.py b/gguf-py/gguf/scripts/gguf_hash.py similarity index 100% rename from gguf-py/scripts/gguf_hash.py rename to gguf-py/gguf/scripts/gguf_hash.py diff --git a/gguf-py/scripts/gguf_new_metadata.py b/gguf-py/gguf/scripts/gguf_new_metadata.py similarity index 100% rename from gguf-py/scripts/gguf_new_metadata.py rename to gguf-py/gguf/scripts/gguf_new_metadata.py diff --git a/gguf-py/scripts/gguf_set_metadata.py b/gguf-py/gguf/scripts/gguf_set_metadata.py similarity index 100% rename from gguf-py/scripts/gguf_set_metadata.py rename to gguf-py/gguf/scripts/gguf_set_metadata.py diff --git a/gguf-py/pyproject.toml b/gguf-py/pyproject.toml index 9c3956256..92d7f22ec 100644 --- a/gguf-py/pyproject.toml +++ b/gguf-py/pyproject.toml @@ -1,12 +1,11 @@ [tool.poetry] name = "gguf" -version = "0.13.0" +version = "0.14.0" description = "Read and write ML models in GGUF for GGML" authors = ["GGML "] packages = [ {include = "gguf"}, {include = "gguf/py.typed"}, - {include = "scripts"}, ] readme = "README.md" homepage = "https://ggml.ai" @@ -33,7 +32,7 @@ requires = ["poetry-core>=1.0.0"] build-backend = "poetry.core.masonry.api" [tool.poetry.scripts] -gguf-convert-endian = "scripts:gguf_convert_endian_entrypoint" -gguf-dump = "scripts:gguf_dump_entrypoint" -gguf-set-metadata = "scripts:gguf_set_metadata_entrypoint" -gguf-new-metadata = "scripts:gguf_new_metadata_entrypoint" +gguf-convert-endian = "gguf.scripts:gguf_convert_endian_entrypoint" +gguf-dump = "gguf.scripts:gguf_dump_entrypoint" +gguf-set-metadata = "gguf.scripts:gguf_set_metadata_entrypoint" +gguf-new-metadata = "gguf.scripts:gguf_new_metadata_entrypoint" From 8d59d911711b8f1ba9ec57c4b192ccd2628af033 Mon Sep 17 00:00:00 2001 From: hydai Date: Thu, 9 Jan 2025 04:03:28 +0800 Subject: [PATCH 76/81] fix: add missing msg in static_assert (#11143) Signed-off-by: hydai --- ggml/src/ggml-cuda/concat.cu | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ggml/src/ggml-cuda/concat.cu b/ggml/src/ggml-cuda/concat.cu index 2f42b8a95..aafbaf803 100644 --- a/ggml/src/ggml-cuda/concat.cu +++ b/ggml/src/ggml-cuda/concat.cu @@ -124,7 +124,7 @@ static __global__ void __launch_bounds__(CUDA_CONCAT_BLOCK_SIZE) uint64_t nb1, uint64_t nb2, uint64_t nb3){ - static_assert(dim >= 0 && dim <= 3); + static_assert(dim >= 0 && dim <= 3, "dim must be in [0, 3]"); const int64_t i3 = blockIdx.z; const int64_t i2 = blockIdx.y; From d9feae1c06321aac9662fd4b4249452dccaec553 Mon Sep 17 00:00:00 2001 From: Xuan Son Nguyen Date: Thu, 9 Jan 2025 10:07:33 +0100 Subject: [PATCH 77/81] llama-chat : add phi 4 template (#11148) --- src/llama-chat.cpp | 13 ++++++++++++- src/llama-chat.h | 1 + tests/test-chat-template.cpp | 6 +++++- 3 files changed, 18 insertions(+), 2 deletions(-) diff --git a/src/llama-chat.cpp b/src/llama-chat.cpp index 44670d3d8..1347ec156 100644 --- a/src/llama-chat.cpp +++ b/src/llama-chat.cpp @@ -35,6 +35,7 @@ static const std::map LLM_CHAT_TEMPLATES = { { "mistral-v3-tekken", LLM_CHAT_TEMPLATE_MISTRAL_V3_TEKKEN }, { "mistral-v7", LLM_CHAT_TEMPLATE_MISTRAL_V7 }, { "phi3", LLM_CHAT_TEMPLATE_PHI_3 }, + { "phi4", LLM_CHAT_TEMPLATE_PHI_4 }, { "falcon3", LLM_CHAT_TEMPLATE_FALCON_3 }, { "zephyr", LLM_CHAT_TEMPLATE_ZEPHYR }, { "monarch", LLM_CHAT_TEMPLATE_MONARCH }, @@ -73,7 +74,9 @@ llm_chat_template llm_chat_detect_template(const std::string & tmpl) { return tmpl.find(haystack) != std::string::npos; }; if (tmpl_contains("<|im_start|>")) { - return LLM_CHAT_TEMPLATE_CHATML; + return tmpl_contains("<|im_sep|>") + ? LLM_CHAT_TEMPLATE_PHI_4 + : LLM_CHAT_TEMPLATE_CHATML; } else if (tmpl.find("mistral") == 0 || tmpl_contains("[INST]")) { if (tmpl_contains("[SYSTEM_PROMPT]")) { return LLM_CHAT_TEMPLATE_MISTRAL_V7; @@ -269,6 +272,14 @@ int32_t llm_chat_apply_template( if (add_ass) { ss << "<|assistant|>\n"; } + } else if (tmpl == LLM_CHAT_TEMPLATE_PHI_4) { + // chatml template + for (auto message : chat) { + ss << "<|im_start|>" << message->role << "<|im_sep|>" << message->content << "<|im_end|>"; + } + if (add_ass) { + ss << "<|im_start|>assistant<|im_sep|>"; + } } else if (tmpl == LLM_CHAT_TEMPLATE_FALCON_3) { // Falcon 3 for (auto message : chat) { diff --git a/src/llama-chat.h b/src/llama-chat.h index b8e94d9ef..3a4d07ce3 100644 --- a/src/llama-chat.h +++ b/src/llama-chat.h @@ -15,6 +15,7 @@ enum llm_chat_template { LLM_CHAT_TEMPLATE_MISTRAL_V3_TEKKEN, LLM_CHAT_TEMPLATE_MISTRAL_V7, LLM_CHAT_TEMPLATE_PHI_3, + LLM_CHAT_TEMPLATE_PHI_4, LLM_CHAT_TEMPLATE_FALCON_3, LLM_CHAT_TEMPLATE_ZEPHYR, LLM_CHAT_TEMPLATE_MONARCH, diff --git a/tests/test-chat-template.cpp b/tests/test-chat-template.cpp index 51bfb155b..f1f9aec4d 100644 --- a/tests/test-chat-template.cpp +++ b/tests/test-chat-template.cpp @@ -78,7 +78,9 @@ int main(void) { // ai-sage/GigaChat-20B-A3B-instruct "{% if messages[0]['role'] == 'system' -%}\n {%- set loop_messages = messages[1:] -%}\n {%- set system_message = bos_token + messages[0]['content'] + additional_special_tokens[1] -%}\n{%- else -%}\n {%- set loop_messages = messages -%}\n {%- set system_message = bos_token + '' -%}\n{%- endif -%}\n{%- for message in loop_messages %}\n {% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}\n {{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}\n {% endif %}\n \n {%- if loop.index0 == 0 -%}\n {{ system_message -}}\n {%- endif -%}\n {%- if message['role'] == 'user' -%}\n {{ message['role'] + additional_special_tokens[0] + message['content'] + additional_special_tokens[1] -}}\n {{ 'available functions' + additional_special_tokens[0] + additional_special_tokens[2] + additional_special_tokens[3] + additional_special_tokens[1] -}}\n {%- endif -%}\n {%- if message['role'] == 'assistant' -%}\n {{ message['role'] + additional_special_tokens[0] + message['content'] + additional_special_tokens[1] -}}\n {%- endif -%}\n {%- if loop.last and add_generation_prompt -%}\n {{ 'assistant' + additional_special_tokens[0] -}}\n {%- endif -%}\n{%- endfor %}", // Infinigence/Megrez-3B-Instruct - u8"{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|role_start|>system<|role_end|>你是Megrez-3B-Instruct,将针对用户的问题给出详细的、积极的回答。<|turn_end|>' }}{% endif %}{{ '<|role_start|>' + message['role'] + '<|role_end|>' + message['content'] + '<|turn_end|>' }}{% endfor %}{% if add_generation_prompt %}{{ '<|role_start|>assistant<|role_end|>' }}{% endif %}" + u8"{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|role_start|>system<|role_end|>你是Megrez-3B-Instruct,将针对用户的问题给出详细的、积极的回答。<|turn_end|>' }}{% endif %}{{ '<|role_start|>' + message['role'] + '<|role_end|>' + message['content'] + '<|turn_end|>' }}{% endfor %}{% if add_generation_prompt %}{{ '<|role_start|>assistant<|role_end|>' }}{% endif %}", + // phi-4 + "{% for message in messages %}{% if (message['role'] == 'system') %}{{'<|im_start|>system<|im_sep|>' + message['content'] + '<|im_end|>'}}{% elif (message['role'] == 'user') %}{{'<|im_start|>user<|im_sep|>' + message['content'] + '<|im_end|><|im_start|>assistant<|im_sep|>'}}{% elif (message['role'] == 'assistant') %}{{message['content'] + '<|im_end|>'}}{% endif %}{% endfor %}", }; std::vector expected_output = { // teknium/OpenHermes-2.5-Mistral-7B @@ -137,6 +139,8 @@ int main(void) { "You are a helpful assistant<|message_sep|>user<|role_sep|>Hello<|message_sep|>available functions<|role_sep|>[]<|message_sep|>assistant<|role_sep|>Hi there<|message_sep|>user<|role_sep|>Who are you<|message_sep|>available functions<|role_sep|>[]<|message_sep|>assistant<|role_sep|> I am an assistant <|message_sep|>user<|role_sep|>Another question<|message_sep|>available functions<|role_sep|>[]<|message_sep|>assistant<|role_sep|>", // Infinigence/Megrez-3B-Instruct "<|role_start|>system<|role_end|>You are a helpful assistant<|turn_end|><|role_start|>user<|role_end|>Hello<|turn_end|><|role_start|>assistant<|role_end|>Hi there<|turn_end|><|role_start|>user<|role_end|>Who are you<|turn_end|><|role_start|>assistant<|role_end|> I am an assistant <|turn_end|><|role_start|>user<|role_end|>Another question<|turn_end|><|role_start|>assistant<|role_end|>", + // phi-4 + "<|im_start|>system<|im_sep|>You are a helpful assistant<|im_end|><|im_start|>user<|im_sep|>Hello<|im_end|><|im_start|>assistant<|im_sep|>Hi there<|im_end|><|im_start|>user<|im_sep|>Who are you<|im_end|><|im_start|>assistant<|im_sep|> I am an assistant <|im_end|><|im_start|>user<|im_sep|>Another question<|im_end|><|im_start|>assistant<|im_sep|>", }; std::vector formatted_chat(1024); int32_t res; From be0e950c91cde2d8488ae32162b549d7023482f0 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Thu, 9 Jan 2025 11:15:15 +0200 Subject: [PATCH 78/81] media : remove old img [no ci] --- media/llama-leader.jpeg | Bin 199945 -> 0 bytes 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 media/llama-leader.jpeg diff --git a/media/llama-leader.jpeg b/media/llama-leader.jpeg deleted file mode 100644 index 0b4e6e1cfbd442f1d945f90d5d668e19252ccffd..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 199945 zcmb5VXIK+k8!k+BBcK9GN2CO#gH#DcdNoK(NC>?XngRg{f}nuX5lE1#lu!~v4@hXD z(tGc)ReIf+fP$hYzTdgd|8M5UTv==8nftld)3Ro*Kl6Y7rC~EOGBu*1rKP2Dp`J8< zmT2^8nE(0b@;{fDFEd?cVPR&y#>;l?>eXw4T-+SI03mTPQ6W(g30ZXo2`N=+5m7}0 zMO6(g9UUEU1tW7qZ8Pg(9qHUMf?8_S~?nf2F6SOP&;|49ed>bBu6FuTC|_aFW62t)UbBo z^eYLBBZ`kJjCNnym;%C*pvqj3Ef#uF1^*A-jhVWmt}m>4RsagS9AYW`E23;DFJ3j) zO9KpvG$Wa;<3c@2AAy!%z;?U<$XVYvT5;>acCBc~YJc>-BOQ6(%sj;pr6=LS|KcRGed_FZYd>FVnP|%cfdm8s_=zgJnGP zJdtFLm(5@s&WN8_te+@Zl4wwdnSXYz#xvDNkA`h`i4S78+6Dr;_=lRlCghRJnx`BC zND`y-lD+fK3%Raj`^;AlE-2wmhWWfWLeOp=47sme8#s{k`qdx#EqRa`rsF(F8Xp-S z85bE>W&y8C*F4-;pw1WDt71A|eCaEb$iPZ_HNR_W*6PbR`+PJQ88(f<5(y<$@X0E; zjTPsd75$ZNrd9=zm#Lg7yO2nK8I&D^h6*t_Wbr|dXQ6@&u8eraRUe;b$oTm;M9$l3 zM<$G2zWqE`(*BsTl~p-73&iE^KlrHJ(|`7nmtnF=4ZlctBa2bS9hnd2LYe)5c|MhX zz#*N*Tfi=pS6LWa}D&FbE^DBbb>3$vVfb%6=XrnfbwTA;fmL z;bmN}R0YW1+uZsStBedmHpj*nyVr6$DbG4{tLco`@W*kAV1I+H5 zR~&+xQNh&6y=4zgX3W${v4(DlVe;JUD|H>w^KX-1>tYUgLq~jDgFeJHzmlXrCSz{P z^LBvY85WI=lxmP~)0(&6JuBB|pU5tMkpe0&*zS0c?>D*QOUU@BD1()e3S>=r^?B#D z>rNumIIm)+9=pVV3Lt|Dn29wO00Rut3tghar}%PprwY~S{bPi;bD$6$#W8K{WyZA- z8p*}-Sx{JcqaF-gS&fOkRF>fn6@;mA+=?gDfj@A$7fKlgEa$@Vp%2r0wj7`cOge0SZwy${ z#L>DN#=e^HZv%}9KL@D{Jh2b(U*ra{TfZyghRK0*9L&Rp~J z8>!H5(qoMsicJw_f=P;Hwtw3%Aj?e{Zg+QEBGKUWrt(HPuGw#2N#6$B)r2$+WEoS1 zc1{CQ?XbU8h;Y+P{5rKid;GG1&vL=g?!9>B9Ec182xRF9($H zA|?-oS)~FidpjJK8ulEA+GG>ac2Ubq?&Zt>hVH$eKq#6!Me%OvCC#R4o@#JCI`V!p z=3(D|%57xy4Eo;0ErsJs29J!Ws;;8Ul~glTX<%oW!lCKavP=XoBhM96VQOIGFwq4t zAbTnY?@T#-Y=LDYGjPC!=}1dp$W-BMs>5^=*Nkb1G{RR(1@S4ll1q&9S=prz+dHma z-YO_R9M?MTqilx0N6Z3K9)?~$KvaiEZ+mia`;1FfuZ@Z6gmv0*N+~y}Q8M)?_G>_B zE|?jw;@_;;32`*f3+O}m`!_qRu$3AzKC9;uc$Vvx1-hO>Dle!izil1UgtnR0KkC!MJK3kmi^O4~c!9u0; zQb9vrww(lNJ6(M<8?gZojMG_?k1d9I!Iu-$4FF_r>H{~53xLl&^q5Q(V9fK=uiLFz zFqQVczJU$N4Wx7ACUy=krlfnvd(Uva3zp`vQ|1En8-4YWJx2D)rg^+fO?^q5I}~P! zq2|j;hsl}kt&-2AkOYt^pwgp(4Pkm&h?OCe?yvb~S~0KRm`kmR-03r8!1>|5uMwBB z^|FaDqflaA`Lmv83A1F5Jiv4Wv^De@+8-_r=XwS6=U@@Gg!-uht4+#mFTnt+di>=o zemlNo%9pu?3cmH!8QvVLYV&b@rj=P7y zKT&O}(32<$!>5X?EF_Mwk`}N{&5e0WS4&tzMA5aX>&9r`i8p-B%cN?r7ZvJ&65%R< zDV3pOHW584{FeZj&`ajQN}HnDWa&OL1gT&*OLFC~<$CE-eCx?(4sdY_*4xu~GZIo~ zM@kNno$-ZAyCzExDjHJLTrkXRz9|-)%p0e=5`qabSG+lU`!416W|wL8p2n?vW|Fh` zba}_CnmfV?OVId3$#?zIt`dBvX`Rms7SHkW{PH0J?Ppb7&0Sc5#+g1|j`JB|j`|u| zg1J%Qb{@I0FTpZhBe( zn}SXvmx)j!O^On3@>4I)oLi$Le`mA`w@buL_UzrN-%4xTPIIjB!6Zwm-U0Jxf89Fp zbOJ@$pF})=o0!;C^HL0QqKX9!A9qoKBw;K$?^~EH0lL@MYW%`M{!Naop$>hbe?Qf zcwU7cvBd4YvUueN3xN2kS}*f0&5-%;)O$tNPo||bf1cN__qbA>B{q<0OZC|Q>$ucQ zOvd{RTBuhEv=JMxly9I!;sc@vC5NRo+sBo5H6VoY#0<0rC(5!Bm>negD=!ugDLgL;)TE2 zk^ss~!f`RLX>@2AcF6Z5S$@^xHd%a!WZ~VTQ^97(p0Q%q4HB5%D$wo1g!z(z<__ge z*#kG(hF)_*-hjfc9nh65& zhhM*X&#bochFUs7d*|Vvxi`VSnSER^om0hPX@#2o#LW!x1>WHu0bkSeB0crB_~N-@)a;6B{i5ad`z!K<*>m3#{*06w1@l zYgp8cH{rRhR&{QcBQiG>XC;Tg6<#Uy%JcA!td{a4Bv~}416}t3e=B2m=KBmjk}}en zCZBX&lf(w?1Z0D|&Fu9Eb*4c*$d~=3U5BcU6nQLOVK(uF8rXlmXlCZq*wj~?Zyt6;_4TLo&zQZ}FjviD zB5AYn5hk|ehIz3`t6A=R6(P9PUpnWG`Mg_MhZ*FDrLtaP$PNBghmnYsMvs)ENb{m4 zKI!HmVSg{qrmiuWI#g~m0CCR?eTU3Ix8bjf)|~J7zZNs&rBZ&$ z`bFk!bFsZW9{N~ho~zzPs@Bj8fh8Q5j@h3y|P$4 zF8Q+Uq=4}=;_4~0zgTcTx{t=?fx0_<@^WOaRh`kDdOr^TYV3Yp*&4HdIQyZ`Fv+qw zRIkm5uYQ}`QrX?kc#aj>rteh!>mCAluLmjgNKHPg}ER(_q!-p40OoyofzjXr*7vB zxj6Nsp1nEn>kaE=tlkuuCC=6X$1P&c3LYO|sRG^6^)>5xYo8_=7gu$Y8E9e(c$>k) z%qzwVFr{&!Ws|g~|Jv~sJynnyWSMNyQZg2<*=qOFkxD-gogTf^}TqffKcnpY;!nm?zHmujc7=k(I#S++fnOqGOV)~g}0C9>O zY&^oW>&ZN{&!-gIOiif{Y z?*5#Olx%@b-pLL18!PI-6g{xZO;KbTw>A|P64!|vH_pI_GV`&<8w)+-p>fd_laZ0| zL&!pf&`BT*{oTrZKdclw zhjTYv7jF=LmhjAr)YHEh`+AiquJw!%gS*St&K@LYy-sgGVemFoA@2*G|K5T0*JP*tf`_QLnN(&GYijd*pWR2tCVN0U?DC)5pap*uqG@sk}-V+oVFeeIj-8>OW!5?bx*7>)s#A>ojBE|EvE86JWBI5b;$squ20B6t=~ux ztbb^88$@n~WcRlPuU${iigt2Jdtoj<+Rk@r)mV@SlglDmsv~Sg`o=V9K zh78%qWCHJhbI66Iji0QCy}zL+VG~iqmuMPNi+(7(((%yK(tF?4JS@_fP@Z~AkrQ;` zL6~@CN+_M}1J`Pv3cctMbf5Sl`vaer@&cCRqHs_O{I-%}TC`fI>^UcB=C$ozYyV!Y zJ5QGL3N4};{ISC>c6i5oWAAQ<{Iy!asE1)+Ii}@|U6sGgw!W3Dh>nnBO!h1ek|3qB zM5jH_mkPI&{jvht z;~J#X16EF$&@;2W&s(J(ebk24EYy9&ZFZC$sRk%t$j{OK#dXdfPx?~nO8d+TXWp}Y z`*Igtoe#RqqzfX-Uzf}C)(t>iCF7lfjSZjulf3wC&S9s7u$T{N+6;f(*19*&47{7| zyX8L{SerUa_c6cMTxMgfShnIx#3E!ExtfGJUO&|w3%M!ZC3x^{^ps%7>q%~;a^w(l=;=2!>x;3S&gwPaaVnxO-71x#PdB#(EQ;PuNkI1CWYBGQwQjv3+@uap zzOm};-k8|C`mKuMF!jD$(H03PPU)#azFUGSIO2ULuOwv+UNC&x$vxq<9;n8HcKCO z8H3Wn5VlrL=h|;1%VzPDb5zUU*E00Ih56RP+upnI}LkUB6 z9&5exLe0aL=QTP1RESD8)efDH(heW~r;$oV3H*L>%_(~vxtUYHrtmUzsgUgbSQfq& zr4bQz^S?}w;nLyrTFJfl`@zbH)9s-*FT!%$qSQ}s<`hW|O;iBM*+&C4rJN_<|6T3t zIZsgtdx*?Rd|o_;Se^T)xezANz81Uv`1+HC);6X2ZGWuD4lf}-l2tXD-b>stm@TXuajeZY&&=Upv%?QA6mhNJ@YcgOscqOyLUiuT9ayl9>CFp?<=5sksfWksWXdwmA<{gQh}|R^Y*k+l{sXirc?RH>f=Y0V?Ue~ z;)s6y;m?Q;^WD~=S0j7m3NzG`pS#Gr??@q(_9(}zj+*BI(P#G?1H8QI6}Bdc(o-)x zafB@758@`}9SYR*tx)rPZDR9l?$I^itk*g`zuc_b056!|I=m%x~hV5?ef zl`K1cd|F!Tw~&tep!>7W(+2$e@M6s#gci#=N79;V%96x_QDAEOYBEM>dW+Deo_~_Y ztyKDEY3rT1qn-$Kw@yJfseX0$zo^^!(C}|4P1HuCU^8B28jXxtD)1qCMS+RtAq1bv z(d34m-awJj_Ky9Wh!u9)Rqen=1X_SbZ@SX+&y*3g#fp;;Z_q3eHx&!uQg8I;etSq zIx@1R@k=9cDi@zr*<6&_CJh3i>et5XjUrNg4uVOj&*|c;PRR0Kg}3G8`KKak1L>5f z^#)B$xfb8MXxYI9Yj3XadmVdcwTML*krYF}QD#s;w=Ka>-9r_IXUMsrJjL03vU4F~ zBH8GS4^xL&U+pmLYlL=hMmeMBZ0FL$77!C4=WWfkw;TAN;r0fwa%gVE3M;?2QNG32 zt@U5XTe{{YS|RQ$h~vG zbzgumLj!(Wyn0DVi^t%{?<%?*>4>>Tm<{=P z$d)gfc$H$ew;!m%f2awZd_>L7!h0+d&9(OeqC(n;{gk`!hr4Ik z-Sb=Fr6Uh7x)SqX;%`HP7nQwjDpXUpCKfTj?n#DcJ&tfJdQj4ZB}Z4xbg4Zr?cX_k zdwngp8J$Bgy5c~rHT1?bO-(i*e>shCjCgY!RY;*kgsm4;H_a7#loI969MYY8g3wsi zt@Ah_)?7~Z{ycl#q(O}Mk2*>G|Fi)VAfu0E>>>s{qgL>}?Dw1$NgI z1rc5lCUINTJ7-ax!fol5Q>t^kCknj^&O_dS1>dA@jZ+pNw4mtf@g$dIn*(1hY7f%X9!h2H!WIv?D3!zB$84+GMM}Wxs*)BIuX%tKJsg z+{TQ*@7>amohKLlp$UQ!SjK0zH0*l{qP>jH+c)}G2PN7bPnHjV>6_DbrhbM9+3i?p z@(rWTeh$Ki3+2>r8=x%ALA~M0=#y%$Q(v z8}6)?i<>Bnt9ID2qw94FQOml)28z4ot+bU zt4}2|aIXe@VG+M#Kie4ynln`Ppr&Q&Xw6zU?(rNx!JAEv&<3jr3K@#vT>N=0+7LKfSVvSz4{^SSblU~@$ zL%&&%z;7beZwAcGo@aG!UEHpyxb+svKv?n|?;^@T$MeXFVy~5ayB@m4PZp@UphlZVrzPTB({d%J@T$EUYhKBrT zZ~FoCJB_^kmhALOac8LXjS9K*&B%Ehbuub4H#X`kwoT2Zdss*NDKG+l2L(6GEe}O$ z+u1BEdrud(q^gh?8+9M|c+N86uZ8XlEi{S`5%AG_WDlciSP zc%(Z5-g;iL7ba`Bpm~ND{B7*$U;ak_@buBclZc#JzpAaMtM9Jf@{SL8yyiHGx3j4j z*z+1vzcyAg+}tx9(h@D%!3CF6hh!K(y_wi3u4igv6qYr)_cU6zz%{EK~!q3_pN-KsbC#4;q_OWW-ZR!hK z7u?sUmhvsye>OQf&S|B-R{8aZMq~O+^Iea5I(Q-9s~Qxke)DNijda>;LV#&mf)4JJ zef!qa#J+fe!yKiKOp6X=XvE5!TR9^GTAi4q|1?&$fhyqefzQDoiv}P#i@@F5ll#ku zIT}kR^;4zyq9r};_5@2NGzFBJwOZiqe`vgu)7*@LqtfBMNuiT==!-X6P6%gB#-K({t4|6HtqUN~FHS_uV=ii`84V3km z)c;oUjh+}>QNhOD4hp~bVDLYOfd>M<_WPrTz2z136LW{(BN9nnL8aI}ie{kWb#Jxu zUZ3xezNPj#=h?Y?e@%|C`%Lk}W~tU(cs-Y!3%oLX5^*!oII#V^2JRCQP-rH%arWf< z)cvi*00=Dn0ov=faqZCV&nOp(RY$YI;<(Cm8`!&!Xy^O^yW1^2N5ue2_{_qWQ%xsl zpzG;*MG4g?KWn=0+U73i8W+*ufX{?W|BRI2lr%|4wnit}WcB7CVqIGVQhe3*Sbe zJaO9MZmzdATwE)f!Vm0`pR6JbF%Q3jnnxcR`>Ijuch8;Bx&Iwin2?Vg^_r9ZRU{w7 zoRha|ueE)moK!#Gr>re~4*TWg#E`YH8)UN;b$f&i75#N4MnGEIXQ_5@BiC4Dsq@n_ zV}?!Vnxzn}%=6z~`?u8kUakn7D%lM=tnh|~wXNREd)rV2@OCDj*IKaUUEANN#V4FYYkhDKhN-!(-G$^^d%$mV!NX8%Wygjmv7OwMI2} zw{(H~C#^R-14Y$Z+i}>kMYI% zDb`m4A$v=ySx(Ma3SzAE-c#V-5MFI6B=qZlk88TLJ9*|XuP~1DK*#X6i601cCiSy) z4GRWMeo87|?Q$)Ctrp(oL7pBMsyfb1S)(0I^L|Wt0$X=Udhjk4H~sM;QG7Y3@7kLW z+Ak#6zMi}b{!){2K`J`B)wcO%OUvonC43#p(aUSd+6p_-;xVy(x?XY)?P&1r%ng#S z#Y#ON_$>TfaaX28Vr(L?L!IlimKwDyN-lreWg*-p^|b_Pb`=g`j8MOv_*tB-#`D~% z?#cCC*PTGj#AVN?`Xg=jwfX1>mDD}?xBs3t@^#iq_kCsoXJIfs(V_nO zvS@J5#KAWBO|Zx65ai&p6)$2b{)Klsdra?rA88h8PrkG0u5fNOXZZ(4j7y^jaWnZ*V_w{6_f+<6 zzm|7nfejRL2S(dZfoK@EOqnPz8^H}Wt}ecOZxbs!mQ|V)uKF9fa>3<2o_FfrrA?5> z!ADCsmK-_n9uK==&+WFq{p4w1m3Mx#{moIbktd~Am>4+ZX@~e)6mqiDTklxeqVE$n zLB4Wye}q0td8oKyrPALY;y2>y+3$<_sWijiYTxAhT&Jd~#;-J>eeWdifgW+gzmy)1 zP)f&$K2XERCZWU5w7TrTxa`x&knK)5b9FlHHMKB#?$@3(#yL6S1oThT1QrIp0!qX5z23*j~Z&>@aTbX1I&sG3vRe>qi~1+0i0WSn$azX1D+K%IPLCoJ?_fg(?Uf zgyB`?A~xPOPJOA%&1LP9oFuI}CYDs6&PUAUd23IBaw8H~Uk|DVJimXX#8G5?AiBnf zAd)RRG1|~{y-21hZk+InF|aZ6x^~k<1$y#b1$uUa$eA-n{vdjKC@NM@8PwaAqvg0&NJD&vB>iF)to?z<^yai`71xsN*5|dc zmD;MowW-c0TG~%7u*Y-l6n>z0MI?)w+-uTuV5^r!+SNeM8UKTSZDwc29Eyk|LqyKe zXp-M1q%ffGPQxx5_xlBhF(jdlZl>n`4~xZ(vt!=L4=u%nhI4yw!-9@Sr8xm`$EUR6 zz}za?RVkk~XHk1((>I{z>pj_?1pT=?|dwBOy%WA&wOgQP;?A0*j|dg9r5t(@^+VSUZ+Ew_Bz18zMQO1`wyN4|HhWM3Y1<#G*~u?hEQ-ECBvDeZL|dSr zg{Xlz1LZ@%niCk0KpnN6V#(o2>Za^fq^G2 z3;olh&j-%_SOZRmG_7LCJZL=mRwqJs zhGY2%-i~-Dq!l52bUZ`j?EHADExk0So4o7Q95Oe_a(IHy+_m$!6eNL@%9E=$)Priq zxO4I$)jz#37U+|jG3Bbzl>v-Xoos8AQ+^dbYpphL)M$9CJ{B`=I`Hr8ptXDQR1`UY`faNRW24pPzOUCWe zs|IHqOYOFow)-Yf>M+XYfvCA0>a4nAWN*Ncc&>Tc40mOriAU&6$Z#n8hPFlk3>A}0 z&HZM=!Yu;c5r;DZ&FPij8_L7bF^v5}P#KVDSulTnXi-#@QL^T2&CGHC&io{F$0IJG zPLEcI)ffzyR+4?xI=qlS(k(G%tgY^$)f_rkn-)me{hBO&1sf?nQ>!QVG^nBm&sA{0 z?J5XyQf=-OFxHw0hiqT*AW3aPijt1*b>+MH_@;rhI-`HMy&RK8)8fW-lotxQ?Ps{o z_Y^e=2?#fPUtahNZ;f(HepZX_)W+$Sld9Gal%J;tm$Fyuoh3e)bD)mtqw#_9o}mu& z;Gn!FAxKR>p!Ff3?7FKI+2FZstMV2XZ#CB?=-?pNkh}64Dg<7SbGq&9k@=p$@u@)| z!Q}lE=S%CJSg%KcRpzDRw)J%@fdxnRJ#8Vcq_v%;Hk#w$;1lHTn0Jv$Paa}=n5gmC zQ9IpG&M}lU@F91tJWYvwO2hJ}cyf zBu?eSEBqNf-|a#2?s`iZu3@eA=0xI=nY-fvsp@9hM40NqNbkxb;A^_hz{Z=Ys{WY< z25Ey3VHxo_A6t@XUPp?ZQ=)iSOK#v<=2VDVC~EBrDPPtYQgF<#VH3DM653>Y;;l}< z^>x#gYsnwvK!(Ge08#_82`a%3Vdou&Lph3vZ+oN;HthrNbM6EX^VMu#8>LXDnmg{s zBHV!a@;yp~lg|rzh@SCw$q1VsKPwhaMvd{o%@TVFqOAXs%q75$&L)ZR=8e4&)Z(78 zbeP6cao-O4B$Y3C0b_tVhhmXMiPt{HtABT2E2c{=HD~(=2I18=Lz#59&I#u~lCG~# zI*?E)pYBoBzlpJZJE!|3?B!WC-oE8UC`K{vWk_;B43{mV@n8?F7kX)w)U@>K%S04M zwL`(X!I_X@XbarKcq^w(BJ&hm7WFRjYQdQ&_jU;J%YNwd{H~``?HxtlpyAfYkuS+2 z0UxbOm1}LP(zSdWhT}-ZMJ*m&IX;^!`OVPBKcwJdF|mG-Z8}Jb5MfBx=wGRRu}_fU z&@UY)EgFF{j9!Av2bUpvYO{48wn7Q5#}BJpEiew4k2O1vL?|C}3WDcm4lE;_a|{T3 z=reKP=y9#7#kj&r**)?bj>Yn)8TVEXVfv9Ak4o_=>3!@26B{KCK zAle)Cd)59Z@*Z{J^VADlZ`=o)Vn;>kT9A*&-G37{-tL~E+vcKJ9EaXgel&YIEKEAG z${XGA6jP>1XrnND?-Hxl`ck-$z6Jtohg+maANr0sp&E+^#vGlZkh2$1ndD$qRJ#Y7 zP#F?s>&O$bJ5c5B-QJnZ1uIEzROD6vFBO#G{}1mY$4T2U=IXkd*?|h}E;YQ^@3cHr zd%n6~80&F4C>t>$-SXsvm0EHJTdCaxH^;&W8C5Hplvx>b(b>s`#!J;Ii4Ww(b{kTX zw76P}CJkbJi->UhW`WsD#mn#Q345j`^R9gTo|EKD1s7U;m+{(>AX??&_mkCviJgcB z_vL}H#;Ug)u<@EZ$%>HSX2$z`UKLtb?1zX3lV>oLdFq!rhuhZeY};PA4@S7O>ic(V zI7G1}C2g{wj7E?m&Nev$EnG2=&kg@1COiLtJ{m+G*$Px9~kp3@^?9AYr&c+ z%9|VGWaPK+Ji2o)JDddF8n`)V0?c*si|g(THup>ryqe9{d|2NUJQu-sq(B4K!kKgO3m02 z2;;sD=Jr{#%PT}D^nl86|0Jkor>(Vad^OFhLp50Spz=f8x48$ey55NUH8yWzhTUd| zL8@-kEg`5xf%iim8NyQFL0yVRh`D$CSuPWnMdS{mG^K=wlPCT%6w#;={K-@CE=&|= z(%h5EepAX&8T$I}i505CxF~J3#e3F+?#q%@$fTBOh`%0;LnS)$TPd%c2MV&Th$y#- z=n+=-8wgW>r!NYKlll}2^%1*6C{A@F*fMVqGS}cTbn%`|n)ZV(RRj_|@aDf67GJ&U zC-?E5;Mq;Pn*!pO8{arS*Q^;ryt>-nF%XfnfJ*a*UryqjMht%9FlBnK6OdB&xX!1{ ziU!6b6Q@AaPbW;D^%^GbE)&b1VWNjMF|pyyt6sHd!dU=YAD)o$jS%_Tyo8wkk_>(zp&fmVqXwT&7%tg-+KBlPoOLkdt5@?1=h9 zb9;^aUACfYOLppto!Qxb!*Y*NA{|7}gzf!abP9yK%)Y^fv1T{HoJ%Uh#3W8pMreU4 z*2B}%K#$2KmJRyBB#pq(EtHXIVnTCdDJaZS3_-l?eTVs!aR3^fnc^A%yz^nX&%m0I zxTdNG#DUXwp-l8xVrfUEyM&GZk`s-%SUgBPAJBcr?Hj*UTL)Aiub^ z8^z|6j*)js^%)a%1~FHKL^9$tdxFbTbg$CLRL8F7NXC08=nT*^TxJ7sGV3zMW(YsK zM5hEukkaQ8jZ-k|aS<_b>vv^{bG;N3XP9r8Wz8vJlFaaO!mMAI3BbViGUH`~Nv0mu z+T?>lM!bs%50mh-*G(R56VMC}6MCT=?7rI}DbpU2G9`GKecylCGg(}%O@s_GT+j@e z#zw>su_gTvg*FWJ%9stP%h-fmggGuT=?b~TQbFi3dBi#5dF3jU# zvdXj9$8zvk3k%aI#F*$^24qw3Fasp=LVZLy>RepPIOu6vX=!|fg=u5n12{|=9_d{+ z;CV{j-6hXPL(4!*PfySAza3!IU0&2JUG(f63~a*JMeYbG>WV5T=`h|<*0Th-aOzX{ zd@)gXg3&V4%?stRA$qKPxNwUd+;zIvwAMXtRo1cwBKOWv;P1~6{A_=JNf}6i-+@3n zQj2IQgWIBiXol>z875=x-f7&GvaL5#Nh-T;V|l_+kiJB~|3(Lxp`U(ax*_m9L+S#= zqykj7ucCSVqO6r8@#i%Dca+pBTIxdMLV8|!7vXDo*Pl%X4{3mjtebWHzJ!*V|3lNu zNLT9u#_N<97EIgMFV32)WXc)f?f%eoXIoLMv1VQW+7l5oM|{k${+^37_@DZ}(NS5u zSd%!tdqiiq?~E!cgA-CSB?VSezyD?KI=I!KCvkxTcSspTWmH+0MR5`Ao92Wz@cP+bX+@B}lDX+RS%JYhm_1CLzwKe4V!&nFG$NObm2_xIZ*ih>I|S)5&Ro zeV3{6G@HaN-JeuJU4~N7YB2SA>oPi88vLYM?fJ$nLp+3Vs}V7Sht#_7F`<}@58^mw zED=|rGF;M}CV^}eYr>0*XK8&Ch?(E-bZM&C>x~Z?*r=MUF+S`2L-S0c^k=tnd10dA zjp%=M8C0=m89MuLBHnJ7`1GXKjotXW9up^G;+_|p`d$V$9U(3$j1=16OyhaSADU6D z8MQgRW~L;4p>~4);j>$T8B7d3B)@W!iP3X5L@h|hiirsM`aAV6XkoT0_8sEW;Lh#K z)p}YVm>8344ga>&UMl8e)lKG1>ZY1Qt)s}a(f1Ci3zk1L@n)}>;;epd*ex-u+WY%f zFAL?>t+&PhPBgg95PfnOJw^AD85ZQD z{*MgC?DUFma`|&hM2}m&PE<0ab{|s>p|XRLMu1OE|InnZ>xmxP6C-amP3GUeY2dMl z4noYlwJ~F&UW$oQhV}#8A}7<127)iLpXvzJy%;?bolgS7kL2`q=+HnRVTEhKW{|G&``5-=i@rH5W1s&5pkJ1i2OxmEt%J_gm zRoMDo87Eb9r&;QAv*rMSxa7)4gVG%)AhAq1);d}mT&nlojZv%oO*rudb`Nxvl-$bh9Ni^S+ys>!sCyt}p{Ij~%=-o5|$ z@#2I;wwcs-Vs!q`W`gtLUsD0^P@U;qkC85KU6Ab|EXbS_Zm*5E+r}o*kFyz!BW{r* z3`zoF)dU-Zlv<;f4oyi8f(_U6$_J@v%1WdtLh>69F4c2LTkE7 zl!36^;ES=Xa;s-bC|SX360hBQXJvR}L!eirFOe@v;=r6UNtpe-sfrqP>Y#FY?B$<6 z)c+f2pA4pk6~kl(5#j~%BGko^aomV=N}6h$pz_WPG}6wByJQ1#+9Z&-&PK`AE*d_A;t?m!zOiy&Y8$%D%50#`ZTv(A;#+BI;c)g^tmx7w0?DclHsX0 zI->-n5wj)W?^$sudYaZ3oFq~EkEoT!c9uzo8y;a>54~+r@X0=zYJz5Hf2)(2JT^#` z3a+K2b`s`Sp3KRZ#3Tbo?O2D{yso#+T~}svXW*LDn5@+S>0ViXP4~g!b5;%_6gv?2 zj>Kj;mED`rbZt$dv@OqQB=V~Qf}yh^*~;xsNzXH?W8N_9s`mphGfzB5v@LwQgU1b{ zC~*|h_OOjOm3)z&RG~@+q^0s$&8NO!iPqGWmERn$t$Pg0I#{1e@=TRl@GI(F_p~^K zmh#CG{tl*&&@+C-tvI01$3<*Nsn`iy{Fs%2=={_75+z9*DwGs|R0xwG>-7`kd8`SM z?WKVTrwl|2jgqye2K%!7asl&Xk4W|+{Z0wG`d5j`<ECogxF=s{C!y|h z$8tE+TV*pFZKX}ykQ>jg>b$LE41Kl}|E$NF_R8{U?*j5FS_NsLGV|ou;EP}Mt%f(k z{wvsjwdM7$ejI@+O19gPC?(2(MUu4fI@Ql@Mjud4tMk|k*fTk-{Rkj}hw&epN%g1a z`t$=o>-KJOBzfJRG3T6O>a8!EaeCf?H!`Zz+B6ozP4Qdcbs6scn{8$6zOAuy!h7+| zDp=ldaMoNbQ|$VwR@GtuxwLDpqZ;Rggn^AKQJgRmzT+#-Hq7*J6(fCgczuWm*K4IsS4J+onP}Ope94gw4=Jo{Fyz$n3e{(@re-h@)_1g&l6i?2lL;DM#6Ll{&s9XhVAT=Ehs!wur1dTcq# zlM0rq5JIHit;ZUhL`(A=a<9s)v&Vlg|s=jZG+9-)iB!B=(7ygNkS8Sr%4V)Dfw zn)e#BC-qEmdctfk?ITEXcxW|*g~7faDnvEb(5X_xWNHTToyKOu>EgP!=1QnZhAJm%a%3vf-02SWCTi{kOIgUe9X#x2%DK;odwuEB z+iAML5vUqWEja!s$E<}AL(TGkMVkGsLZqN2Gyls*IzlUZryesQS%c~lNcCuHAs{kM z72h;TDGzIA6aB5jAuy*JOSx$)G*t7c<1nKMqd^b>4kTYPQTh88sx%ZehW!sJRWwEl zBcb9>q}DMqfAh>=41fJVh%>;Mh{MDv3z(w|9PR*pNfXhj0dTa*Urnmqk~uj?o(4ji z{8q8EK{DfJ@jTkCh(wyehm>~r{Fi~-@_foy5f3{?nJ&7XO zRV5Cu;UHZuRbp=$90I!S|)7 z5Bq;7GNVPPFyPj-b}LeyBT}|6h424zwP*iBlXEnQh0~jl9i$t7BOI;$m<@oZ-|~n? zi?G|`C-3QXE;sBde;bkZnzBv`H*o)LAinF0B7grs0DVA$zv)-mKd0wU; zv2^s_-#}Io^pn-|e~dmkEyT_kE5da?e8g=b^1 zN756wIRe+@ynUAt8`iYxEkBbXMaWWP<4lpRM{7rrZ8y|89BoU&>bCyDmfg`VT_!9k zWLx$r9KwuAR231mTB_K&p?_h7{+VCM9&zkVmCXPbxy*AM$C%+ftqXC3?}U8=or>ep z;dw<>is6HkUuRJ#b6=3PArxQFl|ZQR3n52PSk@6B#+7!Uc_Y+(lR{Z!$Z9ZS{-FYg zsE(oy0BuTvm<9jyXu08S@oe zQVyYZtJN{6$mdJ%zfXnk`weCf^@B*8P8K7Q+o+lClTWEIsHSA%zgOp3W^#LcF0gY- ziYH9w=ps?W9A9t8r_SC}q-#@N9==z(p|~s9Cp~BOLiR zeNL!H9--ahPod?mc3Oz}l4KJQO;6Ijfn&7%jT%#!x?3xvXyG|4N`myxc1UWU&(;uRW)x`HS zL2hI?>DiVZm$x%El35d@=$RtvwX(6Plv9ga8?`btzDt^3_c-M9_?zVL$P>0|cM%j1 zG5godg)zBmblQnn^Q>#5{{Zx3Ya^UX6mjC7{2BeE9@$%fxxK9l0kGj%zd$@!s02p% zxGbKt7vxu^&!p#?eAuSS;^Xw^k5ScAYI+Ioe6qj`bpC*AY%-tLds`vofk1aLslLx! zoEkr{(EkAJ4x~DUOwq8DpH`$NH=P`-9%Xh7{>xsp@%=!DE5^srd^$4M>aVCeM)&ZX@EHAkR_WO&FcR<`9EX;cVzmc2D8~dJ z<5D8Fb8FFFw;!V?p9EDkwwvtr6NrAT#2E{RQNeWxWPiv?Pk{KdWD^_oShAnK;qwZZsQF_~F8;%$ zX3iNh3yL>I9pKk8YzouEZl`Zd9suz2L)|MvWBP+y z!yz3!yRpM1;e7I?tYHPw#M~Ho+?&{oPY(#{`cIEm$NEZ_h^4DG7qqcA+3MHGoCz~G zqIwR%!r<1<_Pa}|&BOA{%$wB(n-pUd@JnPN1qtFr=JTc$^6nn|V~bWp?$x@=4a9E# zTzu&N00i)f*)&dx`^n}m6Gl#u@`8`HH>19z@|6*%h)Hwl9klY~*111UD^kGCB`r7P zf4Qr~VAzm=hjU#WzBciQp%XzbXk9|*P)E@p4fGO5&3G<}(iG?9)`mqOd6igG=>w2B zuBD=A8G{%0$W3vb+IX7S_8w*JZv4~f&Hl5*y(yuGO6NX3oJbS+kWwR>36`1c+k@zx zrPFD^w#Xl=bKj~>p48D8ouMsqYqrEhT>5NIldN2vLqYCjfobE()Ebw^N&Y!f#@U-RE`{uk(mk4XMI@&ux7i<)vjTSway!e*P({vOHUN@i z=m1GgbuDkPY5xEutrp%BKyzbZYI8wcS#K=iYBfQlPpY&Ca_woE^CPQs9S)_F1o?Vy zY^->sPAjOgwA;s5ER0#0pq27q+2zP;8suEk8b!I($IYAbG_t)0fKTdl2$Unb+tk+Gn-TDmN~?Om@_y z@grQF!tkR2sQHZgb>N*oqD+S2t}W9>=R7wCykw-Z4V3A6aZ9btL_63MlZGXu7VH`t zj;LmNV2`5{M|2;V_Tn#W)>Ix(OmK?P{wCQ}6UD4->K#zIH`wk13NecweIa>?@G@}U z<3W84sX9E9np(QO4N%ny8!voA%wF@tYrA@_kg=GiGL`h%)Dkov$RCZgYek9>4=|vEiSRpK)JwWPUU(99CoXZm<0+Ecm7T83R?HuE+Gjonc z#?&-UDePoHf? z6}nXUSM{FQ^upY&aMVfwpeSrg%q;lL2inPGp8QaeN;6XM)8j?$FLI5}CDz1VV^V%6 zuMq76RF1Rh3qpQ`UPjsT@U`svw2A)!%kvk$R=V*uhd82K6OYnw5xeok+i2mz)S78H zST4nCWcpH7uv?uYq;p(c$BsD*Nh_9pjeboAjJ{@BbkMbff~zn%Nu`;#o#J8I;F(KI z;#ZJQCWAE-HS4D&tE9$?j}gjf#r<1*TH@SO;-uHUCqt>ra^7EfLvg1qHZYIvFgbM@ zVc?yA8gc3VHu~HYIoe$=O_}FQ9B#@MFh*C&^I#T=K;FgF1*ZTC!q!2EyoH@KIIHBI z9_x37q6pFrY+^%dxqEH1M)nyGa%i(5!ZYa`e;h0^*txzwC3QYCY3H3-$^`y2#*zww=?s^_M(p|RmHfsI3e zO($|=YQ3!#4>SR_ou?9=;_*emqnHM*-v0m#MtOVF&S}KC4-tzx?$)3!M9dVA$=-v) zyW9BRe74yb?k#FP?KrmhnEe$frn$DFWjgHj2~}yl0pt~am&DhqEp)O5J5qCFw#M0s zM}h^Ixh_N6<4uS2&t_5X7FWRSIxblpEsZ%yoyu|gdUH!#*{RNyQ}&3O^j=yUo8A3T zoh2OHWRJblPS(lOidOW7EoMUUg|BOqNs;kt9v$-zq4BI6MZg28VRl&;Owz*^K)0gSvJ$}@fMPRq-1k2zM0$Lig$W~p zhS=u0%wu}u0R2a6<$cKa2MBW#xEK0#O#cAMZEDRAQ`^8QCtDoyojMYZjhbmPFMOPr zcR4%Ak*!7i;sR+4+DaipY4Rspq7hB(hgta;6Q-p~)%t?bKMh2{;y7~@yvzQ!LX(Iz zpP9ti^TiJV?k;Vj(-^Q^4X-;N7Odn7yAuo2&Hgsc{98fq)3BEpCp)@xjCxPiV zKAk%=ora4wXLQTDh~u*85|tR&;ETOy*!0E4Dos8vRdkP zmv(2ta>|!CNCY?(n!@DyI%Tq_yr#6s<|lBq<1II=Bes#PeA$qslc?DH809a3>|EDz z4-2A6n(foNpCjIFG?FxO8q}any6uAOX+O+kNkH}#=PLw zO$rfaDGL6?amG42py5unG`$$7zN;H+SWD)K)}S0}k8@5=m6(#yzuQtQgNpd+^V5-O zgn-UVjDexAqMU7iwRX43mlH0aVXP!GNheNsaGpAcjH5C=jc!V)ctP1B?1(f8<8a)J zlH}(+x#1Mi+Ts%`oSo>=X!?rnJifvE_5yDkhNx`(rMlcqct=SO=oki^#1_l&mBC9RILkg1DtVDMClW>#e#x{9G=Z1 zTJkLjF?oqOoi@4Ur&4Zsbh7FczsJ5fJuUKlg{1!gC?0uMsZC%NAHDI38!REJTCwLd zM7W4fFB372{^baD4w1vd8IqaM9m9fQwks=Ii(bNkpo)&So)eVPzPGnM~k9v5$${Gbd8a) zX@9e*u+by&2hz_c$XZ|W$nI-ZD^vJP<=^Y6aROD~{{X)#yB-5na5tt40rH@4a95t8 z?d;TmWP`&v41fOFWf%u!hl=KdMDUR8DSJREQH^P9 z^#g}_flz>uu@DK3zSuC<-a}*kzPMxh%NfeDf{H-ur(+{mPzOJZu>3;@hCq(fp zH*haO08t8q8tE*^Z%gTCljJQo`7RViD*;~<_$ohqj;7D~9ee!Pri@=GZf|!m#7SrY zJF+b==AZF7wzQ+Ur57HdsU(}RLj0TIJn#omvqDL&zO5f|m*2_DGb6TU}Y2Wqbmpx9a#*gv3K{C=!gu5Mv^E#Vk zx$pWOn9=eUm;9+(N^7_UTAPrVaeiV$FD@+M9j!yenzr{v7G$228!e4j;L(nCPZ$ui zuMAvekNC9?NT4J5I&?YF%gA zBqXPDi!w9+0Ewl(raa;hq~cp7Rv$6RX}=s^<86F1+)z){XP-pcikefDO8)@!dSgWo zI#2SCF$6JR6MIjPpe4_Da(Z>;{=XxIr57%xmj3_+{Hp+JpJkKulcdb31}}3@NG&g& zO46QXcXpkf*UwX#S({ujE?RCh(iG(1^?rygTYZ3y^$P_(~L%9W4BMK4Y4Hr=FlF}h2959-4-fIpAx?$K|Z zxX&Mx-pH70y{{T!b>Md&qHPb}W zwL+RLNN@Q42<_s)L~!HWz_H?-R87V`R9w4*4i9@}S#gq@LDT7wU}hR`D1VQQiQUgS%hS9&gR^wjzKC-j5j{W_NzTZ>sdKoOf|&Lvw77nx zG4-aBOpNkc(=a{q&(lU+i-Bu1P!JiHRxrY%{L;6HIg&rC7M>Fvak8a=Q*|w{TixYV z1LNjl{{VwX!XX`~8&V$A<(?0a)|B2GAhkSHqJq_1OP@f>*1AVy)9OWt=(w---VgFM zpPhsRe6(|;WgNNd@*Ghq{vYId)AH%KF6~FP&I+v$KgXkJd1?BV8lCPgpN=5$wP_#&=@gd}{Z-KYTkhQkL*0?oGFHA`@<4x7MO{fzl z&)Dt@WO|AAKFwNWWy#Df`!JkQ9XQKLP`I@KWPBQU*rk^zUevY=LS8hcu?Ns;Jk*qO=eSkK-CrP#-aiQXVwpIfsIaEeL{hgG*L6i)jE8zY*;} z>%O1-jbnRv4MYoK2WJH+=9Zh6OYe234&qizXoMjF!$BW%t$-)+f8Gn(NI`XQZ;xwZq(#WgY9*lqv2>gyUTfA+1PS23}k1JRI)FY#+x4)t`oQtEi3Vz&9<0} zOwA#G9!QbAzH*#wKgr>y(N>+#oW|gUmb|0Db8&Z5w2oQ9dx3d7v)$-{q4PE--T|tO zNGMi!C%I#?V%0tYoc-7qE1WRBdXvyC(HZ8oLhGr7~(QM?y>U&K0p$trssNk_Cg z!f&@meub+lPZASj&2|Qo`FmJAZA!1v^F4G=mijdR0G>h%IM^*e;bo>$By3cP%S+}_ zf)=Q*YE_v*o5(ac>u zYfwUQIXOPDEpoB54q9msS&z3Mb8c2mJTvK%z`=I=?cYkm`F#yIxE>6xS!6K_z~Y)| zq4ss$N4y=b)ax=Il*t%~-ryAz7e%(DJh(dx=G=)52&SO)udU zc?B!26ah`)Xa2KcTMu?IMm8CtcFu0(AE?A zaiZ=UWQyYs+EBc)(!ta&j+o&@zp^$0oOx@hUnP8PrDI<-lppAcpZSH}E&|sp2Q^t% z3LfoFEn}%;l6yeYJX$_-Yqa1du{En26+zAxUYt{G9temM+1TjYrf@HdF1x@YP>(41 zHIr*5bEj0OAkq+iD>_Y=88OdLvR-y4(etR~PBzQlq3)cHqtPkb zM@jzxiPS*}loV=4ghGrar*N*uBFG^d+W22HLuJgxQq#%PlTpev6PnQ0xy9c$70Z-~B0V}U&pcf}Mw&jPjcBV^dw5ua(Tz|{a@51rFN*-iI64uK z)W#hoYO`}&g2}5jXj0S!NxM zN9E1{IIaoknyj<8_;8adIS9VywA6ywiz$Il_~8O*V?F0yU;!>%BbWO@-7CLPn6xmX^Adm_ooT zQDZI9hAc=C3XG5udZO*Ac|m;7_=so@5wtXL?p?ch((R&Z5;pTe$-pj|RPeDVv?q4y zi0*Q7J4OZSIdvv6auI(r9@M~Bm1AxR{@hA!be^b1c- z$2TZ^w1+wT7jm9B`M^0JekNDN2w;4e`X{R*j?gf+7Pirj#j!Qru6be(1+HOw_BE(; z6VY)|K9h$qNL=@JtU|yPdr=_{4s@MGV2e{)`g2@1ZHH-?o(q%M#*+w7`fam7F{@2| zN^1>RqBgGKN`NE5^Tzkh@2DxggW8+q`Pv4DJ?;b`pcM;!t+Kjn97$~+K**vNr-r-- zz3I)x9izOY?q4hxFi-6gG@*ALqhOLyauNo!f~}xr3gJ*xoXa>;EiDddP6!eUWNkh{ z)L?UtCCkD%hQZm(DD0L1*y6MpBNI!Ci<7h?DVzx|7qo53?zV?W_0}#fkP+2U zzR~SXyaS#MtlNvgpmRay+&Yo6XTakim6p}PcY;YPm5yk{@&=Hm#v)BQHKe#0BP%YM zWa{>GLB>YnWbI89psuGmWJt6fXTmttVYw<&bsVsA{BHD2ZeH-7{+mXlB=2_3uq>29 zqEtr!B_#DbL&!c=ZCo-jl&vO7YB1tT@X{B|bfxzOgwSgWM0UvD<_=moq&Pr=O)M#5 zXtd(ir#le$7proySHrr9+95=KmqQk{gfXpXYvc_wyEIdsPkw6}Acmio)lENp-E%`i zU9_#a4j`W|YK+r>0bWS5J0%d(MTY|51eCwgj;F{NWs+NQK$4p3K$7;ca0TULUaH*Q6Gb)d4HG}VCZp}hAfaI| zkavU+9M_c~rPBd5W^XlYBABpgMbrza65@$1X_C+)Lruzx=~{eb7X-)j-yq4aX%W$I z$Cw*}cWWYyThsy0a359j1`e~DV3o_mw2d7{HnxG32wy%?Wi1@E`LyCefe2HDSReo# zGELw(0k8t&(mlz#FovEWvJ3`pUoI)cmH{YDVF*Z?Ngx#KD7nBo?aa-cCSX3129x0r zT{6jGozEs<*KqM9C8{>!=UG6ET+3apB)O}A{TM08w9w#rUnm>g`>{1W;zrb?3Dl`l z#YY;$CEO7Ns&b+QS!`gq@T7B`Ol`z6*)tn%^WbUYxg-@W4eytpI0}L%6y8QF0udbG zQ>-IkW!roc&nuE`%>)vh+C@b!3TF06!uKeLW8HE^#579N_>b7ry*j|tv5a)@IQJ4j@a z?RaZ$kIJ?2bm~&4HB!YiDzq9CTAIY()sR`D`dQk^%F1t#^>et9vP_)Ph;n>x#<|ck z-)K8-Ys$=7djhL-Kt#|MSIM*xYQeYC#9<(C5+e{!Z6ie*PXxy1hA#=O&L_O5cBVA+ zTE@o{57KO$27t)0Y%@O7Uq_{qk4o03JT;TcXXI_d90r29Oo`83CPvC@Q{;0~By5c( zRW1WWZU@@jGhH%Ea%t)uvCdYx?O=9DNls|C^qZNCpy$&~xXY5ysxz?ZIzi2fcA26L z>e{dw3%gAf^E)w3l(jY1v0SWab7Il%UIfx~z{%|axb)gVZK04}5l-SJl+(nHwCY%@ zRa-C;uyqd|p=7dXG#$52+SaB)JQd{( ztSoXP++B*s!33;$#)pQ7lw3hH5u`61PBLU1ZrJQ!HMEx~CG2yF1;?gez#@nODX17R z$Pk?ODJ4$i(i?avHAfg|qOqjiJ6Z|FP;;%+To^$PLhn8vFlm#o8IMrVI*n;jiI$+Y4gOGGke6v3oRnM`hD76Xnb z9(|6>x-RKTzykSN)E{R%Maa9AN@GJH9OUeF$7(^TjY8V8X-ChNv@xQEWk@y7A+!S; z#AX1^se7b&l$(?$J>4f|T{9xVp;$$s&8DsbAUIwr&283U(Y49OHI<>l2G?>8#Q|Wk zH??cQS;$M?xZqC#8|q>uBC>{XX0Z@DKSlv#jdc|M(oM~3Jd*y zBVl`^fJ7u~%BMK>*_Tu!HzY(>WIVC2AiPONn_P=P^IH>f624df-c`^{`M@&sC+Sfd_ zt%Z)fTxR+sQd=MPGj}oyWKIs@(gvQkDTQ#-;P}|>I}vt60>?zkk&G-C--rR$*2vu0 z<7nzIOw_UQ$>zyL+-51|%!^3&wXzB|lJ8Veww|{Qb&I#e+#}elzd_nk>uDjS9Fbh? zJhz1#tW~bYt07+sCfX#*pCc3J%0617j$E9^uj|k-PKzGYSaT)Wy*6Q0YYuvDvT+nz zh*;#hb}%5n9K5S>M74YqLz{K3r5fP01@PJ`79~lYG4s4Ee&X4LhBSp<9v5gH45cdNHrwu&QI~fRT zg*1>#iIxYwt~g}&4EZE6xrVN!Va35_)|WW+?agmdJ^8%Iaw9hI<&kpdq`)stVqU9* zw9<)0u-n?xfeO>QY8Eli-jy#4;BpSqAFGlsvD-yb9vFb&3LF)~2T`D&1aanUHAwth z)^cxM=tmx%eN8}anUriGxvWFfx?oykEIgM)?p<`(8!c+YWljY0dSM2Fdu>T-kqtx$ z$NB++E*3G!FuCQ-$MUZNKvVW>u~Kp*7EaeiriFI+Qvlzm>9JX^rmzjdO^04w24ljVE3#CYi!K4r{!pwDgRoW1~M-U(1SPsjUmN4LdjG3fi`< z%ZWsQ4I=HRSHoC(UVyD++h zukD7rCcI4mk*sT%EhmMN#8Ch+y&r3-6Her`KFJFb6||QIlm!kij+kYi3EU?PjpW%l zIVP1E$~W?z!mHY-9kS|0Se&tMgfP>O?fp!|Y-UrfPOvfXMqM15NZiAhNx^#wLGEIX z+W!C}F5H1>Zr%t@c>+(O?x%Q^$XxUUNyIMT15QMcy1X)5R>w^(6am?S!>OQ1ED+|I z4GG*Bdmp4DImJqErt(OqTWpq$TWZyXcj)lGU&BElyMpo&HoWZ7!#P-jv87xVB8Rc4 z3|IR~WW}s;Tmbo6&~+9JuWD#()=g<8h0lqyj+~7|xjsCY;YS-D4rQ(2s%jv#Wr`6r zu9I=Z1`Mkn?||;twXY$;H$1Z{@+PcPTGuqEaNm_32R!2GJdO~hkt-XBg&VPSgP8eq zFQ+JOF}aWc5!O0IENs%oTzW!!W|t>6b}_E-NXAC@Dp^?ysXN+EO3EB2xG~wcq}O85 zS{?$oBUx~WYQmkg zY8=*Sn8m0%Sl>I~l`yW1R@)YoAk_roJ~GQD_<&47xZ^;;*!xEwZ)D! zP`0Nylsu-MPdj*D5J;r@Wv(c{yM*D+z#=Ss@8^GwuVYvjTI8-Qp3&vqjXbs7h39yJ z?LvnMAXcJPd#(2Z{#M-$=TNfv#*|0OvKWbfKir9&q&t zE+IQy)g!u$hx&OkD6P@XL~+fNn$;$fH!IXayBv(piO!BUI-0yRa?==bArJ!wS5brB zypO8odASIb8Z*rVm1>5VUi@$v^MAz{?K2E^x}16$~&<9C}^dt`3kPH`#EAqNy6 z%VfpPAg-+}%IzyBRD4S$mN5}BC|Jfh?{RlYuWMLWYXthN5BEFco;jH233Uu@Cd62x z)aDxSgzR&Ha(*vFJ5JXI>Kh9SBgYas0NVy+(*kX-+Bqi*Ro!&HTiUydvaJqQ0^4E7 zwLi&6m1vB#zX`1`acOF4{{YV~1o=Ayd;AUBzELXuKP@zQDo$I!jjx6NoCHGfORXN~ zje=rxx#150SZZ|H6^_9}fpwpOR!I@70nV-|4D4^b)ZEL!TnuwP=h5cwlMv5jDuJWBT zqEC;i){K)p(}92E@&_)XUI&*oy{cQ^sNZ61<~q4>ZFa9}syK0p#Gre_xTJjF z6aqBA*6kEZJI35}L{zIn2ot*A-(67+U6KcY>>H6^G#4N^r=Idr4Xk^XPB3g}HbKYx`WI?vGC+(f&Q_`Vz0{N)gjY5YkHI%?Jo-V%o;kfK|0D zDIU~9ZL3Es$<19wl5MpEu~$mdA8P3p<)IR2^1LT_E>?*b!fEdCi-;xV94O*}1!t4> z6EXoFKiPC(h@Qz4bJpr!8rb>~N~6=q8zicc<0*BW&5!9dp@q?ax{hk<2QU%Pbkx%( zo#E4T3LMEU>5^_DwmDvVz5+; zEIJBJW+YlA2O25koY8$Arv=KzzsY_qC$`b|61t1zCby6fj)TT1h4L8EAp@(f?$j`4 zKDusM+o5hcXr?U)Pb<)4lnBvWL7R;2f1|1lx^rk zE2uN-%Pcg2q75g0KSu}z{jDFd91oQ)EmhQ!N{XXwu`;2a`J<%d1&sA=qD8FTogiTU@z)*Vi@yQ<|jNv?ZQ!R#3^b+cqHN0DNv);1{Z3L=|BdFDQYXw=Au5wm`1t zsHbdk%PDi38!V23NFM1WD%FH4h~)2dZ-)foxCkPS#YGvF&24PefGYs7YOREV$DpE| zx`j2?F$e(DXghLgWWHSMcTAuW;RgbVPc(z=ImXMawz8(!A+My=QAfEr*44{Fau~6+ z(s>4~4-%|G@(N`cB)HIL4*fNUoEy3Je@FIk?OalZhPd6F_F2H@YhD!U42WH zqr8F%?yCG?(x2cixfwASht`m4brlQ461OOf3zw9|AwRts*ZN+^|q+v&A zY8>{`@vu&+BaN_dKmc39bva_1-h3hG0?ZKf*nBV`n%4zNg2e&G0`?}BVP4do9ju(x z2`iKewCX52mP>@^5N&3%wC7tv0Nz99M^TyFGD=6tKz8_1nh#EOBak;pA&EnQR9srQ zYUHm8yeQh|&nvNxcGAnIr>>*lBV$aN@~+m21lp(n09yolc?XuY&U}XwPlOL^g+IzK zHF=9|g4zy52P3-^4JtKsjVBRyg{LFSO1oUHo=Y5BTDc{@-1#o0lW2|{xJ}xZ&&35> z+PYT0Jc=laBf;^ujsya+ZkCQ?2Hqm~wPgodE)7CZ^(ShMOp|+FjV}ly5ekiyN<_(c z$fp||wGuBrT_qwx5xUn7Qnr`I*GAL4vD|r5@Q1Xjqv2mUoAON?|Wna5sr-7A?FJTrlAf zYJ%{uC3Lc(h$|2q@k1lY_NR@!Z^CFrq$h9XV(q}C$VZe@_z8?MIjte9;@lHsX*7wY zCaV<9NqXyY=^c?Qi84U&*03phtx@(H$u-EMCMzJEQ9PMl8{0LbAiAf$#3s9mQFqA=ad#p4j_eienuP8`)OG+ic_QqO?khYIKy z(s6T>Vu+efQ#e0A>oy%VQh1v(5oH%N7?M02edB7^|03ayk+#Oq801AZtNhm<$W@kCa zMtSOWwNpvMB0V{~oB)gxWOL3ay3kgC`^pO6xSy+xaI}W_L6dGHjB8~ zr-8!BG^g-T^0Gwm)ZyRW)outlm3#~k4ZJ}_Be#??38bx85O_~H2>W!*r4v81sJ{1N z1h23TI{}fTc~wj2$pvS@^J0f8k=TvxGEUr$^Nq1&5%Lc!m2`z5LOWjA_D1xrEVZhX|&>7%brW5#qGOS zwX5dPcRfciz1>z_qJcrwk8-n#6p9W`UPq?sIM`n*&?qMhO#>R{(d7;hS#9n^&kKyc zUUG=?tz~e;3*2LCV`Q1lGl6j=po(Bc9(F6{xuaQdT+*wIiUoN^RqqWGOaj*hPy+n` zazICsiKUj*oCq8)6Nd`ZO`zITuW23h^RXx7*cw(`2M5A}JkwgS=FoDwPJJg%qm&R; zV&ML9W6=~vr>{Gm?FUkKJ4VpDVPc4-*yikf4@Z8|H6yQLeBTaZ9B(0ftsUgS-n6U8IK;)jsXB1oje%U1bb+OIoZc#f&(yVNZwMY**GT3!5o@Rd{=;lc7f>g^ zi6LO?|lMS+V_XD$VMU({vl{L}_H>lPk#?A<%_CxqC3pIS7KA#((Q&3mU+OUY@ zg>=4tDF794s?cu-xsUVz0P%c)>3p9J`1og%XgE^IxVx0^VsE()1@V=vu?;ATN7D(X za;&RdDqJ8f4=A0j42n-q+|mT`SzDdIA@J3PDWtLBuk6 zV`xfWGGqWc#wHNuG&lyZwSrLA$VqZFwW#7Q4upjkpD;m~QAyKeI;qa!pF06 zXO*ZU`s%4f5UiZ!cnwc>*S-2#!Y||F=Op83pcMo~e$DzVU7)E0TY!evg>8Gh*l7YQ z03Sf~d?LOV#@`;H>G|IO088iF56r)b7~3wHz8WYWB-sTM_JK`2-o&d%!nA*Ow)pgM zx%D3nXbAcAd~Q7>#@`;YdEV9jh5A}DGdm{)_Z$;H2AVcY1kspWPu%i?2$g%2!U@42 z*R^W)zq~ggSpfde9-og*{r6(n(|L29+M0fjo*6>czS@4^MFU=+DDkzU`#yfn{hk%t zdB4B$_b>J@W;+KLw1uYCwT+it+H;GK>4fdi{L;P<{ZaaV9+UPwf%k{oKeh4t9yTAa zJvaKN)BnT(EfD|$0s#a81OfsB00IL5000010uc}cAR#dlFfu?v1R_vT6GCAWU~qAP z|Jncu0RsU6KLK8?h(i7q{Ddv{@@%rp`G`XK7rx8ce<>}u@bA>T*?yDt5QVX_%Pg|X z#r!4dU!?t8JdYyF9uvba;-P;BW#YaymLY!$V&2O5R(T#rk!Ad=EV9clDdx{I{zZwG z^L%#s9%G(w;~{3tFXK^Le0@qEsa81F)%+!9irEprRS1-8V)^!Y9!2Jg#p1B|^^p>+ zlxz4p+h>vFEXK)k>mg#SYK4m|zfSXC5swj#B99UhibUPNS!`pDZx&g8rTUk$%P%oj zNjHf}zl|k-KFcp{X3O~R_^PpKxA8CdYcFDz9&-Nx(ktNC@yq`JfEV*swJ%S^YPQ?- ze1Ef#QG7ZmYQ1zzzg0AcUm`S1tSFCS#FDR7MZ7MFck3vWG$rfNNTRbI$su+=Hz@E) z{d$s(p>TNF^h>c0HR*UVv0LiO!rX_1J_;y9;R~`}+)5+aKXw#v((MZTsB3{f2^$iz9wk z$I$Hwe`Egun2dk1_mou(s(7}KGw(9OB^Z<|L*%R>v;P1kIaF5LJjpwFQeHM$*u^%w zIb|}M6_D?oOWb(#M@xEtt|)S88d9O?w z#5_f$y$L}}u}(;D$={-hAw)=_ZEN-sJ@M?2jZbO#>toFevGhl=k>V#O`sEs;&HM&d z5quGNqN{BC6CR?v^gNlTJ2h>@)0d1-+DCyH43ZKkd=d7GvPb%qPueVf5_U&5n=jDt zV~bg6NTDo1kfeERY_nHa$Ab4W$%;aBd3hlr*-UnYFCt43CTY7x5-p3CYG+l_B1hJY zY)iywYn)!oBA86QcQl*-|Nn2+dQl_xo{bnq?OBo7BO$RztEg3bwe}{8+G-TBV^z@F z+88xU5UW+Cs48l!*`N10pL2fSbFM$HbDeXI^UN)e$K!ds|Ga+X%nAwY&OhPKEFoV6 zo(CD|cYv2Bn;j|U!t#^HGKaJHKW>7}h2%ywf+G25R%r+jMN6H93)PPLcTmdKQQh21 zIxoYNpLZRXB?k`ZhT3uJLpQH3*RlW20@~FE+O=~l_xG>=D6b8$Ltp3`e=j z;kRp0AtgF*wU;5C&NFf}vMVBGH&xlF!zb4-_%(zij~e2D zEkg*iPQD#nExd+I}GfnF~W1 zw^;6=>E4(l`~y;W)c5~#G!Fs<4+`W0)l(@3ISx0Qb&{GreeLCSlzVcmoL&IyC%&dq zj19f-cw&|d|1PU(d2xGZ7R%VgxJmwDLmLN*tfx;E{}Fk*Uz#YfEHszx$?@()C}{V! z2PmPy&->6yS84k@>xnizvMM2S7Xz27^*=dSenJwoTFky$Z8+!p_%=TZx01#-D*H`lR>Ql(?@ zP~KFIppgLChlI0cf%gj8I@#t!V}1b^sDV6zdsS_;Mxu~1A9(qFW=K{Br5>H6U5fU- zziX&^bG06p3dL4Yt8=B84l2+2EnS`54VQv`hI^MRW8$XH*rnU$btrqw%GuG5Hualo z%|Z6?&HdBJACUc+3sC^<5T!f_c+UKEUENnZX5B_^MbX4L^&gRr5K|ZmAzGYu{lxju zUiph|WwzT`v_a;9W!)bq+4*mxsnqfzJ80DC7{tixxdFfxV-v_dY}MSXs!f|L1MGGm z9PtTU{Ad{h^+bylu4k$a3JjLq5e8V4Lm$_hhYGcGtd~Lb-;FhUwu}_`_Co4!hmrpg zRc8T)TPKHSehmMZZe|F}VrS9T`Nm&umnt1mW9ZrZq)Y?)bgLv}#{5qp)#IF0;PVGJ zT-e`eh3mUFB{y8Wh>^j;o$`VDx#2$G-DcYGlFor&LAkRbbj4qz4 z_G4{Zj5R{r9LP7|>pFQ*b5Vh-b}-7y?G4zan(=E3*b={!|LVG&Y3WoT#3pMoBBANI zw?;aTXNJzz!ag2f-*oyV^hv7ZNXk-V&Hg!;&zZ@ZNv^n$DG;Cj(svOz_|giUQOnD^ zRmKsXrJkzb`b2iau=%RC^Y-y(f^t~}#hBY&@I)AGTPa->C!s0-)LB-L$6)V$4wYf3>r)7*q!>R&J*vMJe%81^G8;?h4oP1>?Kv~m{7-Y z?x4Dw*k99S@Q;iP)ta#4&19cLf{T6T1jBEThVTVLP~6t@ZV?l%beWT~&eQwZU93*; z)h6dIa5r>%=i2sKljQrAkQn&CS^Y9iw9zOsJ zKKeiWAo(AD^iTZNkXKdg6N+c@Vq)>c9%;|G{)mr52{MS+H$6G*Gn^Bz(7!YH44^Lt zfOX$`u_geD*}_haunc>H)0I@ft$XbMIXHEP-sT-Jqi)ape-3O9&9N)G2l2lXwP)N~ z(WpUYl-P7%5*j6+ZGrB&d&0U?A!N~pPVMy=e36G4JB)}M7~Ah+JZPc@E_ur54?EC$?*93H!1WbKsvM+B#O zj<>(q-)Zt-d(9eI%P`zWBaXc;G%Ej&9g&4DovAj>v_x*#>wSBAXOVLH8lb~`VFTBF zzVu8!`qfSA4fMs@-#zvo%&?3|#r^!^eEH|t*|Av#2ZquB;Ff)4I$!L6 z-=7`K`F?)u?;=}92^_;Mu{&6`(8AL`5ZfDPyi+mjMrYECf&u5x+~PX*4pozf{3?nH z5iy^u&7q4SPS1HEp_~ddv}O%zaKq<{*tF2x%o5Gx({C~I-IC_H#(#0cD5D#kI2zwkL)ks zLr<4XPlE_FE*B@2xkKvj1)?WXl<(eiwbJhLx zd@AGVL?$~pImF56v-nId|Y22ErJFJ-t3jVd3nrRqJqzlY@xnLKbg zyUq-9?WOF?YJLVSPx!>aVnG>KzY~2sXvVN*c`hNH@OX z&GJdfvZbePAtTRgTiU`e8HYOMO>BP+(C%+!FZe;SN2>Qec16p0C8?}?#g72zSiu9s z*rA@64c#oN`nL3uFIW_48KMyeNzL0WMu&QbuG`k-&8xKd%DMXmLrrTFP&esY4;hwZ z;F9eEXjc%EBe2V=n}V&QVPXQLZu#}Ol;*L8WjQ?N9eEnO41uNwTYI`BU8)!Ri5PG* z$@XzMtfbcj-qH5(n$yKQ?nJ#Exnkr>8eKQZga{{<9j#)1u?%*CXR^Xj8l*-c@4<5| zFYZ6k{t={pH$$CF*Yl{l5aa;q4}wS~*MvC6{bH%HA-!a!3CPIzaqJZ_>7imu2J|_39g$mXY%F%$-E-_mt~Zke7c&gzg=r ze44#te9AV1B>z4XuHelJKS;*}*T6%juaYQBAYCHEkizdxY#A9;E-hN0>(-4YF?oPz zufChBzfoP(7>=&vOmjs!bBd$%*0ywJuUz8i6Y6S;TeS50yN&J%`pM;tw784byAI_P zcp8>KE`=rpk%cKkGj0e0aqT~M%%6@Yji$OrzRJyt)=JjSs*rO`I^X%in0M?mdY}7L zcB=9^C3ai=j-bAR@Q)M53T+2p_I(4>R~6nOySX;3s(0fdPBkf5?ecXe_ItPMD$vql z2TXsV%xhVeJayny8NMOnrQlK%Bs&;Et^Ez+p?WQ|c@#tYhI^+v!<0*g4SnZVD=QN_64P(dbL14|Iv_DPs zFC~O5n=wV@9}UZB#1!%!#h0EjR=G2OGhXMA3>NRYq{(&$5DE zSC;caj_{fhfZHd3fi(J@!%yEduEoRl74faQmgw#$Nhwstv~b(ICTT|H@dDh=8MlwO zwZ4p3sYGk%AJHmvo5jmzeyn2QAP2l67>{3x2MpCOJp(o=p2g`fgr#To@s`k>h>yFx zRCmaIuRSd-Boo&h^1TYhF}S+cVpadpGEH!y$M;xfV(=mmn%w-D{~+b>r{%*Szh2s` z9rw5^vCou{;k+}N?oZR*p4jV_brjqAIK$0AQ?r);cWaOJntwzt8w}ek+MA;s^^)PY za|9$CCF(Sv#}rsEWJ1t7Z0L4C2(K#t$|>*w1<>J?uTZpHg_{5Gr+*0!=7lH={@`U9 zyS7SptY0z%ZYC?I3r~Nai9dvEuWT8h01RzZ%#gC*1|LI|ZsHwLxcw*n`!4+!46SKb zuJtndalR-qT>L86!eHW$TPnd@m5B>)ex}emEb;=!*yf?e?&Az_V}nmhgbtujW3E_J znSVcTy1%pR#NsWtHec&FCeU3|?wa9HsR05)P>1%Wf0(zd(CWG;XO%`_?l}N=%cGIgCa?D1Xgx2k0?sM7iFj8 zB>sBYnWMb%KCevhdbk`2PhX6#%xcxP?A1!_%>KMJ>?bDUJx2J`tR0l}Hk$JV7~|hZ z_NqLP)oawo`!wy>=TXR&9xkM9y&5-T?Lb7(2W1nz|Blw?5&>|i{JDbs=l;D~3SnBK za*RWSJ}k$M?v7q+|05!VXGQL`cYzMYaDvWSbPLxTgD&?Tyj#??JprNDMh(e3R`!zu zSdL6TX)e^FyejN857K*&Ox-Q0;04OT?gaR^M4tPj%qqMVe3YZ|JZsjEn%bDZN-|n5 z6N%mP+>@aDM--5kZ_G=jXqp%}OSp?`H7Bj&$E`XCa-Uo-!`0VRUMUuJ6ScAJ3`gXc zh9C2Y>>%EraE1@ry0)ks*ft%wP5x1}-`xIN@<10%lgYEERVAfmb){{ zp<8p!Ycv$pz7=2 z4QL&{ESC7ILz8rb6idpQ--FQ9;SP?`>_w4uv8x(>V~exfk<0sMKH-4Y7`1t(sO`!M zp5jTDFNKHyh+K7?9cJ6V!FI((h_g8^|D0{V;YL#VE&+m4(~{^RoS zgbfUt*Mc!)(&J(!r-?1kCJ1CZMY62IylquDq-4e4_`Bbw(H@#avd|oBLaxl8zv;wp z9AcH`il0j|2(Cv7%+tiR9}3^ajGZT5+MHZRlBogqcg3u}o~dYPR9|wuxo2gJF7Yk< zGx=@RkK@^Lq+2O__}Sg2A9O(l0>K(SQI9<%FzCK$jpESgh7QftnQsU4d26R8s&BnZ z<`3w?KTmuuZ@-zj5xho)R3^+I{(4!sOO>O3Jk3Y!QWR@2)Kb0mtV`kW_Upi0c3obu zUkMlCHKHr zbovY0C~`e3TS^tl$ag$^PIIzXcfx0EDyLM=9u|Q9M@QjG2GJ;1d2tg+xYH-nJlVe_O!XMulkwTM01L_ds)pQ{)hXF06^D{VGo)isVS z6!$Nl42F|%_qkIr zVEvUQ@k|vpB-Vjoj2x)0WoJNVAkCH0Z2La?$Z)%8cmJi{$KkTr!~zemKx)mZ2g?jU z!aMlL6Td5QubZ1{f7C?4gR8naY~3B*_Uw$DsY#y4dt6PuDpW}CRO+f~yK#m^DFxrL zu-+D_Iej!Inc#8O;64sI?dYo|g+r$Q5%G11YQGK8?46o64|g%M7l*54lTecVSb1r= zv8bYTCAhP_G73xfSk?qR$`Y;Ms>Gox^=qowQ|Ik>JkIDSo->=jV7`-B5;2MrsbOR4 z{k_!ux;Ldi<7&3sXOE>(jfwcr&%nalrB40(8{NOpBgG6r$r$!<*snd1S#$-PiR)iT zEntW{;yR`>*G@{9We*)bBh#u%ds8z~#C4h_5j*X%g&>#KpRoQ25K(kS_;4)@qx>TPeq`CPiSxBtL#mcBqM5msmn3$on z{bbug+6jW6q+*6Nk$LT}u_`=~COkAvkq7gJ$!mYZmP4Lx(kIaPcWj~%-N<6pMxt>j%9pTaVOO7*i1;?9b{SPiQx>6vYuh5}odIZ7M=oB&JQ>%p zu_BMxQu523>@i`wOwY`2~JH9Fr!d+UApf^ zG%3gYn#-^_;00z27TD<{gf2W z;1eE5^PstM>f-*_8VQYKTbOn!nb<#~R+YV2LC8-|_nvAGx10j4P?0~**_R@+{vsMV zUQBLwH7ev319uzgP923A6RcxjG%Xu0lJ;_0Re&5fHrwYH-)d(zAcj3Rr9!JoJCFJW zdxEU&)`K-0I*eS~yV7d9{N5xlguWR+eCfAF)L~a-%~vFdWc%+i;_NYocE56-#%%<4 zf{>e)n9XHdcIco%RZ91y=t&Zsq#O?Dn^yluqpv>fBJU>gt2DdC%tB%Ur7Eblih?b; z`L*6+5(;k~M+%8@e^4m~l`2&z43vWSRM1Z&n5ELoD7jjr@1y zQ!iErT%^WCSY351D@o-|H*6nRLISl~+}HzwvcZ^&rQgvN|>h0<;NBXHPJ$= zx3PIHVr%1MeDm^4*;8Kz#EaiN60qGM#k;uwei_mOsS#K!$}`?5?n{ z)rg#8!lZO!p#G0Qi|m|yverK$cNOe^&fp&0_y1^e1SK@&ax1A3IS;FUpv0Fe^(7u9 z6AYeJ`~2q-NXDE~EmUpLV$7rKjJFEViNjPsVla=6mo+WAXGl%T{^zay042=kUbw{t zcA_kzvU63+d@8MUR+JtTdv!o@_Mxp2BjKM6+%PpOr!0`Pq)?m^5aD#SEBr!yS;HrH z)dt6sErew}8J`|Qaa9C8qiz7n!K&W4eXK_23`L!H*d#o4x|!YD=`k-YloW7bZi3Q? zb(zORkMlLS)e_hPDbCSCD*B{2>W$JpV^k>ygbyj)PV!H%u!?LPe%P@WYy_h%-80(= zwDg)5$|f40>9#U1T4#1YdT7(3L07aN-he53Cc6A4Gc^dws1!FrR@=b57I-V-M)K%B znPE3Rm%NvzYmX|^b*Jc!0|1VIyFc4ypd-rVFv^VePKT~o2^k0$u|RDFAIW_jMrljCawg**Qb1qhlU*6~&v&d@eHx-0! z$JJhlvf2fh5-@Zrc0NrDBJJs95Zx}A?%SerUVFthgwCLJWY zO6Q-LOZQ%5EoHUpeVm*d;QAhhP9aE6s@-a{C8(*aQye|P&1cfdH~y) zYN8l6XNE%RtBkx?i~Dh+4w&SIG8Jjcr_5~OEC<@JKotdl+}5MtiyE^q#w|OyL|~>{ zWU4^F99*|=%GN;azWcwo+e;*+A69;+bEmNv()^nhUT9B~kozzw;koGJ)e4zxodntZ zzP;%2iv=V4`aOBud#qGwNdXjw)81gWB!3sJz-(g7e`W$3q{L}r%quW1O#+;u~ zn7`CaK;kOq{#T|xP5#oo8-W(40)kNGtrnv|dv?^O2*A4rfM7O@wh6GZRXsJ2K|C7y zTI8=}(jE*gr1x2QWz6;V5N9NnpFtquUXNGJ{q!{o`Jex%37d9&>dp)jzMfzz=+{?_ zBvx}xO4=%_Rpg*c8ab)hVO0ah?H8IM1+5!UiR{$S>`pR-Y<=}DwYv{YcqKFnjg;wOlG+9+iKKJ~O+G^gp6D5iR++Z!8k4t#p@yQ#z`jVYwh8UT0!Lf zgeaN)YRw9mD@*y)Nawq4mEkZtnY%@pfhM`D)&3(Smvv2(E!E#wlVL#YT@AlKzs$b! z%IY$)7!yTL$lAA05-bSrbByiX9@2yePksxI$0AE2DljwOBiq?h)`G1XIEd;mBEM7h z(J?~n7fnJB14!Q^2P{IX-nZz??w=1)K+I#Cv59gIsdKdMKlL}fXq23S&Hf`IrvE_s zmeP5DMQy9^QbB$X^08I_`{PjW2N2RO#W`n<7B{}V)e(adtG`}|poc^swa2sXVfp7W zzm(g4rdX{2(z|dL28eKuN~XJ5x>YPH03UH^h{xUWob~|HP4fHxr1}0?G*bXBc|XgC zJonL|&2&xfS-!x@!@EMFk-p=lRmuTc;Sb=XofV97iyZqWCl;&MIXWZkex-c2lF|a| z`XzEUhdSE;4wAa<&NB37V|!hcJ2M`sr4~N()Ee|;pcYdMFvbM2X&yrKWxk~Zkb4yt z#2bFZyK3%rdD<7e@1N{o2+YX?ZuBzunq5l1V0Df5PWr1OB=FQ2)f?@fB1oVtTZVnM zm2Y5>lvS?(nWh@UWbf1dMKIrW(jQ)B>XTVU=5`^79qRj1qesimFP7nO8OgH@4!EJHMondV0bvosr zgfa`N4d|Vv9OHjP0x@OGM`CGI3z)AOfexsVhN-!_Q@gT0S(y{yJ13eWC>{r}c!91O z?UN%Vj(h%nmhVLs_or^3#i_J9*-T8O=~lr<&7dAM9V-HG0m@eN=OkaH{6HILl386K z81TS>f*crWE^NjcOm!hTTfY=-&2J)mfgnn)dqIHE|8$k5oFMZx$9!ESa*eb1*<~}* z1^gpY{thFF#YMS=-?Cd<|F(D>eJ~PNRY=A`mQ^QE_ReVZZYuA0IR#)i6_~wG9W*?>Te-+f3)vdlnwmGN% zoZ#qyz0av+LmOkj0B$6A$;05yd?6xoj+#R^lE|@I*Wt~ui?e*C>J}(}E;?ndU4K-j zL=H!hxn5K87bZ-bYfc6|Ga;rjJA&YV&oeRZ0B&l|tqwmhX4C`k)#6#fSLu?pP`|>s zb5?jc^m!C6q%A9j7|BFWV~Jn0N~4v;W}G0KQ)mt-`Ke(l^ZI`x9w)fJs#U2-aaw|c z>GV??$JH@%WOHoSD|0e#d1KE4aNf^CyG z6u##D5caC2;QntGRzVpSHV`5b?ZTekpi@El@0zN~;U+pnq(yzd-UQHET zz-MR%?|&Z^j!mgftd_M4fsaQiFx6v${v1?T*L$&Rn+ZiZv+qY4;y41(d9Q;v3|A$7 zj`(yI{}FzZm&mu}m^7}5K`;8Z;wl`FzBYoai=^+ksVk4SDr)AgEok!)teGm~{Pm+N zCc-BU8_ecqcJMoK%WH43I~|}nC4O0f-aldkF;daTn&s`F28(;FoojseTZJ1?;M5a- zDtSA^)$HBtTGxzZO>oE&zt)I^{} zmoXe4>idVrWC1Y2XZ*-2KIzrVR%PwZ$0ip!?9?$K8sd%Nuyuf}Uax850C_3M^gWCD zHhuKQC8nAoD3yY2O_e$E*3zh+H=&g`R;DLwVebu9l18GDy|b4 z3retu=TLqR{*Y;W0eG6&wuuBxp20zZ?vfPAx%QJUK*S`kTKo-t2qcoV0ecg_Zjx@` zi7r#17J!P*S4+>As6f+C^Tl~g;ZkqLmwiTpD!KJOkW&)-m2ZZ=zTtkw{R1GZ@ora@ zy+#2>@87T&s$P@CgK?F5CrB6bd(f!Tx=XT4G8@hl+F-`JWE!wumJ1e7_{f;NiFvGh zvdqa>5~YR%qYz~y&m~bo+9GE+tRqZ)xy}ukQW4ih9ZZgEe@15!f1jTsm*1Tj`DK^z z)(Q~MJvVGL8m&6()nzjjCm|O5Toja|nDExL6Adi_fNYoLHZvYaRjdj(jQP2w#AH+~ z`cf?c42x4|o;Qm9`g&-WFHK?SvG?NfQs|pWOyy|2{SO^ga`IIRyczUPio~eujXrlL z?{vJ|^OVxE+JQ%pqQ6eok9uk%5I76QYE0+1x~tCcN<)j+2G?7fin9wTULk@Hos?X%(z$*}|*c)(op%ja=mPac+1vy94IGjAWC zT^;^!il^abjz~4;1ePM=S$>jEv`15B{lo7ik6odC+swQ3{*)sA5iQj={(Vqmycyam)uOzMH*wttWoSUQ?T4X) zdoL&4-fa=|*QMh1OaJ?X*IS|E#Ie6b^BbO5O&YsSpxmA*NlG$JQZtOufn*QhzI69E z4r!!HgOU^k4^g@V^Cjj*m)<9aZ)>sdWM;_q(IF7xcnkF{QYgsc*~ZBqE-M0wGtv}; zPd~Fo#&f1;S92=7DfHG&ZcxL;ZcnP`9ls-Bpgb9!hij9?S{iYM`&Qvf-%ilyGK`Bjz$MMi#3vBYn8C2Mgmf zD9?ap7cZ4pZzaD?!RiYNAs;?A)q=ih8m{EhD(hlPk(eb5N}&LIwrvZ}L3vYlA$KI@ zMa|6?ED9F;nQ-~mw+qu0_<~sy7Om`OBfvS3ude^|RiVge;b+ELyPTQN38OH1F**ML zhYgtWPAz@n$<4@@TFE9k{d0-8i~dLrS-|%PzrLyx%s&4O>YOxO5wX4oSM7WHMkWUlVtOX=iNV^Cf1VYjyZe>{*cxZ6u$Rx{FCt zEAJrqIOmwwB5|8XWMXf3Ep4^<;b|nxb2Tl``&y zPq`H6>|2Q2la|A$)p(Z5zyEVC=%ALNoaMo0Mf+%A8!#~+?`QKKa%h1JlxXK^KM)HR zOIXmZ@hV*;tr2ayGe6YKf|(0y2F};mJfr;n&GE2Qt)-$osBpw8)V*yiE>p55{LQx9 zFQoUmXp*H_Ol#w-Em4vJ)2pt4H<(MkJ0Inrv1H?K?Hqib;gxTDSI;sUfMMvMOQhWV2NgBOURg>sb=!Kn-tDbNQzA_#| zVYrNp<0-mQR$K4c%P}}hbF+#=g0<`%3$~gFFyhY)aHNARZH^&&_ul>BmeZF4p2@FT z!ro%gz3fznAitMHY}9WaY?mrEiQMZeHv0TY>~h$)wx2_-V~@pg?XQ7f-Y+!Ahq)W4>1%Q z(G5Oj)P1({7M~1>7m3JcOR)v|_ekqn$(6aU$ZztwMqvD7r@dry`XXlAJFhM7 z$L*Lqq*Tsn8CCs84J8k`-@)id!)jAGrBq5&>G`H+B_?VSvhvpYH~NZR`FAdY)yf#z zqFjFh6->>|e|UoE#}^ehXIBl5fB{AU_^xUYJx{jM?eZPWW^SX)$6~kGcrP1DqEPjdE^zwEW>1H_<&qw zEiF1AB%C2urlQsJvHSDETzBKI_&^a=LJt75Ce#nrmtNig>B5FNs!ds}j^I9x5Xtve zNMHZdGzd<=zq#LyTT~G3mR&$n?|3lMA*BmLcY$sm2Y0?RN@CE6&ft z6!VvfVMZOIcgVKm+-#k!nJ0W(Eu1f(7$IJ7)i)S#VF7P~;^cdcHImI@O z`B+*qhUAqQY-nJhn*HmRiov``y1$V`xdffT2&=M358n~Jf)CbS4u-aYkVt0gnxS8_ z&0{KJQzXP@ZXdWCnaCvBj&YbWL*{x3LX?_MYu%?8-+Deok4OISD>W;3IzdV)eXXUP ziF19=alEKmSKW{OM|8DlX4D2-C^5x6X_(zSikFDeuR`7w_Ry>S5u%dwEnNr}d(;3m z6mQQH#*yO*Mz2I$Ut2^+8M`hSVKq|CxKd173KR_B&?HEaK8fl<_KSvM zo{9|Jjn^Er`)ZNo@Pu-jo0+;U531|1s58(zlURz7JRaJnLLm1FObXIi6)@5Nhz5Du z-(w;T);>;zIK#ln*7{j5Y044$v<+Ls`0zUl$VO&HB&r+OiC*?Z z!Wuar=Q$5HW>9O=%Rw9d5oMu9zmN_f@Bb#&()8~OpVOH?meZ+mmu;Ge7z@N=*XSc< zR@uLFSzMSL!}IQ&rLK`xy+3@Xl747UuT&?e$zlu6MaV8bW@`!j%IatG2r3^W+HMMJXX1sfVJ^w!<`cxGix>rf0)m-2bmj@@>^6y3;Vwwnk zNcR}ecIvu_5Ihnw89ErDe>{szFRDJ-W8DKuS4BIc?4BjG;H~R2o8qmLKdl$`Mmsa} znsut69ED~AltJ;Zx@gP{ao`nZ`#n<386#^IVUUO>_{<*L{7@)-4b1a-3ol#pkJEAC zW|dUX7XMP*DbbcXXX7_p$|3jmACY9N(J3oT-2plxFLE4!w`G(w-J@wg9pP@E)cPvqnqvJFVZ@BJN08EA=$a$5K!ba1w)04u*kscrf}hHnn6Pfn z0n(tpFv@4tB}**sy+j2KP#ym%BQB);AJMFPQ4dYPp+z2uFgB=YLePtQz+{IMPXJK2 zn_uG!)!Zv)i%IV2pw-6TLZyAx0IYPFEvjwwXk7ern)Ciz=!9Ipd&c7vn5TGXs!FRE z2t)RCz0m>*V?8r3l2_E(s{^G0p*MBOZ(HQ+aOK+=3^vOfeF@f^qRntCrTCyO5bW@J zt>5W^Z_xXm98q<__GKT+YgnumhU2-FvZShQrvR_&_=0DWL?13PPH*Wym?R@!zq@|o zu;{i@2)j*^_3R7G8R}|eU$0s8wQ6WbnjXRhGRuf;w#rtnz@wt;9U%EAAPFWLl#r4< zPu*52XG6=JZfV;oDv%H3g-W_1-i`L^b}!qLl(-z}mO>Y~mg=zw8-W84M#tC{B8r*3 zx%OD&fHP=5N^@^awj+hy^EV-rVdO+HEn#sI|08+@BR)-UIJUAexlMWy zF9>s}*{&)&ye5$*bq|5DJIb0hZjp(RdfIF!GjK~2Lw}f7O_)iqR~_kEJ&bmYinjsp zU(M<_z{Vd_#b;~V_=9rSOYI)M+n^wU<=aZz1z2Zvms@U7jwCOJ86%v}Y>f|X)k;2N z+?AN6xlsFeg?Wa_@2KV^^(UFg(KbWg#Z3*HzY4~Ba(jGE%5kLKu0t!u1=^n1>{9#! z%YBLuLDO?DDSiL@fY@S~ z3n2Kj`M#(+BDT_|uY7*;;RB6kLYaM0PK5h^p!ijxBlCF4R8(0!wtybPoI$cpt95^Z zoped%8H2{1?Ow5jokW+n`aT5C#K@cS}1` z-?6(Doq$k&|l8^Cn^AjmylqMLMQDbmfzP zdsuY?XTBNdwLI^HUyDWP*N$Iu>SMua-po0$G)aB(7`*?7!#My)d=-EuNfcCywD=uw zWD&=WaHi!N4Txs!l`Is~JjZ0%0TO0++RG?JIaGV6@@#tkief>h!>^w!d1;at?Koz6-0ccg!z_oxW_b!o1HXrqSv{Be z$Aq@oI8vFqwIO851i(#re$Mvt2yG#Umirw9hv-{PkST2axW$#5w2% zumS0QvasLudL0Xdor?9x(xQE;gdjO>wKPic@7|I?(z$L6Wc>(x1Iey=C=|qk(KaY^fTa~4OGZO}-u?NWi z`j;l^Xm&Ys`Y?4-?*xXDT;}tUl(On~J|V2P$lnPW*KnKB7;8T2Cz#5=AIclZ#W$ zEpv2Jni7;SY`2U^=|gSD&1?{xP+BYUE~eY?dY;AOf?rOibE`}f&lreKpDv`PoiMq7 z$RrQ!`QoAtHR49|cMBr5Xx|@J|D4b&mi8Q-QGxq>^lzZ7MZ_ZwCF0dAmXyQhX`l?k z-O8_R?>iX$lCB`EV=5GCU=#| z>tQ&&n`3U3ZRZp6zg6#F%-IHo0(=|5JH0k6Rt8+Mwn|H!FZ44hS=Mx-oW5F798k`Z z57{m+{V>JGoZW7ej;Is6RJc2{_D~M3p_fB$gJ;@lk=p4Uy4|VT719S4?5_)g1Afz> zOm^z*0IBZLvWUR_@v3z9}bJoi}+>rsD6kl(mC9*N-+F|TvZt;-}~ zu^Hs|y(^47F`cp3Z(9fxyVh~}CB5D)-TJi-xg^P43;ftP%v*qI^|FwvCQ$u?s0$uZ zmojv~zK&A)m@a}?2izLyeFG*Z3@Q$3fGl{3p7xLu&^3`58@>7y0dc@{>X~c$%}^>( zzjFCUge6@ip&oU7tD5_)fp908uriB1jMKw+R<|1(-Htrz%^9bHiSrXuo4Y05+dW~Y zZzSH?OGXpTt)xaYg6^JWLFvEz4r~oKbC#6n?)EAOLQjmiq=9K zeUj^W^!ndPq*JNf#%|uBDflhY-NlERxG39wB%7>u+w^!n9hP@tocYBPt>fxX6XfLn z^3*xX{qBU8B>8`K&f|8A1&O3Ec<$4{+Jiu?g{}urK1617ER2(Dhu&|c3;HT$mOl}p zI;ZvW9W97|Oy%&?FM5mCe?-V0zIdFID7Ee5N=QeechOOfP+%&uOS>yj^T7c#NjKr) zp^mig27(`MXyMmhNu=ic@*#p~`_@mMO}#ye+D1{9$O@+g%Q-5?KcPX5=D?_d62@L1RRoE55L~i8nc6M+9&}&g|(LWeNDwLA#6cPaT2 z32vkoQ1uIoIepk(s5?^-t3Gqch-KOT>m=&g`aUTdBe}~)<_~d+WuZx;cag+w1uPqc z8h(OLXp$wptCO&{j@29EmCM$4JE7SvW$6;@xNS}LlsNrt`#85~@J*&rVIbUWk>bXl zK010aR3)NOZ+gDf_;I_{&?EO1H^8x#j=NSbONPX4GFAcG!_Juge*G|m=&$f#_ns^*0tYNj zFCcP(ATT9DAom21mm(jDhW7t-uJIH7bW($rJ-|Gx3IcH`_SJz#{}DZ1@*;=v0^F%r z-ImUdZhl8O1ssD^Ea|8lwN4$7(z6YDYJ@=JJ2Bg~E{N%HL@sJOS}>2nc#_N=?TDnn z#H>g~ze|d7doSy$noaM~$#EE;n=uknbK9f+N961bS#8%&S)m2d|{cU2038nyFB3bI^6>SQq zRE3{Foa>$G-NC+Igt;eYMwf;S-pQX?ImDZM*7o-=s!m^yuPxDobRl_Bv>LGlrD_f4 z{|4tw&o%U=8f5JK1jgG+?zgj#D7GVvZc zlCDb$HAS<>aQffNv?sXV1!+gMAYIz^ns5YoPWg-++{TQsvQO!sq+S4yJTuBxI}!O9 zX1K6dxY+)*MAS6o&b*b@VP$ShI3^Huah1oUdtUag24=<9QD65omY$imw!0>46eSdWDam9+CARiu06Q=^0*5kf zm1S?xphQ4v?nBYA@oBiHGwQ^u`VF&DBP#*1ns+2tqti%U%2AIlJa3$)VBmRGU)GRw znra{EeRPDwf#R6g%wS zBPBm~BT(xFxVe{Gu&<6MalcN-lzO-9&sHU>sDUU5PbYQKQG=m8_>!ruoR8To2dtu} zUc{dQ3}j*tD3g%>*1v7ekiHVKO4&}ld4Vtwy4xv|)7&828WP&!aUeJt>&v(pQ(O3g z?HHLhBQ?Q5XY3Z4&`@xjYnWsqh0ZWef>L6ct9m`eb>-GAHocbOi3bv=Ht|aFU$d6z zp3<~#1Ja>duO?V5JRONA=mM-!~RW!9-b zX5^;@6TJXUUU~ISM0W)sUn?(whP+S-s`K3X3DMh4!v40NIzab?L|IRFYU0EjIxE`a zY=1is5+mMG1OW4<%B!a2D{Vighm$9Y-oLVZ@}nOHvv^8mZAK{rc|x`~3dj z^WM7~=bpIdaSU}E5vvtht0on$g>MAOw5@a_Lm(norsS0wej7AYhSBZLF{}z8j^yMM zmJ;ZRhXanGQN3{OLoI+%^&Sv=0l9=e0uORk>q(<^R`(4IF$q#9ex`9GdE67sPJG3+f!?^%9?fvTY`KI^Iw5tnrEW9weBEbL|eB_pZnCUleRSs*jz{HQ)up z<5szbRu$V_O6yA7E8c^4i`FP_U09}=gmR7@W9B(LnSa{rm=^LxBa%JUrlCvJ+}keZ z%lyk4Be~zTe9Xm^pP*sH_%)-gyTEYAOG8z^{87Sb-A1&I8}&CqCBZDPqBuqQkCQig z7~cBW&@h<>$EdCe$_iMOq8=n_99m1x(MMccg^Tnn6T8}QXi*w=XuPgQHS|6Nm?Dlc ztpCQ1$xORM6yOuYPtXG$xF0yUSs-q1mjFb|s_Cbbs7@a6e<<7K`-mdG#a-DbUFW4Y zbcWtr5D4LTEM$4kJG*iCvC@8b8t~DjVtpMZ2-&UQ!M@j}Y@8+D+PR689YZUAmw}<~ z4cnArf#?{UE0Vr?r~$b#uff)Om6#D6ZfQPG^DdOyds?nm%lY0M63tR7=KYhYjsQKM z(>JYtHa~aYheD z`EkM)r@sxn-_fXmWa%!Ddb)cUM&zL)GoqWchHHhx*Kp+OzW&WqHvX=#w~R_F1B>n> zn=$yX3$dmz3$j&Iy04sryjGcJg02~U?3DYqM7ue`{JT;J$l251p$q#7Z~QiUf##=XCI>*JXJ*$M~Enngp5&8k;N$Tyj#uSmYV*ilD5(olLq|{<%nQ(0+aF2 z?D*i;Uj1!Qid|r)_HwFTFJM8xb}Ulkr&2{*n?Hprf&!By%_`DSPsaDOY~EgGkH;rn zJ|!MO{plB!SQ2;KSg${P_SJ9F2BEpi8P!B`Uj>5Z_G-ZSr8Yzy4|5z;e&?kV_NQ&R zqF;*rX4tm!PCQ*F|#0- z{Fe(MHdclfCg-j5{=OvD6L8JpGIx;ob1Tl!c>YVws3%7{{z_FvWv}n+pZE=QJS+n< zA11ux>SL1-MFBr5g@kpnsvE>^S6^wH$53vcTg`@i>Ybz1V5cdsz)csuptBD>CAkfh#3NKS+%vy!I%&VWTIo^ieM~l$b z=vF%&74YG3eSiI+y2_kQp4=$Por4n)9FM@~w*EqUP_pzTd4fs;_6R0SD$-tV6zU>x z4?|NIF^4SM1S0}uvycyeQIkE}k?DLtr6xP0^svH1K;Y%Xl>sCgsS?-h9IGD&lZj{r za-cW43KgB}XjZ~B zuCBCCgLb_Wh+I@G6YHq>qgoz4u83$gBvYqwQHH%q3!o_S6`~M?j&ag)R5Q4h@ma1Z zOYMjQcCf~I1Gf|;BHAXETd~n2vV=s(!W8M2x)yZE`AZ3UwQQ?7DF0EkC)$%OkSG$1 zkNqHSW4Q)h>EBlofu73yp9ZjoKAopbR2;Z367ypDdPM7%X-$HikzOI~?l5JlXS*=> z|4^WCl7swjG4+rKfR9`uSv6AK=61W&woYkq(+W_QfQ`*g>u<@nPYB+2nIt^YINm<} z6hYR!X}yL=nGP+6Q@rs8RFhHZ3tw<55iyL-Rf2p9D&Bo8?*LHa@|=8PwvqN8w9_NO zlJ3nPwETeL1T)=hO-m7atOSXQeDQXRu5gnhNsir_Fy6oqR)owmZ|!ds?5OZNq2;lu zblJqiFu~#Ca57nQyZZwcx5j{#O|3AH@q&9Ai%UiAcLj{kD)A)OX0%b+iGAwEct~)~ z%J-xdUWcu|bCuGLzZ7;eC151+Y1rxAYK7(YYAw&&NILA?Gg6@Qb3H;rV?t*Nf)+08 zv`@#|6kxO-0#ZU6xJg+JuGZ*aQChC#Pls)sTdZCL$5ER#dXYDD@`s(pWZC$e};qoI33h#FIoX_w%lJ}`G z%s4nd(esKhZ`SnHc{(N6b@Cl8PLcWey8lSxa5FiAw9(Br&m!H9V(>oV#Z58^4enEH z7L@5WxI)Wczm`bzuClBdV2VTWO=7*7|VyG3yO-kUU?TbeJ;zzObE^467a1ED{h%Xi;O|Y}9OHCffch@lNI?J?*cN}87 zN%?X#&(*(38AsQdqQ)fjdnFd!s97Lm`<^(qH!Kioz9N!CMa)vg__okQ^E}{FYAD)0 zFBNPZH2!_B*^C85Ldy4zK}ZHt>Qa+kiAoK0IjdzTxh3Y?g*}-Q7!39+)hH`h0CVLO^hginl4nJ z{w)?iw)EE#o2*%WS%lXn-o{5YE)`o7J>y?@+*OFx17Et%k;5B znc;TpRLJ0HGsu;OPulBt-!*x!QQ?+w2kq?@>la3pZ4FQP+&x@~xjm6%`fX52Ys(D8 z`Lk0*{qPi4LPX@{0x|-zrjEa2R7F-pSx#;vz>8-;^Py3N_9~Kq+z_qGo9-YIm5EIS z3UMc(z;Y3gw8XccBdc1&%lJ|W6LtzoDKmL;`!=_>9Cq&1|z4I0rihQ13-DdZT+~m8> z#6J?*S8(g0R7tqaUJkNeE%$u=Q-$1x=Ib2t+Mn)$-HuVU0wAA-k0jz)mHccb+B*Kg z2TmzFWS$qqe3UZt0dUWZ+B1$jQOvv$JmkC}^A%aJZmK<<)~S`=8kzy4IGv5;F%8B(xj!JzLK_S=GQK;)0{{v2A`!_lyX8T7u;bNJ1R42QRzH5 z?XebTg6GO2*Fk--WMj6fYYWjlMqTc_g>PGoR5xQ4J5yQ zY1Z0Xz_EYtw3K+SQER#H6@gnq3rJpZP(T{TTl;u2>1!+J0*)`$n>T7OTyw=CYjyob zadE^9tlGu$)w}jOy#=!N)ysz(*}pJ^my&db`e#$kF-p@AZ`90P$N+Yu)EgCo&!HfI zK3Ia#fxxzd&GD%wXx<){fEfrNkFG@~z$&*sXDGcp(^ddJCRhSM+WUzl$l$sYPc$ z>Hok0HSw?q4|PE@Z$ie(8Ola(u}J)1Uy8=@IOiz_NLy95bEJh-vKM~p7U`&8>)*gg z5Z=0(#xHVNs_YlAhPWhRyR8`%>bW|dtM~u(*g!HQQTo}sZFC+3|MX5Z-B3NDi9o)J zvZ>2ckQ#V(UZsv<%Ar-a!^EM1Wi^V{T5$`dqI(xq1(7%=7D2?mZf4hx%@5y>?Mh`! z*I{_$Y51WWW)-3Mtx4MM?2FBp>;Vslgvl%d5sBAyTS&c6ILuuX7o__RAPCfys1%ix zz(m=^8q4KTL1j^^BtO-Galey^K{vu8a|v|b_G4zMSB22)~09^ z>s=J(dYZ^en0Hz$d+il8vehiv5r*VOVfX1qxmy~${eOi{C*=M%doQNOw~V)`NfuxB z14_6l(AL0hvXL46`c6WpU>Z!2jSIQfZY!vG*lj;DAdKA@`W?1SpCMxM0)xGVjm-D~ zTjhbB@z2w*Wj5|KleWnGU~h8MS2s}9JsT@!MBV*TxWgH;EQ4;f0$FmcZO3FI|VZ%R^ul3nJdOqQ=gCM{x|a;`J18l6UyM_U z={y+L`EsD}9}3_ja1V;Dz)9z!No88fm~AUfK}%M??9d=}I`dAM+%SMyeRt%`ZE1+i zn(|=%FPt8s?B)+YW~$=WKPXOBMUO-CY-xSVlooWAb-r3Oy^O~kazGHRp#=swcFq)- zATKSLU81Z>Mrl;mmqw8B1O=^2m8s+E9+aaxa4~+(O;lU=pk^FLWWn7~BLWrGQ^y4< zW-3ofH}^R#7|7}C;h1O^{Tavtb#m1}Acdm$K;$8c_S9<@-@+jFHDAM|CO#vtbp)Y>h2Yt5sBt<8ir;B7!8V{x zZWp{1Buh{@itY)0{YLKoDy)nWp_o1i0?kJ#{vu>i9MzifJRRw%$xN&NBAu=Ll%ds= z(J!TXr!6g{6Q_rKw(eEiD(v-YqY^SRTqSlsHnAenxMj;+NkivZ6_512!ubERzSZ+F zS#aN9!+xjy4*L%DdYr#fU!-%*QL*-u4GbtrQKA5hkxVzIgRko&I1i(K)PGH~izsr( z{98By(`{2@v-MsQk}T7pfO{Cb!H5*u>1IBhSjq^rDpQ%ed%r6 zn5Y6of#mzYE8;fwY6XEDsYiBkF(=V(xtN1p;)Yscu`i=&1wWZ4R!sB6Y80acq=H;3 z99pw@t|{aR_sS`%MuQJ_we-g|^~8V`DEB+M05&s_a&~|IT9%MorGkM)PFh5J#qcY+ zxoGQhveQ>@Dspf!m)LrDf2~f@t3=7-&O&jdJ!;}fKUFCdL3h3P+AURKRmJ9dN(V;Y z6{%Z!<{F=%XuKy3G-s1U6K7u%ZN2ew)Ys&Hgtl?6VHyLdS-{=?Jl}X1yrE?``t(~y z;}D$|Bg-iSi|eR!VjhtXNK2vhFzje`0~HrLgzvf^VI|jWmhFL?5PuZijfjV3Yh_5g zOH?ISfeiZ>Km*>eg%F7MIBc4;Zw+k5Cd^0@5fA&iMa!%{1Xg^kSRmY_R{2}Cu^wL@ zKU=!Bh`0k8!R|bEqi`t(&Q@4Kt0EllRA0sj+wa`DugCRskGKxn56ipB{Y0UhnoU2+Tz3LvZ84%X&N# zm1pL7JQPFQ8)qvT@8DjJrd} zehDM9SrE;C9x-p5_vMM{L`o+%*@F@Xu3? zoMJxo9}1FI1RzGziO`V1A}l0Z`u`A+C`ee5thS{)aTs%2ab4dz-^|v9jMo28Btn&D zxsw}>>E)Ua0^?9WW0y(2a=*g}eocHMsVPVNAIeU5O4UWr9dLR`CF4oCbO1Qd)Tcu- zbWR<}%owx9ERzA;lq5pxdOK2ym1Y^pr$b=}K@M-Msh^blA~wHW8(51)ERo^fz`z5% z^5=Ru#4)fg3`rdSrJ9g!xXO$2He6A8{*Cq1Alt~MHTB3A%LTKlKfb|OkKy51mp;i8 z-Z_2Uode_dBL55k=mEPo5CptCx`#?%)F}%@z2?sV=po5g6ytY1<|RyokrA{|)25_4K@nZm41N;twfunNG z9I+WwA|QNiZcMs^U+3}in%Ezs!k;nYNT(Z+fegcOt04P#;->eci49y6v@qEV9fZIiOh1ukcasS;=7dRUp4-9iO6j+b4?S1V$GEFVk6z# zA<0HnoN`|H3OpEQ_7ztShgx8F{F)@V6#hb>l0p5z&w4N{)pCKFGNOZ=kBilwNJ*k7 zc&Wh?h$t+UG3<#H%H|KBrU_2SI^eVDi@?68TKo^?vNQ;b+A@rk|E5PCI6d;eT{&~- zVqP|W1CJx8{tpFHeqLDPx0ovk%7oTUQz&~N={kQciDg0)(%bV;H9deY)f5C!o_$vQ zU$07}dO_yNqn{=bEH`R;kPsBy6BTm>fw~*)e#SVaP;{b5aPc`)Kayxf5CWu5OBtLV z&w5fWx_%Q9>F;l&DL)NyoCa|+$#Lb#dSz8EXFOO2lS;)M))$1{k!^-WAig}nUPQ2w z3y+-2XzZC6x(I|t9I134E(RO&WW`8p-LR0+i@wrS_kiKIdAv~EfJ+WNy0GWS>G`hV ziXk*M#mLBd1Mgk-1HaW!__N&qrfH)nV&0l5Ia6EQU+8dP_E~^S3pPgLapM>t2F6=6GXrIh)AU3p=C{-w_a+Pxlu z1eXqE%~4J)y`fwf*|AFIGI&x^;i$7h@`Ermw<^tF;YjLW$SU5vl0uo|=#{&}9_9;F zNAC9!;vxoFG^YEx>BwJGKu5l|$P32g$F0`-XWnVcgsdaiO?0N@t%J3ZY2;EpAZ14v zPL~`T+iT#0#|s)dCTB`7QH9kt)2oeMJk=W}-MSdQkfVUedPr7Wd>*xkWeQ)S=hCLt z6QOnrbo?$9DTPFMr~RX)?`oOojv`4%y&Btz;pIP^3|(c!5}B{5196cBBM0UgvxyX# z9&)yA%GYc(8Zh6{1h%;E85cBrUAI1=l3P7fTkVc#9T=pssoc4k^n{vU<(RdcM~!D; z@ftH}RZOU`LD;P0l)#&nW=J&u_1t-S&q0nF@+ER*qTYl&S-rX4cF^Xxl|)UDB9jtw zzw7Kiyg|xB%<&i6O#ahMHfeOo9U~G;h9f_n=7GyOVdx}`&ZISdP>4kwXmm5#!$Fu3 zDu6M1tM1Rmf57yR{lv$QEE~acbmKL$xo{*^`T;3?kmDQ6Alre*WoCw3ty=x83_CDA zpG`dISJISkVEoxnR>#fF4V-4Ti2X6g8=Y4+Fu!1t0RR_k*<|L8>r^%yud`mZDJ53u2*A!fR2A~X6Np=VQ_M>* zV-3t3ABt*=OFpwJmaFg06U*lJ56qPQ&<~QF+t7Jpz9m`&IVH?Hyhz^?6{qymV3hux z7y@0}>>&MJgY`=X#A!E%59AL==Om~H+IN~-)ow9G!w!q|UN!#BA&_(FRH~(UPs`!l#XwQmTi+UK4v@zb0 zGZ--qnV(C5g_QFNc6 z-{*dP!edEJV3a1MfTNLje9e@MPsx$|Rr zm7vV9`!$qhww#a84&@>qMjM3#@h#T-4L2ka3;BSEUEZ z4NfGpPI%uQg6R+LO_O4E!Tb*;tABuNZOpK0gDpP#_V!;=g?O-C51TBE@Cnx>2&tQ3uw-o&cbFzumwEmg$Y*UZ zmq|kNkho1K@-%x<0A;8;)OX|Foy}i=G;2DN58G`R!1@6zzAyT#pIj)(5@L!|;=Ze{ zM#7{oWHufmIy2J!5{3i+cz_!SKx52&X6Iq~_k&6FOG!S?sk{7!5KbyBc?cqiI#nU1 zazz@wy7XiHK75JzLtN?2Li|l-;);>>U?(JN-k{$!x*PS)uy>I5&XHNh#eAhsQ6M`2 z+^5r}pTj--T3-=S2h?qU2F}Z};63r}h@5i@^_`(=iBZ8xpSG#(f@~@23m=(672*z| zNRm036bbg&)vfMI8)$mZK z%wMm$8u0G@zd{_$<;TZ}g>XiPXLSUneWPlsf)B?lk1?^0rO{mw%4PK9F@ zEITfEFux!c-0Z1ft>&>#ht@XqdRxyAG+k&Lq`-ORyHVt}@sE!wx*_TZ+|j%(1|mk^ z+zhE)>k&QOyylE@{8Q~I_tr>Cr&C31_pwzS^BR}w-|X%a=tu{u5_hQ6<-qr(=2Ka` z?Ha_ayzZKOCQzd6Y_`=a#Re`@<(ah5-LKFq-Pq20T#cco{}Q#gu0MmUxQR4wgLLK2c_SJi*Ttj%$V6& zQ8vDX2?*sVq@GG#GnVkik+63G=Yuq5{G;6yr8=)+<-uW*QZ(?&U|h_Wq{~c^i@*N8 zDv|IfU&JH+wCqmnTy5H)Mw5@6x)GIiPIO^h@?kd}j4=oDY?fLFPL=y8(C13-^RR<0Wtu3aO2 zi8U!|;T%Akyiuw;R*2q(^4a4>Dma1xSr3(w?b;gV>`+~@c4$tkka0m`7IDA`EB`=z z&4l@+36+sSUb({%o452MF-64xXQ>?gZ&(@hl`S8>G9_cnQXs{T>e7bP zAgv)38!g|r7Se;AD()wbg_!>{Q~tk>fb*OH;X2c9armATRe2G;Xe8Xof8kR4tS_tV zIb~-?>8C$Lb9;Oz7#k_*x{LgAkYC9XMc(}$yZ)axhHOPwC*7&t-juaLM^a}Z&Ci5_ zHyzMEwAEuKz5Gc2FPiex(b50@(f{>#&0yZ^y2BQz@ghj;D^?E4f-Kxun$dLxJkJN| zd0)HP#ts6ycQWlQLq3hI+)y_`>G{pN-w-hWOI)8~*NoHLvX)y+uAW{e7(+H#@sM%; z1*yl={jcqA@oPI}{495wSMh8)OZJ6Ai>T=N&aU=~nm)RR*t?g%9|!1gPdw;*MQf8^ zn*MJzcZ>zCqFHXTQ|_YQi+}aHW8GDL6R?=mgpw#;2eFq;&X=(9{D%@Yw>WGbdDi+3 zzbsrCt6c3VKKLuvlEVwDh4w_I+0emKs~OlmT4hn%m;B&Flpq}SGy=j z_Ww}AON3Og26$#`g4FtU8f+Ko6}^r&O5u9QwJ&+y?h3;+HUp&XI9-%(7Zc+2mq*^) zfDfBOkV^#If=4b$)B;X5&C+6rwZF$|QE7S9ZEkRC{oT>KepAIkxX<^a{1>{`iyU0g zqn5=D0XYIyE2_=_;bWhs65`BC2yl4x4-R6*^+NwsOW=A~Du*oM@aA{w_k)Zy7`5+T_k(=0F=|qQ1mv{oVIR z8yEtuxq~r7%jbd_idfLY_2BaE401Hq^zesh{UxN?iTqzhCL_3l%et(f75)Qyjp~6% z>Gpj2-|absMRpG=o8p)u1S&W1q-=_58h3ap>*l*^yGR-V03DmLXaV5GO5@K z#b=)7DNgI3sB57K>#%5akuhfvteoHpvpm+7uNK1_9r{np?(aP!vv!YB7nO4GhuED6 zz=S`4LV)lnX)wCtBrk~AM$v9?QDh3t?!Vsqzcnq|M2%_z@AHMW3^-cRbo(~Mn zY*SZL#h!|RHD*VtR(M)&jM;kPyBzf3v0Q5OnAs;tfx0<^<#5_P2YAu?P@auD;ZqBb zQ+vt{)9KK>{w8zuYYGD=1l<^5IDf}?>F*`E7s{Im5UjJW#{URw zYsRT5$j0?e$?7#cbeT6L>;@j)#by1Y2U9h(<=8tUm4pDFCB4hLWfx#6H*IxaPi!zC zRN*yxL^9?Wa@VA>7-J4tHA**Ce)q6$MS9S~GskolduK7- zpV>zMtV$uK`q1`$`uLJdEd2w8Hv{(XM8QPKhu0>Mk7r}9Xj%MfTPXA^!md1d#0))23G&_mU8s!Ee0LF zGPq}>O6=X_jhMxGJf!}#n+L@-ZZIR*9to|(2s@dVi@t9mK@B~^F?@z)+M#t zgUt<2URK92x8LU2U-BFp&~Ui|L9hLVL{*gCUizI0 zbfKUdj&DoM$ne`5pm5cY7;?L2;+N5>dXl2oi+Je3iwWp&1opJxu8LH95@H$WSop_x z)#*xLgQLE=hNsY0WG%rz9F)SzX6Nt!_Gm_^3pBlp)ll?`a3E~zu>$S=CdA&hMs<&G zk$Ow8VRBX@GqGHf7H72XF!8Pu|LNIvuJl(K%+K8Eg>j?zGhsNUeAG;*_kSq0(^c<| ztF4;4-PhR;yFm#D=ITmC>HOm&S>@6dGU{k$Dt9%5lw_UnZ1`HQX}k*>(!JyJv4gXN z(J>!9vHp~F26iIj(`1F~M2acoZr^i0*4IACtW&qgWeeskpNreim5u1eAJP3PG@vo* zu`+Acmf#R8zRd!+CK)e8Lyo2JD^n7~dz}XjqZ8GVY!6lQ9smpwYV|I>U6!($svFp&eUwKtnFv*T1l1DfJBalE!slX8CgQZ0Oek^-{2emt&fp?l6PG5 z?9H7pUGkeV8po!z5W7_>LU41(Na_siy*c03A!fFGPZ?gTzsZRWvhbh0mxrbc#7?{A zBZSJv??k(uu($Y^DbQ!EO6_pf#CD&?l!)G9yK~3nO2SzPo;W8vD@{V#2Leikv~p|N z?Fy290s8d0Ok?e07&uTlRigb3;T+u;O}eW_W5*i@s-sI7XGT% z)=^}EjPAPB3_}IpUed`mRSzCJL{`9@_X#&}xy1KZWn1>5Q$yU+y2p(+;O%R-A?pIh zu3unXj=uFEcZuV1V|e-b?h!+Gt?f9`+)R%Bpn@r(LAZiW`P=%$?VwXJyWQ^snRA~9 zSwT?@C(#wj;0hbRc*tGm57GK;=YZ8AoPX~pm1!s1u$#^O$_)<`h_eri}}R`GBn0KKrd!LYyU@Mxd2Wq|uKD)K%_QO{$p zEiDKZs|p?u!+mCgx?Dd6K&uYdGwb^|{b>W=8ZCVeP~?_L#_ibWQ|vW{?re18V4)hE zE*!Qktm8j8m`(WSmqih*Tx?qeF zFMLR3(6KU^@JmloMk4MJZ{!~&bjS(+P2k}Uk@^ErkbWU}2LRn<^Kz;H{f57MLDFpoQLj`(Qi}2hN15h$j}luQOd@ z^g-ifI{4|_)X5;IL^$<&2YrjLfN1e8jxxO*!Jhuu$fwZu0nRZW$QL^6*wt?z(VqHd z%C2-1=J@5>OwoJ5gLQ2sFLw!JKk{-$wk6qo7}-?>lfHk}*|rBdbwO!1Bvolv17@@i zYj1dRa^H(6fXeW<)|X?b#7TgjX9ex69q#3s$6^n%CJ4kgMN=I1fpz^=?Aq}q`*zG_ zXEM$bA?r%#y~06g{_6qHKP$Sl6p77J$!vHA!e$IfzR%l`EtS@cUwwxC2Upxqq#aVO zoxYU59*AKUp3Rn-#vj{s(=xM{+q=}5m_5d~$q9t(EEE_6$#D~VH4U!8eSB~Hn1~as zbpdLB&GX)+R#_#Z3c9+azKi=qHi$)V7q+JKpr)R%Oh< zRjtO$P+@yJMU;9*i}sAWTJ*zxmU*&5+9eJChkIbyhBByK4bP=b159be?06kxg3aBX z$gznJjaKje4RN_qkm=Zt{Oi8{=~dQeKZyquo;JX@Ds9!TOHTf`KV`FL-u;XGNfqz< zoIS{vF!<(hu9Q^oJgDr@<-Teg!g1^gIA*_s+jz5xTEgu!l%kVvPq-1S{5LH;U$HWL zrs*D+*4dKK)gEQIoBSt{>7Z--YgXNVB9iI23h=IUb&jYy0G|{@WGlFMBkRZG1_LBA zbo|1DNctDrPm> zS?$HV!zI-q;VC?z%z<7++dc@poMBmB5_W#m8lly`1+xNqjR>6VEOS+Gd+~@BpRJgk zCu8iHM<+{`Yj}B#fK3Tm$2FwtW+?#V@W5~@L(J2Pq?Mgn!Oa3Zh$A_&K;D7^xhTazd5Xo zi2FDSqmvl>Akq8=7tDxiIQ9Z%SsW4&REu^LAxe$^jZr*A7THe!X3Yc~wy3&!#uxsh zJe;=DC$Wsm6yG~JzH^zz7Io=_Dxb9@cp)XP2+X|jvd5qmmbSE~2V&$Qtm##u*?)+Z zL^>)`Q}H7@74zE6SvdN`mXn`){vN9DCoC6yyAs+ZVs?Li?2{}{L&tB#-sj==2%nbN z7NV(v*0}r0Eg(2_bdDc~=&LfA;Czv&z4UT9u|KqTCZ(d$T)X6^R~;jMlB8$~%<1Mb z-<*alJCs!vHIb50haQ&<_+>m+lQWH&Lp&2e*zKG4}X z;nv;r-bTftEEzmmM~%hNX+X)uWLvmbnnssg?8W2u$HG)O6mY&utZmD~c%LnTul+Q_ zz=JtoY>a5nE8e3RZmW>cKg?|4qBW^g1>2M}h|n|7d9`PkZ#Tnt2Q8OEgKUkmK^m{xb1MSnCaw|Z%Q zr&6IOaDPNt=!t@Skf&##T|D)3f?2sqCWuhW9fZf|+sEdu;c1`P2)C<_nmW0AHYT)%ZNk=(0RK!6wU&Yh> zljWzY`1n5g9y0Y|;Fp_V;6A_FSiZFz)tphJPmYf~dZgR!wjkvLX(*Rl{)gPQTFsn2 zzF*h{U%H0JRNLaLE^OaFi$NwxE1KO%P7TgWWa#yd$YFJzjeg+`yciZo!fx&GN!48o zii#ngwYH$6jTKZbeOx&Tm~#c%-I8+Ty-T@D23GIwAa05RUd+cXwFNtXRHvdHYpU~& z#BGwEBf0j~1Yi10*2KNMzK_L;N72kU)AfTQm{Qx~QO58jSW!NQn?iF;t!TS_0YO~C zL(cPfdxal1tP<82@}ZA>%WA!|*JOG_VbBat7_=)_<#Zu<+yU*6@d%TC>uPp}UzT_} z+#x&G$ymNc(?sy8G+-N-IJ~k->-8WP^+RmK%>P2{%1kCpgu-Ru-v!SflF0`l zyK!2M`MM#~cl;bK7_esnJ*TQsT8QtgIeFl|*9C@fZ7f#DSipN2Wv(UUm|39OAV$Kf z#dzn8zt0<#9rN0P_Dp(dmQwgEY$kDDcE6mc6zSwLt?qVISvzU#=QVRJO3%Y+us&)t zOE@kw-(#8wm1o2VIuBHjdw|ExC@QuFnIA?^9u(XQ*u7mz{F`{Lt{4b?Udd#(Ose<0 zy7eGvraMOLN(>2+_*cj0Sm%OB#bpz&JbitHg|bA3nzma|p*{4t5_E!MmRABZWv^X* zHDUGD(f&+}{=~~rkMb7D%9odHU9B0SZBwew&1&NO^qp+sK*1CHaqLfcrhY91QIh+^*K^A$0AD8YW7v|q9)2hWjzfFLpI2kZid)XTci zrosYtlq<05#iBd4p=EOm$Ewk~4F%u(p3<5f?#&}U14z3MbNh-HQS41_cn8%9$53}{ zn)C0>EHiez+M)m8wo!UBEv7Hr;HAs(QLz>%dxdjsvsLpKi#%8u(nEp9on$(tx^0>7xq8uu zKYZJ4@S&<=CLawf;}DAZ_KJs7OO%nJUMP9m@&_Qq=dp1a{=9T@A z^?Ty{+T$2j18=gG(Vv=jc4`J&CpE4pdWr=8%h38cFU}V>#UEQL|D=CoCW=dzyly#ZaB}BB&r z{kQ1`m6rV#Wxu?aEcA1FSUNisB(>Ih^$p>jUYq?rYa(zr&-H|)+H+A0Ys{qSJ`^XQ zXY)1WVNJ@w0C|!SyGd_f~m*eXY>ftBs|fJMWd+o?~ImtKZLcXYvk%r8asB7cs!EJ zV6Q{fl%80oPc3zGs=srH8fTOn%-nSyIb6OwL^?7RqP9>jG_)+*>WlKWobLEC7@*aj z2frXhjUzhI`IrM5C=^gWdaAt`G(YZaPH@NkmCg7ilSIgPYN)r?sd7g#DwEVfW#*3Y z+4TO3?{DYeiQ8{ozhl{kERG4IhxKVn*mi9vvPMoP8R*yE`)m|pxDR$O+lI9LCp*%S zJqT5!k7Nk<7@8lQzB%LHy&4OmssB)xb7d@@cpYyl=0PWRqEQ77(XSJI|H-iVtWwtP zlvMj?mw`q)f8#wf6k)de#Vvs5WrY-X7Y$AsPz!!Iu})cz^3K@jsPXLqs2ZO?dgt<# zu&dk9PMptaJJX4YCB@@QZ`*tvTijYRwrcnNCb5*02zFGHJFtx;-U8=GGxNI*I~d@4 zqibFd@8{VI4yvzsKL!lc#DY>eg?D`eCI$#ClZM8!h-X=&M+X|=W&fd!7C7I;(x^R^ z|F-%?)4~v!-NP}_Ic{xZ8JWEeb0&}imE%iFzEO!x|(C1!&$ zJwT6*^{}UxP7S6Tq4^AH1iW+i+xVQeAuXPlrcOmb)H4NbPHsr1^d4yx)R9&0sFg}( zsJBA7Sf|yY92!47;nXLdQx-NK6D#SUfon3J=ddKJSWNJs)pqY59r2FxB+SO;adM5H z(R<;Q#|Jm?XkTJA3_9bCghMD7xzoz2|Q>ku+}e!h(|mXD|A4~Z*yHP48o;SJ#9ua29K&ie+zlOjICr=dxbaUf7`vf$-O6#wA88cOF+#P%Q1D5MBgb&MvX`1F7O|>kjid@f{z2UE2M3dUQ4H; z1(a^|#lTZIH=*J5kIDf>4g8y(4`IHK1U8(jvSXQGF9R+F*ceu?X9|g_Q*Pyq42;fQ zWVIN#KJ3L_O%UjNhGpBLH*Q1wI(VBx#e)e$A2Bl)yOaEvTLYT>q+m8u8j7qPxWvv1 zHo21mcfCNtN0DuII%Xr#?G(ImuKAW)HgG3=6W8S#M3a5`}dU^SG7yb3oL+6oM#!A7RdFrU1i(D{{UuE zWm-MsK+^XWen>_p+k&2sIEJ&`IN|xjSnr1o+*#JR?N+~tuD0Xd#r&nf6(Pa`bF5zf z0C{%TP0;c5odg(nP^b(o9bWMavS4w4DN|*-I(V2Rs_eDqP{MrOi5(sKP46%J{Xyp4 zVg_roZvG-rIj)>az8f#;5@5aosZBN~)Beulc7YcBHIGwoNm>eE*S*Y;G{nyV+1?GH z-plO@j&!<|Rm!YpQG;8ZMA6&!oZgD9@6h7WXIonL)SpmF`hQ&?~S!P3WhK z$8v2Fxj*E{ed!&-%vr?6uXmO`mrj&%snyqi#KXj_S-3H{8LlH|yjxQ-cab=$zet(c zj`E3%7~V5YOe=90sb;4C0N^9#UqlH^3hwuR)TQ|AMoC9*W6qN90fWQ18c}kV9oMgD zx|=u(_qeOL6SOpzYQ3WwS95ilaBsvP{CYJ=(P;DReseT7YJj-YtBlPXEodGd{{YmqCKxK_y}U#kUMO%c zjKd3E#!GA6mK~sNmFRS`0^t*S)0d*zYIJSp>{lN+>arph$3b;|CL{zDqPcHv{{YB< zHHM7;0NJIoIQ667af1u4{bn5$#RWOb)=|@aUZ7wBfUoDNRKeqF@!}i=j70^?{-%F; zA-=vJl(E#~r{&wC7@UP(8u*Tuzh-4EI8ssba~w4aW%KFxoshM#XYWwKyJ1rKeo;#+ zipGxcP)1w|^BHa}tnPG0bS9R(Dz*Mjk{%^J&R+S5$}O$6k0z#(u%o}T8(zEj{b9o^ zBdKfBWpSt)Ei^=qj(f|=!SVlG2CuKq4eeL; zFp+W28HjW(zm zFcBb9zA7p%Sno44FucX6fznvW$n=5M?mZ!7K6$<%a_2fe&=XuVIF+q+9e=5IvSb<@ zGnE@{)8FS6)}ZO0zbR*UyTH>7RO$2o0A)B^mZmoDSORqUMY&j7bkw1~GVs$adUC(a zEU7sjWy^fwEu(n2dOJ#Nxt_N-FR5Sb0e>;KcZrbG#wQU+8J8JA(>1M_mp$0n%Y>y5@-0u}{Lf;<+7DsSXsqAiwDR%pJgNLApyA01+#0F`^n;8MBI zt3=Z$We)N}g0J{9q3w0W69@~Y%(oi}v1+?7;Z{p#uLxLNH4FjhvtwC&s zi*xP7c@Dw*7d@q2x?4BR=Tj!?_bBJF3nJ`Ztvd9DOxxPKl89Ol#LwNz29zgbr}mYR z=>G_w`8`x7GNz@nvY zhUOO zN+b(Q9p``)8vg*lDDAg^rc~w2EoEFcGZ^8wHScvA)wBm*wb5r1nR=$TfN}sEwnv3v z)pCGyr0T%L0gPZ1h-*H~zKeD1-YBt~G@k7r8{yCS6)Kj_Zl_PI;8#?JJ`lAPb&8~% z>KX}acm2lPTSF^+%h^SmQEno%YE{>7^p=(5bfOt%Wn)2p#9fAuq^A{p8~2?i(_2JH zAxu0WSk_Gy6w#yi?-0h>y)xT0abLX0&|nFd!Zq3}D#+bszCM!?7E?ijv_@S=)4Zf} zkynTmYWooKJWW&mW-Ovmg0PO(pEczC&lUOf%?*Pl&C7HWZ^()up|$Vzm)R=q2T|wb zl^Q5448Yd_lv)AQc zg9gp?RhWsKv&X;UCW62Px^JH|8ao<&rZ{lb&PNIMi9;_GtMrW;;gdYo%A#1N}2)C*ia zrKZVWi2O&RvpG+>`!dciIJxJp3c17+eNU9DjU4V-XQwi*UAjzI8XJCw|>m;pf(#~_l5Dl7eZzC&{Xg^(MX!M)@M00!UJi_SS zq$?+39i%@GtMmB^JjWVWI8`M(&%%VrG^B!18p0TGkHG zcvxxk5iHZ53eo#mhzR9%OFG~QLC24tIk3>`fUG=@=Y&0CXIu7#S z&ka1+&JC<8>U5S{6O0$1`vD!IH$chI?Hi*`0AziM_%U6b6=`XI{x*(f%%6)z&$^#bQn9Gh}WRh!s zBljri)Bb;uFsF27R;-_Rv`cHPD;N9B;fDCHXzJd9-ZA0i=!|GQc*_XuJ~qlH8w#_V z@RT~(!dLJ2FP`%}%-1Vlx3r|8mu)=2W31Gr1K+<%MiTzfTLPQzzqr9Kww)(6janx# z2kO+c80ry}6(gq~64ZftVo6A2zoIgoLz-eIbNd1$D~7FIHXm_O_u!gm4V>km(o1{}?BZxc7v ztxKc@dOuuT)2;2yyUs0am5d-$p*z*dyU7)Upi#dOYZZ$64gHCv)o$<~-_8XQ&{)aW z%&y@jIu`zMHC3f~ddnylwH^AvdDXu&Xrl7W^4Qbu9g$lT?a+D##BBt=gBN0@3t|J|r;g{gP z3?jpfzhk5#ii}u%=1wIxF~eJzbh&y)CaYlIda?GDc2#aseBs_x4BIjF!d$uCEMg2m zkYSsvd0#!E5CC8acq^RQ_zn6SCe!x&F|&g-VJOqUKs#IUDRqyu>D;WfxLRM-aJ)uM zI=W18-X+6vT#eRZ*?4M+j}q>i7EO!q42{~o)xX@-HoCRpG|e%HBpYm6cuYzGZFA-g ztm}$Zkn7Jsd5V~}8DpDd`bW5-;tT~)VY%b@@hFlLEqcn2&xjb^!&{5->~Mt3+V)sIS z>fPHl(kyLyo{zvy{5(Zj;5F78Iooil)W)1H`Xm2LzlWKJO;i0061eC4#$a0xKcwe^w%#RU5ozWsD1^gVtb9C7@ zpS#*-Y-GW346C*#6^rdDq{7>oJi+9HuQ4AEO1Zq{q2}x5IIg+S-*z-->wRHIAu{s= zpdDCXEjl?y4wz1`#JQ(hsQ8UwVgV1K$L2lBS(nS2#7b07bDNE=1m;-H!LF#wjwd2~ zwLXLqY=+1b3mafr;6VlTpM;0Vir!bzD;Av{xzqB8PhyU0ZRM>zutA#us^3422AghEcjwxaffSU8F#-9X<|!g053WgHq`G&OKj73sTB0`?^YV7gmOdv87*0hJB*I zzE702MbhpHyyWu|I$_uP%nM#Z6!?_^%NH=qNVQ%|q#V;r-Ia(jXz$gEzWu*{F&(W0 zR$4aOZ_$%YT`CJsH+N&^9sClM+O(AZ8fwbIi(yyGq#LoFw6WNT`rDdSMNEmZiG1N~ZU+cGu>p{08RU zgK7Icq78IoM6&naR9;EwOD$C$q0qgN!_SDK4TcysrPf}4u=0VU;j`#Q4jqH3R=}nu zZ@e0|TZu__`oPx{l}tK?qW73o2y^Qz2DuQaf3!~LSrv1FslgCUpBj$BZPo5}qaM(8 z8#cjJ9Lsd!<;1p1vKqq^Xmy%ykXRYF@8VYw78P%GmCR5Ag}B#_@!?dT^uiY?()waM z$ND?|=3ZQ51#p{>4)@OA4ungA><=B|w)EMb#O;g+KGz+wc`?!#Kh)m0GGhIwb$#PW zuxaTv2W9AmC^yw({^Dgbjtv4LEmQ|OyZ!4ffQ8RX{=Ng#kB^T>;CfAx(T3Lt&TNz8 zP_}eMorMv&#(9Zvd@(OeaSDNgoG12$J(>hB=3&h3(`?LyU1ttputqxSZhJI7ap@__ z#c9oADfI<)mj=ZiP+C;AQDt8cxm4N16j=$}xiAr>Una%v>C~xJLbBS$^8V^o11%BC z`mr)>VKH5yONcnx1-7bxQnfL<3wnK}4I*^yDiAE=AJj|Y9pzS7?J&TdaZ%ab+zwHS zxsdM}g^+=F4f>mgpen-5zYZ!bTjT38ziFq!=4~5IP+#uTDnf-d%vs|T>Ud`IfbV?N z2)cIS2j`yXEy=9UZ0_iPWDO%_xEE*laICG49y1(NyKd863W!CG?cQW6rc_;C4N5?8 znrV#4Hf5Azw~5I8b&p8CUy?!Mvo2ayDymqTZv@4`oWWVmL(sB->lZ&-m!qGjqJV*B zpw)H5qF-UGxU1}aqR(Y%&Fk7>x)oP7%}Y2q`Ho%)uID&x!ZnVO8`v|U{>j3aEMsvn z43*|QSD(C!gPir58FayZ^MJSp9yL+OscgP|2&26DF^r60;@_Vzvw7(2ce$rA46t8s z4vzIag|i4z2cpot_271k8DlK*7h99_x)jRv@h0qnoH# z+FSzkVF<_c?iC$YP^Ct8uyrql1IH5OTX327jtcK8SJE(f1tb^b7s}$(v3slG9|q#P z#aHhUOcpWHU>iIcnJ72M4`Wib5IA6Kqb%bby3W#v6!S1Ohi=F=C!?sCc8JoCb_il# zwTGEw%MLQv)?LzzuJctvh%}j}T$8&Q^tihaMC6>yo=~N`YTFl*;qCA{z)!3J6kUPE z=4_j$sYeSxY1M*ips;LKFYO*Er-Ktl7ijIgdfeN*wK+PYs#C-(Azx|BBSX`3Y z`)e}BwRhNJ$z>GiBzlN_I`o`H+8N}t>oYmJ)K`+myhOla=AijC7gRe ztIHev%oTa7r&-j&eJ(Kt&&f1f1HL8!RAvw^RdJUxzVx^Qaa%D(^@lSMc(3h#zIQ6L z;MBJjWnDU}-*|^PN(cC60Hzw~b-BRaud<@bUR|W&5SeskCyjPl0ar%$<__(dhnMa} zQ7nVI55#PMZA<#I6+n6a0Lgci*lhH~UNKx3ZR@GV1>T%J}}~)X4Tfz9yP4 zsi<-cuKPcJ(MzgZd#^RP)ZA9E{ZQSwR(!^oAZC51zQ@GI#q;OMNwV<05F)_j%A$v zqqqFJL9F;-u7mVoZAQE5GLooKG5n`#iKKVT`l-YNEaKoT7vdEGmmboiQPLLhcM-@s zH7hFXutSR4F?cfa5MIkt$@HQZB$p;g~St2z-|YK5~c`bQB#4PMG#i!WFn7 z99%sM{7c(?p($gurOh5?P0)2}7f`F`iIVJEZj26n9;F zz%_nG9F4j%qggpP?uZO?T8XtOk5y4|Im<}+Oz^LH(CaO8-@H^>CT759K~pEzZYOi9 zajU=S8=NnsUAH`$WH34NF5{@D3bXk_2T_^88gn%rqcsZN_;~w77KvO8OX6gSMml4r zWhG&pfzNg}E_16_o{*{-bj2_(02JAeq(V8h_qUzPrk);yp*;Yqa&Tsfcs4bcv z18`dvnlJco*_mHSYnnPH~gBh!=ex4fgFVD${g)qwk9;Eh&Mcte_V3+^}w*Y?;bhyj}Cj z0A=-M$I<}khF;!)g|xACH5_@=K!<~v;}G6jVbSJ@?f&7;m$%IL1A}(`nTu$m*ULXS zeHxx`+6Cg`tTK<$kNk}RbvDqd z^UMZ?yLne{_Z;nfjBGb%?;C|!a(Zl?3!^*NYT`c?bbRH*YOBOFG&8!?EtIaW9$#5} z(Ye5HzY_&(;}p#*seX8wGUA^#7VejO+W=z^wxf(3EySwr-#eK@O-Cesn3zGa%xoLT z9z0A9IMy)lbr8ddu~{DGV{Vf+HZ`j00=XOR^q(Z$wKdbd}?mr|rP& zE|zM)dfZ=A9YFSf*=S3O#g~tl_b_y%9lzwTvcXdc){bSm7Y>kd@75GYGVqHrok6s$ zutFT;sVqZ=tQV}&C?G6eZR_c9KsMlqLI%6O5M2w-BR50J*_e=GJ>d@t7+KZ%nRuH{ zG(<$?`4I{#^Ys4!lb?jfw-B;hM_4c;GmV@~0;@kbvEkMf0CY7l>vy_}z7B3!&^Ce4 zShbn?{{Y!=R^>)F5Ohh(<9mp7pa^rs%Il6Lfq1pPAj#9j-HWlZ>LtC+GOX5RH+o~d z;|$D}B}^nM)oIMd3_=xZ&gxyk8PLb(8L+M8g1SVL09Q2t2+-iI%sDfnJ{k7XE{lXr zEHu&n#bs-t+bF|D2WjpqFm;I%!FDB3l%F`%Ji~BKaJqKKL zj4Ojs5#5kE+n~hgqs<05g3JNYR!l=&bempcVcN_}+|s-0P-$(pB`vxxto!0Kb2P7q z9i}%0gkAEyp-QdFza$J^AJkXj&s;)Uerx)VBVsk*7soQUTg!>I3K^zTT2iKAZIc~U zTe3G}v>uG~mL--?Dq)9i&L&!OFOfX?kDM}~>m15+zGLj2HSyXaOXz;da93y)VH_`zGT{;>oR3+ zx??jeHLPs*_<3YL(+NSyXp;j8kZA!;-#7h1JO=P&h!`1tjKo>aHIHbTZdQE%0FkB4 ztGJpA~yKoh6~;76%_XDN_TzcG5+Ic=$9+hK&W1>I-faK1DtMt z3>#-OKr`c-ilkI@<{!{(wER`VT?w4s3>`KDIIx5`rg078!Q z6N<*8>D?&0Ohv{f(PiJaS-IRUrU;D+WhG8^eNOobN>MGCoiu)|}Zf%2!L~S$4PE5*D=jnOs-AaD(2JQF~4>pgr8i zfj5f&;Ju=!SUPm^GZ-_@;~prpOt*;8Ku(9+Cfd$taC_X@z}(J%bVL1}yAiI{ORj#g zV=frDQybj5=rh`&sw+S8_r^YYJfH z%p2^1lk%K`*k4Y6S#Ve>(gs_`a2RF@S=zA;w%xZxqPi6bYzs9G{TW>U0AH2@DP703 zq-DUPf(Z&LJU)5cco!2lZJzPXJKlMhCsVZ09Z92kJ8E^A`NX^-e4_HZ?+%CLOQ+Ng zVq>O-w}W~Hc=1pv)qg6o|LF?fd{yqgN@EN&aM{00eTHNSKB`2`$MIUZ<$_e z;!weN-c|M*iM_ek6Pfdvr+-9lBNd%`!&C%WdLkB-Ifg|A-(Q?XpE7mXr#@o51#MeB z-jc+DW-Yq*;-*luczJ8-5iA_CT*}i8^!#NG@9f;pY^H}hHxvSe+I{i+QzEct{)|Eb zqelSoaj3hcQ+_`+Fc?(K7U=ewUW48kmQwk7mtR_ zw@CEnR=`+nvk(d##n0vd5nGz2{-Zx7TDSKTE}&#}%(gA40`&+jjV%Qh_9>{}Qu?A0 zvrjF|*lR~qi1z;gEcW)6=MyV*my%WEYp9LK25W&(xwHy1|#08wVB?P+k>uf*X|EZ0t-G6NR+vmd>tF zG!r{^HBeC9m3q`%fGw>>-LhSsJ7WTsomPIZnzEdl{6`K0#wSDcE^nR-g|qWr+|?|% zsVba2)GM5XS(J&Zl)l>?HT^}FRp6+?inlQ=DZPx@8DzPD3_UT=I5BsQp$UN{b4m@W2q{3DZ`~|4nfxrTCdE`-1&<9N0Zcn&VX(U zCWtOnYmZhtK<@MV{!F_=?$&9wH5nadORCr$Zg)IyYj@ zOc)Tr5WMpDmyPK_bn0Ao4iv7AO*_4yDLo7B-@I{`E!&8OjJi|5tmmfgBeiS}u59(~ zH_O@<@nx&}`Y^*~)Yzj%Vz`(>w@Xc%uA74v-F0!8_&C<1X@Z{e@0^bCZFTjQt;|V7 z^?%IANz`s=Q2k-r*hZJH#9WnzhiQUUUA&!O4yhAX7lDrw<*Ub7s0+`Fl`WRdim|ZFB|;sWaJ^d1O$hV=jdJd>6gy(ob9vMSf>~1s=*po`0O97? z{GnZo2lqG$(ELAl*`Y%=4D)TmSQT5 zW^;b3Slyb$%FWv@qWT5G;?0IlA8BoDo#pnpUe6o%_ue&b*0<(f&T>+HBV<|L<|~(3 zfoii7{{VakwL4rcml8apTdPsfaSu8xxlsj2J!Y^Opdo~))S~R!kGu{vycgxnR@m9(PLqn?hl?q8jO1^37R9AVF(cr<_HthR*PU4!SCj;C+H8{hkUegMW zTt#NP%>1aY-eV0>uEJ?PNr0@*`j|_Map*AFVP^Lj6oHy(b&H-*aYv77^&YOzf5gpl zU$-CBBLeH!5JPQczX5CXRMPoBYG<=O5=o z8pG8QU|!mPLL?n`ap?yv6;R6c6V6TMayaL#ZuE7$)kFzwcbjFVf60F%gwa*@l<{6= z9D(wd?i?!>dj4P}cK{y3P*9E=M#dlOj=8I5-PzHz1KSUCM^ zem0m6*A;^dF}H_4^CgeLzrO^?mz(6H{f(h~;+ZpyG_xfx`{ntQMthd~O8})f{6^_V z6U1}sx!qsiQw?n+^B*&hQ^&Hjfx~t9_hKKQyCH|4X`A{hq@d8ZlpL`f!n*i_97lBSMsoHJ`Mlb^NP{7Ohd% z5*u9uwzgs?IbRayuwCPrud}4La=E6?*p@X8SBbJEI@Pfa-?7*A7N{ihAYL91GezwM z3$?lWuN$2kRJ&xnSD9sODAMiX1?)5^5^tvAMw(>IbyuhRg%e?$8quDyxjAC`N=pvP z`{K3Vbno#ky`6BVs+{#|y2aR6SDwTLDiw76=KP;IbVGB6BH^IAir@(K1@xwYgzsA04z_>iPk-?pzN*OC{wQT-3|1|7(EW0 z6`sY#bW)e2!JJ`c{)1fV-e1?Oy;RIIZ(i?cAt+YfU4y*Zgm4ec;^N%oT6X)qW;rdS zGM}$5BE$orG?gAW{{V6_55Nn%2X=9Km$BQ-8UZg@KIc%ugQrGv;3> zNO8vk_U z7I@-N)h>7)R{YE}b3$8Q{{Sq*3s#_~F8oSTsOZ_QJ)?G?3rpWqJl<@L4pzP7=k18+=o27V$_5$kN>pyyUvBrmtSJBx4R+IO*UTx7 zx@!771Sig4oxaoSz<%9nyy7%k;nI1nOyJ^@yXcv~)vR^fnUc`kNBvHA)HU^BQjQu6 z{{UWKC}MSse?}r{$nW(2;IVDD6_E2TZI92dSb+XMwFP*nt9tHYyO-Q<10F4|oi4+{i$|?y<~uuk!YxMTQN?Xg={%mbCCTXa9S z+6z{D=AtIdm^1G#FPwkavZ9pVJ>^!oV?ARKDvrIQpep(oQL>$v51-;ZRhd=OEpv&l zINqO#F!InF>3zR2P|`Ho59#?-H5IF(UOvzmE{-|R+B(NJ7FkW0#-M`N?wT2!)GEg? z7PT>NGp4)!zOt{)MlF;uv2j>Kb3^A5DKkF)g-4e0jllOs+zq`C6H3k7*NWr(l!?kW z)q43wZ>szo`cChz_3!9HYdZ&>{{T=K4_Ew@8XLgDQL}U$&9bHHOe2iW?)=068nbOm zoK<_P-fa#{@3;M!oqQ7BNiSYsAKX+l1_f+i6p5k z=>#yfU|Ydi>jm;Lz4JDnLHxbtiKgd|xfC)Q(-OHWkk~@K+rq2fW?@ewI-Ii^&V6s9 zBw>|=Gp$B4)CA7G;lUmhJ_DpV!U2tb`Huhw zGIH|#ssm0MjH$(U%Pc}eg{ywPlQ7zzXU;syc*g#dQ02WoQKB9XKeaPP1Fzu=P@$G)G=#m49!!Aomxde#H67g8xhgR}upA&z1%HOn~ZE*3+gU)PDf@GaGQ zwJb%{bVkm5R^z0 z7&V)1t#c~2@2O_D>(&cwUM+wwkj3!l=LQf@5rnZo=;BgyF2R;;YqIlGb*r%CI=~l8 zJjWPT9!=D(SUD_3fV<8i795Xk+z&$CAHC&_h8k?SYpd&7W#1!w+{gynRdX)(K8$PD z9k&44X0wT(7FKy4(D!v{&2n^A{UDPjOUtGguqbtO@z;oos-csge#ww(^Z6Zy{iOrQ zd>ZzNyP>vS&uK(J7yMK4f##)Oxsq?`sUP(EI9U;U4F1;g>`)qI~UY*`a<2> zC0DeevAgaapm;50F)09xF}31sg&9&?2p27CBpY7xgJ5aJp7OrM$-kisOO?6}P9vb0 zsBo#d4Z2EoCeD5Vq^6q>Fj>;t2h7Syn-od8bX>?yL+?z#7mnw}K6 zceuMAt_6joXhrpljjS$@+wvHiFF0@6WtQTds8mZzJN>V zU%n;09r+{OFD^oUVyYM5kD;9uRxSBv)(bVo<#Q(t%?=Fm6GVvGMbLu%_jvx!XPy<|Je)85qs_OUs`9&RK>lYQTRrPCn zre%l9a>1~KRhVOg`?g@ZYa(CmOs)Z?N7f2jEswMp<`X+no)VbVHGSpUd6ga#>6*QJ zLN8~y)GTPGgKcBlJDQB15~2heRm*8&^eUxRQ&q9Y_Jma3r>w%$F;-SJxRuCX64o=X zX_A}U#J1yAZ0Q-M9{ot;l`(=*i*Jp-b0`&j_=1*nF17TAn-t5-={a#Q<*9W}@~8co zRLb`%xakVcNTy(vgl4(@{{SJABW2$Hr6!`7`%}NnxpAY_#!nwvii$gKa$q+%eUk&( zaWa8W&@%prAPgZ&HD7PMGc`&_)DLaLNG=^;Mf~+Irz$#KuHQejB_b$aO5D46ixp0D z(~mbTaS$AmU$9G$I*jZ&@76U{SnA^X8T(4m9SpwGnoy_fFQv(1-mb%vB_OK12Ufps zR@hPZPuzN&jt{IViz)@2MCFt#0!6Ckv3**Z`-e1Sl&Ddszj;OCmCflmaAM2naA64B zv_UovG~#4lk(x&usk(CtkfG1Cvt%A6j)S2ASA1biJkTog_?7ozdFeJas>{kQqb3DG z?FYXDQ*I5>M5bFN*`!yx;e#B3i05j0Bn25fqR2$Q>`Vnq-0U1>L@jmNABc!r6z9P( zBltkF?dTQr7I@bxeZ{`gkb{aVJ|n-iWy%vXr;&`79Ueb>M9Dp>5y?+5YB8!hoO@3# zXSaFXx(!?M?FuDyJK&DW=qeMH7l+`C2W>4!Y;5o7Zlw0~mC79^jrygL?Arxm7p_$v zfYO_>n|ng5TW(X)j^JVmX}>v2#Lj?O{{RT7!iRS>2AQC>eWuGJ4i&^ojsPE+q+8%e z-s4FH@;_hu0J=Kh4lC&|d*@mCy5*4_K}$N;)*+zceM^F*mE^&v#Xev1ZQMWGCkwk254|SrgjepPp{5m zS43AB&k?3IY~GW>2ROX)(K&jp7S-l9hzF32$+oL_sAdDKYws{}`NmYavjV_`JM#>* zD;x+J+H(H@B4!;TfoZfBhE%JKiuv!_Ulv0(4``M<{-vM*`5gPe$4KN%lpS}L6FH3v zpvCbw1xy#Ur|6j39a@i-*VPc7U??HhK%jGX)4=f%30Po5+2G8H8*; zPts=8uSR%09pX~}K63m+jy`aJH1y7Ye-KPIh7Fbuq+84QsFJ zV7$=cA8>2LK(*pF8G_qu%g5dgvjkih&ySpRBWiw=piY<^hpomq16awP^E7bAUAW}c zL1xy^yrIa>;I}yyh@dQF;8a*M2K3k3bj{72_mtw^)j(cwua&Qu(nC`I^-Z zxXT8(U2L5ueN?8H=b`0`bprt1{c)~7bBSWvtLwxN^;0XCaHb1J7XwTbmLGBRfs{FR zC9xKd0mqhLMy#65t0uKu>}DIus-m^e?jpJldA;~0qa&_4oewI$@HU(R_!-aj94PgF zyv8gR=zrAGb5QM-^!*~@$~pbSamy%5dHaZpu&sIBA8U?~G63T`s7Y%SEnZi}#aU4~ zwUzC-^=eI_tmAF>fQ{VLYcKGZHBYYo3rQW4EE>$*6HY;oujdmcf-Ou#KnUqig4DaacO{{SJ~8HHCoeB~Ks zyzj-c2wLS)#^bvu56&P#OYW(k7GS;}dPJcwfqzt}72Fn}J?$;Pw@G$nZz>6+spbg+ zR*=JgKM*p3TV?y+a#I~Qd)qHU`vCgEXcOv2*gJ7P>1R3Peo?OWG!f`B9g9@o-S^>Mitj&S<+^W)@wA`S0aacj%9;wIP zcd3y_&TIv*OK{6|a&J1+peda!{KwZwdgsJ?HXQFgV^k@+s=WF6$}MemA1Frcp@6sN z1jernMv4{Y#j^x$ImzB6bZV%qX(f6}ys16HbM&nXmdc1WUF!4J0?ICsId9CoO`t}D zr|x{_6JE`R`%27@Lm2B5C|99rp3xa2n%-tWT9r6)s8iqoV4hK%)v z(-S5E*148|eK6z1uQqtleClDf131^t0m@rCN(-tS-h150+-DC&3DlgI6{)~`HO7ZQ=#YU2ZD3QI_a5W@56tk zO1DOvrD@mo03benlkF*!luKVKnRI#%PV(5BmRH~Vj;j;Ybglcs?&lmo2bkdTtsfbN z9UCI_4NEuS26yip`x6C~$;a9`0ymYPls}cEdFBizc1uHe^;q|oo0K;BeBgQGz7wka zZxHcY6wN9(>NH^aRHo3sc?)8URJUw#rTwW|s?n_Z%m&HUDuG?#ZEMHr`|mZKC1bl% z%7>Vir1G5r;&xxw3tF6iXj-g^(GA7pte|^}beCLEA!CBG!|OEwy=a?h?4A3EQVp$) za{#?68VC<1#MxJs=Q77uCd)JF`*C8+Jk-5n*w&aCXbsz#ajjENPcg+u3)5fJ6Ugb^ zxpjzM4aK>CINhvxC|w+7 zV}UVCQ5s+o-KwV1n#VtxP+_KOm8P1xtK4eMp{)Drpx)rgeX|jfVibR*>;-mS_#mU$ zWUhL?VRELsYKta`Vq|ij{{Xi#g)zHnXAS!T`sd6xLgu(@rxMhtDdt`-ikOyASnu>9 znyjkJO=bmDoOaAr!y^gtlaJ~WXcK4e_l;8^YdC9s{{WJoR`Xl&F%A@0e-JKJX|85c zj3XB9toM}O=?^y@Z&g>x^OR$lhdrWJm4N6n{a`3YmQ!t8i8?ZMrenA$Y4KNoNy}Ld z1Iq>xWJdJU?d=h05WvA#eceW|Z370f`KC2>iGxMonSaW|AY-a%;C8uI;Q-=^#ujS~ zL>7)1?rJ?)y6817DO@PdJNSq#kz%~S%#JUI+Ea_AWVq8;$UHaSk}0KDE^F&4)d1yQ z+(OY{t(^7<^8{S2GX4JL&f*oB&-bZjmbfDx-A>YmECjSS)V@B_#h3zEwySuz>_Vbo zvri#Ul)`XUE=B2esbr?M8*S#gRC0GBu}ZJr(aX$O^jV$>N&?}f9dptq52Z#%1?Oz& z+10D)IzWmGi-X<{btz%Ysxw`;%*osqRZgRi(iS4S7z)F#^T59-Ox}g;d}VmU)4VEpF@HE-SvI5S@pk!+Rs347DhB zHekNrCK1R(1yN-m61sF2i}aK&EGEqK%;071!c+Cg8Qh}LhJcM)YnEPFmi;B`Z1FPeozv|W z5LKc70H0}tky_0;ju-7p(6NiIWyEi^&BVe+^^5yUaa~vGD=7!Q4M3*%Jz?HW^DpNH z%Mb;>MT?Fg)lgjo_p7)*(Zu(Q-R>+l3gs)O{pkg!0=ZHiAol5!ApG}^)xmPzfx<7# z7+>lbDbEn0P_8ChI=Gdjx0=#n=Ou_JR+XDP*O^-anF&5@>YGQc_WjprMy(f>+xuFW zk*27OV*dcSj>lephgho)SvOp7nX=zYA1aAQUvT5E1E115UrS@d75@NW)(MxyW*g|Z zFT?bfsbaN_c(2^WsUgWy{{YV3V|8d!WrulNmhKw=a|X6QFT|i}5clN*x#J z2L>E2iJi%1byO?|Mf8@r8_x_Ov1#&xs?BA#{>7mhUviCqb3^DWF7fUDN+R3FX@&iz@2{B8TCH3kVe)}l7HrO^ z*}m`r-SgAtOGQ=7itA7V2E7ZuFSN=vvcx^z;-v*}=(gL=FY6j|nL$g>Y(DoWFSC>- z^sbwSi-MPjUN!WV>S+Bmyt4R!DkGj2V-*Q-&kif)H5{v_{3E;wZm*+=?)*_Vm&mDh zfZbQoGo~m_{GjK>Z=}E28rG)!Zx;%i2s4k{3JJ6>fIRlc)(fKvqZIgE_S{!Hyt@MG zimJ(oldda7eHW{!>`}Q}_WEJmR(y-x{T?7qvE&b|T3uTiUQ@YJ!Pk?1c+=Jj?uc(2|_DbAIuO4(y8vFhGYmV{l7_D zqfO*{YEr))Gd8@;WrJAZZ zYGYE$G5y1hfT)?|Rjm)oXubDhQClwa7onY#AOp(5Sr>+~VCe(BeKi_)22i+h!FJ8w zrz=>k6EC^RxoQ^j!n#uJ<|tDVlZ~zNe7aEaiaYnw-H#B>{d5u8Z0GK92 zbB+G_nXAFpLG3M8s`R>|1%pLBri({Ejvyrfr4EAlhRFPkR?WnN69PPe8C^Heym$wf zuJe9=v8}V420C|w@D06tt;~4v!q*)~`-PzBUK(6|ezB{}?V;s@TWiqwz9P$D7<5+o zh_+!Y!?o~5a^2bhudd~QG1bm{KGAq(3;O-y0*C6|N?okJWvpIHW>9WlrsG9R56N4C zuvlW|#drN;apkG60nc|>cFo_XXpz~)vY=WA8hXbNK)1kZZhirb$=9^@ z!>78dNuMyGVOGU<@Ygc8s6WYz)Ul@uyDN;AkJ`F0rk%!4nINl0&ye}l7|pt~H|^SD znl zR8+Q{h$zulyQZRXe62jD$!ZUZms+rSl@LERGUW{h?kKk@i+2HTvaw|m2A6!StuZ7ltgxr|?FE^E&i2M)AXaHB!z3y_*|q&Q^@w$0&o|x% z`wo`gVhjvQ%^}1g&7R{G6u~H}EA6PQ6l{-g!pk9Q2-n#E09WqESxoj{md?@Vb!55W zii)A-t~|@!gKS}31OSo(w|?~!<^f?1dybX!KmmT}rw9hE!mL-;BGuSc>)tq~!6n1u zZj}Wbe6#&SKYl?Y&VN{;R<7e7^)s`}p(>u=y<*=;v~qt^mm>U~I!jjyF*Tpe&wcHr zu77$)=v~@}@6KJbO{+&#-}Nw+yq)@0!y}Ugy6}5JU=5Y2tAYSkR~UVFUnmkRItyRF zyaJ6PIA|)?9fIc6DR+pmo6OE=tE3f?9`=XApE8%-Xyyn>2i#h_4wwDGl zc6c^r8iMS*Zu8Wr%pzv7Wv?-$9f~7w%8L9z$ci$N3;AZ2?-pL`iu&AAxo#@Ab)Fz& z);Z5TwNSY`lf`G*=_raRPR+l(EO$$1v1<8s(o-@mpfgX8oO^&?Yih|&d&4HyMI%jK zxZgJ#vUUkYnJL-KK5BlT+~FW`0b4tDg`(3^kKyn0nd)~=b~JdlSjpD{!;!tGUd+ax z;V)caiz}Y9uWvk&H|TUO*bmg!8|6=EiC_pThYj!bncJXPs(IDi;Emz6$JPvYa~t39 z+GYUp9>dRH?igA=&KZnQRv+Gx2-Q0W%?J>>yf2wc5C*f9ypTgMO}0JXNlq2!Dpe~=tB#E?D%DJLK;jY`dM3v5^kBST2Q=Fo+g>8Z6`(_0ccXM_yftDq<^JJ> z^CfNp+NrX>W&NVOc2hjC41Q`Ky7}r z`<%1OMmvPLS`yCx06Cy2aoyvXgRMvHa^pOuR#(Z$ec_rt|M+q?wCZy ze-kF%MjBRduStMUAbX{%K}ZkQRZRVef*nNKif?mhKQk^}b%rG;Y|$2XyO)EQ5Oy@J zyIB59hQAR6CP$FgWsw%jx$eU86^lT}FWbmLdU(`k>-WkQ8RvdzlLEFdX z6%`C>v=I*+M%En;cNSuWmxbL!IxggBd&(Z-$)}1uPC&>S_MSfQ!tQ z`)P?azJO>PzU;tQajtT^d(cbPfeSqAU%8bL2+%Q`fSSNP4u{7*;&VnZv^4!8R&WDX z(Cy|>(N;r#OY6^k^qmwTqoDV%sWUzEIOOP;;-|E~Hz-y?TizY(_j56_-Nyd_ZhdjD zXVwG-Koqbyv+zL52OAdeZ2R&HaM z`mJ(!&*l?FFxK%OP+6+Xu%}Y%h_!YmmW%T#OT(Q?G8MYaMyNB(5QKM?EN+X%M1!FE z$1mw_WVO&@Qvs9ZDsN6|{*lbevV`0{9zIgy+H^B6(;Pv4FtD2WhhOR*IyeLj3Fsem zy(8ZCp@ojkqp2)&js0NSHzFhR)TK!Nnt+wIraDog!UD21aewSv7!_2kOI0OQ8nin^ zDXmvB%S_%eU*rXz+x!X8dkf3cOekQ>3hqoWXUclW`^^ z+B_dF^LBhf;*PJpqJHh*sLuPSuw^@S3YzwrfpjC%tf^vF+l^jGPZmq+*_lul{gK)* z0+?4f&-DV&*H?2gl+fSKzI70HgvYXJ04iyp-?Xfp2D31bq4Y=6yhnbWp%ntI5TqPQzPWuqB1dwH443IU~mT*aVU(#E}eN-qb&Nrm5Y zQ48_c02r?LiNU}nIMSMb?8s{JHOT(6JQwNt(fhnZaN}ZNMs>&gE?9C-r|m0iWi$Kc zbO_7EbefI;4hop6QX@|*{pK!K7MP01v_VWm)YdwaGzpA`%Jz#0yu3FMT6F5J6YF!O z)>YT;CD!WCBbvrHnQA+LRr^Njo<@$95sa^^96>&bXf)vQIu@12=CiJ|eq~JedJ5ap zQ3r46Ys97y{U}uEB?*WRrcML<+77%nqeFl$stPs2;0RE*OXFG0$lFB{AFrTD(xwuclqc zN;jdSzLN1&;{9bcSJL1hS;_uDwC)Trv;p!0$GIIf$yV@+kiRFu_0= zz@fj+I?6^>kzc>uy|l?PA~xj(Ld; zy$m^Sx<&ik>^{1kh}Ht(e8!n?#L7m_na$XK^@i$&w;zvbMWvZ-x=!pjD!#$SCAuPC z^)0aSO>YSl3s}cav3jj`H7vuKk{Wb(`-o^$Q^eat7er~kL;Q_7HpTnDx`u(Vz;yP9 zcb`AeD0a$lLh{1P-l6$Xy#xCv0mY$8zsO0mm2cnPS&F!14$&~?oON7YT$W_2#K@?k z+Wbvk0-f{zN>gI{@@Roze)D{0Bz9Iq%X67JI$O<@_6#PE|HVM}-?PLnWOF2%F{ zMJ`o4u&r1Oot-8!@G{>r$H7C&{@I)25xkrepmk0-=2WA!&tq&xO3n^?5I4oOcDaMD zrA;q>@CgP_4ZWtQI1|?#&T3q>I6FF3%u}1Qe02QBjTLIR#0dA9Z*>v`(4>V!*uNDV z9tnW6&9XNbs0y4fiQ0myk5_X7{lUgO^Y(`>nk-&(p7Mn^!CQ+jtMZuR=9Sb&&Pwkb z7EH2gbG0ofvD3xo98eqRJvz#}0;5Xx$Mr3`-n&&^H7*)h;+({6wTyDim#J!V2U?kc zyVzqH(d`TXH+)PDd7I2?3|&dsxVJMh2sqRTTy%_n+z<&@kU-888FMjmuO<3H*;BCG zucjN>W|-d@r;Oqd<@2}vfT}vc_){?#^fLVCF-$#RqCe-DS)a`+eWL#OyMFN_OlJgD z9cW>TV4&j0G2zPDm%=Gcwo9S4^UsM?5lXRZrhlki!nr#duj(w#f;ah%+nne($2S(6 zD9asB>Bnd=wA)urbI;CD1wq5K-Y9}cN%m)k;*g_3SXy}3KlbH@1mK!-`*fFFJsz&P zrd&L-6_?$&T9%2+S_I;{PAJ_Or?K7@FV@xFHN3Dc#Y-Pyoj_~(gqA^)lrOGXr?=8c; z!gSsJA;s}B4H~axbxT_)^9;*JCH2e=g$t>PExzR0GZ)ZuPOy3I3iAr23+es2k(?pz zpYAgZla~;wtBN(5g2BjpMeLJts;XA~F$g@}NLV@l02p11V&PDx#AiyI7Frr2laVqp z#8=rk?=Toig( z2Y)$XG@OU(#fQqmC|S$eDu@f91+Vv{Mzm}LyucTxTC&B@s$Q;E4N9_F?0ulM$_hUT zN1W4|)5Yd!cSl1l{oy^w6Z&_6ECD^eJ>?uq_GMJbgvG=sF_mho$kfla#D_g*Q-!L{ zR1vgpzH z{rf=?YZxne@gE^?Jo^VRKuLo@?=Ow;1bfQ$cfac{>hb|mCdjidg>1x92MOfQ~|4K_n5|_U@;uEekCWoBdcC`cP(}* z7WA<}O2YeZ*O)krVcStp^;;qMV#yOQ=q_ULr$D-%K@h-H2 z#m?opM$1dC$nVe)b1;j(uCr9i8^3AD*5-oo99Pgr4K3ycmn#)7NDh;+po^oPA*YdI z>T;QSb&FnwvAOaTEW3S)?qJyJiqOknNBxWd!+yr+ka#6xopPC2<|aBW8aqH0qM9+x zt;<>BDYWIf7sg@>h6k75A83hEu@9v^r_OsU%ZRqJd5(4HbH7EEtqV%SFa;Oeh)K+J z`y$u4CROSC%vvlOaPRl;6>8f8pd**GpsDfl<^qDn(fy(RTp_Rkf}0?;94| ze)lEtRyN{R(=a+;E8bLrSl~QI7RPP>0CM1m*HCCx|CrNoGlVw~luP9`Lu8@m|lY#Flc$SLd(V4@~+x`_yDQBQ5h+ z-z;mt*4h66Gf}||bmjf#24Jzhy7_Cg1=gnWh5Bdxij;CLrSb1Dz;v@%v-ahZ)6??( z>ke%VG80bt`N}Au=<@yV5ce8QKSz(usN}T=UdNfDBOt#J4IMygxcO!ch&5Gv`$A2g z#mvcW-u-3U`Y}DXh!mU-NLr$FPJJsE(pf`=_JGB=n(F|P?)Z;ZcN46!%G?jYHpYzI z7tmD@JwU@Vos2$^I(dK{VXo4d_QD+?xu1ahAgnsfS9_JZgHKBg4OPbFRSh#V z9TMKra)qg(P_u6Hvv)YJ1ik}bd5E}OABd&Kj+}Ly$7_MK)%s>-j<+j0(<%eA$t}|c z@=H%x{{Y1oD{|n54u%g(^zkb1=NlQ~UrlwF;S?yNEgWVKVQXC#-YZhuYXSRHHbZ0G z*GR1**f%h(+x?j~(gP{zq2qfU;9nSxw3E>OkwLudt{&AiD|ehzuA7x%R_@R8ESN;R zYX1PZ%m*~zdF+2v$++8G9Sr!64el5I@ht$l3m$kS0eus@0=%agzoah}b5OLPPFxS> zBsOD(e{(5;iou;FTRI$Lo0{vu*fK+{C*khB~}> zl|qWAI_vrQMVh2@(>;0@yhj3#NwMRn{2&_@&;aADXqOu~4(-=p-xH;~aO?|!UrNBI zU;D(St4&(z(3Jz2wSJG;QrUfh-r#oo|U` zEN<^<&+mCbi@W#Cu(?L6WA_uiy!4!@m@HQNz)6KB`_CiL;ab7k7ZS0k!C{YNI1H07 zM+Vp{(QC>G?r{!14@qMLYGFD{yQ$SW*5gJ+FC-#M$%0rk_BKq7og#Da2NB4#Cy_ic z0*mLE>bw%pxR*K(m0byCtxu&EuYmnvwjOL09=Mbr^&>eW(+S|YMxY=AbO?-)ki*vK$ zqr9fmf5dvin_lH=1vFmd2i&L3APGwWmAk(d)}oxGtiLcqG}9hqZ>Uz6wB3)?EMN)Avu{o=@N$}(>K>%U2a zcG^5D@TF$&wt6m6Q9r*L!QfamJR;qZjXY=N5$&wb< zeqTP&&7qrd*`UUY)a5%hQ!Reu+9n&rBCD5JETF2+!*?DaF(PbB%ZVRLsEoq7A9wTP_+$p4Zz57e-fPZa4?i%#1T`t`-nL=`C&L)Ge-5y$M zTlvR1^x8OgWl|ktsP0#x{Y9i^)G$hqqrX;Qvr}adP6MQ1-7ms=@#w3>V^l2yH66a^Lt9I8X!fpogX$4C9(cX0^Z?>PVvZ9^U8L;p3i2zVFYWW>moEk)0^8Ic_Ue(b6TVS(OV!n}q3knV^Gs$W?`8>NOjuT4a-gOQSS z_k`6=uW?TI@{|sMn(2s6FzaRd>jGf7Id~;Z3rl`q)Un+)MzXPjfKVFZwN}M>$6-Cs z>Tia$7#_m*OSRT70ql@jAmqULOnWfD370qxUy@Q+N=}G{vS};lDjswR?o0Y^pR!sg zZ==e%CTW)fnF`zfYI7YgKfsPt)x&Os)mK{{X!u1Ej)JtL8Nzm zB};a4{{VW+v{OYjL$BRME?$BC`#=)ca*s)QLM?}{=3^yET-UVWJ1du1;In1;f~Ggk zLBW7@vwmhFyCWT=D09PpKWYM^&LRT0mr{Ui1KrF!w#@vA$EI&cjM_1TUVU%Bsi;}v z78#rYw6q1?#}TSsZg#Qt>2t)_SNrOSCoj-N-h)#guf5KLQ)$$#_tns+va9>hA+^ZZ?yj6O?Q7Bt< z-5UAas=X7GXdT7~Z?8FSk!&B~$1&xj$nKdctQYtpL1g8KAU2%)m7-93vsB_#eD*}4 zGSA)~X5jT|KdX#Zk7L?XQu$w<#09Fa2haPNOO`?39vJOgRoA|$n1xQPeR!D`)5Qrd z#havI`Vy;Pv1Pt%_m(W-Djr5TDq5VDwOq@|tyV({d5O7czW)HfXn~iVo%6-SEsk!v zJo`$y*zU1j^2|35z2Ccmn$X}mHO^SRpePJie;>Ue+5oQ=^8M6RDS4*oe|*egvwS7W zTB7m;`kD<p7jQ8YYU)^=7?C~?=4}B>M3x!&SfYi7G3cxGL+-oIs8Rc zeo-ztdtPknK%ncV8Da%%E_%b3KA2J)L!`-uHwMo}408`&JvWYKZVM585NPoWvSVM| z?_(30xO=5gSF%+oBh6*ZXKpE_PdJ;>_luZO%lSML-6XE2WmlzpI?f5d6G$E4=87di z%PVc?(6>IDX^rPH*so}TZBZRR?Js1|0Ai6%bl!RT$2<%&{w3WC^A77&y!_(z4lcp@{N+H2u%LbCIbL&)(%I>| zT4ICGd1G}{yYVXn*YK3-D9z(VlAx5lgQfSxNezS_n7%vi4A$~e@1S=70I|>^%Z~Fg zKX)b?S!m1hsX%b)sCCVl^B15yaDtW>ES}C{g2dwfnWzXrG{t*4x87KnDb*WqPqaQC zJ7+XX1F~Ms;p zWwDeCbm#s50AL%8-?7(h{nU z(+-}G9N*QMn%a8v)4W)=R`tZ{;}b#ywcn$T8P!7+;b?f{YV_drdJH82Cz65h&e8#3^Ee-ePxp zMT(L?h z8>qNYjUz1qn!BGPDOr6Zs&&EMQV!%a%2eBbztqX`htIsg*OR27Qei@wnuZtL%Uj;u z{QgkfBUu~&0J8vxN^bk^ZLMA_?p#YnJ7V(mYu*4WC4tHLosz!EniNyqx_9*UV!Co( z4=fC1Yvl`Kv2ba}WBZo08h-CHsMnj-`TPlpyY-#G(E1zKVVuh_7BGie<|d`u*!s0A zQg9##gLt-D3DBi^@f=Y#qVV--BE=;;;+fHgSnO?*1gGYcnMtKD7stw7azf(npA(`K zgPzlF%5IZbYf{ZUJ9(V{0I`k$+s~`XEN9Ng-CTX4KyTTn_L-en4mkNr;ea{_>tDYp zpH#H5UdJ1Qz--f4Gg~Ds+OXpojT^dC`cwOlB*vps?u{#t9O1LNrcOn zhg{Uj)41f~3tOvPFc{}^2sf&qbf&BinZM8N!pYe6PBn1V(@il~Int(eb?5h(k+%ly zD&CsZ+_Y-{0HsZ94POlN$7r{J?R}>}SdBay=2+EmAPx%E8dxgcrQ5urg&FXfEVQq< zj&s&$Q+|eJ0>!^3VBj2T+lH9^B>SWAeJ2}~`hBL$fKk`)55l##SM30*yb2jg)l}@{ zs90d7l(p7*BBN5LFG5?F6SIgg-8!FXdLKEbDQ4Wtvka-8P?UVjIl6}hxrl088~qA^ z84iR>()>#+HR5g~qgNQp4q$g?W-$Dch)PWZZ=@AD&63xpOmvNyng~*3Jv>ZM9XmIffTO@J<56$hXpL&)lkXfYp)fIBAY5G9lQP9!PN70L08sW9-hL(D zm}+fBtM(GPl6mL+{-$nTWDg!9hPul-$4HwCbr-wD)KdivuWs&Fo4$wr9qe4EtCQuzP7!GN{HE|(?aKcBqBuRsym@$*EDyGhwf;cg zb&N9f{QRLnsq>Sg`ly8h;1Fx2*%IMrLH*n~me}dyX8cyEC6Tuh$6S0L#@`!oP%@d zJq-5~oL-1LeuOZ~K18b3!E!+^T(#7rs`i&^WJV=8r;33gfuj~Z4^B#n(>c22!g{;v-_TpQ63NJ6~GbN)dd;XI^-G%G6VT2A?KVPX6so723 zq&(Pf-N&5vm7-B=Z}JL~H!0KQjYjC8R_`3j)rR-O>s2tJ!~Ff`QO+HsI0=5eZfV-I zT;wXW=wIqlT~hsz-qQq+VCZ<4Sx$?)*iKte9RKiIsRB zo}uB}E)l@_n&%U2Tp)`0jm``3;LGhReqmmU{KPxyWjTE(Z2th&K`5nE14SzPpD9Z` zz*uje7{sqmb)ZwfBL1}Hr?=hnbk*c|43yg^kM0};T@Y3%nAapn$ zYvmdPX^K5N-)OR3sifw#eW3M&N?tQ68g&If}QM5Z)VuJ*zJH1g9N+^=I?=kCB*>j0^z z%b2>htw!Dw%D5@QkH1f(D-ZyHecgP@OaW30>W8R>u6}U9{Nq)^zVX?qmcH|ir$TV# z@2F!hvM~MNtnvHiHv9BC^sXbuH~aaDp~)LQh$&uW>(G5HgE#2to7N{qK_=<9Nm$S* z14MC*FIP6g(biGJHrZa$O&8uGj0Zw%%&@e)*Ak;TVXe!S<>^rd^1Od&grOA8d(5jR z63fW*60rnqYsA0-sZzq_9`SZ{)R_vmnuu1_5#9UCo4MH~zvQ)Ox4M<^-9c9p$C0^r zHJN%jn-1X{(o;1zGMEx)YJw)x|GV4!)Z@#N(42NO9hdtuvKS2xrcZP)>^Go$Ifcj>R-)=1y^d9 zK7F88)G4>_d_}cB>E(pTZ121uZm2}1|h1jn`EdUW$AbFH07Ipc2<8ChV2j*c01T54yJW3q}f0EXv3=0EWZMDp@ zoi%4zhyzN!K04K{c`*jl1;VMgf=gtFw&axq!+so731 z$`E05+pft>cxOHGsJ(9HNx-) zIzRRa&Ktiw`pYONvZ&!(*X|*%n`zzpsOzTStyjCm#i3CF!eQULA%9Qavp@p{$*wLyb9j_Z zp3tEa)i42NV6lL~hFwwTh?b3F$(m}O^2T!!p($1O;%q1RA#Z9d%y(`Yw)x{yqaa^@ zI))AhCr^K9&FQ@yP66)y#-SNxregFkz`z#9XDc1|f?&Vk`D#2C9&T=d^;r&Lf zT&vGRui|K^68rg|CrA4+jCX^?7Xq%=kn8^dF%p4BUfq4dSPm6BEYcT18%XMYAJFeO zj~F@Zzq>EIAasB@>onQ#?+Zo!Jp0F{tlb{Zc#p4m(KyEDQ;~a^tjL=pzOyRtGWq3k zprA&e8s)NV0NwP9MqVDc=4QKegWd%;M-6=?`DMpRd1?=_l~^Tv>ww0w_m$r0x_;M8 z$A08%A4#z3MLKZ|O0u9A^Otb$?Cb6Mh@^ntL&x4L?{2Ahf3m_za~)5${ij~d4zbU+ zXJ>U~Zk!v;xN40PZ{ynKgO`?h=`$t`CF=f{U1F(bFk#bvVx-F%P;;Jf?J*{ZJ-+h7 zCdlu__JAZ1cVooHOV9z1v&0S~Z8twzsa6Yl&D1g3b9sMkO)9t;@ItCx<0sZQB&uuB z?;7p&gQeXHE1sB&0`Og)J)v%UK7H{M12v87tg3}K^?vg%ExAKOyB+p%JABfPGQ`uo z4$uhZ50Ye0$bpPrbWX5Nl2O$(PWL;d{SUkpf+@IVkS2^v;4^W1h?;JU*4XV%Zc)Nz z0&Rphxyr{#qU{4$IN1raWx<;Hh!qnu3&hTdqiS^NEoRQ$As}1Xl((3haT};Td&O?` z#myrutjmWoxZ#eiEHKvE?(+7{_bC3LrrctKR=sSU&<&8f?)S{F>A|1h_6h|wb!J2b zwKX$I*92`M-zp49V(084rm^tu`^Gk>Vc%%j>e@Mq$jVeqN};oe(?8kpLK;3=d&?6i z-e&^nQlB`gc^Q}}^qbrY*%IVtoXl{_fJ9Y{*~`2vZU9}eN0H2p>)xZRh1LC>O>Ra) z%C8&a=__96O`q`o<_5xaf3XxcPUb2Oj_ydu(r>22e0%DpAXwU%?abV%S`f(#rjZS` z{>|*xh1;$-DU^d4z#Kr{?UeVH@xx%_c92qv*gsW9vJ9MHd(n=#m!j9PZ&wXq)@57Z zira=`!=*L2wqGyn%;e%4F)p=ij0Gj}>A90-v(lv<4PgHOumG#Uk{c^e+>WmvmgKFQ z%Hp+=W5lGtw5oC$8S62vRc{FShDCznuRS_O(>Hv|(ew^wyo^f1J!&5ndxWqC37gDq zp;z7%OFhX=Ax-WM*z*YvpIO3Zp#ag#?HU9x<`TV;$62-l0#$;AUVTwGko4vTi9@GY z36@|-7Wsl@L5aPc#^oUaa51ZkU^|JNSiF*VLn-#ERK|7V{DZWZ&A(DH5K3qlQ)Y+S&v}sr z#^_X~iVgz1yg{rRcV(|IQiA5pV^8neD^l#h!D;()9VT%1VcDUOrjBvBR~ya^ z^_t8u1&=!$tIfXB)Lmklyw_NZP_mw!k^N4Qv`Ltd3AU?nW*4F=+qdJy3!#Rn*|V!8+AHOf})F=;usv= zQ&CT}DzJ%RK#Yq|!!x?k>QgzGdqn_`R14g7dyK>mt~mqEn1CuBRCkMO;##e8N^GKO zT&AWp8Ka-X-hAaYMnaHsbo3VrO7sULN`}p}*#gS%L1zB|ev*)QW4ugEt-Xl$2+B;p$-GTesCSVR%OVSvC0-61!KYyqK zflSaifdfl>uijp55O5Ime9%^GEvIGfC^eTRh{>|KG+fHr8R(j1LxjZXaoUTQUA+GQ zajb7r@M&OC&BR=nC0w-@*8I{a6&-I8db{#Ka$7fBGyrMDP`Ys9<8pGMT)@;-vxpMA#fYex zz_=YzxmW=U*&T}PV{m}qwK7JfC2qf|QC4*KjoW&-eqcipE(0Zs7eMMdw{s9?*?nU6 z<5!?{D1>+DjKaMK3WSYqSJqwa4mmmfxo4!cs}NEzs#WwEms5eo@ddYNX=i(iLg!gG z;#Ug1_I*6WHZ{7^qAc02uuBG>A!%`BX3Cf-Z-ajDNHaSEJGhIzlL=x4O=W!vV6wA4 zh^a_EO8&7eoO=x2Tos+02I0~dx)q+0)tlj!T~UN{DhO)o;5F-lqk0WG#8G<=n~GWV zoEq;i#%;$i2wy}Qfk3TdBv6`I)U@-6yHI$Ue4d7Gxw1ZTfMUzH>G{VF3)`%`2aV6| zOa&F&WxIdbLop+#Uum3`FHLpC6MRF;_4%K zOa}`!;%BxxIpPN!m%gQA3f@|jyA-PEeLdinR|=w|hD(?7pWB)GTw&hlmk@R-%t!={j~n6@#P;L=O=bL7ydv$DP>|a(?f5a-iwNEkngP zfz*RAwA-1qUq&&%S>8hypj5-{I@4S>OANITQ*{D8_%H~3#69aB(I)rkX`MNS=QBP? zNv@ZPQCRgm!pY^RH(kysfGG89T1OtBk^zSm>+snp9SSD&Dx1%#Sc*_wW1v0AXXjeI_B%QF@l(0=RO2 zu(tzRU^)xQLBAeibgNiSu&Y*HkKR~#tZHfy05DT|E(Te{_r%4q_m=>4T;E^cXpYBG zoBjU)lGH){sNt4(o9|randwr?#RiKaWv7UM8Y>-QF!FLsveB321p{Ki(SghcO20*q zppnHexa>z$2wAM=Vyana+|pB&H1FL(dnuMuyLp$&Y7lx~rQMMVm}Rjh5pxvbLE(wg z!AfhaO2?*9qUX_+3yQrQdJvm>TT1-S+36b&!Hjk%L1-PzHv_DV+`$S?D+I|b><&;| zc|c9zyW+b_R4t{aUzAZzJXpdNZ?OFKi$=FmlQL4-WrpQt?NPZ>lb_$TuMFL@S;{Ot zdo?>>`2ji}2J_d^f`F|-p%4VG>u}3lM@#dUF7%Zva94N19ftn^TZ-g(jE?!Zcd4kr(jAQ8q;2GAwvg$Q9 zwGr?eqOX}q67AA7XyesmVcgyi%&T0^EAuI%P2rV;O>*PJE{yUx;$%i6)aC^Wdcm03 z&vYGt-fHF+R{F7~OXg5+{&On|v)VZ42OfxCcQJJ@5W$6;yxiUw-+XnP4Cd%ADj>DD zEB^pygwoYorW$cP8PzkUev=A{{G%4O&bg`Y7qI}#6Aojzl{)Bx$=)VwaH=iCbm}-> zQ5@XrP5RsBnH;6U)eXYsq6X!3v5>(N(bvLbiH|XBDbx~*@p>^9vb;*ETXhA7Yo9{L zqn}e*c!W7i^)Q{2Jsd6PJ|MO*95(lkfZ%_~>7mthKM++le7ra3tm|aV>H~^pVX@-Z zET{n6PGaD7##a-UL(LM*$Z@k(?JmW)PHXYvR}TkU{-so*A1*z-O-GVARH=BS%uB+r z)3iv~3_MXTHXn&}djzg{23cCn*WCaEkaUfyL|jueXUYl!X! znV+<5muuc7qmzvNqAvc)NZQJ)nT_+%@fRbLtz24g`xi0`O?8!-1IFbB2K088z|v~3 za_MavyneFhTZESI`lUw$E9IFq&k&g4vJM*b>O|<(ZH5tQ?#*<*t-ps_jl8o`5( zAmprT=P)Cdz2i3n*72IDS<)C-i`aYX-XOsJQvtEZXp;?{c$aPi&pPoF7r(?WCgw5L z3F8z;B21KYsbRs6?aK5HRPJQIIW_Keg}yy8|F6OKM` zZq0Uc6zn<;GQM#Y-8b}2w>dM8@|;xQdb(}_Lklp7_u@FQIuUK=kCa-9&X^Br(`QB`#B=P7iTr$|FURjxe1^eNCmr;f&BW^AuRFwiN&`g=;dT``D<2yd%W@28ii zN!1P){>9wiwtUOc0zb?i*ldL3dX%(OP*Sf6@~M2jvX0}f1)5A;*K+X@jM5P zurB;s;PyF#UVKfq9DcPIl+?ylv#~M4IOLdZ_x`3eUU74PpzF&s%ekej58v)@!5h<0az_S}8MtaW=2^**KfX`;fe)_z z5xy>E6scw&s3?!=Ese)9B5Cwmx|tJD?K9{Fg|wE-G5L?-C7!uKA5-8XFVk}+O9ZKP z(w`A%4g|wUVz*nE3)OL%qaBuRUDwZ52b1qAp|YQ8^SO94e~HvA=QR6A02l8qGvF^+ zH0Qrhcxb9~(>R;yUDA}gAUIjAZ=_+gaql)e#hdhBqG&}$#P@SC`JT`>+;Nz=n{+je*n2?eWo)@ zHJ_`@St{>|ogX4uVWv6f>n^Ka^XI5g%Y7qnCL`%^c$m>}!9whFDz`_1#gT5mNA1fX z4X;wzTowm0(wM1V8DQHyvnmu^DAvz0>2mVqgc^mczY zG5F6>X;rqfT9~dbUq&3u)_tJg4RyG#+4DRzdtkFK=`R;=i@4wLVmVERiiw61=d`_c z)v}%jY(pk4leFh{cYsyh;_JL@8PTVYd7aJi=5IH{u_?EgxMxK=^^QJXsyj-erP8J? z@KiZXyv;MI^yAQ(jGq$&9CkKmQ+$18y-jm2ZOqKv_T2vfo0NQCiB6Mfjw>wX*Ruyb zj%DyhbB{uv22;iPnVERkNcxah*{Ni8SIn(z>rvd=4dUi1mL=r4Spq3)E6{6ip?7|Z z^lW@MCzDQya&a0;5aa&<0cUakBLwcxYmMEPrsr)E>Mc6oQsuUS-}YogUrk2w?glkQ z4^7JqCHIQDK4V~)6fM=vCRuYd_36YpKG>I5eCDv>-$=o1>Y~5t0sPog<8gYBX65>I zn~psfHLpR;a!2DGXgkZc?&7<7=H;%Qc*J^3a$8vA#QDpFUUp08(~UC~nfH$o)$=RS zr#GKOE+?pTyu|9~h~?zy@PEH(U^G6xVJbB8Vp$>Za2&G9#Jo-kevhe-M$0kZyj=PQ zUZHLO05Sd}bIlUA74ICmOv>hIKO*;|%an$mQGU0~Nu&!StzF1>xdJkNh!66Plo z*thtO(e*sUuJ`CV!RYcCBz@~%i{$>(7q3*@wN|n{W;jKciNs~=)%89|26P_)zgX&E zn}6md4d3)~+FW~ViQ@qV@40XGjeJ-^Y=7R6E zO4KxtT9=h^>GIdwR(;UU5mok%9+&YxuhG6sm+ECQLJ`05@%)*%*^BofNVdIx^X2Pu z;Z06I6Fk3JymJqyrf~7ZAo9NQ{{TeLdX9YvdhzId2k{O1TP|ss_Sc#A^xwb3UX5n1 zyZRZ0OmTNNE^mI79D0cH(k;cHPZmW1ljw7|Ihd04j1hYipW=N@OT@f--=nCkzve&0 zdiaK7kF9z=`SibCW^w9H{=Q>nZSW6XJ|aKEx2F2agj%um;%Z#H{1txQo@dw6y0`1w z088FIHu#PD914E%e=^6#zfM2(A6q=7dUMy->zw+J!GH2!%hzm8Vcf0XL^(YVhmTj_ ze}`{_#K7XC`%};G=hx4sD+KX7d}DL}+5ij#0RRF30{{R35SojOrBRthP*0(S6db+%n zksh+0?7+X(Mjh+dv9>!PMDEJAidU7U7}|Ly14`A+y${hZv!brXp3)!2n`rdx^erI% z2ulei#P`1E4NFHacT7T_%s+QCS@J~86M%Ad9bv%fNnv@heW{f4tQE4K@CGa45BP2`=_=48q!HXa(eoV=D2F2@vi z_zG0bvHLUz#3a(ms#XP)!t=u|N-P8CXurCVWrPqxMi;(Mtd+Hs$G6b!x2wshQ}>~t z1_hc{1B8o??=YHV&Xw9o_N?J=;DWs^-7FMF2p&nQz|kBntdKzNQ3#9fpw*~!tJiSu zir3Vu&xigatKOXbeyf<$^TVbhog9!u2sTZ^P;1=EHA%S6CQy0WZL)cmVJ8qq7&`v| zF3IC7uvekMvcvoLx-)=}dacGVM;AIvpx|w?=(5IbFvNbp_~>|cA@u?iOQE3|d%G(f zOf#8_18}%VB1@?RN$Cu7$M=_~JB;b$X|p5YlO@r>rYdiy0%JIgPtOPsA#8nau3gO1ZM&;=VopxND{wxSi% zo)<>b>(4y1&vJHH7obiYR@2I_O#J#Cj&Bp<#w8OVf2`R{Ga{{ZIIhCUCq z7NmkoAOHiqU!);0D5X%hmOwtGmZ8W>RUwAWzl8{i6||FL!ii4f4~wGg$*pk>;vj+u zDu4@oT9GDAwlTGXx>XnH3t9%ZwRz@1B#}%Rhz+ZGv1l?KMY|`$Kp;S9q5{$^_bNzBs1`~H0-=T`MS77UO+`_!s1TH15&{V^6ct2}7Wx$s z5)cRo_~QM)?|pA_z68RuXLfdWcIS6?XFMv}saJMr;tSbh%O;7X-sw6V3JKExE`h&f zAOA;H`)rc9rT9NnoE}k4Z*M++aMF*n_I&&8GaC`F?Y9R2TA+~ylfs0Gx{ss2+$Z*q z%23&H!;&lVH>kDo3jF`vR`KGt`zb(XABWq%tX(&9_psCHoB3jM^WStQ2#>_`^T+42 z%{-0z>6+7F42pw^{}-pr=R%4fN!d?0wRB&22$Eb2_FsG&qEY+AUQ}L>`xx`tUD#n{ zZTn_kEB;|1H|XvObGda%aZ#?AZ>5(O-0ZzgK&$huvybl6Zq`JYm1RW>vA(71fAv1! zad=Ia^y*z`>&qvR>hiD0&dGiXSOEt=o>2bwBmUBTYY3d3mCcpdYqakU&~F0m4nED` z@r>inr+;x!;eKkr?T)_mg^=}8#MtHXLhjJ5)<+Os(!poD6Xi!4uT6PNUWz3C^S67* z5?{;VyqJ<@^+I+0>ag#USZLqRQ9rsoK&EB8yvaaC&8&K|@_PhrVyy-xP8G>j*Igh&n^< zZ}TAh;lLyKm!ZS6kJC&PPd-bzi_e|b-<@8%g8Z~cQ3rnXuHwki+{a)D$WjDDfB9HqyFZLDNpjTMJ5Vu{x~^Zz|`H(R;Y3lkV8{72#*AsY@-1{+{>J{b*Cis(B*cJ>1+pb^Nx>lhpD4 zs~XzAgXCaB%tf=&Jeke|GM(Zv3p-tv%hV?#}u6T}oVn6-T$F53YiV@qu}#1NPt`@dkl9E+ie)$(SswcFtk%m9@1BjLtL}ST z;hfyKRw3Jg2L6izntg7x2>T6~Z(|yGh~8b(ydZlK+n!E(a9i(7v{|QAas0G6rnBZ+ zF2$LY^Kn;?vB;%8f9kA%#cindmG=q+@4u=mN@PRYSOO{~E%Yw+>)G1n&v?so99y@; zD+`;~Z|=BoPYo3EDy`j+srHwCcQ*P0cbvq7;3ccvTfTR=1ESts)c^O93;$7))AN#l z3w{*wY|ZP&?r<@;W2yzBq1U6wW)Wtu>~8lQLnBq-D!Rl_RLMp zG+)n(*9CZ1HnBaK{s`6nvG^1*RIGycXd4wA*#@tVzk0I-`ZW9Hz3;6-;MLciTM`_Y zFYL_Z2RXinUN(eD(5a19eKq%??Bpouc)BR{>5`N8J}Y+id6f4wP!-;K`SOYTfs0oq zP4)i~7EVrEjN@I#VeDI*YD-$qTzWOV`7Tj4W+s_KKdk7`TG9l}d1?D^3ocC9@lDh( z;8l~+_iI5Lej>gaH=j}4s;`f|I20d{)-G}O-2@A%e6_7RMBzy`=URN zjUcph2gt1tKK-kEK|*%b#|9i{S*n*qiu$H}MZtpL5Y$WTjB=?P`V{1~jY_V`@YNC$ zI3opBPQ8P_>G&wbX(y*bxRI3Ejeq*XtY81~;NCk%>=YjVK|&VXs4L!I*nVv53%Y{3pLkAD zujsUR8b9hw*}INq0rFLn9)FdMp1c#|;q%&K0eU-E&YIgypf<1*#%3sWYmoy!?58IH zbacX=W$Z6Uh37^$-W;AbJ59_wJWJaNcc>PPe_p}I@-~&);r<%fS9GFJJzI*r4t~@Y zTsAckZ%G=!o^evoH9oFh|aTd(NQJNm3NFA7?CzT$l-`y;h_ zN4z1Q#~{ZeeY`aKbBv{QrU{eQly zZjc34@5`jg0OML7z-bFEA+=$jKB(nV&*#k#6#esZs(-Khii3mNZvZo3Y184b*(7u> zR%esdGH+|TZwf-y;;$DjJ0(>`UDi3BYn7j&9{C=`zZ_Brh8S`JM z_dEY-*m?0UER1z#| zPmFu-Shq5$G?eID@uRL6ma8sZ;fnVJ-etL!%w7?p0*?cuoNGg@GWxr@0ws2MKM6A6i_%qZIDE7 zq$K#3vO4*C@IxHjh-3qne%;culHL+LEjWwWI1yM!5}i1iZeRz=w|gtp=ocrF$55sr zM9IorB+P_+(quLF$_SoacwQNi!#Q3g$DpcJiu-p*dYN%s_n;8`SPFRj2=uuf;@36ppoR|qoQ^%-C@0n z(`sb7lrpyDBjy_D2tayWCv03p(v<2iO;z1WFi+dG0-omon&MmJHxcKuIT7R=7$1Q2 zf#1_sqm-R;{elUbtR12@PfV`8H1Y`EUAI69x92$M1|B){4lGDq;j#XnRVhjSHwhw+KE<9XYZ}w*o??%7PrFKamwUy!%`>Y0D7&h)(k%YX*#^ zA#R_WZKzbUF^G}G{AHHzFec1PH?^BRNMdV?;;wA2fEZ<#-N}tCJ4lk58f9))crjEg zLN+o}z!$Gpyu~gRh$q-ebdhEKa6~(hVj`#Rgzub7pvCcR#gO)+D^rnS%^-qnb{dx_7}@BfpveBFyKh>x}vy1 ze<_86O8N7Je*r|&g(1RfCM~n;pzgkZU+J0mr-sIf-vFr@xa7-Tf5j#_6Z~44J^M z@^)3(uFZ3oV;kIlY|M8_1{|D|5-L!=(<6X4MkKUPQ4JffIWM;?jjelAUSh`$Uwov> zjdIz{E^)83f`?)ZQW}K?3o@pMf|Cm7FysC`MguG5jsvEV(^|Ys21Zxr7MWEcByqdE zib#Z9pdn#90fSJE? z@3Ph`eRdaCwY{4KbDC?%_Z?l&LI$LTpVbK&^nXkY4cg^>3=qFtg|XjS>02c+kYgMZ z$XbQXkE*?Kbl!?SMoJlP$hu6>*tfe0ZmL2yC%OsDnZvW7ups+gqX=yPLxyEGXMSlO z0j?*`H^_?$o+3BhXhGc!ZTU=LpXldn)&{~Ap+mSeo`RjEs62%UY_@?I-&GEQlEo@b zu1215sS>5 zrCilhT&lv=(7#}n&RuWoeJj*>$_`xKlX`Z=!>r~L3cs~Hvc~e;LW)23*$RnFzl+uG zYjYmd_LE@84>Y6^IhF>+xhL-+m@06iU3raNzoN~X3N?$|KU6L(^c|Dac=0_`Pot;Y z2`CW!sWB;mNRB7xN9$VQkU^2GvfRoTZgT80o z&>K@#9lrsJ)2u95pjW5@kPM_+wB9lE?s;dBYp4AsH*gyk1Px&~?rE3~O`2m2O-uNYXZuU_eO>j$Dd9mtC4Y=LtN&o%I6g}@3T-N z*A33c{m?FLRQXv?REwQn{;(@I^>_lDx05;M+o?upNQ@)|w~Tq4y1!{L5P2zP7R?VFpbw22kCeZ;cytY*9IA~`$AA2e#NRq3p@@@|1y_3O$}sg+Lx zb8Zk1JL~ug)sK_3^a4l^`t!8JVL|HWL4iOL{l{V(HCl4U7T||((4I^qe%jV{6u9=) zCB;ggQ&v16cRa!TWlCdd)umXXJYepI#_;@$L)>mu6$hwtN7N)as!C)H>@<>vF`hVC zs_Nkp!=Ycetq#ZAXqr5nsIg0T?=#FS4;S?g8ww>9v16`srNt;h2Cf@+laAu(@@Y9@ z4{_}11Lsmd=^*+%T&3$cr?B~Y0dg285=0{Xt#FWkTy@;KBsT2m8@K1)^;hxM^0j_= z`Gs?MEr#!0i=r<(j*7m#b<>qucUmPd|+#^G4~4b5VscF!+%8PSFM4Ior;@xF2VAj=5u;t(99s zxQ-WwRH{@8{>t1v_TJejm(-?iN_yFly(A7?>W}H*Np#981Y2uQ6t#V6-!KX7`~jv7 zuQ;rX_Y+4}d~rxY&CazQi%{P~Y16YaCcU(&nRVe!Xj$)b`9}t>E5owx+kB=DQv*E} z&)$Fj{;j>%;{w!&_DU}BSz>g(M{B0(f?>@f`RsIb)VruE%s1~T;^Vc!YNCH=&^eLGsQj{mNF%D-Ext@chzE78#?v%;j(ennmHTthwXV>X0%dIUzn_D@SBuVtYyZ%t)N1Hs>rf-sMn~WL14N#` zv@RLnofE#frDIe<@O-6L8psZ3Ex3L@vkYISF_{X@mWJE#FJP%X^;o4gI^HsC_v;D) zo;uleJ!T2I`-+4t_v4p59>z7V*A-IOiR$c%rdWX%-&Js~weEw{qrF#DK>52d5P=pG_~OX3V;%uYiV6TvY( z(7(*DaMUKh?%MT=4>OOFP`hB;&Q9G7a_(@tmI~dk)Ob}>moeGZyeP&ejMG_AWQ2gX z5rW1M1pmZApP5TrcQ5ktk4Rbzf8zZOSeP8-+f!)Cc>{|0#*Dr zctMg2D(kVK8r4RD%JR9QkYTY_Ii@#E#b@zFA0w09QlgL2II+OMgVP0$&vbNa3Q03OO&HBx z2K4Mz`Gn*xkHq8h$n)e2QvpjP0k?%!Iotje6UhwsB?(_c+iR1GMuA^rZeNV0F5soG z>1eG{IOKU5-^gmLI7RrHKH2RJ)O0Ll=%{ODBnE@c*)Z&ie76w74jaP+{mRl_8|$_k zX`ggs$xhlCMvMWc6#7Ec?Ge!hBfQ=aYS@hY{EHw97Cdzuy*nTG+A|yGHx97nGn4#C z#eA9kW@_L_#t^c$kr-#hQsYsE)5_>rXLW2DJJbwd_poqQ`^?Hn5c0F2tcHyNO8NYD zrH=4^2ZB$d#(r=78YG*J`M7&(*AF`*QqlMshVO&S0n=JN);w$;^pqJp&BQMnrQ}$S zoY?pc7&zRfdq(iox7`ePhPD=8Gw~YFDj%<~3{(*f={FM6gjwWE-&BYb$(&lN5QUV| znMITP?1N1B2@SF{b}8<(+kS#tw`K0cJB3$;c3{E1{zqS?nsLFA>ywf1`i~riAN$Bv zT23{`_GFqp>Qki&@Vk{k#!|Q+j9Ry-JR>Lf6_`H6ibu1|jOxeC)*2_m2LmP`kgd+U z)Rn*%v||NpOr0mbh!FAQ?wgDK!6tR{n7G#>W8J1V>>c7ARE3v_k9R|)60hU6{=K5GateA(CGbbM)BH58k_GI#bj;7ZBk5mwxC zv7qVQi->*5_w~!y*CPx#ebAHncyc6(gR4c4Tpc8ybLmH!vh!qKNEib6xS$fk0ExT| zYn*``wFfrRhEjeUWb4Ekj5978ZHH{DmPR#l#AZaBUbrdgIXaNfgoMLTVo`Fg85X(mF_TfaB)@8_SUJ#d9O6zH8LSTtoMJ{R)+kup?NqF}Z z*W}cisit(sxc@v4It{fg#!g}a!4FAq&J}x2YtJ(5&7=lnw&sL}!_W4Be*+laZY6us zIV>C-pVIh)PyGi?{STx1A3h}tv%IW`f`%kK&a1az!&CpEQ=EU$DMnimE$>C31~H$i zbB$u-?bmFvV^1>(9Odu1r9Xm^KG$-kHe)X3{7>gfE|mZbi6DvcV#Y6hR|?ijr)g z%$CkBUgR=cWXUah?wsg3>r8y#rf-ZGt3lURoti(Pse}RP*W;_o)9GGp{LPOl!WE`2 z?!qlb7ZanOvtIl$n?Ga%>Etga+p0?IbvHmsvnB$Xrp+=RmOm+C4A*~@A^WMSS-Kim zkGn#U$^YF2YtA7eczPMrlO9e}_sMtBNPuM7u^OGf6_#jm;b&Gvty6^Z&(Z`)Bb4O) zo?J^YZc!ItBtrW}hF1Al z!j7Mj?~u#BnI*U6xi}CKg5OBRV`*RTL$j~z?CMI-M)qFM}7<0@j5TAQ^qp3|`Hm3^Ir>xXOo|w%okSm{d zuHRku@8slHR_2G``O{L71q>P@UG>L04dSCp)1J5!cJJ={WFh2qonEdb-*N+@NUttj z;#Vny%^F+U3+$B59XH+wqH=8;ye{YF*5zsWZq)%LVLlc!*A#rbYb|myMyw5bBQ2ku zCE42M`_m#9S$6J=bCH57N{VFDi>%4U8Cv6FgK{KI*&Z?aCADY$##?Q@r^)BLir84w zkPp7kRxl^U1qDt4Arldj9?xUMRa5O`hX8MtbdW+BdfO;%8#vWq+*`$)TVhqwR10Zp z(YXfIYhoW#Vb7Z-{1H88}`l8}~&3O_W_FCj^DDf)(tB zu>srQii5^KU@x=C<)F0DX|~9g{mTOCSEB#=)bnP}E0?x)7TDmM!5_pe5I?aDY1m6b zE^x9RO7ODv9;+-VIT2Vj5VK}5p(wk@4rjtOGxwdQeuFh6y=)F?pdS1RFp-1iAp*N- zRaI|+JX91b>fow5(_n^>keXAL{&{Da84$_Zx;I2|&3Dy`b^g_aFcUzJt&U7r5e)#W zOmncBty0}^a`4b{BTMDeX=dnz#+-XNPJ>)lHs5KKH;w_M*7Gy*fm@Aw&M31r1LKQA zjM=`sGa_%M%c5{9)6m_{+93~-S)~>=vmZ^2n3|oNx1x94nuP0yxZCTKguymO>y)V< zyb>!wjiHseqOX|YPpTO7f)2+)m9Dm=Qtg&fC&FNPw(bM#fP;36W|eOEd=PXTB6^%A z{;?~3o7xJdDYKnV)L4?@6i4dG_d{T$a(@=mtfe z)Zo9YK-3QBpVnP`?gyr{RMf}$FvBOj5@7@8LnFAw7Bi| z=J^shSw&Iy)JOthn{#+Yq`#FS?c1@Y8uC?p(pSDBPo7p+9W?R9Pb~duThvDC)zb|= z{o;}q3TYxXC&q=JHqRRG$ZZ3H291ORyTmP#vvJE2h-e&F#!h^HB0<6D-Na<0?GFt) zt(m(mUr&0@_Q~3%Y?wb2v<>2#d=k07D}7gGJ1T-Axe!iT`tfyYOI+=qvP9FaV))k> z*?HrvI)DBy@xAQ!@~BchFGqKSF{(;90K>)|>NyHHe7ixBEIet_rTBGcHSQbM#fD~J z)duu2W+fZrdN{uD-i`{qZhBFE*+C=TV=A|wPid%B!W@kf_LKzV33Z*jxif4Mbhl|& zw>;qco?}!ACnhX~V{s)t+&#zWn68cvCY%V4B@E^Xi^raKbcXf>0p-39ezVkP3bsV& zjT3-7JWsFebht22T1S}t(l&^JIECEAMc98H_shUX3Kv~)jjmv;Q$&&EmSO|{MoiSu zD~;~i#vMIWXl{ehHvG{k+hn1EhgKc$+d^U4dm1y(R?M}eb35GCsTt?V!>g!|BHO26 z+v;}WH+~FjAC61zX*>n1eg4H-w?n^An%ga8rVJ>O7q_(cJ9Tl_d$5=8tjLZS_A{N6 z;lwQt^gH}cfIc-*S~j1Ps4e*@LUC5$ux{390w=T?nj3v$Wh42Er`T z58DJjgBwltH>Gc#40rpn6-#Pdd;Bctno%?c*~FM-y^!f|X>v#&@as0l<@ZEuTgZmx z2K=h%FrVZ6RQ5RznnH463f~DHNlKRI_jsKQ-OsPIxnq6}R49AA$Ut|8l~AcE?uD<{JFG=wXd6G?2M)zWqp@^Vw4Z$hstk?YNI9IUEn< zT&x{To|&_km|6K;B>E+ODA7%BTMl|Via4R>`eoBeVOw{CsrMOBHfkZcp}P1@JhdzB z5qFot^b+sI2R@&_jS`=~5KNdY+LOOfCY-zQ2q&4x@2+C_hL+Z^u^pz+e0!qLCBI|b z5Vf!O<)@y&2<_kA9Rc_=MR%OPtb3GB&EeWt_-%Pckn@0*~C<1eHbOkokAMk zg;3Uo?FGyPqIBA-)`|XY#(FiDgybHM!lViH==T`UB-VJs7HZ@l%x&8FUB0GquydS_ z(`0ddeawxVPHfibgo&(%w4e{{pGdI8c1-v3a)GD9E5$aNFU=AcYP%~NX&AW+8PPLa z6G&Zg`l$-uYvY!w<+F)VB^zNB7o>38!evF?M^`G`XL=G3FLYfxDiL{W_Oud~f8a9J zs=trAR3+J3xpT{GGj?byO=R#}T!8qL8*be$?!d4vQcpUEx~22V%CNXIHwCN>CkWf9 zOpo8{)|K>28w6%}z3x0a1oY4N){qN^bOoVPO9qNz(o;=7=y zNPtNgq}S5rkT$F#bh#wsxVt&X%6 z-c{z#(xZ;r4J|_PIyK*W@1dUmoAfaEb$W(qr}ByNu+|0iIGqJ<%9S~^L=9SsJX^tU z6q)AqRwM3uMzYT)B4vNn#jZWxzI|{Ov!@f7s;zSO5V!7R&^mro&&2uYse$5n((4C@ z&hCn1z)`mZJuOhIAcpDD*3p5;-=w;KTl6qPB|%n3Q8$M7Tg0;Z z#!n`wR-wJADh=^2rhQa2rJ;1Z==gUbABtPS9zxY*6``FjG1xGS?D@<0{o;#U!RsCI z(Kj#fy^w9;4Sd2oc5S&W3x_!dd;S|x%-I*$#g{l`KC@z!8XvK#aey7sL#H?c{k5tU zToo>vL89%4MV=5r&8-1W*PSP-Bvb8jTx(oq%M?#kLc@uplK% zh=D!}zwTUanfL3gmYeu~z9nw?X;W-b(oxRghu?r}l?TA%8H%wVqO#;5U6KOSul;hn znjuXMz|7I}U(Bj&QRAo4!Ht7uLB-ZzNc>_nE;kTuqjz6y(Znq`;!9pe)eEVVI>$L- zzWs99@!<819`8YfonM6B@JJ(iXLME|r=yq#RcJ%5v1CKkqdFJ3(Hup3TGc=LxXqNJ zqB2@$*S~Z%>)57TjS9FeMr=m%y;=?c`fUdJw3yq89g^VLEk2Nf%+|0<8eBKpK#x9d z&5m>nHR-dKb+S4n?;6Q4Gxa8=*pavlbaMD30;ClpAkoOtO@VNck>#oQb#m9UMpj^a z%ncR302h2^!_r#d#$@`f^%<;!!sJF^=Q&!AWMZxZaIAaiyY4@<3ii zj75%3Xl~xg4alzmxuD&d+3!I*+jmX44Shp{SanYoROf>FC^?i zM-(!KHJ&A>t$iOzj(QMV#~1FZp(P+K*JZ5DMKzTYN!7?wJ*xjdzq(R;bOt0Xq`Lfsgbi#T69NZf`C9nys+_ zQs(P~!`?;c>a)y$bVm9`Z*%-S8_W+|T%FX+@o(`&=hb`{Ci~807#K|m#z}hzm71rR zx~T+y_7cU|B;6|s=RB#WP(qyk#`14(IhPPTqS+^AJGO~7wkqw@Odb~e4DDd#4Hl3D zMe{`O zY~R&FPtQPfZW&&KTSR3u^=>%7hxZkOt&f9_A#7#d$|W#=cZ)nn&ZR2R&{ACs>SXTBV%Y1_Nzdl8PIL4Hpwh) z7)I6k47#3M&hgM6wCxk3nyHacTQR7B7X80U73i$FX$wdliT}nzBG|~}j z-B$b#GTmLlrP}O`{X*=gUUIZBg?dhq1A3MfXTqOv`=Y3GhSeuA!tBg&7{~9o^15eu z`YNeY#R&F8$lw}4N0Xnkk@95QI*CbDJ?S^q`G$7}9I@1I+S``PE5A0g&I;WjeUc}w zDVgdA{ItIZ%bEe?yck#$G_EW34F7W;M^%O@~Axg2pP|n|^zma=QK(R4aLF_rz)=cy3}97m_UXdc}2}*!b2z zkupli>w-9MKo)W)#YHe`B#oLWNV-GcJ5$R+>)H;d6j#xlYp*|a5&Uz<%pf)mkgTLA zu|y>M89{st_t$Y9Cst9;xzKEpm&WD7(pBc@!|b76Go%|kb&y5qlgHyLC(y)8(Q?N>Lw7JOV6Kxx7d%- zx-opR5~;5e?&ULbUWt9Spxp&c777)+95}Z4r|iytdREC<;i$^I6(U3(DE4`B;S9~MZ%?Tw=`(qU(mOFD%iBvU z5D)cbig!uYcC~%X7QZg2zyXhOM^q>@Tg9*!HiN`%H3LH4Hk#an@Ar=L&vX*1+&}wL z+;gOjss1eE-t*41Ptoz)yWZVK{GxqA`G(;4O5(N!5N-L_aY`I2b8@W^>msWQ#?FB) z)Q~Z8B8?pfm6dG!F2T{-=w|dXWWTN=KB^vcNbNX~Ao&p1o`0dtU7t!{*Hd@Qsx13h zx{viV;fp-ga%aRyar<4&g|`~zsgBgA-W*$sxPPnwcz!+Pbnq_&M|#HniS?PYT@7=Ua`VC zuM3J`aQ654-wczNo5TO zKpM!AKgP#4nh&T4dXz4c7?(ZF?nrj%OiG;p!f$|jBn{HQ*q)ii0hv{m*uJel+toz> zKQQn=WROD)pu|Q4*?ZOU9*fqqvA|iTBGlqPK#&8#1_TMjc9RcQD)W*r?lc@z4web` zDuW5Xf7o`!T}l?K?4PGK7=GaNH6AGUc@83xfgjN1)4Fm#+CY1I+Z0eeybg^^Na5o| z4oW+pzDGbq!TPoONo-01p^`9ji?obCbboF>gw+#G5etDyVB74k%S{F|aEWtsAtBo8 z>gpPd5DNIuMJ?ds;sTI-wo|h4r(wgH zR!JxBdS%y1$iScYJ_iL=>RE$GwX;2@NbPf^sU!U9U)^wfHUUAy`WwfHSOS4?#9b1B zo(9mZcvw_2UnUR;6me1K1tBMee3%7$_HJw@g4EgnQo^RPp61H>QPs#%4Y*0qkp0!{ zt1OMoKlRz$|Bqb^5{V?BXXAmc1qMePi8z&PI4bA3J*Qs*R^qs0habgRMrx~2wnUhu zyj&o}?`5D+d->&kd6>1_ty(XE!Hb||azsLH!EqpeNnr21JiMg%H-M1B#RXV)Q{hT% z2C4G|Gh{}^26=fwY({}4APFumRcZm)zNA{Q* z_`b5h=0p(ee?$S}l7LsG^I(%)wtq*Z1OuY8?(JX<12R1!x*LIEqHKtkP#{zs4{s}gJypY!rg3`vI>k=rO2LNkNq z-dB>1?to$Wq@+K8{zG6~fwshfLGLJj5dC4&d1DhkUm&u0Rqa*ffk%C9Br&4A|FZs| z86#%lvfN@?$!VyJ`sOJlKU}r1S)fT2JS(@{#?jUrNejt#J>o(`p-^`^PL!C$%ZU@M zVDlAUA`d0};qZUNPVi+{!0zn-SS%?iW$R(4yV8#G#rjfNJs_p0u=~g_D}a_;|W^{|Bnv1)a^@keR`+` ze`Mxnk1m<5MbE;(_SSOsq#)HE!Vw?XcR(|d-ds{FOK$CDQH-g`KaHpwpdHu)OXj?@ z;V=rA-TosfN#M`!Y*>1}N?w@>I#}AOEnVE)IX;>o3+2lcaPSQWJ?ir|U2c;Yys9wQ z42Q#!#4b9JkF9JV(5-}o6d*Ty%n(StKwdW43$UeCs_V_}*FOYD&yrFFfVhNiFE4xh zQYdwqBUPtt%TH?bx){{3_U8h6&dbXSr_QVJ!IrI-wYa1|0&Dh&!beuOpP5#rwW|n_ zCIo5foD`n2uZ|psiA5W{w|WrGJP(Nv3Da9aU3s{8G@8T`Ox3Bi(7(@2f@&}n`alSF zze2TnK~T_tB(3X1{f{pnYI=Hsx;pbuKba?b;kZ|MjhpL7br{kICysc#Qb1zeULTO^ zp-V$__6T;O6NvYu>vNdlAy-9-=?7pmiL4+^EYdGC@RnZa=`C5vW>hkUBl#Tf@K7C& z7}<;m?U%N4IyDXEPQiz;vlupj$|fM(u+aXI_k3a4J>*~=jsTJZvKd1Xzo-291pQy; zs``)A|F5F`k%>=pNeL7=2x>dxl@hxTuhEmglf0X?;23f%W&1#%B1#b`;Np{KXSO`o zBi5Xn$%RJ#+_PL2N8O$_u%ZnL@nT$wV z=ws_8(oKqiURu|c01A28TX94Ac;KXAfmJNhJRzRM@jqQFU{&=TX)w*%Vq^C!P9UU* zA^{HRp{D0xaR*QOzbXWv7i(R+=4v1mZn!75dhlu{mq03oZFm}3vM7#nqk%TUtlK4> zNTFs&Jlotrdtt$%WqtaC{4=2OcGJ=(rgIA#jn1^<8BW|vqfh=MHft0dn=gNpSzX&D zZSAZ>uTL`W+b(w@=2F}Q?$5bho`&gqKr=Pib3n--=|TVWE&EyZZ@`v$`hhtq8$O@I zSfH>FZbQETsxLEA?C$qe_0^LSp*=`z%^XS13|I&nUCAIOakn&KM_qY{Y?&Djr@*sK zm6A8Tt}4C3{|zS&3Z&;S;BY;eDB6JW)fuM^@-C}db-f3R-0r#KXvbV1yRVy%X4~_&d{?2@W z5`l{PIf|45{VjYH?pz;&S)>(fU+2?ys|91%;j=l|R7Y9!_OluwXPxbg2 z<2<`GRxAO8h3JZ+A0ZHQHRi;w(=@i|eB;QMPG?*+^$V`+gk%;^U zE~N_>FGukl@iicXud^@4M@akrl;pHTVx1x$RofU zlJwAPk;FSmPY2&C@lH};xOvXvk}1MqILpf(7vDRr!H{;;xTnu|X~0WQ>|wVtiQl1w!h5;llp}*5+w%rO^rX+D8$<#O z2k-++G!W-(rGjS)ykU3^E?k)H-r*N5G8Y<2x(Crb8z1H^IMc=H~# zeOR%6H|A-JYsOwVE-4cP|cH?ptJ{JW=z0=6qS zcEmmErXLUQeFj=m-6!n#7%hupNhwIn4L}(Im|~T?lvSchKH1u9+*3u29F!&l(S%7b z7G8&oKjKKB73(mex-v$Dx8*?K{{~~$0<%D4%RE(`_m7eK$`HvkUs7ad&5OdRekBdk z6k*K_vbH$^iNunGrSz^dnx%F7ots-QizIB{hhwoQ-i&Sbf*}BS!x1W4vpJ9g8HZ(~ zv&!c$=o9ZV?-Q{^R&PyRVNEBL@csVTJBs{q^^(S(su)!4 z12X{w5+F;?W+QyEtpvHTgl3Ieu~9)Tsd6wF?984l;7BBn$Fhlh;@cMzmk9kgV4H19 zByN28V~?w7>^FNjR2|%q&P{_NWv|Rw{V=Oo}i11^TJ7HQU|tM5^h+arJo=(&6VE%=9+|3Tr)+@dX#hA41d)b zpw4grBaKRccL~j@rO9DwB~qCo1wfl=)$#kP7-?4B1dK29TnI4cnj5DShcPjV>?V~d z9bW>Hn@uX_1A?1hutgHyb#LAkJ?j>HAYf&S%vmszU zFz#RyhB(;+R=mSHX#`~3Uv~Rn)QDJgES{{2hvFG4f^IX0P(u0;Le{L2$t;$1;aeFIZ+3?v2Lw23^OFUJsR9 zQ-Bs7>IAw=ng0Wf_UUEx>SR$d-L*^`?GqmsRb zsrLiIw$kdtw(il(uLIK);b=dRAx01kfEusYy_79D8ZH9512sV#qZP_IV8XZ3E26vE z%LjC-BV=kQoKHhIB3_-Be@=vHNO)S3n#6@A-Gj=3xHR}NJ`@h~G#{ESeG)o&-f;#F zCy;>VY-|37@*6Q2 z5_{5Mv8P7QSu{1LQV=La=s;lw?%U-$&>NI*QKCnIk==%9YCGswF9hw#h?$fSeOV-Z zU%ReacZ2UZ0YR@}7@4>82CEU!T~ysJLO;|{9lp>r2-OB&veBy`{7bAghU%wCvxnmz z+Y%=qfkQ&h1w~taj_O-FFD5f`Sn}FcD=n;=SrFjZV%C+&o);v9?ERFD1k0-id8Wn6 zBgt(az;lM!|2z`GjEn;i$N&ZJkuU}A6Gf)y5Pq(;RWWL=0z{>e+t z4eerW*%9}5Gopzem|N}f*^#iG(98z0QO#+Yd4m#(%xZCUCBt$<4ue^^V?C{q2rW5B z6qcUQ&~7L%k>G+v-_Cw}5Vw`8k%X1`Bk*Fd`H|p3^Wq0&(;(+b>&vA1dF~UO)%Au} z4MF4cGE+5B;**aGJs;2-;8qN#_>DPWl0~dzEL%o=OCkKA@LN)W<1TrMy3)B>u zZe6TEdN)jVfNlNFOA`wA^eJ0{T>5?eTnM@@-7vmz)GSp@O03=e`v)lYG+FT8hu&c9 z=VH)qf)T7)+p^owD&;xja!+S{HWP)eZIYUXKd#O&LU(#D2+?b}3QNrNreDarJqIAT zfu#vTNPF|7-+=G9jNgD+TDk#J_U;^KAI9A%Jnin+zTbe_DU_Yo zop~ONTSchb&$B)B-vA0*t?nF{P@J150N}J*|*r*P-j$V5E>=W4tKlU@(>0z=|*;{yYhvD zBv5%LrHe=wHU5Ov#Rwuk&ec zt1%A~kT^IAndtlM7_xsbLj(Zg>r>YZy(vR5wv&HkKH1$6hXe+98JSgwH6^S>g^`+~ zFSXX#9GHrVB5LpFP=b?1H)p0{q92_Pcpn<>8MNSd!E3|7}S+KRpirQNW@Q_E^n}p9K&LWvvX~K6#9)7&qGPe_8^6TqHA9A zJ3>~eOMGV;U^*eA6f&QqbiGOazOcx9s_7g_s5&(8?*0% zr8o7WVYoIr{B0Hz3uHvJ^0qmDcY#W$4$23FYD5W66vjLx9_49eY87u)HoLu}+2|ca zDcuAXgaJ)6-o|HMj}%J6Rn9gtPl((NLsW&P!!cO)J%zz6P3*MFE%U=Xuzx|MKmIaJ z)PpGYkNg5imUINA_{J7n<#XA+zd1Z{MaMlQxj1#W08$>YRY|)5o|paD)G;~&ZUb9_ zq=8dCUjAX?ZW6(^yv~b?l*g>B_gfVNXW&w3FT(5aAdq`d@cJ3K=@)0TFUfR zLQJAnCr3M$6LE6zVtV*0o$HJqY?3C1k4BX>&G4r;#1TcyIj4Kt+QPEIv~!H4K9QZ}Ax{U8bG zCNR}WE<6M{i&C|N@i?-qF-8qCWcwn^gQ`Rd6fTUB#HHy-01~Ih9lr#u=Pj)fm|O2a zf@%V9Z;CsjIRUf227GO+8<)=VaIQew0yA;yS7df=VOx2Gd6Z^=JxZV}UVs6Rz21&; zd(Zw_U2>Nw!%lrPj2+~QXD0B!6fM3qa-`nk0bqO-m9a^k#e#8IW0i1$dI=HimV5Cu z&i|xRU|MWv+zPMCG%(V8ED` z4MpBjz0q50G|kjvzZK^(dRX8bHM(-jkzfG04+Fq-I8Unt(<~a__A!%Ov@>K0e z?AJ=yuV4Qr(;2PCHb<(A|>gb4F@-d^dGN7Y7sfp@d9ZYoKo@)VVGFx z@woKL8mBN|P%8(NCYq*09hN!uAK=%8++G(l9xKaUScW!IFJsvIQK@F`#yM}uH$0hk zhg;}|3_IE$7Wqg;xTCmlg=jDAgwPb@aqBF1;?nQ+yY!;|*tn}59aCtG2m=Ld_Sn-A zXz^ww9;_TAsDhu`e|}#Nn}C;?(6~l7#2ym?*6h8~3!0Fp{eajvZanupXD(jtkwpQo8-=P2MxqA(|6&WvN$|9IB9X65@Kq!Zb>6(f>XV>IWhR* zm6|}XA;>sA4UTzk#nDuQ>yLS^VcJS5(FwXu1d;|+7*GUF&1ai>WAf!8bqVcVrgaam zpQW{fP#&t}Eg{K%oL-xQt1^e#!=PZ*{I_`?l$iiY}U-9`@gh zi;2f4gKz7bj!zTMH*Y7zRKge~w&EUC=u}rS8ndpmq|?sMtv=yl zh}Hc#D=3zqnSgmDz$0Ntc*9X$Tu++7N3$8FWmr$Jl>{rEi6y}|FnK$C6LN};kF9~$Jhh~h3O|*0{ zoelcsx9XUl>+bE-FJ*A34#)u&+|KfoQ>JqLDY^rt9k5tMNR0Ln7 zv-zn)u2jvuk(mhXwIYu&1LVpx{BZ+dM0D3Uhj%bd2U##2GVk3N4c2HOo837}uy4I(2dXu9Orh))+2~FW6g?9Iu1; zCjKyo?MJ7+e&7j0)G0Y&o%TYG4aCW`iJ^~kLAi493^RXkog81N>?KN#n&jCNYRx%u z5qv=1QDcZAwtRyq-KDM8E7huUM~}Yv(^aZ`^o`exBeMA|XYM z!C zoN*cK6`AtHx33+&E^0_zqdd4LUTE{lY;H%-n83B8swWwm;A7bUNwSXJr@El&vzj4Ppz1D7x5MqF zQwKpT`|GV7Alewhf=UDj(5D2N#81HSxjP!!f|@5{<`kp-IdvwdAN&^|rWI*8i>Ys= z!RhqZ&)%K~w4<=i;;4UvH#!G9LVUM$*cdvN+czW*(t9AoFD|e6QmBrJpozPsyBu{d zyojJ4LSgsS*EdHi?`Nh!7Z564yQA(iIHor}Z44cP(YYkffN`%#LpUS5DhwEj;Rn7(daUE)VCdH`9*zV2ZT!Moe=UR8c)SBgg0NC~&XgA7NA!9%2 zwJ?dRs+M&r?6Mq5s|shJtB0%SWZFR^jX1AHd#VJwKeWn;dPf zRfcY=nwZ5XTcs3=45xzg(Ep zsfnZVGe7l9c{SS!y-s8z-EIUpq?*9Vbm=e?h+zTlZTQ{Fb9z^Y6iYU>5Qht(#Niec z#83a%6)a_uSVbYNd{$!ewm0iH1F`)BH?up*(D@ChEZv2?CF2N}_yn@7P>D6<)1u0r zw-#Vhad|(kEe0tm!JPb#1`qbtq+~^wZIn>9g%Y2RuMeBYgai>J`k|o=QiV`&5vF&j^I4 zwJiJ60WQfeKNGC+X&BEphnU%Q}U^V+7m{2yI}JTf+raW_dzO?nZrk6Z?JDc zQa_<(N%-hxwWkXgud#ril@Z57*SBSElLW7-2s2d=-X9#ra`0Qdj@#_8D`6JgT%1QxwBI!1dOMYSJ!+%|MSa6Sh+PFJ9-*^Sg_Ja>D3#i_4~ zyPGkBvKQA>kx{G5u|!o8?~(oGgEwY}8rY>E&19wkqh@Btc?|^_3IH zkX}pvGuYztk|=pn{-p#3Z6>jgZ9#D!FK-6!6Xs%I48>ZHdUWNVQtl_#Y+N%qfw6BJ zg7|Xi=v4Ty9Wg|SzdKbd`LEJ=&j?fQu7$_J>|o#~7Cx9%=`@^k7I@j%jqyc0h?@~2 zP-_IfRjsjz)H?SoOMDV+bD;Kl0PQjt{n!u3feVMPiHxsm9wiLgC9fX(KsW|;xM$D& zW_fVROAdty$*6KDNZx7st)P25KCvB7cMlqY-n1JscUOVUWxhJd-Sa=FU=F2ELzQlCP3|r;LxZLfLByiO5?&KjOm@F43zo zHM!+9q>hQB~AIkU&I@Cu#PkvCv)5+;!;5x+!t%7R7 zy!RS6&9mdvK(xilggcasx2YWh7E=t3YJYHI9ecyaMZTF-O+I$(AoG=P7qU0){+Z{% zkSKC%XH-k?@tM)UFp-8Kuf5uH7*5~veO#)nwf;`e!ZROQCYs|#hL9Y^&NOlF??K&)7#l8)raaf~2egeUL^v`4r5-LM- zz>O2GOMK)+uX-=QJ)6!6a^k3f&}kabB+0E6D@64_G}Cb%IzA|&VKW}aF6t1FH3=cX z@>qY_+}f^CV;-8QNYtM2LCz{8^~abViz?W)smx%q^b75fWatP}p~W(Z>!lBpp`)4u zACXKD;kjZBm2B8nYwKVw?yxKb%i5|L>`Y}==ni8UUJjoQUFCyJ_|EWLs2Q0!vz1i$ssCW(xqwUD>9IdDkM)cJwviPxo!^J*brQPI*b3gPh_SZ#+ zbn8KJ{{Y&WM9I+P96{>fqA~FVO(Ay#GN$T}Eu={dy`^!}tuGVRT&*!z{s2DWE{DqS zEMt|2$`$?@)Sd7I%tPfsS(ZRbp~%z0YUZdahqaDN##kVUQhcf+a7_!<(oEn>ZIT?{ z0kSe%dn1I0 zX?nWxxQ<~JpUp9m9<-n0il-aUG!d`Z{XJ_5a9slb^>^Szk;L3!Um85`Hy6IHot>6x zi1Wt}-Ba*gj@w!4+aH=6#J@l{`_+OWkTe16G7d&SYGF3)#~9v(-HFRzsto+kuAqa< zM|wBL=;mz~8x`>Sz3N|cSY_6jr7W3(<&b4h>(=O&VG8VEWiwqse=H%V7c7=^h-Fbg zPzO-^-#!nnPI&zv0B(xC8Vijo!&+61;T40LD70WPHMYwq(_SBeSTT6MxzLD^10J5{ z{`RAmAU4FR#P`1KjXSoYwbXK-dQlC-je4ZNKat+=ZFf7T;J4Pm3kg$&>%QuCkf%+T zW;~mX)U<=J^3ItSKQL%EfAiLJ{~NZS2waH#i9%o5fkHJ|_RkQ*Z)+atb~s2B^a9<`ej=*H^5GiXcXnZbCeTsmJO6X<@+C4&buZQoAqkRno!S?f7MOL;RBeiG9dNc7K z>B>|(kx!b>PCgZYiDvqsW&u^DIRo9a?;!9~sO8ZVU5BHS<;P1Cr4q3^dG3M_y0GrB7T+*8Is#rk1;k)` z0#dGYYy#+E!caU!HbR?mMo()dQQYl5mX|2qTmAdBc*3O|poXnEUKRes79e!NUOhx8 zx=<6^=UD`txk`R@m=z~$ZGDUs=Seui%%Kot2iRgYf~^FecXupR4V?l@_@e0h<>y?g z|AnoL!e6P>gh@3AnsZ-~GFAp{jwhniD=bihRB_IfTlw(;5a zFck#5UqLm_ded?lq~THdu3utk9Hx#6xOkH*mpl6He7)t?Xl!6{!mjn}B;=Cdujm!m z?l#uJUN?!_ppS6c&WSZo5uMhP!2z#T->3cF17`}4JZ6*Z*O66+Wp0&c0TU=&TYCyv z4k!G8#Mb!=x>~{@6=)QdS?F|F9hV!0wzJnTx!^)q(mqz!s;G?2SF1p-6jqjjs>kd* zR3Ri~A|HX32QSc3DF&lVpsdhJ$F&S#yh7Ymp^t(2Wo2*s3J0<%U3Y78+7Z_shH#9m z1EgwVY>YJ;uZvcLe_Jw;WBupHec1pCliL#n^1s2}X=9+rVCqIj_*{5UjF$9T)aYt&^sKO$-6-9PuMLeW~{ zO?HVeA>)x@vAPRz6G%M(Xt6g7GY#Eh zTf)$PfVoRIwD)d!sG$ zYu2*P^!PzhTPDv|30@e}lcv1FGWm?k%+x=Jmp0A^lKtVm^%KCI51uj` zLjM7Lh7|PFLSVz(@=CMud5V3!X(*8oY#S}h*iKVkxDPd0W8hB&n6sb}=6y-$6>_u7 zbeTIbZFLm3c0Vsn;yo3baDPn1Lcq-s{i$Tpl-Bx#JR@mj62urTm8irqh@cBl6sTS( z=)UarnPR%03wmW2FM1rsv@N~3R3U!dgl*+*$i*2{N^>b0?JeHSHRM_c`AlG9GhL(U zvImxr1DwWCDlf}Vz*N{Mn$Vn*O(_UxVfGYd%gLrJl9-g<-EYlf4%HUfx+cR87bUV6YEy`r>9I1LPBKUK$z2I<9aFOjx*ZGd*iv`nT6qL=4^*O@$owyMA)!{;Z zzWS?5EF5|j4~#DcP!v)f<1YF(ex(ap%%!II-$2T|*fT!D8Y#?&r&3bE<;?%aOr9sI zrD}N0s993dQOdVISTA!V`vv@ubOBY4Q8_uPQ{+Hpswx?oHRe2C&GjFqvJxk5KD;zH zX0YwYWWunhNkRXr*lR~_JNtIpX#FsTdYiNScQT5v3&wA*rXIs!rn1-A65Kywy;PQVX_*6RF6E1Kgjv=SJxP%UEhNCDWqXq zk&{q8LI|-gO&0^g(%iQ%!DPPza2f9ID(G7Pun75zQHF}zfxq6*%(aWpP96s`p z$}NC?mSizr$J5p17^~1o$2q+l!+V)XdTb%bw?mH~uSUm@xumtj#~eV+%-)PQ8Gjji znsjh|;og3-@5Anc^*p{13dOq=gEo0X7Rw$w!g~Jc4TVYC{jf&%1RzCUgd|prwt%z10ERUIsJ8vICBi20ceWrT6BzJ`|?WizJ zXne!CTviXTW_sy!Phh>{wYE&c7=d5)m3gzC*)x5POpvXTmvHmT-}Fv?)vv=%w#IWG zHd`+rlWvj-F+ zyDgK$?MYSr$)8};4FZV#tR{J@HS5||(PZLeT;A?y%;j0#N!F_hX{jBex`4Ktl6Ir$ zGIJ1ZGo3N$WtJZ8O=cHiXS)=?FK^6DC--+BMk??0pY23=D&`2pVsI?KT8puB)h0f_ zhWhIH)`6$SpkiIvu`7MhSEF|Y;16^CYW<>rzwOoU=na{Ke+VCNhbS^+zLURx2?f+>%j|(kCQ>uqzfbJ_QxY{&!i7OwXbn*+*H;1>(29rjeq|C8Qqdx4&yawq_O#3_T zNR`a9t(UzvPIhwk?$1om!)}AP~)VtuxRjOcE)CyTQ;Vqbc#^vXT}8M0%0?Yo(wv6)~iBy zdxZKQ;8@^G2bu=EcJK-MUN(7-?UcC89xj1c_N3W63D_pz*^`p}FwP&tCv}rEo5MU* zg)p7UE00MuE1ZApZg%3yMt%=lKzZQHvdSQ(d0JGQ1=C_?5${JlwAuX++JyXVYUq_I6oAO2N=HKzr5 z^&tNp1RB+Y_@v7G{rx)puKFi_xv6~8*_MP(e^eYehBIj+m4ZVFvAvBM<0uTzxv9OI zM>(j9L|;j@x`@)ME4{fekrFV$j@LBY;2ZoEHF>@>+a2ewd`_lh!J@xro;~44TqZ|; zAJO`3`O$B={ud_9u#l3X3k|V6$p4c#!umeiemYX{I_PWDW#O*V5#{-d(*FPuaHV7_ zzaDZ^G+io{`t@CP*_-RlH8U|xoMw5ejy3NSvF?&O6s(xn+r`wG!~1VI34;xyux;0} zVFL39T$IbQ010QE1C9PAUXudRbDh^4@~Fy^)XF(r)4Z8mcD7|#sUsDGVi0x~Q>{a$ zl^~foYF`I@AI**RqA}58Kb`YEX`_B~Fwm?f zA41G^fIsU^bkd zfCg7I6$k%+Dw|5xzeKYUu{T5PBnBzJW@T)eh5tb^OYtumb#7ySJ^O0#UbG9XT*S{X z)(^8$yVWoz>mEV$3>Kl|H0t<+6M1a@KiC9wi~k!cXOW;uR9P1*j2|%EFwPnAGnB?h zTWQxDO&kB+yC)`EB3<0_OXJS)Cn8}@Mxxt*e}F_5@Jjxb1WwOxnmb?N2w~oNC&Vd2omH{BPp)tw8G}({nJ^xec+u!9I59 z@3xlS8zZYmJT5;YOS(CBtiS+iSc$mbrTXbX>l>XhL+F*Po>0@p#{7i>6QC>XGqq(C za=l6Id(wXZQ2iaJXRp@B>ck80FD7ZT&I>j_WqEV+#Uk94_C4%d7W?0P`kxy6$KwN1 z<8+UOSxD|Oyh^TbXWU-GOX{ihm|Yvak&-wXc+!^4xkx%Ah|!{V)^r`dZS+nx5Cqyn zK(F+l{nZgBze-8_rioTK*Y$^AJ=;m-ZXD9NI3CwlkBv<3-P|&?E?E z3^~^Q53ouLc=_ohtsPxdoq%bolP;5G>u}uj*KWkz9X(|gqe#^XW*8~>zc%QIBb`w1 zZGOIuvqG<@W56cncUm%dGs-&YEB(wbn`N4r*DSv6$aK0jFA$GJgh2E|^WggL1CjItn!@8$72Ts%Ld6r0G?>F@a5FSyF{ z+G$z`$0<1r*Gs+O12@oP<*bb|gr;u_8Hotbo(QF(4K=wX=IoX#NdEwh_78dhZ z3{BE5eNFy=_6<76#oXKLJdV47eeZ3b8?4&veRevcqh+*BJRRfeD5H)#7*M`C@({7# zYb5Ruw(=^_<=|xBmuRMA^sX2zfoJ?xwX{LolbMXAXWTH%UR{MX8#(_0+)Ut=V>HL5 zKepJr9`G5BeLN);Rsq7vkx19~#gH{y3-rGOWRC2V=`N`)D$x9zQ)kzcVqFYp5?N5- z4z*cms^Zwqi{+@K?{a3fvgBNz1&xdaQHDy-Aue^EOmNDMozv>jNl2h+ZGLV5(nnDi zF6OkCF}E02v!{{uPqXWJvQ?+DGPLIVA`DN)1iU}e_KNU?tz=AB8NGcQE--d7Wy4f1 z^`%|)^IZ}zH;oKS)6n;YZM|_N1iPhQgrrZ@pwGSQA@4P#3Vbb`3d$E}C~8+3Sh_fM zZs$})RAefCzH<=$iPp_5_nXh~3M+NfyWl2Ry3mZlX@H##Cy4Y;-y28(fazw7z^LO9 zp^bV$R_J_NDcY5&o-Na3YQ@j1t+Zfo;dXt?ywIQnW6R2FoMG#1;PM39_PM#}%!LXG zMqG_Hs9L1Dusbu4Az~CjvEE2YmHKXvsUIffSan}`^!h!_SX_4kZ25bW!dGn?y`9rR zpE19@W4-?Xzaz{5zdy9J*Y;2{iGgX9>Z{ifE9tvyca4_bvYGC!(cqJ&8os;f8`pP&rNu+g>IUOf`$1OhkAJe>+Z(v7 z3ySiim|%IsOvMM1t!tOVlK4=)=>UhKHOIVYqUzz&Sljn{5#?%aFD(X>&(`@X)V z7FzjfG-OsCKwF`pY z&8oyVuZ&m*yAnjMzhry5CXrry1!Dn8g}q0y!x%0fq&-2n5BbVB&gyX-)mpICMqLD> z0wX?5JKbBeL~;Fo^_?~J4Wsh(AVQdml7WDHj-S^1qBLsWUJu(%b%4yyK8Sv{GS$yE zwB;|wm{~0vhC^EICH}6d$(=Fymj30uMXYQD>8Wb=JXY_9>3&MmWBOUu@GGA-dgw z*)aPr%2rqX>v$-?0 zDMv(mrc+q(*80TO9X| zh-kVaUx2dB@_rIVlPQNY>Gz+bboi8xDpn=^Ot*VpxaTtLp01Isx+S;dZ7dL0Vt0ri zI7KGxSnI}-IT>|q2m-v%9SnW7?PK#j2yAmop=NS52L(pgdUO!1ds-uE;fGbGNjK_H z)h4w!`&>_>$JT@*GubYuFNuAFY^8(p(ERbF_KHu6v$Z#DsQ#52qFny@t-D8BRT*~= z7B9dxfECz*cinto%h8%pTr&-l5y#}KmEn-#Yk)YDb{iX^KKaXs z-AhN6w(vzTI?5;}Y2P>~8R^hRRN+u%oa`Yo@NVfvt$~qnAcbR1(Pd|@oc_l^F1)=! z6kou0pgsXVZV8Aky;H~-zV+N#@cNu+yYa|GLFue# zdm5vPh@kSX1C0{NGK6_k<# z*q%#K8bie;e355tq$E_(A)Go?6VFyTp9+TyXPnhkLpfR911NQm$(DYEc~pTTVnA5^84C7Y!e7;c8VlzGvKy6tPBpis8=h zM;3nH4%wC{pVtuu$17im*qQ9J`2(*ZzDHh{|0(uuKYx}kYsX`n&relXs7k7Kdw&UB z@=kENGDJ~AG`&jH7A|L@y`v;_c{AMBwe-a`WWd3h*g!SWP&-+V{PbrUdTt`UV*0&e zCv74IH6oJ7%{<0|TT_(B@M+mVbVu z_HpT6*?jVfz+mvGe;EG4pr;<@&Ka9tjIrYx&i!`uNKCebqYb^~X<+ez;4nOv487~i zR(kUu@UOwymZGqCZr}1z#_UqY%!jN8?6>r(S_CrSvW!k%uVeLXY?z5_vaWlD-E#C$ z%N3tO@t6F;^V%2P={z&RZd#kpDl6@ZCs9O^j9Rvymrhv`;zVRc0nnaVi86M3)b=v9 z=;c;7d{=cZLEiv;`As;)P_E=cf8=Z1-^vfF+Y`eX*ZuQ%zM`%l%b>RB(gksB!F>@kWdmbrA6f?2bs2(G6 z3OY6pNus3vgNXKmn$CtPk%rm3{FDnn&Ime#lkit$L64wiTpxT+(BzjWo06x;WGme=Fw;8`GMG4YF0jv5K2Oe14fjf zdG?3B!m|G?QUP6K%?iD)Mc;M_!Joz1qEzZyEb$l=;A+qy3XY5Z)>!?Fk4D(;T5ORR zkL%-J(mxFXy_t0ariw+y^JO&5ksOkPd~b)QE>I3Xaqs8u`kse$uJJ1P&?^o!DESs&0L1<#=;-;bi0q-#BuW zAr?VXYDp&1H);VV!%E=yu3x?7PD8!qUGL@q3ec`mKE@8^PqXS;htR&JT!;X=W6tKvHQIVokk~6d%2pUS}8?6hOYEGscJ7D3w;vku5$w|KxG#a0vbAmB^=yt1wFp@ zAAr0jkjY%AdUyAU>5{lZ*+TBPeK!1xX2P0xQ9=|O4}trA+$QmgCC~WB`|^r4VSMo! zjK8Pha>O-$fcWKS$uT%-Hz8ohC_Zarfg3lrK%$IQdAK)gFS1G;9SSs@QQDUheE# z7|eIvx?D28$>siLkb+|3ruk`qtlMgDxU)}V+w|x7CnaA*o(l|6DB9c))Bdzl@BYb4 z&SwTPPNP)+DjVqJn_3g}SenIAazQs))*(`WZL6&)7BVUL5~f=lT)bZfybM&GF^nWa z0yX*i{UYy$o=gDkYb=Gisa*&9*u<9t=^Q#F9lj9qRAZtMQjUIV{QD>Qh_=zi!X!oR zs978((j)^U&~V zlL-CLRY&iy-v?7Ap3&9k(c0=JEkpxCXhh;N9n}Vz{|b)n|H=$$=vgd$Hox>N>{i8_ zH)@z$F9P3L*6iwyI=~qOKXXz=oYQoN{S8ja4T(PYgDk8Z-SjSpXh0@hyGSiV@lRFu2 z+w%s>a~Acm!tJlnKy|s<@0jo7OPa1edSkHpXO0wyHVVfCZJtj%8Cbuxb1M0l1bTb- za#F$);_a9*4Ey&B{ z4bSQ)6UPEi7fvgk$8VeNbDSL(gj-9Q)X2F%wVh{fKZ~s^*l;m;8Vb)aiaF z!)P{kMX)!sNmPy9GV#P$g!_A^-JhzG(^2*E>k?Jp4$cWG{8E=!EicrP%;w_tZKRKK zOK)kW{i_A1^XjXrVJ-bs&(eOInZa$u6KwPXe1tZBxt5E-BtV!JlcK9)YqA}pKaZ^6 zu?cCt+1)ZR)q2`sG-m)FemjI=OVl|_fTgK zm(ur?a*?U)a^>N~wgHgJ@HK{=Q_6ZFzNJ=r3=7Frfc~HuxYAyqX1Ejw5vYn96O13e zZl4}<*|jc=I1%1`+wkRp7Um$|adM16gJ2&SeRB;Fxsx=J$<3GMO)X6myjL5TrQ}M9 zJhRT>u_n7z0}SqHN_>|PNFV2%IU@*q!1r|ase?6yiOETbYDZ*+EJ5CU)V{p#ceiYW ztm$|Qu&wz0k63C(DVLoQL#s_3qlf1z4Re@RDJM^U(?OKQD6s#Egk?lgM8c?B;U)dF zNF;S`sTypD%&|oP9NZGNN1{{r)7kFGRg3*2B*lG|uM|d#Zub2E6@Rl&z-*hWN8r?Q zzNpQY8z^zeGQE3)VMNSo_QNLP|9nYc-ysYa#TH%<;fi$qr)J7yXDW-1CJzM!J!4ZM zE+QB0=ENMt?h_Vm|QDVINPp8QN>|v{@m@0z07FKP%?F+NluEh z)4%ANdfYl`ub#tw{dP?a4%RZeyR(!+We>4xiHPt$abvue*%hY-RT8>}`+nT=B9W8J zPgc46dU1DeA90eV=ac|Mwb{wzSRtQ;o9pDi{|D&Kh*q^{R6G_J;ETw=?v3pmXop99 z4gnDD(0dn@tN?d7Ur9~E5hbJEn>O?F$pEtOSxDX9)Pvqvs68F&k4%2AK8wl6;Cb$L zzmA_+Oimbm(5QLL_qdyI=AS$GK%a1~9Q&qD9LhQa9U=1aP<$P$p82z2n= zzZoB4G$Kp)>0rk>ewH0pXqEYpMb%CmbkOi3uP{q`l$EE|PIUUtf@dH;ftp8s__#=o zP2k~t-QTW|H~Wio?Tfv(G&j?&dEe+g8oOplTeCNxDZPKZMk%MIz3Nhy_dXf&Da&>6 zKR|n7iiF~a^>?jp$=@0?e(kzw9Zx{ zBMETivBDZTn48gaTJ$IP?_Ce+Slnb!HX_88e(@3d*NeH+OiMU zT#^>^k#v=tntU&4Zx4>s+7sYf7tvp>gocTQ5~AVs3k0&8i(j;UbLlR|SBkT7Ut#eG zksoU7$iQ^W+FTBwvy*CcL&YZ=Tu%tTi zsL+I`v^hiiZy242+pQt~ok*_>@Cas`Ii+pyQI>lw9v*$M4q*r|<@M0O9+3p@eJYrA zdd7FZWd?FtZH^AHA9|&0pz!W%JCIZ}D32V>WD;u1FU({EiB)qA2Wv}6PY!3QR|I?mYzQC6t2(!kH*4fC zLc#%){8@-T0ts$$Z4?INym>#o^6-OeTC-#Q? zmUza_dk5z^VX-uj1vxNCzG=e#!Y4s<_NUf=qYy*-mqmK8H#Zbra;2hS1j>9aa=73ES&r+d>sHh6(WFt6PyX@Cj+) zYlEEX!_vMCQ%N%ViogU=X3p3cDSsbvM{zM^#Vb;~Es2CvqkKPRWd{Uz5jvf(LtZNS7<05!nGdKTdCQKb>8`C& zBLzu0&&{x(GT(|SibZL%M&&Hur+Mu@BRSzgL*~ zH7fm)_g2{7)k-_3(R+JoV?+Rlzti?t5?LAdha>1DEkRI6QC1(=$uAl4TISLX*IRr- zOQg$=udgon_#I5@x^s2jy6=Y+;O}1Hl&DLP`J^NtPBm*;Ctq<%leoA36g<<`Tp!R4 z$dzJ#NycFXe*ibIrOCFi8<)E0;}0r&Bv)?}&U{gbwM@gfnzWAaJ7YU%Te#LN!6hU) z`r-1!SjT8oBlgD$agC-GaQ_@~jAiKY-vGKgwYV(lc z1lqX0e&pSqo|RzTs{!rw0l0%dY5GZl?g;ya>Zp6<02Js<6}Z1jIu4E_Bx>(!-ea9pb2zBm^Nr?1~ORxO>S%IQoYq<|* zyg#(8MVvKR2PuU4>U;kNGb7)}(xWh7J| znw&9#PGrK)nPb0e5mJI*C`i4FRR)r9%u`kL$J*i5_UH=BtJQSvSU^0FirT1Bg~K!% z#eVH8U8!hGw3nceFLyX_Wq<03AJ7Tgl=LE40t4MPGCA7UY$U^OmbIKrE_MC9lJ+}@ z%WuAcO-oEnTx83Q2lWtE!t&;~y-4)tQ5Hk-?>>@Su30`@c=dpwd@8iZkyhxnltC#N zAaQ5AXzOVI5n{8T-D@PU(b-Q5kx9yz80{(b0XLri*;{||;M#$WU6G*>v2>W|#;bKn zl;+2KBJVf)?Mz?5qS_wO?QAn0t@HO|7j^b0X>4jy)>NJ^S7~NVo<3`K{SoC5nYHjn zOr@EDleWlxO(`xn6Acg*#^f9GCLK%=8AZE17}%JA)unQA7TWeX*$?``+sK0;!WV16~G-2 zflO{JDUBqMaNE-yrRTw3lE)qApdLlv{)`U?#RQu+iqa#PKdOH13658WWFL}?HV1v; zdtmb`5aImQgpmpbBoh?Y^!tp|O7|6B)xx5gm^rtry$`WU?xy}MBq69r+^e&o?s|Jt z!-a^xHOk;-U0dgOkC5*!M-h9SVID?gjggZ{zAn@0y?-3YP5h)@62P^>GJ=h4aI4)T z=I=?*pPX_o5qjO6h<#z+=_T*|gumYt^3OO(A zYPpU_+J>!y8_94lLx=eci8=gvkd0SJ=fl~EPPdBC65b6ZSj5oPt^!m_moyFFll}eHu3kp=A1}K2VB-hse!s zdC~ppLj;;H`8&GOWxFa{ph#%Q!uC@*8vizC<<&;@`=@vHmi*XW7!V{h>A57@6M5sB zMF?z9z@6HAw4cY+5ANp(~P7+!rqpAOVrW{5P`rb1``JFuQ>bS5^x*|Rx-K|~xrVa~y4zMj zt+F(Q;ch>DvGLx!8HV(bTvPjc7rK(l&5+ zUb<`2-N|Q|=3tY9b_L14x_@9`NWHcF{jP{5@N5 zyMUcRq`dYtqSQYO9w;ZK-uKbOg^}&oNYx2e=qm5*LK^DL=iD;uvOkccYMLtVqu;C1 z9u17hnzynh{|C%KGrxd_i;b~4s!Bp@5nECTuQ>`qpts5=%-^gTK9wsV1afuP;)2ga|hOt5560s*ufpi90? zS+GW=5%@k$YX$nAf%*CKn|Mlz`Zx8KwYHfX;{!!PqpH_uZT|qw(Hww*u(}KcYAWzh za037Y08JgZh^vqb5UtLL?r|?%TJ;PV7|qFM=s-(&=D_3huyffvrwrByV_`W^O67+MH=sCTiezaF}?2Gl%-J{ zSKi1wM1i7&J3J?N1d=GhrBHrYn|qUi z44f$J9vu@;Ib>HN7ElO5s>NhnB{qPN+Y|tP9}Y+g4Mh$?9vu{%QemVGz|sdj^HXci z@TI~Qoz)y8BiRQKR9Yon0CXtEqkvivf%+iuBNo0g2E0R`jhzDbCk7gt0jFgtPlqUE zNyDsE(M~7#hAE^9?I0e9_3tPVifCSA<44no$OKg{?-Pe$Xn=C`;QZj|OM&_TeUlqt zmYLy_F{4=U4*SPXDJLE^I{CvyWz;2yz6te%9SG!Mdcohk4NM8t&*%5Y?=7SNoyWs> zJdwQMjtLIImtfWb5xQ;%ri_Y3`EifE763I{TbKfnfB@$3Xvj$VC}Ap!YKS4+>pOB_ z97C4<;**kjRgxb#w?JsBJNS6N9c6V0gy(M0-|qohkaZ4xex@Gbr-P@15}l_9KQHsv z0@w%xm4>h7hD;GDpp6S=jS~$hDcu6V4)5;YizFzo0NPsW7Hf=> zWChfp4?CT6lr5-Y3Gl!5&D`DBO^EvzP4$5k?>Vj%8yCl(@FZCqw?Hn}!feL%Jw*cn zyMX?(X)sq{088JI{JBX0TMf>v=hhoh83f!0{6E>pO0`zW`QpD7p-&oJmTsegNIx5z2OMD zi_$ZAM5RX|&)y@GZ-)^C+2au;)8hnbE9(Pcv9~NoXAd2jur7cC`HRm~G59reZsQ+X z*StZF^(6_&YkZRTn@LL4OZE7`Y)jNYec`Oo38|!S$2qKlV2>>c^HV4aBM~S4{{Y-V z!jL5F)7S9xy2kiK{I18oPK!GE3_bol|FD1Bt%|X>8OzB8Og+x&g~2L93?3OYpXiE{{XDp zRM9=8pw*q@Cur|SAMu=m6?Orj#n9ND%}_aZ>xd?xh@6chgd13cDh|u$s`KH_79r#a zG=&LxZ)ZJ%ARdTVaJo0)j>#yTrtInKTJY;8BB1&KXdC7i>nLp+baq?o1~6bW6oJ^q zC`>2_N)QAN2wgU(8ZL{4idnI1 zuFEgVxsarZi=^w6h#-2{q&~={u7`#`GS;}NbQby(jdzUFZ_{p% z{loJBmzRS55YDG5b?5K(gw&f&P>09< zWpEIOgGE6bRB>ql0YRp;YZT=wri!|9LpK6Sv35Omg+g{MMBIOP8y3PtLnHHWelooV z1)>#l5OYGFoku7-9oorhJ{+D0BNYeX8QUe< zo5DP)-yS2Djg=6W=4%2?O&wVI#E!?1(}@WI*cSf)oH*BMLlvPa4L|Ss!=OQJVOhT; zc$LB1Ny|C;{bM2oKq`H`VAjQTd*h#dV{vwuN&d*{V<^r5WSlpx_ve1`1&Y<(T#G0% zIhEJn^^qjbhfI_A`Y;QHZfn!}!(5Y7y>6J-_F>-I!)z}&pdf}{?3TCdHYb(Y9VXCk zaMa562*wNm?crrBdEQ-35+w_Pz3hZFzfKSkE@9O~j*d-aXhk&|+V&Gy$63_DZjw0l zKUq7IVv@Xzckle+Ezue&q&bhWrv^{`mP0S^vj3*#KM<+WS zVrPcH*@YFN@(IZdxicPqLTTUam&Jh8CmfddjT9HHOUK8YqG4mG&6AO@Kdf6Bvnw5( z5kV?#ImA%4jR+8-$7rL9whArzTe(R%LoG_A#w8)7VTR7|28*)q1%jOuZn{z^?BFy+ zBGPDzSs)Vt&^Yia1T=1N69|I=?L7c_biWr7p)!`HRIwc_ zqTs|}0RiQMOkq4;SoBqkwFda>j`eY*IaY#2yXgJ66?dSn`o=&!@#Bo~gC96C@pAHo zs%-#vDmYOou}Zld6Y`kF&}pK*uYf#MeTG$E$M(pjI?;hd~0Nc3{bkEz)Dq^HIJTyM5 z<0FS0Rp{54y^=e}Agz!i@F=g711zYcQvv5&9{45_{<^6hXbX8d-l}3yw=_cp8;B2| ztag?xQjXiuUR@J-LqO@TVxGZHdDl6m6f}iirnp`3gqUbwfo<|>DO;znvm}lj0oWu{ z>ifCI%Of6#O}5v3;*{BegzIPgxK$m=qyG8pyxT@Vd#{x3p0W@zq!l_s@x|k26*9oE z6Ug(gh3}k8N+F1szo_pSiZxE2KAM?u5(R-~{{XpxNa|^@b?f7-Iv;Z3I3khlh06z( z?-9XYE-mX7UF5)1DB;4mA7LCs7 zIv!71c9b1n=D!&-qp$|FBOYEqIU+W456Ra@sRQ7lta9$VyIri43>-gl(T_HOf?^}m&&ajl~fhLk| z)Nfx-0?;-9Is=-l=5Ht?-a+OI%RYHNc+^wS*2^@WE=2K@aRfvwJp{d3JbO52ZbnTPdg)fI=qec9B$+V)U5c7O`#a^a={DX7r z7--1q)qHpexI9i8WP%rvyWpoc6+Px!f?e8?(|0fcFlZve9TB}Y_*`nFbWzsJ=Pjv> zF?>f7Q|wAO=wH(xm5$L_+8W!|SRa@$xo0S4XXOB5v=%rKNh8L>;_;Fqb)A!TnQVvY zI=r11NUogW8ED-C4eX18dcj`H_yx4$xW0APHBGb{gc>Qa)s2H6Hm$T3sfd?Adog&y zKJh_pdmpPhcpW zdYs>k9P2LPRlosl;8=Sg$`C`SF}AtY`N?AdYrl}uEi|*^FN&`I!uagg{z;WHZ3S!} z6yesfBJ;WgX-wyQc*HVvpgMZvUy~Xt#Sr4NR;Bx5VhWa7Ac}2K2g#0fP)6J|^X|`k zz?u#NmbP*@DLFFT!cjLwe9{`d9OM(YK!U@a5z~6(A1f+D!EHSK@M1y&)Jg;!{!;@? z*jLSWRM2RCv1kn?@ZYtcP5H%I0cg!6OcdKao)gFquJ?>0LR9eN_2=UXo$lSHpN4PA zk%Ll((lYLR^_rEXS+%x(F*llY@r?ek&ShK$#F!WuQpCWez(c9CJ!Qwu7SJA!x7PKJ zVYij6o}>Qq@$mz0VZnc_YDl9KUjDu^*Co>cC!X?dS%T7!2eN*!GzfrzhR6EBQU}s( z)2F?D^PWZpHVxC$7vmNzfl?j`;ofZoP9g}K9?N>e;AsFO%;%m%>l{QOyf$jO7ZST+ z04#dvAe1a4I(3rTxD}4MoC!Nz5Lw!VWzbI(gTHywI9@AaaGY~)@kj;+(E1){Kl_z( zi0psH1-b~*yq}yBf=DF`x#makU=zeZrw8Pp_OUv`?S!j;uwfvaIOljEAV`6vfAZv( zNkgZr=d57@)aRepvBPjaXumz+sFs(Z5${}a#t9k|YH`2bzq}(#X#q!`IzKr-M|Jb& z-~Gt7P}GZT`s!Ud%nC8>bwT4^rZ`x7cSBw}H_u#P2f*Y6Cl|K22&D5>SHF_O0B(+5 z5-r#?HO13$g6l57Y0;r?T&I|!1Ir0{N}ihh%Ruhb2*^BuDeE7zxXPzWaKGLlF%c-l zey=R=2O&cSzYa=2jL?a-L52Cim&!V4J-$y^mZ752)vvXX{o!G+J7Vd!?cSd_dK$2J zE(Anm<+Mz3Txn531w=Te^^=MP3b{yUBv2l^NuVbk<0b@Q3q(T3lT%$~3|R28lPZRQ_r@7u zRH#u9HUoB{30h%1fd(oC+B_eZ0mZb-EaD*L+5t?9kvxkRD$3Q(A=fv==C< zs8#wu#wy*jx&z-D{{R?I89}&9rJ^Ep%4-@dC7LY?4d;&WN~tJXd(nV|&;SHIVzhQh zuC*O^z8@wW;7y67Kh}5GY1*xPbARe&3Da;gY#oeSF)5Upz6PxmOUKqF4~X9fg(Oq( z<-qS)I$m|_vz#r(;#_to$62l$2F8Yudf(r8&$O&pmuJ|YSg)$-#xT?C+Xlw4oIpXn zBgOv!>Ip^`k?^Nwx7H;a3Z}!SeQ()%$eo5NQXEA%hgxw#e5wI9fJTW%y7iA@CE-tp zn1!dTvwcvZt#VH0x$~^x1P5iLRqMy3&Q9(@xZu0c;jFohy6*isNTAqt!G1*J=N^Ll z9zWC1oU(ubhbD`?-`01ERt0hwUUl=A5rle$<6Bd%u+kGRKzw?BVtpr3HKkJAT-K9*+yx!^ZU*6 zhW!!J2j>FDToo&7dFP*5(J~NK67(LuV}=#2$EF5qCmnZjk3v>ju@-s!8p6nl5D$=Q zqmJ-(jFW(9TO+>6^VH?k>9s_8LVO$K$Or`Grrb86c-w%vqmWIGsdz`cAQ23BgQ;gf z)G_3@K-?19<^C>e2B;{Y5%zDdIb`TIz`pZNp+Zh{*I6(nz`!&=NPS}yZ76|)9j1@Y zAz;{_|qPVMwQ|@snzTfQ4RmpFHD* zvj?>ou9Xk*o0_KBG>?B@_OQFd$QlPn?LA_$i6fD{e~*6|MM7$XolZlBT|7)~hoK8& zhN1M$;{q0dfVxeafgr zTVAJFf`&v!yL1`{gA_~f!VNDf9yN(fKnrOA9PIP+hBOc?EkHKi_pDCJmj?0An49x) zK$1fQFF_8xZwPOt7Ct|H^@tLv2r&D0N}kES@k?>+3#SX-yll!9WYK;~^RcdB;}i{Q zPK5=fwM|=t8>n|zib~17=J2rm2O6z7@68R|NOB3tJ~`{{$8%{yRIZ4y$Z&O#(!pS$ z424vc1-Y`u>osb)J2pDk1|f@W4f!f+KHECWrZlUDYTgGGN0G=VRsI@8tt+>~K3N_zJ1Yu+eI^1wVH-QUJ+zy(lLv>Fg9^)4e3wp&e+ zo4cCD9^Q80j3OdEWakt^_k_VmC=@^v_s%_$5&$765c2Np7Z{*Ii^z1% z(o9FdbWkcHUBKk{!IE?~QFp963!%uni5FerKBN_@4tgTjThzfwjbpa)(WMKaT{^{E zU}Q{UdE|fNEr}4@S7DWxePZjA1Aj!{)pZ!^C6$^}?`c{lJpbCPQVXpLYc zJTF~m8ED!?Hdb4CHLOAkOoR^>k0sYu(+ZNZ1P@Ad{{XC%9WqU7uUw2kG-!w$Tj~=c z(N2*qtK(|?+=QV-J&3Q+<&!|9D|AmF2mb&wR>8Bn2ITts!T_bn5D1saIrEw%NK8Ek z$*yup98M#{-+X!d%Y-twq|ho!*0}eYp;ZLY1Uu5t{^Y{xE2^U3UMqq? z00N>kaUKKn;|zoli%Y`)0OzbB%si&`rT+l6g#{fH2V6t@#2o})fN4g~vUXO3V9+}x zE9>!R&LB83c()84xx)tLG=Dkzn1}~Qpo)H9oMQq;iZA#ffIP{e88ogLduKp5v7w!! zD*ZPlsFY-WpaTNuq5d;`8+|enkpY8T#G@BT+f4di-Mf1ziFy`IGMso|`Q}q&w_y z)<=V}vY5BIa5tBhc;mcArnj(-{R=p6 z7|2@;NQVZQu>(w802!-b1t+Fyrd`~2=nQu07oE_)vLR5xQ8ambaR?|t4O$-u8aZ5I z1wm7dapprRG30OwvHD~6hP5GGLqz%BY_>5n{{XQ4kBoMLd#p0j+6Skue{_A$*(U!XBzmegxt<{{Y-Znguku;`iUi2IiNI3-Y*vgis3! zI5}T_FomEOOarZYuCf4~9z^oJzA+|H}B?E?(Q(?fjfBnFan{2bX_wMfm z3D`lpJA7P4$?XYWnGPOsq;~Q9gx9gJyhKxUcY4Xp&l(X>?(y}Oj7VyP2%1f_)9VC4 zgWhaMoj-Vt6`L)4_uGEC$zc|Wr2P26sQ|h&YEK`lkH`Z^C+Gz2);kb%aBT{8uLH9P zjR*-mr;$S4aqVOsW)rAud(-XCJP;&>2S4f0edKe}LqK!a#tVI-Tyqup{W9`+lVRb| zd*Gbo;{Fgi4xRy9+E+uKbHNQE)D^6##lRw+*K>HjQda?GD2DP=2vuBvA*3bCW0Va?Ig^ zz4YhrXAg!s?U|Bp4S+xN{2v*-L@JOAHYdq9doW56THsTThtKzdlsPufMz8M+U>r8x zc*Un`S*$|E4qjWw+zgbUh#@=!!>xVcl~5rkPJ^$>-|}TJR(5XZLrG(OAW4!00{9LgTOx zu;=I3I>|733A@v~@%if*T}2QZ-=7W>z|vSk1^nNvr}ANTn$LglF4Bvs4Mp72JHs^k z3O-!uG=BZy6&E<5*Ujo`%t&59kN^wMk^EyFt$|R{K>18~wRX1z57x4@j6D)N=tbb> zZ!Q6KO?pet((uz5R$Czrt~S%hS!lQ*X`=b(jpOht4QqtyvElu=8^RbJoNbGA_0AJu zPzt5tKznb z%W^EMp{5lqi{;;JO%O`9%T6iFzpUAaI7Eydhu~pq6+-TmHmS#^So0AhVN1%etixSE z5bdsVKmmX%4HIr|dH~i8H?RIVuMJJrUA}I<7Z`&>fam8VXvj6}jx|TT4WKb4Q$uQ# zZ;x{vFBOduxLX*fM)qKV#zCayt9aoV4~z={Kr~J&yIC7(HqEb@mrR<1iM^g)KHS)3 z5cC@R{_zoIl$GF}vh(xS5&?tA*DLUT z(6YTwAvXFN}(s$?YyN+RoC(<4er+-W+ zXuu}xqsN!5EhP4(Q>}A{XGQ109Zlby0Gn*6jjEKUpTC^eMm29B-E+^68A_5_i$!P% zzPr`OHeN(07Tz`!hBm?%v3FlDI88~k3IUBid09MT(tsju(b04sF}Vtr^Cl)HVs0qk zpw~NW;~7O6M_v_K&p)j70-*-PgHY;!SP*sh1@j)8srKi&r8rvL-5 z(c_;cGDAUzi#zix`tKjGg#o@g`~I6p& zqg%nPafv^Uu;uu`sAVlYIs1-Q{v04Qs83;ns1_+n<9A>Ale9Lc0ki##8=&cTJN2yB zN-!vI&;6`Du{q(F!T$hnHmIS4S$)sEmZq3BcfrrRV^fGz_($BiZVz;kut!5%$#ET3 z03oBYzQ+R7C(6kZcGWhTKj#M0?}F+s4;SZ+^^lmc*;gEQ{{XpGfqVg`+e&@Kz=5d} zi&7GDjYfT8W1$lzsQ7>GY^@><&TF3V8dV(2OBu; zNq}%eM~*+dK>C_HBuz0nniJ<%&J>o^NAQI^CyWH`2gk5L=?t0_h`@o){0A0`a z^XDWHMr@kC{&~v!snG9Fc)L<6P3o^-{c}LVzDSmTf1G#rNLsWTc|g;^V;1DVrSYTG70Ta%F)m{FsYe6t^Ats(5 z6E=s?Ci6`qmT2O zrpE_@IBV^ST~DZVZy<)|DC?$;@2l1kr!vx=QL*^Qa)K>+8#pfn`26Gqa(qDb{xKjJ(HjqVtOH&h-)>Oo1zF1F00YXL zTxb!z-K;;L&aKxNrH0w~&Oj-3=DByNr%y|RS_`afK7IcHjAXn75#s5=2nBU~9&khi z)vo&SfTN4YpIIAKr=aoU5mB!Pj~IB36)u(k0KZt$>7frG3E;2J4M7OniPZl9Gc*|*AxERE02v{BARxZa*^2l9 z)WXyZ1t)yoM!91e1Yya47hfK6fzTWv1D+ysZ;mUPG*A?|O|7@yIb1FU*#4vOFfPZs zk@=I#!~JIhbQKyS!RqaHy=3Z8Rqr$<^Rk-f5myQ=ws?oIE?g5H8KvopHR}N;l>B#+?8+Li6j^ z1*E3PYIoC&5@@nl^~e5U5#z_VYySY;xS1Tf>ovgMF(pC5o4!F~yqjBg(eia~Zt-4= zAr@+Kx#wsIL2Kob~CR6Md(nAjX>6@@y2(j;uGj*V7a6O&P-~M3C3bKz}bRRg_D05l? zr?N~@EMRq@=c6oC3QJ=(UmRs`qV@n!z_^wrNhmJ(?&Ie|wVrRouhswn*+XIP4eH!l zclU%@(6g*Gg4z>l1KAH-!6}#?O-J6Bw;_}bD-DPpA5#oSltSnzcgnhQ=kGaA2=XDn zj~NbwrCX%$t+JJ^!cnJco=pw;&Qr=MQVWwxajXv#rtByU*oNMFm@1yQ>yaE?U)DEz z1dtA?_&3G}(4bR7wL4YkI)cE}-TimgZ}iaY>t|O+E~l^vx?Ok=#v+s*gAO|5&M-A% zpdS8D>2p^Jf^gJ%@#V!K1+ig$WD>;^6+7FH)WLG8qk!Jta8#&;@0G(9YUwz<9kvY3-&Fqp0P_}up@u4+Zn23xxZV3^D>d+8t^7j%^6Lg+Zom7E z>mjF?{{V29wKsdy{#s9b3Hx z;{7;EsFI#A%ePJc0Nt;~MJRK5_{*sXhlStg^Nvc=*FnBjP@C%-(XmH_@A_P8F6r%E zf8Oz3C{Y@gJ`(Hw z-=|oJs$oeSFQ>jWaio_joBse--@Hds2wa7R{Xef*3Xtil8-n+XOH#|A1?c7H859>> zf71#zA|_+g{h4Ro5=|RpNTa z=Ib0yA?`+t;NQoLcz}QaEh>3O;}6S(fb@`Jv1CnsG^PO}{z|=nh7dz#%0`6Z@|Z|} zsN*^$cJqi4h$<<@lN2+(oB{-`>HEk*wgnDXQ<2{NVlC56HoSc60tGiEa$C&^cz^4k3!WTX%6Jq+tCeKTa)a2l~n_G#z29oF4OtedjCa$qn>eU>8?&1qGku z%Y-NZLAQVBA+^z|f5m~N*-!(N_ht_$1mw0gPY*vh1Og&1!g)h; z^^i220Cz80a?&hrd*b%tbc{^^QR~KEU;?!s8P=w2^2tI0*7#4Zv86PS(5~$(v%!K8 z!<8p5nvSe*1h6rbZ8bgQ|ZJoDyD5qqO9i#<&+VEEt;SASS#!Sm^aJqHR~3G z!5Zn{-oA5_Nj8?gp@h7GApZc5tl0qmLi-Ps04udQIdwVt#mNCeEmN~k9&pfkZpY{Q z$9-b@hxy7LZMEkr?L^cRXW@(}6MGLixsef_nEJpMwRBnLXyblxfy!Y;u{@J!Iz3hWRGPA38 z(Zfz^o52uh+kQQ`#b^)>g_<~@yMlyl0C~6m`pp6rZt6~tANM$gsHBTz@b!QSplKgp z_s&_d9ZmxK{{Rj=$8rs8zl;piUDXXU#j}?A#*FbLcPMIu5T?FE1Q~#NArGCNTD@=( z*;e$VN^yReVcrq6Qp0r}ZsVi)s?~hl*?)KovvZl2e6fc6I`fv*!;hC1n(fDL#tMn$ z{N(^@8**KO)!7EC(8rFpn*hiljB8&p4 zDt$No^@m@A%2V2UXVxq=2CDfzWcP$yVLaeXPGO|=))~W0Dk37B>F%ZH9TaL0a96l+ zu7=^f%bv>)3k^I?E-HX@UqTwUYdJf{fb)e9H^^!}H1&jaKyG4;){{`1d}VNG)U8VL zH;NDltHF2|RPU@xl~M$*p-(3_HVI0hmL|#5*PKLjQg--${{T4xL|fC_zJ9vF9iSL> zt-dw)_Rvi{lw|(^zOV*@kVQ_gBmk8|d9Z~?yjjanX-W5pW%a1iXGnd znj}FVImsGw@}3MsRVUD{dyM^K{JfZN>5uVY-uZBTOi{r^mE|%T;!2@m&u;p_1R@bw zJarA$cZNGZh5?LF_`*{|luiBhn}q_p3pwUs65d@A%n2?X2B#pj(BRs@CQd&Uc{gbf`hPhU@U8p75F!5ncQbZwM!M$yvR0HSc>pKm{bUHF2!=4)DLPIJ zP$LwArwDe|67T?^F9*&=(YhTF6XekIv(8Bhm;j5Fon>1CB~)lJ@_cKiXH>CavJp^i zz4^sJ_R`V{18FtGA9$-t5Yan0(Rpin#R5~27WYGY;|pAf9Juw+`QGoGYeuwqLhXBS z_A17t;+^^L6qj8jCmidX)&mh}8tB@0;(EBY0E7flJG8e5L7_Ci@vrM0>aDJh$I1Ty zbB7r%C^u_JiC}lWaLR7Py{a|0f8)kXp|w)CDg-%7xyQfaX)^2QCDDtlnV4H{bi#Op$cmJ9R_wn za)6sokNd`L2A9*DqJLOu1Du>q6~&gRR6WD|VgxOS5OAir>GEPin70D*8P4~S!K)$^ zj`;1+doDW=U4rXCb&8rmSTkE4{{XD3!BGI{?&)-Txb}jRv#tw%Ul_6Jt6Aqn=NrIS ziK{z(;s^mLf%rSt-xyq}Yy?6m_}Mv_LKxdZyMCNv&j>;~2VdSMqje?FCqoZE4r*4& z3^@@{tTnw|?b(0?Sgrzc5Z5i`3_xeP5S}#e-fkC&1>;g%#=br?O=24rCo`v88RX{y zK|owXM0B$AIffl^S$I-+!Sj*3ly^ID#X#Tx0CRIN9izs!JYC?_EjuHvXyeWis_j*(2Ta&`SG3TTy#Zi*~h~B#~cqv=M|7%?2P#T0JWXL4rYV9@PFnTu;9|Fy!1(clm?>$Q+}%qi-D2| z4Yb5d#u)WC7%~CyDtLLp)?Ebz_G)~%VE{@aVtFy$DA5OO>lY)Zjsw5(&N&ij^j8zQ z8DIdkQob!bcaSJlxun|7;*d2_W$~``4sXJptMuQGIifH|lbP{@*fgT#>s|8e5UJmQ z2H$QeauDb*g~y#=-Zn&K$kGpITJyY$ISmx};05w%)L9YzS~xi&BAj)#2I+(v)joK(3AtqX47m8-=dgbv51!s0i@U6X1sT zyu47LJbCo@g5$qaU^NjRUOzc~q0DuCYrXF1>k{rtAevbUANLs^>;$^mwO)N&$%>mW zYNm=pd%?o!g8Y>IPQ2nmcvA<^0KD||jT%9=gOPe311@EtA2v8Y?#yvf!frftTj57| zP5?pJ`61@$FO4$|O9&womZEIn`w?*+d&swhGmy_dvti=rd+RMXh_%qJ{24d#gqiQG zF=+9JA1*s{)7E>-3~49MTcgIYA!>{-PsUG3Pd6b1n)vsU zRw@#ez_#hCagos)PO?%(#)-lu~N9p9BSfti? z0Q0b>xcs?Zpz;ddO{4np-XPgah(1oV1@9aO0S|ni;~e_J+6NYRAC5JMJHpeeb#=W) ze39N;RhhL)&I~wxQyQX5=tpD4#88wFV(14-tF(P)_ZT9khrXQ63hW|hWRNICiydB* zNqEIJ%xqp>QPlW4!g64eAd_ylT^I$tMv!~#elorfc_3(3wX#o+G9$%QG-{ri#6Eh> z(I(0Rjx4%Qybcsf;G68cFaF&%NPc)546P^-Nh`uD~wDwzj3iW~o;3-_ z9pbu>sk-tOd;GZ|H%ld4gc~?Oav(KN#@fs9(5);ZeU=SpK>zkEn zp)5Ms;w4F_h&FbA+~ZH%hQ-=m@>*S3Cv_f&thR}R2bH18iLktC1|UmOoi%$Fl-6OE zW=c95FJ`-b{{R>r;EL}4`@q#XNNwvGAqA;_`EclXb(j8s#~DDk2ucRqo-$KWtTpA) z39VO;E^&)WqU*2wM?7W7*FYw>%={mWvN$w={{H}2qU37V9;S`F{ACUuh&n&-Il@R; z(eD6n8%GEK0CBZOQfSwIl={P3C0D-x0IZe-Gf;v!-(;%i$SH?sntGzO-N*=y@4)WjVeli|skMKpk|&Nb7k#wY{>S7vQX>yrVB zf!bk>da#$HEG%ZR3NB^*hFkPM&Q!aA2xlW43MQ1Sk;>P9r9` z(_98N8XDJ>A;j~~ZVk&Gz6X^%>f!fhwo^##O^&(W#wALnfCRf%~F;*4(%oAz~qU=KC&Nu+PLOzRHC zbtb(1yVp9x1u!7q=N$||6{zok4PEWFeC)}9Nnr;!YVJy85~E}p9y#xf)F)tNqHukF z^2h{Yr2Cv@Wv8{u_5L!$WC9vBUk^WwL=4a_p;v^s@c0a~1bOW~vguITB}@UvA!SX^ zSUuXTdB@f47lFmVL1xv~2uaB;K=eu)2u<09fDZ98ENYPD2zd3ZVYaqk%eh0r0RncvrW8WQ*zU(PCoHfvX3oCp*Jg07##{{T3JHQWVL z=U(vBpw$iIjhcVPF@h>;08zr~ZP4p(Et}GY+lARtvmek7;t2-Eu+c=-PH~5!8Uz~# z#7phK2R7;6Ki~J9v&we-I1(2qA~Qj2zfUIy2xM@@Cx_pMyrK)OJ08Xp0D|S}-kx|l z`r`y9iQKE+^2uwrXpz3T{{Yt|D5awEd=U565_CZF2+i+}^XJAgtqV^y2bYbz$QA&z ze~vByA5bf~%$NF&FbCKjByc^s*9yC3(wm4>TzzF{6&lL|vM zA-%w{JGFSoW`RRxI2G#OfyPr=xk%Cuhn!@jDodn%o^08iM#!&ez;*b-qB2>21upLx)TyP7+X@JDES)(>S&)<#jBGBwY0oR;$tm%Il;}aiQH8NB?{_=$-&{hS12L7_}RF}E0 z?-rtVfT~z$r)&Kx;(eB5%-;WoX8f*Vem-fZ}6K<}TP{barf z6X$jHx9<;J8>Uuk(bWxEw#@zP>Rbs2@{! zyVqau6gIIAos!vfeV+2`*06^wrWJ)@@i(*FS3#6{0UX}=m@D0sj)4uqUXOYft`vZGXD zgc|A{N}%cn91z_WH9&yU$2cxiMR8h78*h;?1wkY|Hg1&f`p!$V(}~(6`+s>mgp^Dp zL2u2%KRY6GU3)(!^x}y`ci-z5dhxAbC}u2I)O%<3g!qfl`|qB;V?Y&o@z>{*7y}?O z@H=P0@$a18fzq|qnh0SnDT@~=5TENg4H#F-~z@oY{N(@kLj|UEZjE2Il%f4MC+G~FC$M|R9i>=tM(hmFxcsyx5 z9(8u{M>SBfe?M4TIt?YYZ8T* zmC0M_=U5db(rt)7zwcOgh^lf&UNWKwVy?$W&LEbUkSC{medLmelbTLHKlz4IMw+${ zkN(V{Y)(W&T07^Htag#^q=T&l{;`#V5)3c=VsAEUZxf7rdh#)!^MsY80a3iy=kHFFBfZ5(|`DDNE5qjXQ@*eSg zj=|+~sqE##*Wnh%7W)4H59VFa(bI$&9S4+M5B~TZR)A)|vK$wF>Bc3ItOJBYM!Llq zkdO$DM{u(7awzNw6`SBfAAYbF(h5nj%D6Abct|#K;lGD-TwxmlsBs&TRM00H8^nffj3mf-Lf4h4tzf4Z2ow=DD=E(y zDo8QXo5x85eIKmkM=9lDg>B#o1I)>qyUy_a=P!YvSb4@cpc^=ddnG^T7Po05vnYJT zv%!gCDCinAn|pBagb^g!v^;Pv`OPj41&9`W+%{nC#HHWcC*jUXbF6e?evC^LgM@O- zB2Dw!_XqYlF(^HjcAXv1Mh>1(cc34b4oHF&TtDyq$LK#(=i*?|O4oJhy*uQb;%HMr zviuYGjY(Bh;%(7x18D?8aIgtAKP1-JJWplYHz!rg)ipKq3eDJRvM+d}xI`|GU%%cM zkpVlu^vz&{I8F1eH{Lf2=O9R8C8`>ku@~ z8tR~>J%t-ei z{o@3A7L?hLFuoH3Z|e#~HQ|)_#sgt;-UNVnaz@isId${v33O8pqcjd2UVqj& z{(pl^?6rnSvSBC)t@ubBca3^Z@CySPQA(apjnx&?xKOsXrU?CigvesTscMO&QC{{VgBQ9y~? z_2&M(Vu?pNMBj%`KL=MOUDK(;Lu=ama3pp0BTDmn{{R?(OmsC=Uyixn4#KAi@Spaw ziLz!qgib_w#T1Rjgul1_w( zybv59co=-$zgUi43>A3SpI-8se*goMZuPBVqq!n1{<+B5-o@V|`62U_fK@bDN&VuP zGu2L$oj06kp*9H>ASK4hk#?&)Tw&4z zY2xT*XBssyqSGX%#x3P99&ll?L9?%M-+nGZ8z`zJJ&A9Q@zJWS75hvNr6Lg$717`h z%XI9Tx^9$-v&OtSaAnpF;G@RzW22Z>*2MyyMvDf3det1lu*}muL4ig{k4uZrL8L-= zHP@FA9yg#g)2f;*aMB9W2Lu(}WZNGbWcKtcBZBW-4gl2zpmI%DzC<5YS zwWQgNR=_;&*Ysl5*fc~~JQ953L;A_E$5n7}fA<4?#}d%wx@vR4X>c{^&7#m929Np3 z;o*hw>;BeR1qnpgJj?$8OaU9%ZoP>;{NSo?j`VzT{{Y|6)mHS^8dvYnc?C)#t@E>; z_5T1EY4zns){N$_t`7s?%1{Jx^Y@WgXocq{--*Zh#_)nIVZZpx#xu1G@A2a|4T%Sr zuj?!ZTGU}lIo#(qZW@}j+8!&sM9R_%n>H_B=NG)ZwEUg*tTsBy#MOa-n3jOL`osiK4Fif4P-?vUz(hd~2R7Wo_Fxj7t) z=r2@@mb)T7I=NCkBWXa?c!wJKauHHe933e`lf~B=#Cu4Q?48HfFLP)EUl_T~nrXZ9 z=HeAKc82vHvgJjpgpUUO?_W616c{DzT_WG>5y}=FI!n1$YW0G&S7<2c9BXwt)*b@e z;8nr`-PAb1*BfX?+nvqdgT@9r6;2wDMnMRJc`swK{;=gzT_i)_t$%qy5Mqcg$VZ#6 zj8G6?5e0ZxRo@+b&QZS9pmEpWgdFw5Yv>mQ$dC9y59QU zSQP6)$LCVd{@@afprF3%ug8wCFB%32Q@;f`$)i9O!44eQT4%bn3S1kU(p>l;|Gi%9czU1PaG?B8dd2EKl<6$d5Z>Bc(T zvF+jYIQNI5l)!P})Rp?fbP?hAije5pN+@Y|4vYjC6q{%Wbnc;WOeu&O7bi-pYFo|` z0`@wUIDe0L>X}EKoxU~X$5w+X1vn8WCq3anm3g}QUOw@VcA_AlJLHM;Jm>(134AB+ z2pAB=0~HG)PnygK2w;$li4t%Dz-wNcd!uDtf%xfV4vEl;==lgJ=O_qer~_&oYY=7< zN_dD-0P&}JRK3>dMZVI**^fv>UP!6t`1boD!($IavjKq&(&Q1<>l3}o5gSJ;lHT>WXG+=L<=Z(6Fa z-VF)EybatD4seJ9G4N160e-N=j2h821BZM2%I44z0;iYF<24?)V2v~h5}7t; z&_$zDMX0H?zIwqZX07BjeUJPt^Kq4OA2cP?Sv(kdDK?jqQeN1Z!kP`H=dne8BEOc;M z1nJ{vUnUqJ5F2n8e3OH$TgJXHD#rZeDlbjblXyY(h$$j5@nDSDEhS< z^P5_NDzw4$TnN~L15cFK{l-+3)Z4C_&2sNppv6aKufq7aUT~|(H(u;*?^s4L0a`V? z>D#_zj&HSZ&l=`$CgwP&6;1w`r+5mlqzIo1CV5KIYXxQ)3 zl%AXiLWq11-XwVdj+w50Fc|_(`n&%C9~S@+tby2f2T!b_5K<>vd3o`yCZw7}zbfQq zG#df*A9Er>1nlJTzn-y(0V~kkc}Qz4F?mHz8t?YR4`R}j@qUk8>{Je%1{v?1KdU8K)w-u513UotG--lQ#2r7yu z$0tVmKJx2ksuw}}my?XJtu!E7Ktzwxme2(uLT%?bbZ$-6CeUJ zy-4uekUgANBg8xZ0L&TvZ<6xs(PCm1Ya2U@O z%H3m9VTvGv1DAE<4Oo67W0OGhwtZr-sH`)B9H%F{$9T&ayMZTqE~Jp>mle0OkW)it zj;72Ih1!I2o8KPvy!gE#@_?hhZp8e3&;zM~Hh}p9gU8ses>dst%-zjetYqVl_mlx0 zotTt-;w+2{V2VFqj4GCkq~>5ogo^LW^Mn%Q5~($=rr(ziTSVc}(cNd(ZJ;~>x&Hu5 z&KTe{CW*~0^Dz*<1sakY^Z4D?1y0~9T<~u7-wq%U#TrTzeeD4Eyj+TI0&d<{XZ>;! z1ptGdmoy9rp-nqLyxo= zjSnlafM7aMgb7^WaD)LswWHziV+2Hq?WP_l{$q3@32^8eb?02%x(Y@vf-1EQbFO&A zB_+`9c}6o@XX6pLq+`9*;t;zIy~DD8R8Z`6bFJ#f>Wo_D{7>E^1geHx?t#Bpj@Sy5faM}(UljHaCB0*v6nP6D zJn)_{e=tNqH@jy&f9?**cJ=cA03BlhX)@*n9$%3!oQQByRk1G@h8~<{E0I9>tL57e zigC39PmQ9yH&4?*{BeITFr%FZF_$MpLt+ zeIK9mnqy8$2sFd{^^F*i4G3P(&HxcqQ&r<$Hczf|lxtgU;O18wDcTI@wI<*5i|Ues zL!JCO{`*xVY^XdkbpHU|;DBkqC(B#kf@$*LM)0WY(t4DB`o&T6V zl6&X(nsMNUJ4w&%&ETa*rQYX$y2a9{4kvXt+g}(B2IPu06{VZx=KxhH$>LVg6!4+m z2vj@GA_{TrzFg&*rIE4bE}A9ZuCVY8MHLsYEHD?7Aq^q?zB1GmN)v~>fnTo|K5!~gsn$CsSOQGj|S%O%z^pXRJzVF2P!rCIU$$pxe6`s1+Ncw%S1rSSK6EY1lR;hmliy?|U+L z(0j^A5^_c3MzHB9MLU7$$Ti{W+yPBSsAz-98&|_UV)Y0iWTy+!ePhHpfc?BW>tEEw zinIizXv0g*53APk7`1~muJ6nL09*(G9i$>`);rQOdY}2u2b`2awfsABfFL%#f9?n) zPQ2kcNR_DFI_oTL@YSCu{l-ysjjBn=HC+)lijUnCY@k}0HM1n z{{TAP{KnD=oXbySsHQBPfc;YloB6nNv99CeC-0N6mgKJb$Q zkX#&mfX7e+T(JOnHp|0!$EtL0G%oIq7{DnL2USEsE$4XTscu@;^meJO)?rJMWI#{> zNS1lmIjtzVMQe0a;X3?f;3#d{dDhN_;(g@;QVMw!cFw=2!NK2Av+l!>wl_-@60f$E=f$25W&{kk$gItDx@{3UAkpKoS-+ z0Fc<}@ZwTv$~jFncz@nWoTAJTbrfx>=W+MfJz|DzNDx+~+V#IIby1NV=>nl_UU$BP z4{z{e1#51Dmv~!&2%zx`y_-!{Ul?H5Hy5rPxk3sSeT@fxav)F$A!jYvZ11cm=z;5Yz79X~si`%RlLhfGMCOxyCP9tVAI-yPM_S}f2p|g$O{@_Uhbff+B{dvS{ zpb?5cnWo{80PSeqt$yo~1cHkH0FHmWsV0>WkE^?);R`^gklxv?%JY;;i^<8@3;zIX z$&}C)H>1V!&LfKGqWS*-x0)~|YY;Smf7cBZ!GUJr*!X#W7{Q=gfCQ~laQ^TRZ3ww| zK>0b%K%CeEr);z5C^%iTr#EjosDP1s{8IS4F>>z-L^La}TE$l*1XGenltJj3xuB3MS8$@8cLgAV;?9tb6CqOjz&)dgT|7 zTE=%+7K9@mbCJkFp$#Ip?D2}>QVbCn{{VN45U;?;$&}nuqU>-`b#ieq(?q=H`I*)M zkUMqc>2bZX-<$+ceLQ6w1E?9#tV9DbHn^2AWDk?QR>vPQHK<2STgR_E%NRM@-3N)N zarAeWJ{FuLtxf&+i?!HLlE^j$JRO+|(r%DumAm+s`p0ZTe!v^n{rbfOG8$G%IM(v{ zq!Onx==n5jSSKr}4TzqAqDSuXa4|u!T?4KjrGq1=f3fTXn-R0=^byAtUX3aUDQX19NssHgn`h( zcdI!OoaVXcrBd21guQv|c(Z#GVXo)bU!2z^R0?s&{o@r3=1^T%=Ued{s0F4WgUE8! zT2B~vxSyec7!Sx?W-NnHly57TcQ>x@7`3;={VaL&8&&J5KC*6MDrWb?1v$0>wR zZs(4evx8k==%}0|Xghg%a(E-G-xXJB;M zPW~~7hwY*aZ!r z=Mulv8vp@#?dL3|AVA%ZivIv+6Ho*~{))$bvH*aCr&`yZcmDt=g7`pc1n8dhjFFkqx0J(2=6*mn$8+~gaoMF@At$_1R*gh1Xiyod?m@_)3ZT)W&Qff3Q0w@F+_+Z#5kQNEiTL`Y#3?KyccV@ZxuGJf~z1ou6gw5 z6rTu5r-{GW=5uiP0a_mP)|kwl?l{&bF3lWf6{{Xld*QIJ(y3p~KQ>je}T9lUE_-Vni6v3D34>A7bU zF1&I96klV-HH5WjNnlYrIB)UJQBs4529UrDJ-3`plMS_$3ZCw|V*qXtMb-AVK1J`SI%jY#LtoxBADy zM~3`$ikn{n);q5bu@IUGTC;Qe3&Q~&^%Th%?Y2e``%g$8vyP2(#P zfDLl3uG!-YiU96j0QA3{76!U4uZcJedTlGh90s2_q!Xbc*-+R1{AEEmqOaTjd}25x zG@eff`tQ6*C_18%9&59rd}9r|6F{{OX!#}yQ6IEVD4RfNvuh)YtHJT|3a6jTZIi%H!SOot7 zTkqm`mS}HorO+OqU1GHYM!f5WZ@*f{J5+42`)ir_`^im`Qb$gq?+rb)2%tkxKiP_e z3IGT3ep{SKBd9WM-FEl*z-TvS(lQk%jN1jbR4-c4^WfiD^c@7+wrxI7f!46wD|=rE z!F_Z2%0VhB4z~UrzQY9r7iCK!c#7WK`ogBN?hSNCpI7tFF0#FqKN{Wt09OdAp$OU@ zxSy_ZRZ%)PuN{Be%`hMt7;(FI#)pi)`qPZXTTqGK?|FG^CdjHA9j@SJu0a5hY|PxD z%8&%c8uBG8C{~;uFIaFDb-So)qjlTXoIq;|v{l&@PN6(X;(!wlC&Eo}UB?n2jhmUU zQ?)}z44zZ~DCQh6eBcysfjXT+^V^RE>>b&rnm@c@%<@qDuXq|`;GpxGDuNX^fzLcN z^^Xp4*^Rp!8N~R#WkG4$1@_N48Wlzx>Qen=ZfU%E&x~=UwLJ&qe;ELXAbHdN->lle z-;8MQEZ2SDBsD-l@;4WP1zt+ZU^-OLDd&KT{bg7UsW+t#q~5vBA<^U$qUMSiN>V8kU-ZB_i=ypBOUGxd zuiHTUyiSbufGPt(M51b6A2MQWiUQI(HlFle4THVEzo!LBFO~Zf{{V8xgGhvPdj7oo$C8VMS#-WN zzI+?RsANc@pFKdoCyN~-=!>(~#-w|&*En=I@CPqG9n5daWD-3C@A2;i$b_MNE}t0? zY+?`#q*X`>F?7ebMLlisX})s1T(uEK?Y(Svlyypm{3(y>peusUYasfn%;bbAW~c zbSgJ%eEG}V5D>P)?AyVbxuO8p*npZSU6|#{Du{C935)GCVjTwWfrnB}yIg+JvS3Lj zpR9s4DQW#C0`%yJ^@*|^u^w<02G7nFgE5CiH3M%N=d9n%igBWVCoMfE&NwZIGdCTw zM#>gLywnH^ceeikgAlL?krC28W6P5RgR&EgcYnO#c*$qH2e{gD>jQ+U6FrL1)J}sF zM(`CaSirjd^PDh8WTuI;-~RwJb%X;J^yoG3>l$G|rTC6q$Zp&eJUFBdV7v8+G)26h zFH>jlsfU26V$?SQIn##}1cDsD)VLQ6i69Kn4#%Qki%hi2QkE*K;$m?C!AD~2iSM6` zF#rONkvhPwQiVk4&LAUz07z$rpnNs@De&5k+Kjz`@r5U78ZAScc3zc;)7o**ILdpNfdn=eEyF{F#-cY4Faql zz3Yr&Xfttj7gyh(ZgMmoj<=(KS`1NX5F^847V}BdhZ>EFt`P~OdDj?Vdx9h(RG(ab zymf2Tg`KrIdj9}8_JLKVk50qpe~fe?(M{0}KCe6L7O?d6$xWT?49&Av4DCe0gy&DJ=x0U<4!2hnyWN z8Uk>@#nU$&XbDybK{;;Re~xV}9?!$c{d1LZ1^g9A@!PFFoT@W7lVXM8@$UgGM#?>X z^g1R14!fb@X>>GC-c$`C3QthFAAWLO76g$^MkQD)$Mk?j0RTfc;OpbZ7_g;kgc|AN z>3LrmOJ6b2@5}FBjNif$4x-mvPVt6DNFI$)OhHL1Y z9Mt2)^>GhyPQxX3zuE64%_6#1@e`lw;gDQFbKtLt>mH;rS__~T-tZG+R0n-|;Nt{3 zL9;~d`pw7(ATg?*K1s6Mupf=?bTtUb+Yj` zOQTgyT06V-T-hCk0a841_{dR1LX&e(d*_^%^fd0B=!e!&2NbQ%x4Q82f$wILyG97D z4Ls9Wfp7|RytIe-oIU7@kO-@W4W0U!G3d~>w&g>azgcuGdZqW?j<7+cYAqLJbh57R ztYl6p=?oejJaO_tvIYl&mX#r=b?wKH$Wju9jTHV1c3?Hv^iM;c3-TL?3w$xAB94#{ zn>ILyU?@-nmUXMYJHVXoVNu*b8h@U3gH-@eD0jYF=HpZ_K+(B4=jRkwN~5xG&%z_8 zSOl?y0zv`=?sbC+gfvv>O7im8URt5F8jY;W6cHCaZkj9k^;`B#NXeKo9Axyo4FZf`<0_ zhi_T$Rr~%7SXV)nr-d*n^H?+T7Yr3Xa0La?R04_LD>N8_qbGpV-}}HpXp5C(y6cU@ z=NT@LgGTu0Q?%8@+8CiKaTcV@xK2eS+8z8F&3IH24sN5HJw>D4&bE1;AL|`SSX4Je ze2EkMyGd&yh|Xk zAHBalWd`xUb{VZ}w0##68j)>7puKuZN=)1P+Rg z>NWj)^MJu$0Ei8=x6$v`ED?293%cZJD7unN@& z&d0lZ*r5tqC=3sLZ7&w!Ohg^fKrV=+Iy85Vtz`wHrJ}K?QN3Uwv>SdU1#eF8E+rrV zb_n+~<@wx!P(Ptx2iQiR4lqt;;0K98q`oo`|4>^9N6G5#6-r0B_ns_lAiYEasc}KS#zcA_5!WO7p3+ zi*Oy=VDeVaed)o5N+ip*Mxkr9e>n!8+@e=d3Y+gYagYoXY!)OQ8@s$+6uyG+K$kIOI@i9D-hW*FGk$ zQ=rPg5<{=AoZOZwo=d9h$cM*x(@>(>s+;kn>j0t+6{nAXnUNut8OAkAJ9*wE=^+Be zK-AfKYmMQGipNOedtEKQ@Bl|HC%A-=8^0Xm@@=31(|OP%!}h@3Vst1QQ(@m)Tt0%L zw_HP#OYtxU5CvB#P7_bQaY^|UkwM9(Px-v*(c-Nkplgl?8-ht-G7pVg3HHm%u@j^A z4D&xZa9a={Ksj)J{){3B0jb6jVD@$KggFO=dH0Noq-Z8LnHV6 z8E^qo>bq&zFCAovuWgxqt@_@u*@3q$=JX1>wloUfgEd+jmoaadBL=vp3VTfUpR0IB!MAX73_sM z#FL5$0A6P7hM#=l(+C0Czs0yhASjW(8Z(u{q{T(eL+Jz*@sTKlgh54nYU?(tq(__U zyZ{|hrn>Ovr=W5Q?EPl3O9b{uxZmCcHV8(Oy~auZ0L%acQU&3=Y<%y`_k!Gk(dDgH z{9vh;*-j;!51bOmZD;^Ce6`o+C03omUxSbPj1{N|HoM%1hU8v|Nn3YrkAA-+T?wP2 z=+t++&Q=!av`ra5HdN;j(*QTFg@p?0Xg%N{& zEpfBJ8D9x!ZoU3&gV3CsIOQt+m<_?$Sa$I}{y!MlQ_htAy_4q>5Iay+(FWC7wX4=u zWi<&wXfy;kr>Tmg5^4iQc{KB_cO2OHfUHMk(C4G=#b`O`;HrH_x#KIOqYyU3$;jw# zWra{7!#uhgr*AJf?aM+9OGpV~qO-hqT$Le#yl#XY?*`j?1t&t-4qt_bk0IDWRlnP83 zT46(0>{A z`+uBd0Et&RpeGL(ta5wo#HCH!JH%9JFQNIH*VBNw#Oeo#ZeFi`@@km44*2I`{k`D2 zcq2w0H95Oq9x=xRmJZ zyPo!?1k_aF1lVhcJa#$6q+{haeRYfGO@-*~2bRMopm91b>&FvME-a9=D^zu}@s)!r ziu|jyvwFgc)M8uWGn^4j_N$Y*)}3g;dqN9EI437Og6VMn7XZ`@b-4WC;-Lf(@M@ov z8VKklS+Wr|$9!Z>xhDx-Bi^;fjx*G@O<2JV^RhwR&8TcS7MdD}3acG>r%L5;ZUu|n z51vEPv z!rqQd)Iex-YO)h<=D6bRe-5*a6OqT#;es73Xq!{cDj3Zq6L((lUVPyXcme|&=e&@q z=01LMJB#K$ddpOL)+A8PMKymr;Oh|UX9&3os-*P5x`m?XH1IrI`D61-0_1ehI-i3y z?5cR)4j7j~6q{%N0C2hm+U;M1wqYVc9f>#VCq-GJxad1%wQSJp=$)smK$r!AD!Pvc zi!+O4BMQYdb-#}CS-dn*pbhcg80Rn|6Q0Plo-s&cyc5QVQBl`HhQC;(P(oseJaAh2G0sT4HHNw_tyg(iqQFtx!%~ko;~5c2KSew9 zCuS7%6OxE*JrP{fRSwMSZG||m5xqO}&N@5f7Nq3cPW|FxejwaWC;sm?kRmiEp09fU z0EL1;)ks1xpXVBpn*wp)G=}HrHkhac!8RJ^DfRQ4f6O68n)C72T(!PQ?mA=HQFI;Ye;8H4GbJ6sVJ?}Y~JvAo-vA&PaLLDH95vfy}0pjvt6^sTqeCyr3Q+?xw zBtfFZ-qYugtkGUtK%}O*=lbo)=E@s@1@d#{)=+`K48qr2HeK==xg}DswyVjwwFV$S z4GvJFfY-uG*5z_ZG%78+@VG$(XlWf8I1rBct~8j?Rphv_4Yd)*aV#NO!ATG~vzdT! z9-!Qa?5ivWJsB*S4=7AFl!z{tG)SbnjE4|9t}h@X{hn~vK@c?2HZeAZA6St{s%gN9 z@Z1Iygn;>8#Pk9rhC0iBGSy{Qr0euOIj^WQ1z_?7j`(Qh29^tVAM^X;yhBl){N;%Y z7s21^<6$sZpwR6!YeNn4=mpqKE3zM#*Q`O9nsrXb$8Yu50E(EUSIMrc39jYIrBEn2 zTtVlTpL0&AZB<3F0O22uV~0=>MJ=ZHSlPT&eP949d7(I(JL4{>h`?%H3&A{6_1ZxI zP7#Vv7hZL};n2z#&(>E1q2+qq*9YPAkeUT7E8GIhrhDTzKzWYO1&?eGPK$dBG<;w|3kW@*;JbtnmBU3G{1GfjvMyU~aQ1q2-PdT}Hh*_97>xbf!#+)Q0HuuZ2haq5G+6y)TeHS>yQBSZ-u7R{b9 zL|{Syk7;*}J&DGuG=O!!ap-XZoOp!Q4xd=0ZxySTif0Q58`Exu{bh!ddrHg91>kXa zo;*Ot+E}5+_xs1I!Ktqt6NknZ1#|(AlIshsK|zhQx+2gVcX9w7p&$@DY8w>R^Kywf zZnQoANci3Ys1vr)+MuTa^QaLZ462+_OMENOoJ#~bamhYtG(IBSBdDsm6bH9JwS;#^NIqnH-PEB=SRDMFn~Ku)Z88)7yuwk z%cQ|-6!z?JIGrB)JYpxqWal=U-0ufsz>SHyH=w=G8^y+z42je?E#z z4JM|@u-3CfjnKrZO7v?IYat+yA}Y}U4nx%DV|UD72c!IZ!vi}lbnFtk*TvQ%kpK<$ z>fn(_MfiLx*u*7Fdt21}*-HhL3jts9b~zwd4XiH1k+a{djf+n*k%RI6I>i94&W|aG z$RaTWBs)K>BeSz;bcZKA@7{I1D^3QqJ$K^x$d*J(=K7!I#wtxxR`(q9I?kFDOdS@* zq<;SLPcj)3dr^KjXVx_dwJOofWbcjXJYpgQS>$N)!tFTx!qFgK{sW(Vc4MoWUu1PT z=H7Sj1OV|FxJJ!By!*z$SESi??(_X)*PEzKnzVUm{j72YMyRYv@E2OSOcc6Np{cgg z4;nskgFvJU1f=DNdn|>)4I-3Pk&O_h9FE->0jSf|bdaGFz4ke@(UcSW=JDRGYwlxP)cywfNkx(cb2fauG6V4!6QW{a5--+{zPa#c--uHj3kO6S? zdXYuX_`qcy_Njr1h`-z0M$M#z8k5A@jx^j zG*#9pb4gd`KHdJYqDH8bxgRc{8A!Sa!hs80^Mu2pvV2&+9OQ+gL>hPX&*KjRYlKES zIn%8;bqFC34S)b$)%n7~DGjN>@42*j`IbY8uHC9t`8U>El;#FI7RSBo#K=UFouHRZ z6sCh&91~kudH^?^>2d-Vb^icfF;oI5Zij!b*Em>o=tziAY^V|*@Fu82Nob>?>Ndky z8qhJo5LF&@E&+oI*8@e7AE$VXihPX62!Lm@PLY{@u!gk3dx`dHNtT(THjj02!XbQ_Os`|48W?2BTMD!I6eEzoeb%> zfGZD!526tT3ZjG?ueJA(zQ}J=ZLc!@fUIboGBvYSL?c$~&74SbkJIIoxQC2~-=2f{l$A@M#EyOgC6>q+BSnR9G z8e%Nb%YYq+$SZlwEo}T}5Z=avjk}Y_9AzPc)l+W27=ly*L25Ae1#8_A_oD{$#`yijlqo`F3=9n875~ z>H0xE={yrQE)`b}g5Qq1#$k%=0J>Co@!S)P6WSz1mINm(*Svt14#C;VmfG|Ct})SY zkO35WJ&gJncoY%PFoxyrhNdHGLw0~Po>7;EWXCisN1?iCjy$|%PXa&?yD8>={+km< z3D5)%m#3NDP8ex@Mlf{DX_75(ES zL34SazZwg?f@A;*vV4^epE%(f%JS~7a9(~~nAO$O+pcu`?>Hi5pi0THKu;5{abo~L zMlQF~Q|x%k(BcSQLs3*8ZZ0?tq&|h@j~mKiTN8F1H6i8kywKr<5D~#!T?Fm};u3Ho zxGO|MgVtYOD@R746&jUJIlz)LQ58y2VsyN3G?a7*a-isnuRu^mgjIvjll6}g1ra#J zGKE3>r^WqXlR)Sxs>e;b=9lLRMODmtba(#%bd#znSSI~wpZv=J5l}hs`}}6H6jUU< zdY>QqePjd|ov9mm@#xDCg;{Q?%o_o&wR(;qikV1fLJP;O?3(9BeysFvy`u{wI*cj^ zz1g}Al+phHxP^}(Qeolbo1z3&91VC3!Qi-@3F4}e4Ef`C;(2B*Gq z9Zdiu;GgdVPeV|tY6t!Egg!vmI1%u&@wc9GX!cZ4Yr>CP{bt{^Qb9$~?r+bG1jSYa zofKZ(UExg8fyow}bP08{0my0tblTBHx}LX&qTL$>WC8@=uX8uxflX#8uJ><;Hb>3~ zt*r=Y=bkP!GXz(H8cNMOcYq^M565`}(B8IO7-9@;e4&3B(?e`Win~sCidsB_&^t!Y znY`6{2f@4|)0Nhd##S>6gMuO9jsT68`|)NsY6XSZ|U7A;sT!oQZ{EX>>rS`1_dj2v7hOG+2YG`OOxD z+GzcRcSeuNO5LN#8=4Ut=Y4M)r2L0wxzhapXECJF}Nq~`<&RSb}0xAMnc3d4n*R7dB_^HrnrS>_)`F7 z)bt%FX8u2{VN?{+g6=G?olZCMi9rIv1)`e$J#Sb-3)se_?Vn6Dz@-Dtg+0AnuYpUc zyrm>IC-=@)Z6yW6!4cmK=Xea1do2W@bVe_TJ{*v6(6>g?!1RnWd{8Cd;8`cGCs|Uh zo!V+DB`;FL)>HzBaOMljo4uzKdLg(d?RNwOIoV-)bTO=JY_2piMbmwiE8Ch{ZY!hd z+gU`&_DdEj*>*v@cp-><4%@{VvTKqOTFS0HD7i#Hl&RnY)&Wu%YE361>)dx--vU&D zqoTaiUQKs`En$JWS|CRI-QF-jBP5B74qu;mg;vMW1qN?NBju8C^ed^og-Aix5H(sL zUdpoXo{vTrf&<4Tc(tBQxFaqEZZ;>D->l_eg9e8)Xb1hV)Cd^TDEJxr1d<00+hQmU$2YI6z~t32T6gAs6no}V+b z5(_%Ee;()g#?%y0k$zt@Z;o@i0F)Xo>hg7m--s>fA!^oNZd_f40lPOh`OXMI&vja_&Pyh?U&TjtjLCr^Kn4b?dJ#EfPrqUET=tcG{piQ6l2u@UuQhvCYAzN`8}AbMbW>OqF?T3Y+O1# z&;mQCs{oc@K>;nGWEx@R$&^-vNY-b!3F z+jYb@so#8IEp_U506YBp<2VFTmWNkse-rb7DXryktK$Cv-QpOMEeRZg2`g&y`5yT( zG!jQdYxVV&lqQ5p@cQxVJV4P8d%k|_`p&a}NOmt<`|*~fn@zhG$%{LPg#z~NXWj&D zV%nU6zbhofT7m5=K7R6J1rE=OI*rySXIM(7J8h}Q>E2n5BChhlt>;a8Zku;zAh*dkr zdw{Y8Unq5>J4e?>jdF0bYE952F$>ie^0FZ*%uGxEWW{fpd z+Ew2t)6w2HY85ss%@!Nu#u9-b*l2^Jzg=^Z(NSq=Sab({zP?EbwjQ0yHiqvV+({tO z@F2H(efb|4xKolCX+cpFMvp!*$rVA%t#U29zCG`FASfWsr)}^~_?U-H1E{B02WH!Y zXb!^FQ^DXQ`oRS6dmR#R6dKgyoP*qT^;~tD{lxLWm~GAb@8tgT@U)dQI)f=noE)cUWXIswF2=x8M1ApWrH< zD^2ma5;~csGzb<2;^3`MrqV4)$|aWQHdOTFxs|L(c2hv(`fWu^TAZaX0S0lbFrq}! zVh)EqhxL>Noh}-_*M|<^y#|5ycau9XD!M)S{{YNY7_OuXV(*Xfl|b`JB5e6;_F}h$ z4sL?)VK?)vG0Kt@fv~n$tJ#d@eWL>Uxecf; zg4SMo{{VRbFcAVTPUL#=g1}M+6pvhf@CiVqz(C`3{`ZcuGem9ud*cRbO}$e{)}q;~ zT-s`pU^@d83i4CNEyzH4kdnCsPrK1rplpg(a5^{xJN}93TMYo?mmAc=%8+$Q`{wtL zR)vw65LKl|L*mQ_=(wZ$ z&U5o6T1P&33-^pm!nGw8RC^5EBbtFnFM>Kn6KnvRKxDsm(~iSY(F24?gA(B$&9aanvFvfL3O#^%)P!r?^-cO@B%SuLs%NE=i%;Jg4SK?fN{ zZB2~kjkAI#i0_y z-<)bdpi-iSS7y+7=L0LMH|05n@#m8VAi9JK2t5*~V&DdHizL$0)Sl5T{>N^3+1wcq0pbSl$?HQC1Mm2mCm z>$3r72@pj#JGO!w#IPkr z!~vkZqQN8pp_TRk8r#_^u_3g&X258AKb3cFB^x_?i>&q-WP5P>#85;@w<3#5Af zmrel?M1)UtFxvO4F5(GOUAzJ-jMS@nL^KEHZk)iy;B*A6**L zNKtk|qqI@e2}Ac@@ERfR3vRk6pBVh5Y^<7J#OsVGQiuVovhncw$rLoS&mq>!n6SJ7N{A8A@D|GV9>yL~pLNGKR=$&B&)GaZpHG zVDWTP;Etl1Cv_4FeAI_49#qZLgpyd(nPBkRBY|%a zS%5=m2_4>Ym*7@kYof&Dc?oc7Z3zQoS7y8cb#!95=;LA_d^?dg&p42EjkN?2vryWR z=Qe220w0h7i>h;jeRUSuCbp~tTOXFDbi{B$Z8VEf_aeak7v9+8uE%D!(h^%j3T8tA^Pa z^KAkhU7PyPGO1}dPIa@48o3w{3d%X}lOO^r1Q%E0>y7K{2!;)AIq2W@{zOoCIU-3l z72WD&>3Iqr?=pknp-fXDE?JcQkeDr0IbOORfw26e8e@;&wO z)~6X8ksR6_*WzCnoVV##_dx@D*4eC(iFg~tuYF}w+5{cDlXKHNWJE*=Af}F2lU?K> zWi)C8XP82pj}ASWRphjsO4)L zjFjvxK%oBsJa1*-CCw;XGzXBcXe5*=VR$;xwjaD*t|-vxlpTU{%@j?nQ$(@Uh)tZd zLvtD?iccg2g!&QTCxXR=)7!uRx$Mt|$2$g>LlLdh((WB;nuj?8R@viX1QPd>t zf6nrtJCkxB<7RAd()_h@6VGSH402Xt1CVP;pgulu`6dM7@zxz7ffWQ&oRQ2+*}Rzk*Oyz(KLdDkPhWnjx42 z*qn*4BfKI`BFcpEzhH0g2BX!Y;pKrBZ@p_YYjov8hYB%?=O$fLxSKRcIUNJZa2s`Q zosf#9N}i!2E@}ZF&>biVRM#!jopaI4EA47o(`E$EtcV1lfkY8=yfg%XP?4%SAR_X@ zFDX-og4jW)HqpWF;zRaF?W(mluPC=i&g@bbJxxj3I219o0y3?R7*Z5Kus&WfePC-Sbl!DNTb@Nu|(m_zm=MD0dVh7N0bn z`It)N3xt$Zu{wBu49N*H9pM&)dR%^#dK}h->&-%!RZa{vih3e?DPOg`48`pkDTI% zxYbT+@z-5wz}x~G;GMkeI=o;bb%NL)IQacOF#u3i5aWJ-<;V*@So0u~%YY`}8$4eD z#})OP939Vy8~(6r!~|#*Eg8$Vtfqo#MY-rad|U*;Vn|1sh%n9p^QQ8@a+i(((joc<9 z(n*>sY23dg!E^_}p}iY8)E!&cRRDqxdI-BV>-tm&P?{n<9m{{bfAamx!dgLm(JiYlt8N_ZaW0)hLg|hD;>Yca-Du4w$@!PYVgB9!r zsQGTaOaxL2QZW%thXvX1ymbiG&;(s3w3ncE*B% zYZk_u$wh%7VkHh9k&r!*$#fofdE+6{#*8L{09+02opWQ8b}ZY*mb4O86QKeK(Xbm- zZg-#ro`uocmqgt)+5|@v8 zpnwq%BsY|Za;BUCWEmd*e(@4PzA10y#<+_~SFRsg@MYSn#>#&10h-Q2SFXhMUpPw1 z-CYCN6zEyrIN=s&N~^Rf-i{84Hxal00Kebf8(2Ut7jHB3^M)VucZ$Cljg@zzd~BWL zCX~>KdO+X4uwP1n68E8ebASXA2!2PBem?m$h1dk5lfvxq`78BG&j3ao`X=qd?zH%U zu`c(nN4bb9CZVWnZ#sdTvy9&8BnOz3a0gwti!lIhDKyfHO;t;SP$iLGT|6%AnHd$1 z&IG-1esFk(bfgtpN6yZ$l`Kz&7gD+`dgBN;)j-7-1*POx7gDDBLXrVlctHtX{2xh_bR?ds3rlYPC00yqhpCp3*?757gc;2sLo*nu7>UP z*KZn736`#Q?_Bu+G&Xg($RL|4=Qa>z1G*wdf`UbH0uo&)A&nMx)5CEgVbBm4Kz5f< za>i0hO)6c*lBh7oLyZ%Nj7thl;IHHte%2)lqo_8sJ`%23yf!AmXD$KkB;#G@yh@c{Z9iBoKo3j)vvr8#JUFsJ@)Cc}64P*=OtGg* zZxEE6BZ`7*alA(@W18d$WIKh}UF(lqq5)J+lB>ZrejGV~fwU6$!M*qRa!X?636`o} z^*xw5L=>opK&OYC4jYvspB7FvmJliuK=--p{{T0l5Yz-<*WeTL*kMK06gJQY%gomK z!OJLxK=>Y`lmwnM(bzWF@%hbMf|9v+q&=8|@Bj|YfC;5Wd)MR6Y7FSua|U{=w?%G2%U(h0geh~!SNLR#&mNkHh|92f@W{Q%4Zum%u&H8Gchjx1vw7ek2M@sTr?3Oy1#n3Ks}6K zV1?!a_~SLmKpiI{4co)-22-y%t7*$&VkV~!X$@l_jp$!i^G}?9e1HSX0d26>&l(D# zgE<@zG0;-#7KmjrTeMV4j~gda^cH!IZznYd;o+Y0#_sIABx6Xg4C3;IK}FbDow2IX z(qJWU07F8Eu68au-fEyhDiB&lZl3RqwH_TYQk{_U^X1?HC1py3Wi3}iX*$JRHn!q# zh3ai#A;FZOz)&NIS+H-9B}{i9RH`JFfYpFOd&f`$37#KbgM@p*0|M zB77dJPf3LYM@xb-1Yg_p-?s*c!fJ7=_IHp7j)HQhzIT+k2z3cq?FaGB2LJ*DQmQKL z51nCp*C-Q%Z29@iV+b&VfK5+X!RIKs0jNJ+nnmrtE;4yOq)~fBj zhUTn$gHnuwD9}zhC%iU9Mz^N10d-n)iDk2G>MF6cxw(>3rni1#(J(Bsi zE3vXbYyxRDM$B|iV?~&_>)>E0bC85HLV!VfM)<|BpV1=#FQj_QckRPkXS_= zBw>UFHK!nRWJN9q2mrMcp~|j@4lpL9v!rO{)p(xqV@(kX=#i9o3-^G8t|rIQ6{R(7XirGl(wRObZ5?u-)4!`ocW*jIo+O{S)x zsm6~Pq95<2#B;<$n3 z*XsisJ$PW?ikELV)~FpATCi zufBEF3yclCDSjh4orn0%_i$_su_mRovv~<{N<>M@-h94n#SGO0Vbh&&d%)9GIg!1A zI^W|R5R!}spU)fdh*T2x56*}ZO>vtnC{{tGwEqCJ>nE<*H>eBwX?Wu|&=X*=bz(m1 z_lBerlzW5?&OK`d6m}+Wb|lkPe>mz*>?DVPP61zov-d!9&qeFwyoWTP_1dW?J1xrP zqOIzSwT1eN0*oh}kJAc??MKn&wuz0_q!X0?&Ny$i+~g6e_MF6R=|4mkfieS+GR8VJYqHgAvou=ktX2*3&T}}Vwyp% zV)zM8$cRvvMv&jC9)VGfNPR=-=zbcbNY2Ac;Hv0RuLG@(*~O)a!)Z@laPHp)mS zunO-<#wRigv^TMf*pL4EDS)?1-ugZ?K6cKnHHpTeFA^<)Yg6RAP@hB%_xcWPH&-H*L zphrySZ#-yqzVH&@bQN$AHP-U5G?~(3O;c~2qTwF7u}h@a!s8*X710Xc8bMEwb`F3N ztTz2?67H%l^N7_QHft<)<0Nc% zvWUIvr&HD(WJ$0P>CrElfe%0eN3E7rbqrr0EF0|^xDPt>fOl0uDyGzqhnsP+ctE0F zhXpmQI>f=l&@TtDGl$+0h9ZU@0HBt>Fxs>zRRvlJp0jcUlSCA_?ih4s+BPLh?6fZQ z>j9_&fDvdxb)Y(<sx!qFa$J|Cf!MY8L$yRZ%*ni-T9ah?G>zn9gxh#i~+pjw*t^ z4-Nvgs{ykfGf_nE;}Di}$eCS=hK0m}Y|>z91rGfG0QVod8Yl@+0Bg%$a2X^-3Fo$x z;k{g?-B>%*6UWbOcY!BB3P5G>3$HsdSVdH29#cn|I@|9il9HCE*kO+-00l?N-yhc; z<7eULzpC2`a6CKU@Mum*mExkI5CQw8U&$If&;BKtQ zur$%%XQJ+6MNpGSx+)H1UAefhwGjib0>XDv-f+op7!=|3EE>5DS&rtHWNEH7)6ESVL~* z@?>D0X$~hVLxB7E&B9RF;0}!?`TnlTPQ$)A0rbzA-Y$?dv@mq~?+rpBMDOwOi$OL= z;{cR|VCCWUkMtlZAmE=^fQk{bT<_N(tZho}4Cf})$5dz)i6TKW6ZRK{q0{eub8(KLuC?T^z6R8RpHS^ofhk%_> zNTns#r_S-j+KtdK#NScFUq$6XbZ*P*HR}Z{8$+#LdBL8dmO*0n>s~5gcD81@s%?3* z{ZN7++*YXa4?W;;QFx*`9wL49l*b{@n+5Xg^K#7_{D@c1zgaM-C_KdV-rlev-b&Ko z$9-of55)B6<2rJKbEannc7kpH0K~=GWD4|g`pXI8@Bu|U>)>!FBS?X~9`~nmm79Iu+yXGh9YPW z1_~m?LUVx~z(n(eD|tha%@>nQy!7A+s~w-`1jJ~`i@_1?Chrdu1B12J&qRZ7Cnv@O zEONrhqT)pwrpn+Ts47njP*VH+b1I@;Mz!!nj=RfL8WbFzZ4M;u)OUe+s)m5mcs%P| zWzDN+eNxR|x7W@-ASDMyK-g2Rc2J2R&?0c#bUvqejKt={P#~iIo%1IJ)4` z?c~S6DhwJW{w(o979wlx1vL7TsuxzB}6=+iA6omdCAokO+PPh)?aS$+Nrh)<9Ds#lAvwV zxq0(&)+yjM(-qBJZgmH_HN{?r<6f^EWFpbh6)0P4PKd^AO}9c6 zNVMb|E4)gRhZ`9{(eXcg;tfb?Km(O`$W#wACJLq)!RtgrMFp}H=^<ZLAly=LY@o=olLL%=E%BMh|Bf@gC=x81jjJvXn5>$kVr$$vs z#BoS4-~zU_8hXi4w4=c04 zHh)>HWa&ksBg&-i%j*e^TN7rpy`Sf;OqvqMPEds!)jz?`Nl`|mQPY!5oMDB9^ku2f zdc0(yEz6To{3gD0x12URJU1N(JSv}k;Q@MD-#`0{(P+GAKj-~p(l&HD%B!HoJ0GVD z@qqw|qhY_?{N%&;K%jwkhTn|at8taq{sHg*0HaiqpmYQ1K6%a7qEmo5akS(-sDgQV z&Cj39MB4lW~Y-3tfCO<9$>Q*0-7lw2lI;bA-X7A zudZ<$6%y@;Y#!JEQhE8eQEONU?yvF&L{#PopXTtXmIr?-lHlC@=W`0Nh}jC~7|j0r*=0 zep~aK${_SZ@x;JjXvC*h9VLEn(*eL~fm6bGFb<2c$4@#C-aqz*(FNZxIWQ`6fsCz^ zQ~q)a3^IaoJjC$~1XU3=3h(D`vRzRd;M4oV)&)ZKT&PhED(g?AkIqb8(~T&z=ZWRQ z5CK=KTullHwkvUK5JIWr{_%=qL@WOQedVIk348M57KKk^wvNvufMA|ul5GNkt-cs} z1fW2W7iANYCZzS0X$G@sCe3X@@tZVtBf7E<&z*116H!18u~(0?mQg_cemlkCT=Z)z z1PUJR6iAoY;>Qy6>A+P-vT4HT18GYi53Eut8M1O>o1!7i=iYM&Yyb|f^?`#V5YjXv zFXNNJhpl#?Qj8(nz4z8u3A}s~#1 zaC9hCG^T^A52ND;Iobo0nyl|r#xr*aDsi)uP=D>AN@zzePdcbgc*ODml@J72jhM=5 zr3Ze(K(O22E-|R_>ofu?I}y=)Xa4{*hy;2k0C-KVwBH#d;3bIgino+ zId^|K5zScmE|dmwrs+Ar0iltk6S2dy*UKtMu+FDW~f!6qP4te$JiSZQ8Z$Bgl>0? zB}YV?3BDq$v$8y7X@>+2T40L-CBHHN!TCRKs7YndHIdFo@AaxP-3 z9F1`K^OT^%f!HSPdVM#-v0y`sDwYhXr#H6{8!R)xR_Qf4e({131mlx$PWFM(oa4%< z1QE)I0mOQ7x^sg?+ECY zJSSu6Fm~z|Qb?~KSc$z2F5X|pD|&U_Miv#XtWupW9)k|;5QXSiZtP&wb5k9Z=i4d@8_i=<_K{x*ZcZeEdY>jhQ)&Bsr zL6k<;i(l3ia4|>jILQ*b-VR;-WJ=p0c@vPtxE({YC_x|yzOb#AO-HYIgbkNV?*pyV zOF16!Kuu9G5SlcerZklqMsw>ZOK>9i{{Wn-3#tcy^}tXCsha`j;rC1u5+6VJ7!r^G z27y5L?;QvLkq}GssgPPxp+`;kj-J*Liu1gr&<)=E{aiwkq@5F&9P?<#I95bq%U(P> z#*MIyppp7N=L1*@J`TUd`@*7{4$74NvZJUO#f%D|}!U9qTt&jDP zZ?rKAaQp9D;{_DJ0MKE9+ad3qr%gZ^Hx`@fDSVqd)S#%7lD`JE%*4E(yk3|0tdl@{ zFj3h3-f>n8I7e3?FF>MqgBP{~uXu`9%{P-K7K&5k#v+FI=HMb&>SnktXpH0CVXuBM z8p;qVmr$?yI_Cg;H$eCsoL+8Ff+MU5aO2Uw#;SnPLX7L6Y5aFRAGBs(jOm)0spO_sIRxAXD&$TS$VE7{@g{{R^V zE;33jVg~^=jn;xhp>}qslZ}|I(^ru4wBGa^;{)v>I=-l(vb-EF90=*1%9FZ_cJp

?L-r+VkI>A@D#HJgbfZS0+Mq17BE1 zrh=)|hgW_sB6x|y5sDqyAYcu5I)J+3an!-YQV?{QE-nHA_9nWbz~h?ph6xZeeQd#& zm<0#Af93?gfga)QhOKP3EaFjM$5KRG9`jVt^S)~U&!i6A2w@bRn+fYa~z<>m>C9Wxlb`3a< zm^nk06yF?RpnwXBNaJg{GHsw#0qlH95&kPAnR?b(4LPb31DP`6dq?(vAC#&m4*EZ;PpIa79rU>y#A0@nuW(%vDjYhGb#OMu3$IzU2a{Xu#OMaLz2k<$o_u3PG?0Bi z*8c!lQbYw6>hXv}O@P~Z{{T2ugir#u>g6hhBYo=%Nunz3K7KMHEkeCsaaaLB>sn9A zj0X)M?Xxu0Y+q}w{Y=~h>8bfJ=qv$V6AT45R5-8cPx|1HqNU4`I{yI7HeP^%dxo(N z+6nC${y%ud<~=7jECdy6t#O*|hNC1;`}dN#DxOUQyPUhr3G59gX&nPOm-{F%PywkG zNb>Q|7b?Qd5Ri8TU6b>9$*8b8F&CF!v40xmaRZjV@DQ;(gvOW$q24jODu`NliiIk4 zX0@x1m>0CBVeXhPl1^VioM zXLt@bh;~QTMkwZ{C2R({_UPVoD$*bZI&Cli0CHIp8RY4!2M)YM)&&5NwGH@KX8FII zP_zl(MMXB(1IJj)4Njm!ri*&)SIGpeqT-CHU=jy_vm6FSmC;slcI4|33?Teun7@7S z;NJulLOG)k!SRyEstjf z%h`}_X;DFWS-h6#UrGh4DX%ovus&iyhkGKsta8?K`6c$!a_ zIr7sTwAPdp(XG=5Q);>u(1%>>qj;>?wkSk>%eq zJYyx)Hf*W5qiWxSoODG=)4rA*wd7UVp|3C#aei{C}(;DuUr8pb zj;^v$V`^!PCRCCjzCY`MEku2delmg`+2?Qh#M-D$Gq;cLHqcQw&ja;wnOaKMoN6?H zBS1y^j&oW<38TTMd|XlN3c3kjhek{#0rMK^zbDa+N(&krtr*o%(>8oN@zyI?hNy(u z!QM^l^)TJqsr_Ss17`=zgaAANFudUNmKvjgSC^c{pAqvozQuDkHI> z;WekiVj#HETsSm4tH${;AkowW0D=lu+W2CiTU%mlu+RZn7o1rp5>dQ{RM%ykt;$j4 zO$tXr7WvKmX`a!SO?!WyO*js z)*KGWv|>Qo>ZX_P0KAZZ!*T9BBYuFQ{ILM;RbHXJs8_rbmqINUW`h%{QmK7~XBVGY z1|vEk+VgeJ3104dIu4SV2_Ti-cDf%0#BwfRzf7K^{o|J~8ZQePF0cM!gA{OxBLv?s z<;GwkNwz1LD4u)D-8%+G6q?u|qt3ALs&-Bx1$LX(j*KuRqTgjsDO#s)jJq6$CPa5l zt|Cq*ZFCxxiAIZ8M;h-W6BJkuR3M6i28&)ZcVlKMIN7v;V6PrmlyuNn6a{G!(1FnfT+a1_7Q%N2ZAbv^ zxuCVaQh1Fb_fY4UTmp?Hqa0}P0eG?VnmUNbbWMkvA>T=WsK$W} zItH279P^UQ?hINwculi>aAAs#1rmd2Q=`Pj6kAn@SUkFc{a`78R07?d{{Yf_X6PBQ zPPfD{re&fRJ!GnCiO!Za8-8-+MQ8(v%^Kr7)^0I8^fdn5^@8z636EF|R9KT_ru`q* zZIM$$l@*_EBM$2-G{wwn93NS!6P_F`4Wp+J;P7K2^OURY(SfR1VB8M7&6u6nG-r4a zg5;95e?*6AOO(${{R?-2^rh{;6x$@`3>MPgGkq%;(;ZA_~3uPymWstE8q8x8$(Kc zwUg5D2a|V??~EEk_7HXO{{VPK>}f?_dzd;C*2nWRSOzYW`oPu%aOnEYS^{J7qpz@< z$y7z(tVx1RE-ipFCV*%qvtQ#Uj7OY!^N_;F#v!ER^@|{B%l>e)YIK~KqieIX{Nu<0 z*o}Ss<+5(ueT4kto5IJ}NBGNW?u)S8ro;DmyWe66$*M`(Rhjf`-WuIskvh0>=;F1r$(9*n7u;K~1Lu&|WK84*;V*C_%+sH3z>Y zFBO5~oDCmeJL3*FhXJyeZEl@TaHzc$gd1k14okkd&D!3KPH6M+bJp-81<63*A20RZ zB2l7%RNPUB^zeL*aBW#oe2|>=)=VOlX3dfg?za;n5yr6+Z0~)a9`fyk*a7Znoqz5q zGl)RY_cm?U8L1>eCQ5yFYmA@)iMk?eY`Pv$js!7*5C|0@(ID-Zuhj2>RHb$O3Hs-<<6SW2v+QE_W%tOXuBy1LOwd?jR5FCIBj-d*> zHV%q61(U&vrjaU64Hu|;N;zZG~?haa!G7NX``d(t~fH<00yc@?_BqejiSQ-roZ#n5{U;^ zgt@1sbuS+ta4e}H3vXM>9r8V5#6V(}z(2q3o74>*O|y5bInsDU_3%;HVHG`u>{{XyTbaV!5*4ztk7a%KXtfa?H zc_vgnw+T-;f>41qfdS}eyn=5}Uc1JLv()Db7kv|V8=iz|Zt()t?0BZxhJl^mbN=vU z#Dh?^!#~!!$w2H4@?Bx3EN|llh^Rh1;D7?hNs0nBZG0vaA=@M8Ap}cr4_Vk01TLPn z?>F#buKM?lA>(rKowE_1`M?t*@%!rt8roe3wfMk7QjrfGm*W`>TR*&?oZdDD;ftoq zK(I0AxV4X4fq zNwkF8bMuct1vD=>o6jdWZ~pNuHBUIc4ueR%dU3!oiVvRWoT8Q|9x(!mb<4kuxp>fU z@##OTB-9bVUfj?GtlyvhV^c{4QL633tbJaOuvG7H{biiQKqr_3ruDLT#3n8)IrnQCP=+KM%Yd}xlLLmyEkPv{1 zx!bAGWDIx&p-L209zOA+cbc)nMeOffXJRv;2!z^c2}d5-NX%LYcWQ7{jEo*JTRfv_ z!*DRAvC>75;0plIasf(fOypTcHZFkx+gtTV2Tq|gQK7x)c|^W2h}o#updkz348Ip_ z)5>Ta_k>JH1S|1xN)M#Ph;)TdeekY9g49sHBHx*h-FZSoqFP%x_m7%p)V^r`6DI-y zP~;?djhiz|&8q^4x!|X90@wr6%a-s=hpSk8kA@%@JA8Xrbl*whzcP=oX3L=MY0O;Z16+{!m4x6aLd50-$bIq6?Hnkkhh8@f2uVTU zL`RoAaKi?wtuBL6qi(o6^t$1E2H@ckoFaCP@;L<)w!lV|w_7x8c)D6bh0T_{7nbqs z7OGq{a6PY=zrN@qEwF(!;aWO&Cldl1p&?a>O$U4q?^7(QG8)+EJM!h{u@oo~*o~?q zLb3JbzQA>~I@b6&*dY97fwgJbqTk!2Qbol$2$q1UAM4*3q6QA+iuwKHZ8Wi0Pv2O@NlXc#`Tqbo z)*WzyzdFW&1w}{0zV+uFMbNNx2EYJt&o2^d5j40)hK2VB z9Eb#>=<8pcO4Ude{{X#Wf(`J7DA!~C<*hjE`Tqc4ypc{;;t|_1Z1i)id^AGu1&4OG z9ba!)K8#v#j1ai68PPLL240+-uZfxfJb#QJ9fwb>4bT*bnqKld{%;2+IP&mN^0ZQse0H6qq!I}-!yFL75Vh5Wv`@jk|s1Lm5JAne9 zDrT`~G-uFbg4N^v%lGHTRM=lW=IC?q!gU`fBZ@?tiR{$VeAYj6hgh z#?K7E$N<=r#+W>>5wAydJJYgKvAt(+T)j{`;JV*#TUisU5 zc*a8L>7vA^{{UBb+SGDUEm3t@b$`Y+k2iPEQfY4!Us*vVh|qcz3q5$}1dz4d0lpnp z1C{iTB=sreI0RE|dgGkjqHRKG1D%fxqj#4fXK^aK2Lq;?{ZHso0nIID$6hUSV71{= zyRN=GWoF7!l@Jg}zlY8UX-~&2qE%1SwqgTsgg0FIEZ#YDy{O?xh# z$<4aftFll9KyXJ#m}0b0i6@}zJLq)hOQhkwj*agj=*^0;*zP^|<;@@2c%y5efKnWA z;^cGfM|_XBPA_`7K9xnIU@IBcFakx*!*99Z^WHV6fn*5L&0wu~@ZqaSUG*NPgOU8zJFdb!G~{UKk-aSvN`~la&4s#s z9}t-&AlyetBFFq<&@Hx%RME3jjpc4>-t10;f*cQcApiv^Faeg;FD3g>v69nwkYlMi z9-dnAvMPzwi4Ft9P3pXFTtb4HnnQ8H2Ag0dL^?&lINyrKWOmTqB@vs(wh@gEm{3kk z2z9>U1DN94dn7gis;%x14hYiAL4?t@D(`oVf}k za&bt;^0lm`!$m0&7_NeWIvoxxtY78HbF*{6o*s~T>k*`BDZfpZXR`v)(Qw}oaYswAZ69SvS1)h-_8NC2hSLb0SOOy z!~(-=xao zHtz6Xo6$-OOOZcp;NfT|m&OR#KO4g_G?JOhl#_e?;2I#t3(w9BDmy`~K7XuEkr2)2 z{%;E=(9t=5a4?&7G^clv!FCSKA8&b30t5+b=JkKjP$ezLtCAl$arr8 z&1elH*wgiYpvhr+77+)Q?7ey4IW6b{emnEV_vag=iKPyhPIYVO`9K5>0js034++;E zI8HPRh${6}*S;WNGm9MSIReqT*0{uJ6dO*QfbD%t)^cQ-CC7pGq1OlM!-zPp_cm*^qX$Yt4vu#*@Ig|CM9@6J(EHAi(DS^$ ze2DaDxnijhYtpUxU|p&@?G3KL+<@;>Bn(+Qo%CNgTbO`P5j}?~@s|8yAs5P>L({OX z1Uw0$9D$&Nlh58E{{Wug=HW$j>eD!BNyMT$GN+zi@Ca|M7KNRNGnW)V10_e$MJ<&( z#=~WnXR}kkS|(Z%0F@SwjYz+Y2%;~jNH}uPO)eV*Xcf{2T&;XP*@%qA0z?r}#?!wW ziW);@S;LcJ2+}+*D7vDGh!V%a!gqobK_L#&ZAir0^rNpRMN^RtMW@eMNTb_IyauaJ zX?n(G7+Tv^7L#Y2Aixcfu_z+!0b77j+ej!?0Q4ehVrbys6?;a&D?!0G$FsUPEK_9L zPhi_QXUbYZ)VnHH40-9t4zXxO>p%nvx8GaZ?W$a?fu|Og+uvH!V5pSU4*~-;As*+v z7Akd$jA*i14v`(VL>uM3!iRTC*XfT^+ z)4p=7Dv>BV@$L7G5hzlU&+mKlaYlu$^@?HIDt-Zd+#xex5Q+fIKr_GnkHyV`;3xHq zw#f(wXCLPmzzkjM38(}i*oTs<-1)fmC^Ta8b?3Fdaon0)J!Jt@J3R4(7`(12qsH>h zo-iwcs{a63K+*~0oJp{s7+ge7IrWoJdaeXjNG=TsL?iE~78=5beIfUa#L>8+I9FQc zV_W6%XC~K!2euy5>l)JXqka$fmIW1{y&ZhwSV(qFN32+Yp~yqyHu8yD@ripEMDP3< zNG6foFV->$4Yb|ynn9(lJQFkubF%XHnv+W*wE4kUx~aZ$98{YR4~$9<58g743LDl2 z)O|X{fkAtuz(XGmhm52_VsF>1Nmh_Uj4c(Mr2ha|YoRL5T;5!w8(s;{5-Q(gGb zJ?5Df(i}fHJuRG%Pv;t{1uYTo{o}F3Rc{`j&QV4~i9c9FMin|po@=eck2sR5yQHMi zw)F4kd2K30HN8#~Tzq)WL^(WSrv~Wr#cmO&pwI^|Q?2pWoI+_0hb7~CPJ7^&2ofqG zDQr`P1n-Zaolpd8K=H6AGhXnTEE+FdPQ!fdIPF-{2)NrzQDMPwF+{Z7NR~mbQg^ou zQax2md2XI$@wpvW6mmBo3=)GHhW4)yGri`NvN|Eg&Hn&^-c}hz0gK9Uv&R!QAV7-} ztQtDg?|ahpfUUcLvYX|1faZA?j0(o-S7*NNL;xz;qA143=sfi;fan6OMZ-`$hO*>Q zr%OxJ(Gu(t;et{;!xy2glZ^)Y?>6!hQWmzc){b`Zs1OL0f^^#Q_Bwmk0fw;Xp~+*1 zk2;R{!QcQ}O>9VxK%7D15)q|MLnwhB2zX9rCcO&*?P5cfm(~Cf_jd^=u9KeInGVK- zP(q#pg;rN?B~@Bz(IMWi1`hz3cByF$K!AlUk9~=Z0w=60Et*PS6i~sI244puBj>TTTf?a8-3ydhz>C6PdZMlzD(!}9H#J- z?<(K`At4jrEAxs}+6438)&)V;p@T=Bd3xS1G|D7&)pzU8QGn3heV=&Ni7Yy(YU$@# zLZz8n(m#=hHV@nW=MfT)wXWx!pj3l|t)(@&Frg{+Q-w#mS=2Fvm;fySr^x*;yM&2p z9XIap^Y0P~)4nmHJDNRUHWPU9o!l6}@@D;HlT~!7#wegimSsvJcy!=4C1&q9FjeJ( zdO3frlCi3PjlbSasV6La^OjS2g6|Gs)C|8)@P$1zLV?@w2LuypN$Y&#Fj|Y0-#uUz z;6rrHFL>%_Kxoe=>nRW>p)R<_9RRCz;(TVOBbh$TM1-lbljjgZiJ&~>m2t8%dT|mF zK3NZ(QluJ+hOOt`DkiKJUze|}BqT25&OzCH6H_H9S5R~x7$guuNE6+{H1EPX#`rdK zd2uw0IC?NiKonQYiSj#@cKjH$f%?E2X?7cM6WZxEC;nmEt@Eyaa+Egg>k`&W8M}$Z zG@zI&fL%QtDy4fS2&Yqr{N!@7v~c3mkEBQSkrH{WBjo=8y2n#-<@Nsn7;QvG{{W1p z+K9XM7=2hxookz-Se*EjjKJy(6QiD+@-D7Du$U+~Q?k9m);XgRRbJJnJe~Oa$~C3Y zvv%Y)Cq}o%SHgRQ)Dvk#;5KOyVw6U$X|#taO@1Ee6T)r-gC7BVA(9cXD@K_>S}CoU zdfpfcYHYI7G<11o>qa2r5Ti_y39fltfQ&636o4BL zXcghkPG}2PgmUl@AH$#(fj3^0;r-=jH5K}arp2AS^@(32ryU)sN0-L2b;n>O6atU{ z_xRvIacm^6sc(F_8-NnB`^gG8;|)5XfRr39+f!ki77c(B=m4Wbz~PO6X;hxtky&>G zTXD-+v!D*Q7koO)+UpZ7?#9A0E8cOD34p*&^2Z;&9-^q#B)&r=w_ zQHuM&@yjjXyQ5~g{eNoRjSs_5&J25`MQ?QR-_7GU?nL%Dy9Shh=QT2l3$CFGPym_? z;ZcUl1iClF#?JBptO}BX6K=mRY{g_K850cg4V2>GlV&9Bwl51?uFOq4j>sd!L~@cg z?=A@^B{FDt9$M2qf{X(sPKekw*8$welH?>-fagrz3+7_dfT21YH&S&S7Z4tJIL=46 zXuegNgv*rLwKWa^PR$l6T1^l%Hl$ZkCv%L^!U~Ti(h|GwbGIl2901YM7TTNN9to)y zj}8&3JT>+?GQ$Zfk!jSHC5g?zZd+ByHg6Yv7M^iU;l>rj61+V0VNM4GL8o#-+M0vb z&0R)sC!WynXWh*RXwBYafAb%PKxeIflkCE7dJ96>IO1{VFKoLCKQAxu0H9&OowdGx zKUkG`h7KFeZ<*Iv>!=YZ9J=1$rvOn>iP^j;?o?aG`~Ltqm_*_){$i|QJs36IR-ZSB z4A5T$bBr+B09tuz2A18O%wlSLE}hi6Pp?{s9z5bhSyZ0!7VtpIO&(kk(OyvF0b!l1 z=Ln8o26Ku6F6BJpwV;Wy^X2OhKsY9Yj3u-*OL&ziMxrmNk(PJn=FSILu-ACWP)I|5 zGP*#}aqi$OV4#h+^@t@mW7hm)QZ#r;0}=?8X1}~e;7QJ2sKehFU;%0hb%FwvHL4!j zi0@E>>&_kz3<#&1?*$9xX8tmo1swo)k_OzAU)C;y9qB%%AZ+^$I5i>V1i)Yj&=4`E z`yij3Hv~G-HH|TcVU9tpTnEE|p(S?dKCx(o;0C_&_nX4$`^}=$!)_%Pa*zDp7*Ys- zx#s~WhY8QuIdnp_Lrlm>OtTh(Maz zE-CLj+Z2b;dA;$5*yb9W3#Uhh_m%}Ehm&Edb`$3~tKpI$0O(#FoJ($7<8Wjmz!d<~ zP`(bLQ5Q*M4cBtXZajek4!{EQwAtGXG}kV4Qxrm|D4l`E>16@Ips##jC|xK(Rfd#t zD|tH`3Iv5WpwX@di22A445tfJds3eU2uh(Ug@;l6J+ZSb+3&| zzgR(%Lo%hYvZZ%$(IOiPb2a|3Jwn76evmnKmA+8o!#x0YFqn?TrEPh`c>EivTxcdGR%nL2&s2L5+lPY}WCnSx6Zo za&(}>M;p>)+5iDfbF|k3r*d+FPzj>6k#+a@ zl|UeN97=6kyLZvt#4zZLh*L(%H1JOGa6}D6oG?@X7F0F{STOPxw7jg|n=aXcVUwYz z(pK#dNj7wfV2SU6FDWI2IB}Y*+_^HbI4h0Uz2UGT(Q6_Npj|v405v8KQRV^@XPp^B zq5)AV7SP^L!HLYxKxl4(W~PNc31-6QL*S(VGJNgG%1F|QN*j7| zhPZccXVyAa#IyeY0JVUlk>ToMBNT_cplG=mZmoEGoWsU52zDZPo%9bJ?^sdr2x)4P z?dSWMQr6aJPAPrjof`Lwn1jwMfmZ9l4SUww@Id}gXp4S~Cq!3KojFeM4bz09bHmD2b;<%Yy~1CP9NB|KLEEk!w;zUC-< zrwj(DOE>2=dLB?PKn_=oAq9LF4Oj3IAp+0T^ z2~Dd{#ok|HrCJY@c@@5J4W96Zg-+0zQ9|JcUk*Cm#8}q>l)hXEK%9Xt12}Oezs^dm z61!Xk1WBezf-x#-v>ap!!r$?YNL&|5kaQ(3n}R|YQeV6*5n^;Fvn=A@O^4Pr5{+{i z?--db^y$k!Bcv9xjFFUtjtn4G%6EJY5h#TRr&PcKmwbI(L?%rW2cx7snx^o;g^`7o zSHg4P_{!l+WhzK%B~5Yln`oOhtM+lnJJ@-01hSSb2KTg5{2WX;5&;P~b`8;bM)!qw zED2RskPf6;`mwR3*^B z2NWqtl`yMzNJo!TeVQPF*6cRWQiV1evD;Tl0)q-@_yYj%j`&rcY2@SQ5Ur#}lOJRT zj`F`iO(@u&j&R92z!r#GR6sFrH)9wuuALI_M=YRrN=AUyHqxs3$7&yG0&~v2b1-G? zAb|*WZv$HA5G@EDzEiz$8D6UBO;G9RFYj2Qa*&;yf%k}k#FdNV5`%;)?ZQStyvtC3 zcme}jAshbyy|~&SB6F>I#DanbcFX-_kY!96wZw1jbn%lI6MRQmVN7QwR36{n7LiTZ zZ`Z6)C=|NuH7oG-j?tytle|MJ-Kvn;1aCFp9c9u2DW?s#U2+WV-gAWoU_^}}TgdA1jpA%Nkqe-5@NkD1 zGkXY?sG}W~V(->DT@e}prs=YTO+L9~XQT>^VSK`U0b$AnMFDCCq3EElwm2{*%o-n+^f9FAd#QB0b>ffS>_lv;@}G zS(H_Ga|m98XW9K>LW80<{eAk%;?X1r4tI(ZDun^_fVNVAs%vvy{db79LobKbK0@Dl zOAX##tp}9k-mS#c&<5to!b%r?Cz#f1Iy_-Z+s8P7q8EoanM&3PdmyMj=UkbxMydr2 zQ0Z4~#}EnwwzPbWb;lj@%AzP57l)#6kH%^cKxTpTJLuzIIK2RrYfkmQ&rNJsIUvIh^J^^-{Ga(x$7sK%}3%zZs#1|>c7gG!TPVu}TWG|WT-hQ)ck zV+k$jhVVqFs&nfQV+>Ttac%({uxK?M5#+bLbx=4-*c7wQybmTyjDV;|J2*A6`1sC4 zrwERpI(?j_-3C*+7eqLGT|bg-MKaW}7Jxp^xH6d8Hi{uSP=QI{VZhfCix*{1i?7cd zF!BwpkPl(&=MjPhsU;V0hqR6Jj@!Z}gtRk61zBLsoT+H~|N-3On~OCK542-uV1=f*1@Uo&$hCI50s#7vKj+gS@k7 zlZAU7U!S}~3U;)x+MYGu5D}A($&HXJ^2rUOC`d1OzM zvkXRuBJ9Ov88sZ(K1kz91${Zsike3l4KBfn?qvV~njK(cs%%CkC@{$_5u$s~1u;dU zMAnAuf$|waWReb0$7{oBSOgI#A3AU)cmF7AVdcTm7}+c{9;jvkU+6DK_;fR zkI|FG%89=SP5CAxY7towT5v1RM8S=q8bM;ai~_3hJV~h;eU2!=Uk@AO9i+XKrK&!8 z;$6&m(J8Ly28{y_DZH?zlAT4+IX;AnxWUvQD2||Q9vc!bIRH)JQfb(t4xMj(X2zFt z3UsT;CW+lJij36+UdnLy`ox69GKc5Z6LEx8-u?doTw)YlDM1Y!zvDE`VTe_I7temO zDMEyF8h^g73e*lTlgA-dH-0Wx5sEXTVsZC1e9q*6isc=^SGNN5XmG;Aj?d&dw!tL3}0 z0Xdz$%+@$W55dp<;{c(hoT&bcf~c|p{{Xd-8%`cyoB*}pSa!JY12k?JU4RbLZgUE_ z9Ar)uF$c~xXbVIc#-EHpYA_(_`omiQbAXFgq;-ay2v9y{xci8_OZ{QCzjqRD zH&@GmLKlbw?-a1~?0lXuu!A^UYR1ikavcf;Y4wQ^(iJQ38EFpT!}o?;CfLjBb&W>= zDAE|L(GX2X@x}~X8!=zJ2M3inUYch--o=H zaM4b&1rZ-QG19a)Jz@nmSh#Y!aa@B+ZI1-)zA!R~T$y{bSTBqLme7oBjx4dfaSk>HEeXC6VoQUGAwoV58z-UXFW z1_-LWbvWlKdVyLUk&Qj`FKcslgJjW98gatz2+!;QT(`g?JW6HjO2CWPE8QJTr6vdh zK1HrHalUn&qQHRct##HC2VxVw^5f&IG_)eWlse$|kESF%g7NW#(4mpxb*LLPwjUUq z;kpoV!-B5Nr&J6Ce0xAS))oPBl?@NKYscd^Y+y8W9s#|(c=3=;p%SL&=&t5fA%Xxr zFSPN3R;H3Cfc0`Sfg6)}H;i(GXg*)>B47i3E#uzvlaN(p9!d9$tY`>BlMa_nBJ}#j zN)IAEd&q6!8D;MV14MQw7|^N(){_VoUk*|!N32&zW7Ofm5JvJnxFkb7jBr3p9ibEJ z07xL}gn%e%%fx9 zf^;Qwt-EpuB7hD3NgItHtSVEfG$&vBesJ2*v|XGAsl7Yg%3+X>OLX(TVjnmp396Tw zhX(;=@!k#CJ0OGkU2DzW@FD?ZL7*viJL}#)k4;kz$fl?C;9#2>P`@Kvf9n!vojW!V zbZNhgS7S)dZK*gZz4P%TUKm|87u4fML{xpS9v7diDuAI{#6me7hx)}pF;p7(zhhQW3J0GQG` zB@?I4BL<5N;Xwr!cyWPSbiLpj97-5aXv$!|5!bBQdkokjP*WPlG2?>ZQp|`-!={6` zeo^z<+tiT1RVDj?*`NDWb;a{iT zQ>rgN807$Bmv^kSoX4SAug4j%*F-{IUm3<}g=_QYt>jhYX!87VgIfgX$(o6 zn~w5s_B@vd0t#wLjO_!j3F8?(N**-*Aoc4ur#mG;6cJoPyI0;=LjVSp-6z(TJ~$?Y z85}iEl(4+e;{*d~2^|o60X4?n@l-(g?k=f1>G8oc2uWKAPV_f}nY8V6oc z_`FHUoAw$)=r1dHIgdT%A~gV8b$brcruf4`Nei5AXhQBn=x& zX^cn$B_0PJ{6ARo&6kW5R%`rEcs4Ci)7$TfG+<;A0;o;c7nwia9VAg{>=9Ar)Rp+a zJSk$7&JMe%*Wf7>l?lHei9T^w%?Ext&Q8oij~}jY7^2Xn(a3cNzIel@LLU~VEiZlg zVm6GM;U9zE-%Grj1gfG=9dK*8>tRRLpFJABnMJUm5-e*F9_F!72<^uViFeG_)3< zS@sGqA6RLep|=?XEp>duD5T!(Ozi;Y@CUOby49+CaRvJlOVQKt;3}_Vpr1eE0ub2pG!jvlaJsYOn3(ojn9;;pY@F)&(cy}jcs3-^d=D7)_$jh1O}n1wsN;%iux z9n3m*$~|EU5FWfZl2A*@h6Pa{j1oNF99DyMxWQbUb}FS0UIZBX^nBpKkd_fZ-<@*q z;QA4#(mUDSsQQvm1WG|r9z|}{^K8Rv9zIo|N0cbn&jcC1} zoK^;pAgFIas6&unrPI5pHfFt$-nR9KN-%6u@}9kOxpHB=+C7pldf&%aDrT!q!aJt5 z#`8pEb)em&h?Z!l^(N2#409eRK6!ZT8y#8}=57E}! z`hO-BcA$4_%=Lw$hiHR_y9W;}$fOinJf_dj9AnWnl!TP&;d$iNZpFL7PXnpP*6}># zor3tp)3|pYUl|TXdE+I-VP1n*t?wmnQa4AAJoAaJ!FiU>@OT?cfQciFCRanhSb-|W zozGaRdK4Xi{a~|D0CxC{+};}MLu3x9{NT#>1vvC%8g+Fyd^i9C2Q=5tZG{1Ta)F1| zXj>brxr+@CCBXqoI(kO{Yvc)`u8%bC@zyI!No(%k9U|`zm|N^!q^C&Yo29{MdUAs`OT1`DezJ~%6heMa$Z|Y% znlzvii73xoIsInQ3A5lrh#*&JYc5hnL0E0K%^S|WVhxp15j~O@iTK90Q0N>H9Z!E7 z-c?DB5ev{!>T|EW8_A+gLso7=mGOMy9l-7_ST(2QZMwKr)5K0f0wiB2tdkDq9+%Oy z*De`?H4$S%x@eF5G@4hOF+)R>cV!sPIyQE<~lR};6xNnQ>OvQVnpn6 zqVr`i5Lagka$}TtPtUxNRT`l9L*585cd}`dj!hSSxWoi4&;YMZP0s|>*}^1N4>8o# z=W2Nevk8*ZkkNtL{@xVf_!@~(u5S8?-bw~y;eD}6g4ca(plw+VY9yB0anEjQ+r}Dz z)YLB@cm>R;uHg9P-#l}Wf$K1dMIL|98NC&5-e;I(YFmCfp@Xw)NL6 z)2t2Ao>zjp6N4{QLqaL%eqOFBB~C%)zw?MHwO&Yn-}jtti3WqOA2-%$JF1Ee_d034 z_liM4MK>L>>71PNd0t7uym?2EQ^36PMAie=U;=RJc1&`B4Fzvp`2PSd zQ&teuZT;Y>0@W41H5t2<;u-;`Ge@5OFtknFL&(kdImWmRyL5mdd~*KrIMLV&Xwl^J z_s$mxw1khI?l%p#Ysv47Y9e-T2et;8#vpd?W{55z303Lq1yt16yyOTYLi2(JR{#L{ ze~c|eMj(3Y$7T|q?knGW&KYMTgU5{ADk0G~>o4#c+*9u*fF-O&^@-3&r`|Oq{p4M| zB1PxlH|_GFxljqdody}CyYrq2@()iy_DysY> zB=-lxdppEA$e>Gd%4pR1lLoRPw87(qvq?5y`3n@QP+uvsZtc8aVyFmI+~t#pmt0Pq zKt#D|?yJuuWAbvRz+tGp=*KGkyv*V~3kL8h9>Sjbvjgd}K$|ZO=(=ttsYQZH$S|(I zy%!y+z|3L7?bW2`Uh%a#Y6nU!p60xEjs)0y&{fvZyeA`PcNRGozXw=5F0BCN=*x+> zHgUm&G`VfUzoYixPp2q0h2Z)F7r{kxI0ADH{NkBW(DCG)zv#pQ;_CkZpT-Hh82TL= z!DC!DtWFG)6odzA;;6@pw13fpL=v}#hA#$(6Pcsq2(-`_M;}uhx=|C)E9b1G91xOu za7)ld935govNbM)6wL<7W-4TuT_!rUG!DelKlgY#g^G_eBC76A=1?N6x6iy5p`cOs zhzKW1=L#^<$&AMC-W{C|{eDb9WlL#RL7;cc{{T2OEVNV8Rli<1>j$8m>rE=@k)cnV zA8181171is^)-0b>TkSkEd4~*uxp^v*g~SGP<}pYsxfB3C^6o?JacAng7gIZJ9Ryt z3{r`)p`9=ex!3D8!+>IfMwhc+j7TXA-vPklQ?mey&J(4spcH2J_%l^RqX2ee3j6iE zwWvWYsk--BfWSmXt3FUt#gQGudTx^ldVDPC#UK}w+?H$agqh+W608G=@9usdT;~n)4 zg7L2WW|xwK*7b6mmjkbJAVXHhFLZr=Mhol>E{6*n#CmH_kwr#h#WNe!8m_8 zV&o%5)8W7ZM|)0Out{|~&n$UiJ>e4wH7Ndg$J;9+Q}J{NgmYyq-6WHtN4wuvn%l2Tm&6jS=GFKzSS%Zz{o) zwl>z>yZP@Z78MX9Q$q3@ddiWcEg89{=iUea2NP+%o?bb{WtQ&9-;?P3X7E{PDzSX> zPltSEM2bxa+G(9Kbnj|#`P|76(sJDB_ z-jYL)k8dAtMLyG~f$ifB+^Ug06V)+71*)UB%kkq9T{RCa&nu@q>;trh_ko?16W{ze z+#FOj)0~Y$yhp#hIFu+B0e|ntJjyxOj_?RjpP{@$Dwyk%VG$bf))-kOJtzC#Af8HB z(Ek8t0NEY`lm$`JW8=v4=K_~UlipJo#L99jL9GK*%ZnieZj(|t-L2yVCLRAa9u%S&&e^_3`2FrK1iGVH%*%Rvor7rYLMFc_S8^D2>lzCtN<5A|RFMvOOFlfaJ zMhJ7m&;8yD3BfcRwBk-pV1op!I;-I0B2riY%0)Sb%vlD|&NutVLNzoXd;Nd&005yu zM|Jw?{{Xj;WS|r`JaLJMH-XxBi3Di?P~YPSQTDd4IkX8q&T~u!<9y+X0Z@9vni~i% zDlBoM_b_eGQ@_SiR8HH*HY%Jf{;&;zoSo!Eo9)M>LeX=OTr?f?>j9vxgwS2L=Ql0X z6m>2W0}`8~z8v8JMX349Q&(WUw-1mj>G*Z~#G)1=OyNiC2Jzzs%K6KKh;aV#Pd0eC zqE?581R%rM8~eov5=RC_X%#eCNxA6#6GEaly{+IOxy`OW$+N{{U#~ zdesTdy!F0)<1WdiRXP|apNZd`ssex%JG`)LrN~&(4Jazh_V(fZiD^oJQo7Osz`M6x^7Qr z7`x)9fuIs_Y0>4zKv0CMeG99$>l5x$9UK#L@-lA)Sz3vzQSqi#+xBY!08asgsA<=u z{{UGAK?%`D{{RopvDwMjwwzy-}HBI_YRu{;A`ybaN&&Eco;S~}i92?6hx<)*M&<(wbh5~SM?fAug$DvQCn zVk-a*M=u~)rbHVqHS*x2a2eOZJ{ZB;{KJW?|oMFg_*bbWrVB?)A^M?>&`Siw(PX!O1= z-Fd|tFjl!I4gBCL78U2*pl#e%z|~3{QslSAU*dXH_|5fUNIxuig!LAS5bQ z7kBlCnGvc)@yPMy)+tp%D2CMk z;|>74nB&jZD=j(|TY2LOfKbwiq0hXY0Je2QPoApqE;hCq81 zl)!d%wJtPvkYv&x*=Qw#)D9*KMEyryG!jabGa`Ep1 zfdmgGk#v0fp7D~UmB8MaQ>~8+ln8b2oI3nT>r*rX=1xCPFi=k4$ zZ@K3LX?ImxU8Vm3q}CSMhT5J6yU_7@?&g#eYDT=k(eB^8*qsBW|MbW5m zANQO`9XErlQZ$3pcm3ta4uIEOe^}&VjXcl$`@kSuz zmBAQDBp%E{y@x)!ai75J7jtlc?D)$HD`%%6!`3m0)5dUzO~-rnjBsiY`~2hpG(j+o z)O5S&&LG%I2Q~NcPu4^vp|mE9@N)0%)kT!eaKCoFc2<(p?yk(d?1rL3B!3Y}!qvbf) z{{V7Ou$$$%!`QrcoorUU!F}lClh>S03JtHK&3nOHjqO`+{pM^!V%|&%v^f%d{d0q02m+;>c=Ogp zG?aYr5)H1jUs%|zD_0PWUQ=T(LL(jVf`kdzdAd7zoaKeR24IkZs`|q)NIjhL>o=1G zcVKajs}R)6*yAAsAVi&e!kxp{IbestVBrzi&N5ns(~Kae!;AL3caT?pPZ@Im0497n ze}^)dMwjCRX!2s9n>J(r09f~i4K<20G@R?+SwY1uHIHlWCg)3wcHk-n?^#lk%>#e) z{{XlENgWNZ?_W7XM?%1EYWJ>8P_nRux)w_Ln3N6yMuxc7&+ne{6<#6fZ(eyn;{#*} z6<=-qVkD-7XdBq{Ul|(ZmqHwCZroZGBxPf>zgNe%tTW36y} z;DvN>TOS_&F@SDB73}`rdBU(Xqo6mC`k$A_qcPgvXcG1d6QSK>YeS@)LE2GQ6i{{Z&@g0369ymvV< zAGI`prU?a2n-J^I_x}Jv0kU&mpU?eqBZe}kGyeYa?Wn_cy87Si0O$}A06)ig37|E( zMo${vDhRR){{Y|h#ABHO9ks>f>)tNSk>K06_r@-;sz_MjedW%@U@Um~@q(S2%L7pO z{{T2L%j`Bh&sfnCrx%7o*>O)TYg_S^G(@HYzmNOQL5=PYH~ zZtCy6yPh3W2++dwi5NJcbTgcd5GoPVC+{>09%3%MY}e_?He!l>iTBBnff+`OMLtpc zVn#7#eUS}dNYzQV5IPEPv+oc=w&wnDD-1wh3>8+rt~ojikA4gl0;-3DF70^ric9*2 z5oMg2D+|*orQm#GYi@+cn$#k^xVHgMSk5s9Ic@cZ2nk#ku%YkXIniu#xTQ&aIaH#$ zeB%&wy7iEr{NohtV)*`V^IYeg809hCOcwLJZ^?-i#c*dBK6AXR=PmCR7`41toVw3Q z9HDSBA=m=5nkoMPImG~qu?6w&WCR$LS{~f15Y_~M8q@1Ja)qtaqB8Z6mOn#%Y43~y zfSeSZapKvMV5_=?P4~fpNkj{3tgnpNBmq_SpTGUUasq)Vz7Nly{;+fckkja=#v4jR zBZ~YTTfIB;7zzn%#rrw>&W5F08inI_;xeH{P#>Y$`ND=Uh{XHVy-$gpd8jZb<$Qmq zth6gaunB3>#u+d{0*8DH*8KY39fWPyp!&7OeP~$&`Fr`9L)esY4?lRectRg8@Y}wO z2|T7z2W75sI|3N6d+q-KSU5DM(Zxte{u!(skL1QLz@{s!l$oIQ6BgCw$xG3!G*^K? z)f^HKSdJ7`DV07CTLepwe;DjR>Xeh$@8_F`4>~a7 zf71f>LZX(x=l7Hlnj_ynwKv8jXaea?dHFx@nnMsY6x-kZaZDKcQvO}}$dxVFD(n99 zhz_h6&(nf7jrJYj#6!hA_`oV4uy4oyVERXGf5X;QT4QHGo7O5?EvY!gl8~NoYHIZd zpS(t}DaI=Lz*ktt343wE z+vJ`xfF`+p<5Z7<@2pT&=gtLZJ2I^fGR&6+(?=6=quyM(aKZjZE_Le%hX$Vi0FRa! zKJwy|7cFpMtXupIs3qBgEhs6#e~n_=010@WF79cGd7^6T4rZq)_v49(73^9n^tgy< zEFwVlj%Mp=r1szMEC`?s8xJ48F$q!>HLrxqqhcZi_B!#nZxCSDqh=z1#c7H4<}a=2R28-VYKaV6A(nva76VpQ917vv%Gd`=uBH4-&ty2 z$mbOPbrpbW8Lu4omL|sRg80@7nmZa+lkdD$XzaX+H^#nmSEaRMP8W#pykBIO!*#tl z1!{G=aat-KXj|_PBLmu6{{YijEi;`Vv+ofBxEd)L#tF4qw(?(pUyPP&%fY{#IWb)e z*QS}EsJd?FonoWDL@zVv>k+fC(l{(uoq;tEUffhIA#9&_5^rkknnM8z?-4z7&i8T( zifclD+QLGiSUN-Byi;k*CCTEh0zo}foN5B94|&)Z!^<{)XXiFG(w`>ZR<-GjV53pt z$ou#pK}Kwt6%r-3b~s+pTFe(#ua2;`t(n~D)BT!QV8((DLSsJH_H>Cj~Os z2rG@iXywLCq%P1CqaXl)SKd=zGQeF-ic^x{$c)J#`!azYjwDgZh8e8m`8dFKVBpV; z(9qnRpk?0?caGJRVRmQaRpj(55KVe51*Wx zQRI2wj`0IdI0+eu)d$SbkDquUY|yJC_F^F@C@;?tn4*_QO|HK3iWiQ|2&oy&YchY3j>m44ooqRaEA9S>lG`#D8uV`-vCVpiI-)A z=>GuB^1Etxatwr&;obl;FA)C#+(Csb0pe?BLyB?;_3!-Tcmr*t!{Y}+r;|4?FAX<> zsH(j6)+(E|b-p#58l$dAhpYsRuJ$x|&kz-$B7S^p0#y*=$#N(E0C3a?lP>IYrFDxX zH3q9Ie#mOG0uw3b0Xz8pU?nl37Y&UJi zepea>8(X{?m!l1^zupf%oO{+-;P&BG^gno#aCL=PUF9DWHdftCT0^%KG{Q(uFn{+b zDX%yijhs3Ae~W{f@A$cMj&OfD!@M~4k2e=CT(=ft*9R8|JE>0cIlwy@Q=gw+u_I_h zW_pQ0+<2vkek~&&RKvTv2NXe%$emY=>1%pYI{fJ`g_jl#mH+yTor{ z+U~#g#N|9SM&4t94FOFK6m)6x{o|~yHq-Hka5Z9`eBwfZ&>Bh0kSdmPCL{GE-i13*gtF;<*m#szuS z5nmtUG$srTPePY%A4F{J6 zX;)>YHr-pNI+`$)uu_|g;~F|D>t@~M2&ym*FVmU?n$b$U_da-L01BQ+8~4vnFc2v0 z;vbw9MdOM+V1oJ6dH(>l`o~ZPk7q73s?PiR$763ijKK)eX7!3Y*Q6)4--eeaE(-W=6`t+u8xj!0wNoSo|hg#AUign>B>;06`UVGtVdzYP5%Je z&v5uKf`MkYE@%|xw`>0ZoqlkZ!VZ&%6t=(<%ZV6O-W+h=oGn~v-VhefxPBhKaF7cG z^bha)#ZGbY^NV@T5XIIlGRGFrobAWF9I*G04O~I7)7B#L3A}E&V0*>43{H#{4m!oW zSr;_L!ONHUzrdzm%o zk80m?TvpLZ7(L}a&IZX#-r2Ab`*4F{Pb)v;I0PN;lD@mjYk2fjjY(JXm_b?v{`qp0|La8gEx$IY_DE z&V}RW>l8pmT?biIVj7%V>kG{<38Ux62BqfGdGmnjK^-*LonX*OeQQ|A(1Cg7n2@Yf zNbH`R-H4ZJU`hhCD*mOxoGkG*kp%_h;QZod#e5GN#UaS)bwBHdfCV0H?+D-ywW@h@ zeTXpM86;Ilif!?V5Q%vMoEG)IF~H_lDk@#B9O65IAnbQ*Id-{TDxCF27`J%#awNGaeu@sy_i`ZB4o zl}Pu39k1weF;TJ(&41s#tFS5oU7s0Y5m1QgVyF@$NB;nGXNJ9xK5-s#oQvey4xTP9!tjaT$DCr3kBP=X z8hqIC#!iZRnoQs%X10Cjpq#+t6zEjJ8EtyJdcqvLedxstG%L_x-6AK6K5?Awc=0}a z$Z^BSagfp7>C=RA4^8ae{{UD3B5i!=eB>J`3hMs=?llA;wr_s*LN+58r?<>o? zZPm?kL(pOa!)08zM0L(Cx3o09W}!{>Tw3S=+;C!=uA4yP>*&f#h2o9@-~b>04MEc+ z=kb&k42eE6!Vz8W^TsPTD5 Date: Thu, 9 Jan 2025 11:21:41 +0100 Subject: [PATCH 79/81] model: Add support for PhiMoE arch (#11003) * model: support phimoe * python linter * doc: minor Co-authored-by: ThiloteE <73715071+ThiloteE@users.noreply.github.com> * doc: minor Co-authored-by: ThiloteE <73715071+ThiloteE@users.noreply.github.com> * doc: add phimoe as supported model ggml-ci --------- Co-authored-by: ThiloteE <73715071+ThiloteE@users.noreply.github.com> --- README.md | 1 + convert_hf_to_gguf.py | 57 +++++++++++++++++++++ docs/development/HOWTO-add-model.md | 10 ++-- gguf-py/gguf/constants.py | 20 ++++++++ gguf-py/gguf/tensor_mapping.py | 37 +++++++------- src/llama-arch.cpp | 22 ++++++++ src/llama-arch.h | 1 + src/llama-model.cpp | 11 ++++ src/llama-model.h | 1 + src/llama.cpp | 79 +++++++++++++++++++++++++---- 10 files changed, 208 insertions(+), 31 deletions(-) diff --git a/README.md b/README.md index 0126da89c..a71015256 100644 --- a/README.md +++ b/README.md @@ -69,6 +69,7 @@ Instructions for adding support for new models: [HOWTO-add-model.md](docs/develo - [x] [Qwen models](https://huggingface.co/models?search=Qwen/Qwen) - [x] [PLaMo-13B](https://github.com/ggerganov/llama.cpp/pull/3557) - [x] [Phi models](https://huggingface.co/models?search=microsoft/phi) +- [x] [PhiMoE](https://github.com/ggerganov/llama.cpp/pull/11003) - [x] [GPT-2](https://huggingface.co/gpt2) - [x] [Orion 14B](https://github.com/ggerganov/llama.cpp/pull/5118) - [x] [InternLM2](https://huggingface.co/models?search=internlm2) diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index 01b58f976..5562499aa 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -2562,6 +2562,63 @@ class Phi3MiniModel(Model): yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FACTORS_SHORT), torch.tensor(short_factors, dtype=torch.float32)) +@Model.register("PhiMoEForCausalLM") +class PhiMoeModel(Phi3MiniModel): + model_arch = gguf.MODEL_ARCH.PHIMOE + + _experts: list[dict[str, Tensor]] | None = None + + def set_gguf_parameters(self): + super().set_gguf_parameters() + self.gguf_writer.add_expert_used_count(self.hparams["num_experts_per_tok"]) + self.gguf_writer.add_expert_count(self.hparams["num_local_experts"]) + + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: + # process the experts separately + if name.find("block_sparse_moe.experts") != -1: + n_experts = self.hparams["num_local_experts"] + assert bid is not None + + if self._experts is None: + self._experts = [{} for _ in range(self.block_count)] + + self._experts[bid][name] = data_torch + + if len(self._experts[bid]) >= n_experts * 3: + tensors: list[tuple[str, Tensor]] = [] + + # merge the experts into a single 3d tensor + for w_name in ["w1", "w2", "w3"]: + datas: list[Tensor] = [] + + for xid in range(n_experts): + ename = f"model.layers.{bid}.block_sparse_moe.experts.{xid}.{w_name}.weight" + datas.append(self._experts[bid][ename]) + del self._experts[bid][ename] + + data_torch = torch.stack(datas, dim=0) + + merged_name = f"model.layers.{bid}.block_sparse_moe.experts.{w_name}.weight" + + new_name = self.map_tensor_name(merged_name) + + tensors.append((new_name, data_torch)) + return tensors + else: + return [] + + return [(self.map_tensor_name(name), data_torch)] + + def prepare_tensors(self): + super().prepare_tensors() + + if self._experts is not None: + # flatten `list[dict[str, Tensor]]` into `list[str]` + experts = [k for d in self._experts for k in d.keys()] + if len(experts) > 0: + raise ValueError(f"Unprocessed experts: {experts}") + + @Model.register("PlamoForCausalLM") class PlamoModel(Model): model_arch = gguf.MODEL_ARCH.PLAMO diff --git a/docs/development/HOWTO-add-model.md b/docs/development/HOWTO-add-model.md index 04c5ccbbe..8fcd70811 100644 --- a/docs/development/HOWTO-add-model.md +++ b/docs/development/HOWTO-add-model.md @@ -28,7 +28,7 @@ The required steps to implement for an HF model are: ```python @Model.register("MyModelForCausalLM") class MyModel(Model): - model_arch = gguf.MODEL_ARCH.GROK + model_arch = gguf.MODEL_ARCH.MYMODEL ``` 2. Define the layout of the GGUF tensors in [constants.py](/gguf-py/gguf/constants.py) @@ -79,14 +79,14 @@ Depending on the model configuration, tokenizer, code and tensors layout, you wi - `Model#set_vocab` - `Model#write_tensors` -NOTE: Tensor names must end with `.weight` suffix, that is the convention and several tools like `quantize` expect this to proceed the weights. +NOTE: Tensor names must end with `.weight` or `.bias` suffixes, that is the convention and several tools like `quantize` expect this to proceed the weights. ### 2. Define the model architecture in `llama.cpp` The model params and tensors layout must be defined in `llama.cpp`: 1. Define a new `llm_arch` 2. Define the tensors layout in `LLM_TENSOR_NAMES` -3. Add any non standard metadata in `llm_load_hparams` +3. Add any non-standard metadata in `llm_load_hparams` 4. Create the tensors for inference in `llm_load_tensors` 5. If the model has a RoPE operation, add the rope type in `llama_rope_type` @@ -96,9 +96,9 @@ NOTE: The dimensions in `ggml` are typically in the reverse order of the `pytorc This is the funniest part, you have to provide the inference graph implementation of the new model architecture in `llama_build_graph`. -Have a look at existing implementation like `build_llama`, `build_dbrx` or `build_bert`. +Have a look at existing implementations like `build_llama`, `build_dbrx` or `build_bert`. -When implementing a new graph, please note that the underlying `ggml` backends might not support them all, support for missing backend operations can be added in another PR. +Some `ggml` backends do not support all operations. Backend implementations can be added in a separate PR. Note: to debug the inference graph: you can use [llama-eval-callback](/examples/eval-callback/). diff --git a/gguf-py/gguf/constants.py b/gguf-py/gguf/constants.py index 9d0e7489f..cf05bf47e 100644 --- a/gguf-py/gguf/constants.py +++ b/gguf-py/gguf/constants.py @@ -244,6 +244,7 @@ class MODEL_ARCH(IntEnum): QWEN2VL = auto() PHI2 = auto() PHI3 = auto() + PHIMOE = auto() PLAMO = auto() CODESHELL = auto() ORION = auto() @@ -428,6 +429,7 @@ MODEL_ARCH_NAMES: dict[MODEL_ARCH, str] = { MODEL_ARCH.QWEN2VL: "qwen2vl", MODEL_ARCH.PHI2: "phi2", MODEL_ARCH.PHI3: "phi3", + MODEL_ARCH.PHIMOE: "phimoe", MODEL_ARCH.PLAMO: "plamo", MODEL_ARCH.CODESHELL: "codeshell", MODEL_ARCH.ORION: "orion", @@ -940,6 +942,24 @@ MODEL_TENSORS: dict[MODEL_ARCH, list[MODEL_TENSOR]] = { MODEL_TENSOR.FFN_DOWN, MODEL_TENSOR.FFN_UP, ], + MODEL_ARCH.PHIMOE: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.ROPE_FACTORS_LONG, + MODEL_TENSOR.ROPE_FACTORS_SHORT, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_QKV, + MODEL_TENSOR.ATTN_Q, + MODEL_TENSOR.ATTN_K, + MODEL_TENSOR.ATTN_V, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.FFN_NORM, + MODEL_TENSOR.FFN_GATE_INP, + MODEL_TENSOR.FFN_GATE_EXP, + MODEL_TENSOR.FFN_DOWN_EXP, + MODEL_TENSOR.FFN_UP_EXP, + ], MODEL_ARCH.CODESHELL: [ MODEL_TENSOR.TOKEN_EMBD, MODEL_TENSOR.POS_EMBD, diff --git a/gguf-py/gguf/tensor_mapping.py b/gguf-py/gguf/tensor_mapping.py index efe2a4aa4..7616c468a 100644 --- a/gguf-py/gguf/tensor_mapping.py +++ b/gguf-py/gguf/tensor_mapping.py @@ -55,7 +55,7 @@ class TensorNameMap: # Output MODEL_TENSOR.OUTPUT: ( "embed_out", # gptneox - "lm_head", # gpt2 mpt falcon llama-hf baichuan qwen mamba dbrx jais nemotron exaone olmoe olmo2 + "lm_head", # gpt2 mpt falcon llama-hf baichuan qwen mamba dbrx jais nemotron exaone olmoe olmo2 phimoe "output", # llama-pth bloom internlm2 "word_embeddings_for_head", # persimmon "lm_head.linear", # phi2 @@ -68,7 +68,7 @@ class TensorNameMap: MODEL_TENSOR.OUTPUT_NORM: ( "gpt_neox.final_layer_norm", # gptneox "transformer.ln_f", # gpt2 gpt-j falcon jais exaone - "model.norm", # llama-hf baichuan internlm2 olmoe olmo2 + "model.norm", # llama-hf baichuan internlm2 olmoe olmo2 phimoe "norm", # llama-pth "transformer.norm_f", # mpt dbrx "ln_f", # refact bloom qwen gpt2 @@ -108,7 +108,7 @@ class TensorNameMap: "transformer.h.{bid}.input_layernorm", # falcon7b "h.{bid}.input_layernorm", # bloom "transformer.h.{bid}.ln_mlp", # falcon40b - "model.layers.{bid}.input_layernorm", # llama-hf nemotron olmoe + "model.layers.{bid}.input_layernorm", # llama-hf nemotron olmoe phimoe "layers.{bid}.attention_norm", # llama-pth "language_model.encoder.layers.{bid}.input_layernorm", # persimmon "model.layers.{bid}.ln1", # yi @@ -152,7 +152,7 @@ class TensorNameMap: # Attention query MODEL_TENSOR.ATTN_Q: ( - "model.layers.{bid}.self_attn.q_proj", # llama-hf nemotron olmoe olmo2 + "model.layers.{bid}.self_attn.q_proj", # llama-hf nemotron olmoe olmo2 phimoe "model.layers.{bid}.self_attn.q_proj_no_perm", # llama-custom "layers.{bid}.attention.wq", # llama-pth "encoder.layer.{bid}.attention.self.query", # bert @@ -165,7 +165,7 @@ class TensorNameMap: # Attention key MODEL_TENSOR.ATTN_K: ( - "model.layers.{bid}.self_attn.k_proj", # llama-hf nemotron olmoe olmo2 + "model.layers.{bid}.self_attn.k_proj", # llama-hf nemotron olmoe olmo2 phimoe "model.layers.{bid}.self_attn.k_proj_no_perm", # llama-custom "layers.{bid}.attention.wk", # llama-pth "encoder.layer.{bid}.attention.self.key", # bert @@ -179,7 +179,7 @@ class TensorNameMap: # Attention value MODEL_TENSOR.ATTN_V: ( - "model.layers.{bid}.self_attn.v_proj", # llama-hf nemotron olmoe olmo2 + "model.layers.{bid}.self_attn.v_proj", # llama-hf nemotron olmoe olmo2 phimoe "layers.{bid}.attention.wv", # llama-pth "encoder.layer.{bid}.attention.self.value", # bert "transformer.h.{bid}.attn.v_proj", # gpt-j @@ -197,7 +197,7 @@ class TensorNameMap: "transformer.blocks.{bid}.attn.out_proj", # mpt "transformer.h.{bid}.self_attention.dense", # falcon "h.{bid}.self_attention.dense", # bloom - "model.layers.{bid}.self_attn.o_proj", # llama-hf nemotron olmoe olmo2 + "model.layers.{bid}.self_attn.o_proj", # llama-hf nemotron olmoe olmo2 phimoe "model.layers.{bid}.self_attn.linear_attn", # deci "layers.{bid}.attention.wo", # llama-pth "encoder.layer.{bid}.attention.output.dense", # bert @@ -242,7 +242,7 @@ class TensorNameMap: "transformer.h.{bid}.ln_2", # gpt2 refact qwen jais exaone "h.{bid}.post_attention_layernorm", # bloom "transformer.blocks.{bid}.norm_2", # mpt - "model.layers.{bid}.post_attention_layernorm", # llama-hf nemotron olmoe + "model.layers.{bid}.post_attention_layernorm", # llama-hf nemotron olmoe phimoe "layers.{bid}.ffn_norm", # llama-pth "language_model.encoder.layers.{bid}.post_attention_layernorm", # persimmon "model.layers.{bid}.ln2", # yi @@ -265,7 +265,7 @@ class TensorNameMap: MODEL_TENSOR.FFN_GATE_INP: ( "layers.{bid}.feed_forward.gate", # mixtral - "model.layers.{bid}.block_sparse_moe.gate", # mixtral + "model.layers.{bid}.block_sparse_moe.gate", # mixtral phimoe "model.layers.{bid}.mlp.gate", # qwen2moe olmoe "transformer.decoder_layer.{bid}.router", # Grok "transformer.blocks.{bid}.ffn.router.layer", # dbrx @@ -310,10 +310,11 @@ class TensorNameMap: ), MODEL_TENSOR.FFN_UP_EXP: ( - "layers.{bid}.feed_forward.experts.w3", # mixtral (merged) - "transformer.decoder_layer.{bid}.moe.linear_v", # Grok (merged) - "transformer.blocks.{bid}.ffn.experts.mlp.v1", # dbrx - "model.layers.{bid}.mlp.experts.up_proj", # qwen2moe olmoe (merged) + "layers.{bid}.feed_forward.experts.w3", # mixtral (merged) + "transformer.decoder_layer.{bid}.moe.linear_v", # Grok (merged) + "transformer.blocks.{bid}.ffn.experts.mlp.v1", # dbrx + "model.layers.{bid}.mlp.experts.up_proj", # qwen2moe olmoe (merged) + "model.layers.{bid}.block_sparse_moe.experts.w3", # phimoe (merged) ), MODEL_TENSOR.FFN_UP_SHEXP: ( @@ -342,10 +343,11 @@ class TensorNameMap: ), MODEL_TENSOR.FFN_GATE_EXP: ( - "layers.{bid}.feed_forward.experts.w1", # mixtral (merged) - "transformer.decoder_layer.{bid}.moe.linear", # Grok (merged) - "transformer.blocks.{bid}.ffn.experts.mlp.w1", # dbrx - "model.layers.{bid}.mlp.experts.gate_proj", # qwen2moe olmoe (merged) + "layers.{bid}.feed_forward.experts.w1", # mixtral (merged) + "transformer.decoder_layer.{bid}.moe.linear", # Grok (merged) + "transformer.blocks.{bid}.ffn.experts.mlp.w1", # dbrx + "model.layers.{bid}.mlp.experts.gate_proj", # qwen2moe olmoe (merged) + "model.layers.{bid}.block_sparse_moe.experts.w1", # phimoe (merged) ), MODEL_TENSOR.FFN_GATE_SHEXP: ( @@ -387,6 +389,7 @@ class TensorNameMap: "transformer.blocks.{bid}.ffn.experts.mlp.w2", # dbrx "model.layers.{bid}.mlp.experts.down_proj", # qwen2moe olmoe (merged) "model.layers.{bid}.block_sparse_moe.output_linear", # granitemoe + "model.layers.{bid}.block_sparse_moe.experts.w2", # phimoe (merged) ), MODEL_TENSOR.FFN_DOWN_SHEXP: ( diff --git a/src/llama-arch.cpp b/src/llama-arch.cpp index 007d79f82..eef66ed31 100644 --- a/src/llama-arch.cpp +++ b/src/llama-arch.cpp @@ -27,6 +27,7 @@ static const std::map LLM_ARCH_NAMES = { { LLM_ARCH_QWEN2VL, "qwen2vl" }, { LLM_ARCH_PHI2, "phi2" }, { LLM_ARCH_PHI3, "phi3" }, + { LLM_ARCH_PHIMOE, "phimoe" }, { LLM_ARCH_PLAMO, "plamo" }, { LLM_ARCH_CODESHELL, "codeshell" }, { LLM_ARCH_ORION, "orion" }, @@ -584,6 +585,27 @@ static const std::map> LLM_TENSOR_N { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, }, }, + { + LLM_ARCH_PHIMOE, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ROPE_FACTORS_LONG, "rope_factors_long" }, + { LLM_TENSOR_ROPE_FACTORS_SHORT, "rope_factors_short" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" }, + { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" }, + { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" }, + { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" }, + }, + }, { LLM_ARCH_PLAMO, { diff --git a/src/llama-arch.h b/src/llama-arch.h index 45e458bb9..2e5f97b77 100644 --- a/src/llama-arch.h +++ b/src/llama-arch.h @@ -31,6 +31,7 @@ enum llm_arch { LLM_ARCH_QWEN2VL, LLM_ARCH_PHI2, LLM_ARCH_PHI3, + LLM_ARCH_PHIMOE, LLM_ARCH_PLAMO, LLM_ARCH_CODESHELL, LLM_ARCH_ORION, diff --git a/src/llama-model.cpp b/src/llama-model.cpp index 7deb3683b..7260cb155 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -76,6 +76,7 @@ const char * llm_type_name(llm_type type) { case MODEL_8x7B: return "8x7B"; case MODEL_8x22B: return "8x22B"; case MODEL_16x12B: return "16x12B"; + case MODEL_16x3_8B: return "16x3.8B"; case MODEL_10B_128x3_66B: return "10B+128x3.66B"; case MODEL_57B_A14B: return "57B.A14B"; case MODEL_27B: return "27B"; @@ -661,6 +662,15 @@ void llm_load_hparams(llama_model_loader & ml, llama_model & model) { throw std::runtime_error("invalid value for sliding_window"); } } break; + case LLM_ARCH_PHIMOE: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + + switch (hparams.n_layer) { + case 32: model.type = e_model::MODEL_16x3_8B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; case LLM_ARCH_PLAMO: { ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); @@ -2094,6 +2104,7 @@ enum llama_rope_type llama_rope_type(const struct llama_model * model) { case LLM_ARCH_OLMOE: case LLM_ARCH_PHI2: case LLM_ARCH_PHI3: + case LLM_ARCH_PHIMOE: case LLM_ARCH_GEMMA: case LLM_ARCH_GEMMA2: case LLM_ARCH_STARCODER2: diff --git a/src/llama-model.h b/src/llama-model.h index ce038932d..424cb0f52 100644 --- a/src/llama-model.h +++ b/src/llama-model.h @@ -73,6 +73,7 @@ enum llm_type { MODEL_8x7B, MODEL_8x22B, MODEL_16x12B, + MODEL_16x3_8B, MODEL_10B_128x3_66B, MODEL_57B_A14B, MODEL_27B, diff --git a/src/llama.cpp b/src/llama.cpp index 97e716cd6..ae375bcd3 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -1212,6 +1212,50 @@ static bool llm_load_tensors( layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd }, 0); layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), { n_embd, 2 * n_ff }, 0); + layer.rope_long = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_LONG, "weight", i), { n_embd_head/2 }, llama_model_loader::TENSOR_NOT_REQUIRED | (i != 0 ? llama_model_loader::TENSOR_DUPLICATED : 0)); + layer.rope_short = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_SHORT, "weight", i), { n_embd_head/2 }, llama_model_loader::TENSOR_NOT_REQUIRED | (i != 0 ? llama_model_loader::TENSOR_DUPLICATED : 0)); + } + } break; + case LLM_ARCH_PHIMOE: + { + const int64_t n_embd_head = n_embd / n_head; + + model.tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), { n_embd, n_vocab }, 0); + + // output + model.output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), { n_embd }, 0); + model.output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, 0); + model.output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), { n_embd, n_vocab }, 0); + model.output_b = create_tensor(tn(LLM_TENSOR_OUTPUT, "bias"), { n_vocab }, 0); + + for (int i = 0; i < n_layer; ++i) { + auto & layer = model.layers[i]; + + layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), { n_embd }, 0); + layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i), { n_embd }, 0); + + layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), { n_embd, n_embd + 2 * n_embd_gqa }, llama_model_loader::TENSOR_NOT_REQUIRED); + if (layer.wqkv == nullptr) { + layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0); + layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, 0); + + layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0); + layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, 0); + + layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0); + layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, 0); + } + layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), { n_embd, n_embd }, 0); + layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), { n_embd }, 0); + + layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), { n_embd }, 0); + layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i), { n_embd }, 0); + + layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0); + layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd, n_ff, n_expert}, 0); + layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff, n_embd, n_expert}, 0); + layer.ffn_up_exps = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), {n_embd, n_ff, n_expert}, 0); + layer.rope_long = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_LONG, "weight", i), { n_embd_head/2 }, llama_model_loader::TENSOR_NOT_REQUIRED | (i != 0 ? llama_model_loader::TENSOR_DUPLICATED : 0)); layer.rope_short = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_SHORT, "weight", i), { n_embd_head/2 }, llama_model_loader::TENSOR_NOT_REQUIRED | (i != 0 ? llama_model_loader::TENSOR_DUPLICATED : 0)); } @@ -6266,7 +6310,7 @@ struct llm_build_context { struct ggml_tensor* attn_norm_output = llm_build_norm(ctx0, inpL, hparams, model.layers[il].attn_norm, - NULL, + model.layers[il].attn_norm_b, LLM_NORM_RMS, cb, il); cb(attn_norm_output, "attn_norm", il); @@ -6281,8 +6325,7 @@ struct llm_build_context { Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0 * sizeof(float) * (n_embd))); Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1 * sizeof(float) * (n_embd))); Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1 * sizeof(float) * (n_embd + n_embd_gqa))); - } - else { + } else { Qcur = ggml_add(ctx0, llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, attn_norm_output), model.layers[il].bq); Kcur = ggml_add(ctx0, llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, attn_norm_output), model.layers[il].bk); Vcur = ggml_add(ctx0, llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, attn_norm_output), model.layers[il].bv); @@ -6326,14 +6369,12 @@ struct llm_build_context { residual = cur; cur = llm_build_norm(ctx0, cur, hparams, - model.layers[il].ffn_norm, NULL, + model.layers[il].ffn_norm, model.layers[il].ffn_norm_b, LLM_NORM_RMS, cb, il); cb(cur, "ffn_norm", il); - // FF - // special-case: the up and gate tensors are merged into a single tensor - // TOOD: support into llm_build_ffn - { + // feed-forward network + if (model.layers[il].ffn_gate_inp == nullptr) { cur = llm_build_ffn(ctx0, lctx, cur, model.layers[il].ffn_up, NULL, NULL, NULL, NULL, NULL, @@ -6341,6 +6382,20 @@ struct llm_build_context { NULL, LLM_FFN_SWIGLU, LLM_FFN_SEQ, cb, il); cb(cur, "ffn_out", il); + } else { + // MoE branch + cur = llm_build_moe_ffn(ctx0, lctx, cur, + model.layers[il].ffn_gate_inp, + model.layers[il].ffn_up_exps, + model.layers[il].ffn_gate_exps, + model.layers[il].ffn_down_exps, + nullptr, + n_expert, n_expert_used, + LLM_FFN_SILU, true, + false, 0.0, + LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX, + cb, il); + cb(cur, "ffn_moe_out", il); } cur = ggml_add(ctx0, residual, cur); @@ -6353,11 +6408,16 @@ struct llm_build_context { cur = llm_build_norm(ctx0, inpL, hparams, model.output_norm, - NULL, + model.output_norm_b, LLM_NORM_RMS, cb, -1); cb(cur, "result_norm", -1); cur = llm_build_lora_mm(lctx, ctx0, model.output, cur); + + if (model.output_b != nullptr) { + cb(cur, "result_output_no_bias", -1); + cur = ggml_add(ctx0, cur, model.output_b); + } cb(cur, "result_output", -1); ggml_build_forward_expand(gf, cur); @@ -10536,6 +10596,7 @@ static struct ggml_cgraph * llama_build_graph( result = llm.build_phi2(); } break; case LLM_ARCH_PHI3: + case LLM_ARCH_PHIMOE: { result = llm.build_phi3(); } break; From 8eceb888d7b7f5e93d20a4f85ca6511022b87040 Mon Sep 17 00:00:00 2001 From: Daniel Bevenius Date: Thu, 9 Jan 2025 11:28:29 +0100 Subject: [PATCH 80/81] server : add tooltips to settings and themes btn (#11154) * server : add tooltips to settings and themes btn This commit adds tooltips to the settings and themes buttons in the webui. The tooltip will be displayed below the actual buttons when hovered over. The motivation for this change is to clarify the purpose of the themes button. * squash! server : add tooltips to settings and themes btn This commit adds a tooltip to the '...' button when a chat has been started. The tooltip is "Chat options" which think could be a good description as the dropdown contains options to delete or download the current chat. * rm tooltip for 3 dots button --------- Co-authored-by: Xuan Son Nguyen --- examples/server/public/index.html.gz | Bin 1205858 -> 1206458 bytes examples/server/webui/index.html | 72 ++++++++++++++------------- 2 files changed, 38 insertions(+), 34 deletions(-) diff --git a/examples/server/public/index.html.gz b/examples/server/public/index.html.gz index 36f9c9fe9a68d6843be38c7b798e29a950ebcee5..3640a7a6cfa76764d93684e9051a32c263932c8a 100644 GIT binary patch delta 912289 zcmV(=K-s_I%t^Y@Nq~d_gaWh!?Z|(=mR1$5j1VBkEbF!d2oORe5|U7N+cstpqZovw zu-wo0`F!H}l51#0V<( z_Vjx#&bQF>lqfMa%(hg2-u@eJ|J6dTxOqm;Zwn`JRL3j8Ha|V%2zuv9v3dR!{Kbt> z#&&ewI2X#qVZOWh0cM#2{^hdRMmk&)1U~k_I5u?0Lp*%{R_UJ~@;@HD~hLI7Jr>giqety{~7HpPxX;8c{F`nOUt<3G4q`Xtk zK{xs!S7-O20P^{R7`6-Gs6X;h<+`n+qL03 zN+^E6F4u~yis!v(wh~yzd6SA?+q7aM{QOdErRVZ{;;!Uw+g*O~d_@=+t zj`!13%n0;Hv3-0oTrR2~SjEffg1f~0gXSrW^e_+Zkl`I^+sH7E0K1BI`T)<{tIgX^ zGWVsB5hkJM=udy;^LH<|+sXoqLPva0hhK-4alzC-mM#QLoBDTp3D3`0;4kmthuw5M zDqh0av7F#b8b&*zXQ3DSvfOLN9#U4@lxqWd6jkh#UX~@X3k;hJ@%9pUv2{1^))M~B_eUz&k)1nL6iqJGu*LLF6$Pp- z&MRk430WFOw=o?WwMBW3Xc0fa;vau|-K%nJ5q&=9 z9Cbc+8iChdl~>t_z>gKhm!uCJwG3BvR+G}|h+{({~ah#T!FX$?O=Ut1&hA-1ZWua>7I$zl0z zzK*K{LO%VnPrO)`$fvEomSu-Yx^Mdp)yXSsqIz##UT(bCL$_XbSLe;|D0VhF zxF3I2FOE)Yi0&#jf7{x>)6Pfs!+C>G+q;o0ePr2x4BgjLb-m*;9-JTGr>cAR&bnXo z0Ed391N>Xhy&H;w2R?gw@UAHco)!EHTM_BO24Q2vuw6SJsNsLyV&Sal?MmLxSjr#fX}n7t{v_k{Efftb zDB39>XV1cU+PC1#{?WV3dGVFlHR6Nj?^YFg^9^kYTk;mhKX0^V{#LrJ>;|n!E>(P}`3&RMK=pqC!hCN3 z_?U6@2_fAX!|odDIZv7Qd_|y@+fHh{YzXp~Aly`TdL-}oz-1bFl5@4Piwo1;JKOdG z_5Ar=B=26!-fpPLl$|tu#CgZj(DI7PP06D}N&V*eY3uLdnDV_hs)Jec;exuqjR#?2 zI_ibFp?~+a?cv*M?OzFqg8@5oz^Tjiju`_RTI z&*@G(=AzoComY(gEp?+xvyqYep{{t3=sRsCB-^_qa5S^pTrYv$hWY}WDm5Jye?Dz2 z*>>L>^d*I-qmkPt=c+rI{l*7M!rXfbNb46T(2eC6f*|fI2c24g{WyOU;=5Wj2bthe z$Z)A(b)3awj-KBxH}7ux3f0`V()enMr~SN9y?T0iLW=wNL;7etJ45glE&gETd8t7M zlfjal{6b9r$Ci@sExb2Nd;3ay)JM?{rq~RRf}~;e;qs5Q=~wlqad45er5S%|7W}1I z@Bl-vhwjQ;@uE*yUWKuENFLV#%b^a4WB@?3%K9s$@Njayaq&F?R%5?e;P`#RI6 zs%%qtnOmjrhxO`?V&N@nWitq;m*5hsV7GV9SwZ_ict$?RPTtAqavvGLwE&tDd;fIemY-+Uw#9bKd7i`gU_* zUN)7zB(|PEYNmBQy^p%syK^(z_4!Q0Z+lzc-m0(q)A_+(a();;^xU`4)tBD2H(+vf};zB-HrCs)5{Z7I4OMErbNr9%C@?42@rh+uaqfc-b^AKEuY|M z`FZzmIse>W!+y#C)6cxri9F-q`?FxH8CB3~B{r0|doHBFYaTxS{A=o(veZ>IsXxDh z)UL}D*f5Pad5H}@(+vH^4>$YbDF}aGwpm7?8C!Y$+rb6!m#viI zB?_Y*89DJ8{CVRu`1xQWxK)JMxp*rJ?RP0#LU0v3P%<<;z5zE+tCIRr$pP`_r&j_y z(KmTVspvOTCn~G3yp>8{P~vD;HZZa@R=m4qgA@I!kEzz|T<(3pURmuo(krWdNU!Xc z`p`%F{`-Gwgk1C@F|;tyzrP>JNyW+c_TwFNm;F$*g4Oh$ols6{Rlc_q@1VOf8JVUT zN%Fl@k(~}+_T7{59>U9>IDr{{XE&Tv(&z-=+l_Y-UUnl_;=l>4@9ar_x)Ad3URduU zyzGe)$6@@P{V?K_YK{1N`|&Qq9jV_EPeA%cx9n_q5zT*lW%oSs< zQ#0R zjQ@X{U0q7Psy}9o@(n!0kG-AmRq(C;%kR9$FMMgiw}hR1H@kJp`?)}4H)?#r9>;}t8X6)PX>^yTgM16|I}`kZ~J?(_TV;}tt+6w(Pk zRPmJ?=HqqeXXBlJY``z>gpb#3#M;@_?Y)1RKj3Tn&tt0cklo1x7AN0A9Bvg zLH~_};iD(!`vyHH?}kSVd#*n;K5&_Ljg=WIzL7i~xsGqZr?-chjrjBPSNIA5_zFPJ zU(@h1qqy7e`@Z9SmhNm@IoN6bcfX{*#lwB65r>U_a@Q?<(Oglc`=O$Mhp#hxq#J)n ziTif%mdWQs%0D-b;djY@Pvgw^_uC!e%r8ft^aiG|8@XV3Z&qi)@-A__Z*^=V?S4QO z_HroFw|t$@Jg z5~-cf>3ITzolg|@>E!S|j7o9#UJ=y3tiB-8@+)#P_od($rQwZS;Op0&{`1ilYRSvr zdEVY~?A-_qA2#K?RnvnuYNw7Le=oAIZ^TA(|I}maX~W8nG;e#|d>CX${7-+&|4yBM zQQ|nv|8d^^ZA6mG=n<#OW*u7IO>x!rZSNsTVW22S=d|dd{DFWDH$fq)64%&u2`sG@Xp5u~=ye#?AE9^gS zRn&LcTk?q`I!Ov*Zcl|Z{s~p z+-Y#$s_e{F^Dgs(ARcT9?)uF>29>()Kl}M@Z(0Ff_yPZcm6UWVPonZ(QO>(i@7sm> zDf!F3>l*vQ*YkP%4t7uHKA#^bc+dCqBjEP{KjsSh8?e6T68ib=EIfY}csA}I_fvj8 z84pqNvw8U_%%;?1PP*?Q_#SuN--Gw&ER`wIl`9K!eq({4Km3*D_VT$+rEjjDhd_!# z-?Mf$@IQX(JcRb4m2;r!|L6bv-~aWH&){>X-WPB8LR^WSqdIvnoVyEt+9*u!6L;Ph zzFlGA8yp#ax6i^qwF`fst?P`Ztw)nzk?VcmV4>8AZ+1+)-5#Ekq$STEML!F<;Y;Lb zt`Qd(AV*WuFn&9WTf2kbi2{zS*pJD~OFj5p{@5+9b3XJh-%!afzu9}d+@%t$;2@-W z-uqoS3bD3@&%R4~;@51${}ZE>->cwcSwB3M@H=DqWgCZM6EJ_l4!unjx6{$RS>nsP zB})GAE{eQ;m@8WXz8L2#d0V;P-LBv5_1zBN@9Te5H~;beX4u0YnUw{JPy)SR02nU| z#mkJhQc&&_Zz}&d_#nCTQnE2eEF4+9U2~wZF}FDnmi_$nx2G5I`D@vk%;scvlF-|- z7Ph~PudiOH6t91Ut7ahAywd-oB$1&N2GOY#egU7VPvEH~;-xO;iQd*lh}WGrf|JkR zB)ID@ABu-ZmqN@z@G-_*@q23-{P&+eK{WsXetKFsMoxvxzyGwcr-h%2GLHnJV~T1D z((tLqycRC60wezK5$=PZFzSMJw(G=Eda?&t3!(A^B~O1_;lz^@m`>oN#wV$O^1t@{;ux@3#l`DwFE;geOm3Ho{;?5R9cKV4huA3;(~nv;ufLTa9T(oP4L?d(&u_ntS{=Ce#8&b!u;)${+^tK8 ziTjA$m!j&OOlK!eyA$tnSK??|^IP@xD9-p95zRGipA zeuyA{$m-`un>=55-#zQs^Mi`G?_t?V&F{MU-N~nSg)dy_o@w-zBLZxh(D!xx%8Y-R zjmGGEo@0*{+}6|kIJNnaijmg`eWzRXEkl3J&+=|IF2-=H!}4672cU4vsJU7664_y3 zJiYE_?CrkOJh-^tmZ!7uMtMM`7LnGAP(P>NFKHMSUKO8q!96ZD({~s*ev>y2C3gw& z!JF819Z~S)mJMyxEzdi!-9`IE!}EWfC~=Z&4$yy9Y351jk&WlzqYrB@jXyYs`v*#C ziC(@{HdXDmRTzCKRGY2{hxS_$yQRgAl*4xZfqfX?@s|mmyFNIPi5(tS=tpPiOj+pW zB<(%g?WgJFc)XmFt%rw_lG@9kK(GEAyqeybrNSfZCMLF{c%ikseI0zpZk~U&n=)Va zuaX-IStaqq7QFE6cB-3sMEi7fJ$}~@?Pa;0_u9*hErlqhUr|~F_S}U(ts+&~gYZ8# zlljl>>HhEM|2h5KX_RdOO)Xzyqi}|76b9WV6lEio-*OrLc67>i2KD@WLYGcpwq7)! zUhV*&*oYn0PGKWsx)P#e3--)GKFd_{lnx8U)OmUzx4 zgn^y;>f6iSG{59A33x(3*v=hTal72X9i3*ER?@Xb6`E_?!oO34&c`Lrf}mLI1sT%Z zw<{0EjV98!`t*JN5SaEQ$AK_0g$A{xhfUgjS~ohv34COL?`vf>05> z=*(@)wGt=pTS^6h!iUQ)n!weo;~znDe%t<%cKohG=Lvu5$M1jjr{{8%R{dIF0)cET^ zbtnFNoO=I$+Q%um8BzlH^i)=;u@Y&7^Gc)#eM-1;L{DKCtnJh z{Ph?=;yVq8_Go|pxToD=Adbh|p;MPhf1P>zGw)*Dt{rI?BYCgTf56FirBm2OecgIT{(R{qdAm$cpATpHC|^`Bi+BCoIZyVo z`14aC^4jr{K34js&)#?aV(r2L#g_Urgdk7*zx((RC;oq)KNqgbo}WMOyd9sTDnrD^ zHg>9E#fI^@xG5J>!FQ_J0UaY%6txu+hz;EK5?y@)K5f#TFE7{yb+}){MJ$X>4Q0+t z!EV}xzytKHK<`k;9{;{2Wz40LNA$?2f;GDfA+rhS^5tps&-2?~>*lX@bAR1z{n{q} zp8o@#xmkaK%I;XU@>i@JqkDHH+a+Fvx!R)Nh`7r>-nNy_GbJ|m8c5q_JfE~o1Z+ESGXmx| zBH4p2+9?^GxomiTyROt-zFYq~72H94)!D>@BYJ-w?*_qHP|>e;>U&$Cc~@(He!B?N zd^?zCVf4DSd|eoE>S%>Lv}6c-w5{D5+j`2u*7ox?*uUKcDWTrK9*iQ?U7<_7U*ffUAaOv?(@^;LUgF@wnfG@!}x&ugprt!~(J=RO=L>JY5em%4;JT&FnVZDEqCKQD>XYS?(P%y7ciflUzlG85lxWtEg z_rAj(s_H7Ldv$+|7@m7B+A$S$!&EXaeaHLEKGKiO%(u+`ueD;n z9%i}#`QEMkf6-3&o^OBOVRl-{2e#^8CY66ilgfip{%sS;4_m~1JzGMT zm+_LS`-z&!Rz!B9&3*OG|GeYl0DCRyqLwrwLu-@o?${e05H&+nAl_Nc^7+wr@O zVye?!a{6{Wx6nF&&s~wjo-VAiZRM@s-|zq}QJKqE=!Y78t9|cv=E_sH+^F~8vwJ1? zYmClAS zS4X7#ChyCYJeYrd_0<>X?KI;R+2_D-H$c$yj6IBZ#C9}P+oWIi@#}x9A0LayN7dV} zuYP*W*!bNzBwzGulZu|C9!ulpbZDPi9>x8pDm+ORbm8svQu+0@5O^IDi;u6b+q2z`3nN=pFFiq#rX>g?^DoS zN~tON3%+Z}w#>dA`P0i2zG+3it*R`~%J&UBBrfu=uNzuk)OIB5yBe)H%-=Th#NyW` z$ijvR^|E9BT|}AKlsy0)EiL0P-(_l{cb=(*Ugig1Uklc1^0I$@61;8R2ODl{vU??b zeLa{v7d)eb@$>%fv%$K_I+olaJ1=^UP&LxL7?$<jQc#?$=jKiC&;LOF69egY@NK z=(D$XJ)D3?0nK(%pG$aN(>M#XBGA}38_<4zExqR69@8fN`l?zlYGGXi0>Im^rbztx z3EW0<+*JmBV`YC2DJq>;R{O@vYNwUe?p9W;h!g)gUtgnwT$~7_!Anu}_F#-wrJ@Ha za3zq%R{va_%N!mkBMsx8qHFr+Z$$XhX+(JnN(=2wl0A?md zJ;#1^Y|=I%cs~?(_blx0JCz*5j=#MZm3#;I0+eha4~g(G^%XuQqori@>+ANZ{B{;+ zz7QuBD*eETZpSP5{CSsIsD!+n>wdg~&$lAe9}%9#k>A~5FYsP3_M1eu-w(=Whn)>f z;iusV0XBcFgwDGUAvgPf4)?ouZ_DL)+i6lA4?{1DpN}n5H-}ANBuOc;*kZAgq@^S) zR#qFfW+~0ij6E~;nCJwB?5dQIC~3h+DVIr%$Wcn)8?MZu;e`Qa;ilus20xcRpz=qp z`~jCgkn#sw`q+5E*?)xP56^j1JnzfK7&$G~AH{#hTbXc2v9a-jb?oQq4| zK4XjGe8m^$n&^$){&}YyR+M>x9zP9EaNa%;PEh}@J4UmWbbr@A-l_>;^X}MWs+n3? zS^j@k7nNUrH5#|&)NC-lB${pb#^1f-AozE&=%2JuH(qUTmb{%sLET@2m)i&OA+FlS zX5tpcBes3{f<_dsaBjnF%kNxEKxVC0!%8Fj<^MI&;=lJ0yd;^Lq^D@!h0#LaFYXsR zC=@{r?0qQLSmj}?e}1<`8+(#4en}PU-NJvqkt!d=uZP+rn3L@*?%i^QrpJCH01!b5 z=<6dOj#@B?c5paQt6})NaDn0*@3yB1I`wX$AoEotetasN_lI?!h3NyWGyG^Hc1#s( z-+L895CHu8aAP*!Wj{FNNm?9o?=~+sEMpnHKQ1d@4ZU}ZEehupg$KijN9AD%&k29r z$EUBnMiCez0R8JD8|(23qQuDb@Pl)rW{#KsxAXoM+dAcYe@IdzR=f|cDmC*wBmMmT zk*6PaD^f!g6!`jqqiMVPJ~#tx)apa!a-`VR0Ztu93q%MuC&D?@~`-`&#|LB9W4D#@MvkY{Hyqz@X9pYIo z`xx;oL)<2wr){`NJj=x%dug6SJowNogWV#Y4Ho*^qcBdD zAT3Ge4gZ*YO$ZMH5WJDXd)B|cJnXsSe&OHCd&dV__3a)!e!WWGLH&K6JZ|BB$D79) z*!lPKIq0QVkB?8yciEE{67Xp1Ng^Agw-qe3M{`$W*ThmlD<8AlL zcf8dXK^Yp8|0dgmUrJkkXJ>S#|MyE0Ynw2yn<%eEW;G8@6Z)l4wLsy|IzVu3i=@cn z4+kYA32%N@ro}fnBuR$6_lNRH()Rj%oAk9h#qf04ovD0_)=35;oqpY}g)N&5Ns1;} zr$IJchiO{_O=W*^Ie=Pnd)}zjtd(9xdt{R2T@YF^(Ku;iqNUKYewPFqM7lJq)d10@ zWlH*q&<~w%tW_ICf7N8t#DTaG9j#eYw5Riytk#8L8UWrPPlh?%uVbAc=({Lfn5L3^_5`_oM4M7*fnjPuwCA)P<j=@#2J*9k35!!ui>ZN=YMcizj!j3lwwQ$%RuSS{MbtWb` zH-rH7Drmo#j)rJFk-0Ri;R6XXV9BjjF$kNrk_4Oac1-tOSy?d@Y1an~FY{x@<&E6q zc}Jj0XSIK9u~aU5voM&XsDpL0a1O=_s4Lz=9s60MsuEx|HMH@3R*mM0Rsm%?G&{U2 z_IYZ8p;0==Ggn*d>)dM7(s&$(dfVmt)0&y0!Ah9tJSG@yW`NZ}LrRryKF_hC#Ug2- zH|bEOEwWlcp?SMz$supaUBxFL9Px;WxpW&cNV^*pjW^z2Sxu9*O1y=D`_o8e%gu6&$DINRvYWWnj1)-0RSE@{?v7i?zp8b5zf zfgw1|nU<2MD%JzXsczL(AMAqcTJgd-kuwO(sTECfGEGu*zEh|Dsm(W8sVB_jPSmn6 zWfIcXTutW9nm`X*eq8HYc83|a^?4OWRf5&WOO#2c>rOM!*HBaI&Z{^U4(4d+B>mKz zYkk{cEZH#HBP}2>&Gu?G&=1}9aB6?^iY5v`VhRx@MF84avgD{UNevUJnyu-q0=Gi7e}q1r3nuq#~-?ZK0!8-{#6?zue~PX!fWOm?Y6GR3E=E0a+V1e$|6S?h#?B}_Oc zxf*NRakTNdwh~bgG;HAUGO~^l_7jFhs44 z?do)%jIy-Wh>1CftiaxQ)N_LBa;&IG)dxC^LL`~qS)hq*3Z}v2MwNfBR1FP+Vk2#U zD5xis^%CHy>ZH180!?$>S4@2#4V)@vn=LD*ZMa=)A!wdU4CO3h0wT0UH}kE9p(u(n zZ$>;PP{TD-)z|f)pOaBrgfq$>)|GjknfFjEr=gB0O}GWLgkVt$NS~(a7?!u8^)%7D z)ivR_qz)x!WP8%@854i9)vbt&K`X{tB{Cy@Ni-}YqvR9cI?JF$mkx=GpO1p z2X0H^q-I}o<+VstMwMKOVcnyq6+{Q%5a(SvT`?<9)Y%Td$N|dpo3yZy+y)P+5tp@G z#Y-EQGg0_-T2)cR?rQv;Plu?c%ZxOb^3$0DPZWLR@O;Q~R6l>`S%~v#>Nt9|jY@5sYEOp+p3jMy;gAHXPRuP*EPXM;X)~jr4}N z9L(D;rBCWwKMw)equODP^eqXNjI}c!^Ig`}nAECU<56Oyqg*l-!B|5601&ve%X=ej zKGFU8fRU$DF0p?qbRI^^tm~2<)Q9J87KpX3BBX^yaCuV=rxZ<~R*)^)1)lPek+?-5n zK(((efoiQOhms(-oqWJY4Rz4MM%+vZ$X-oN7{QzcsJ`env!pt2aMluQ)c{F_=hnb& zIApIg&gM*S&c{pAWXcrGrk;@%*%1s$i>jZtZb<)!zu;9S}#Yk9^mVLvMKl9h0ZX02xJN-&K}0lP2wEm8aH| zq)kCnhLLB&6tdkqTt_2j3YudFoCZt{VfcTLN{MDKr$wGop|0pRpu9SaCRtFm7`QT; zn_y8sI8deP%Sazf3#cHiK7|k`*sA;QV2y>Q6sYFi{t5g;(CZg?3 z+4j(`1uiKjQ=%3$v>_m`m2iN}4P0a`bUOAX0d5Q(8-l=mis)%8N==mxdwefID};a0 zYOj#)cI=MhwP{4vQ_uLC6hMYTzm3377&I*Inyrm2NFtvFwn{ z$gc~2A|>>EZaC8sNDXH27?L0W6wqKOG93dGmR4lRh+<6_{%XNh6%C14svU$>-GY#H zTNVe+j+Civ#hd531J#zTS=W;}eTaW9CgP;ISfiH5VOkgsK&-_{0MT1!)#fbdv$K8$ ziN(Yal-Xnm&bv zIwBvBb81emNF+(zY?zXk>xh4g;gqhnfW83ENqgLv5q?fCX+d=B(}?qY3$d0#hn$iE zH-MUqqfbaStoQi=id!-!aKKoDaGQc^vxYISlqzFrDOm$^M#y{&2*)6&lF1Bjb1>FV zSU|#1epMGbe5=DJHa@fGia40HB(`FYTU<7Zg(lxg_?m$<6Ozd32HAhsgwR5Ad*$eq z-bb3M+sOJNLIu>sW8H?q)Fdsc)&*0A2!0H~(Zrlo`3{1#5x^PQJ$>FbY8ZvDmm%B2 z+ZCzp+5W0EYyobHsx7_(v|R#PFBpB!S$!>QJ8_fa`xLmWqjt9Fh|X|D^VqPL%YcO~ z+ge{1>QKl~kS>Ri+f;wMi31O$Bpmbw81*bXahZIoGkD9Gj~hV<{xU-oB;`@vxr*DoLod-bB-?17sLt!-Z@@#31fUvo)~}U}j7Vbt&Z5>xH}? zaZZ?Y?FgvTWa@t*=15J@hF+f)Yx9EmWjfPw7k+8%g$1uPj)M@ zz7j(K0W_^nSgD=?qlpgo(_qz@DC-f?5o92-`aNu9Q++unh8ThKx)gA$j#H;Zl1v=A z;{@w)#__i001U-+vdWO70lMF6d%ixHb{*RbRK$Sb8NYuB!w!YhJa6G)yH8r2En#&u zGkLe`cwN>pViz8>luJ(94aDj9I(Tbtz2hSu2jxbfMW1%+KXeM3;oUvMd__ByyPa>Up&f@*F-2+LGKK z4dU6z)W)J<`-qgRWHPq8B;sTFfZ+yXWaN*LQ8<5YP-vHG&ZfMBl4jV6kOaU}QJkwtH|tH9F;*6m2RFc-Id9R6KT8kmcS_pL1I$Z^*70M5$77j?=wx*1hAmvTNnKcyXA>L_e%gD!~p$iWEf#tifNmn~fTwhWX8mjY2G zwvzz0#X4<-jAPR{2oTA-H}PE>TXTQfDktZV?A6i<2xa{`+S6An*Ib1ZJAyK{ZONp@ zF4|ocTQguKh69JR*jB%#lfo>D?5Z3nLqmW7##uD*Q5^}FmPmq@Wwhqr%=P+~tXQkz zY%bKhiwL(TODU$ck#%s@>`kI6TYP2<|!vG(~;GfaPwEEKNZskot0j z0-QrnZFE54vg_MUh)zVDaRYyQ+41;^`vN$Wk7 zktKr2DXlV?^P)tJO>syq6Qvu5JxdW615a2xGiKVFVM{sjX~5{~iV&oR76niwErU_8 zb|?q-e0##RRGT*|*{q(LH9dx0$BUfV*k1tim5Gn*-C4VywfZcC&!c};OYz}ZjW_bO z-EWL-q65vjK@;oSnLfxgPF+&*vY#u?IyNbH*6DejHpCBeleWBA&9D|(vwKSuoJF~r zuH*$&SK4r$pwkQ&&;m)I7JxB4TxQmF(TvCS-|AScN4%A5n$Svoh{HkU1l zu7|5@vf2sVsmgz0LrMS@zC~5KRO%`u%+thLjCztARwX(Vx%ym**_s*GyLN`g?wp#Z zhC7%gN-7eg#W3|MwwmJ_&Q_W|4_|x8d@QX*hV#uSjgITOgG^~Mg#e$UC?6GIe%Z=Z zN?H?cwb`uI1-PO&@jf$Z2tF6ikZEns)Q>@ITQbBk@o7&91FBq_HWniJUi!m9&l-i3RFxL_Lbq$gN16YHEw z)saaM8fwf0lL{xZYpmZNR_cV+&(~h9&L()E^znbRzl!G|*`_+KLc^>q@w&7C#39>I z$%dcR<2nUcqE12Pip}DRXe{WSEr*O{bkz|u>>}|prK*c~4U?*r&SG(4b9z`+7H$X& zEz}|xUOq-g3lZ#T{8}Tau;)_jijI}$3PrQf#lo0o*}k)04vC)B(5L~Z=kSPU;CkFM zl*xaz)?tT?+@Od#8&!rP*kF}jW!1}svCoIouIam|I#@1=PQ5y)Su+4JBTA_bG0p!Z+bD>TvBb*)LqhQdWY;G76c30NA6L@A+ zNO47I5ZtIUG9opAX<~&^&NZt|#E}&b7~+33p$#si=2QY0himdH5W@^@)|>SJ#~ktL zARKD>JZw6}9z$IgW>{gZ;`IUB(?Z1_3O&XK+H$)t3uD0&DK^to05mq&-HIaueoy8!6KoLdqAnb-ic zG-$r|p>?WNo0GcW_op#BpAA^S?2Zg=&;)@X_u$UV>PD`WyK%GSgigZs=akvPjj>3# z_+=;#QL!82SvXxaS`uFawML?IZgqbUjq@OH=F2ukHrqljX~{$yF$=!6?n)TzcT8_uk$_5rXmg=76DkX{O<*_^ z4gKn{HB}h7*YauzOfOeqDsZdBHwIZhsx-3&F>j)7F60!71Hr6X9U+p4MvQ;8M0z5~ zTNob`v_2h(Bu@$sDvw>N?e`YClr6!!O6b5~3Imkobh|?@7$6lx%$uHSp^W5bazAN@ zo<{e=Zd~y#&`1-u52{PN&P*z}PRfmxQneZ2@N*p$RC}O!7mgH2}GHqmD7qWl3(62KV&d5&HV$xtH_1ghBa2ow&7DcH&hns4j4lth` zBNGr2k~kS>ZhuwD;r^($P-#aMeZ9%4h%Mt;okKW2WY^xHA&K+ZtWHkHoNqQ^&sg@x z14NaxwkQNBHjGSVpe3`#0_h711!1_X889idxQvjjDrwL9f$jI4CdEKNCX6HX39=OB+gz zU1vZ~MPhuNLQShm$Bushge#-E5LvEQulqpaSeV%pRy0iVFrQ|19|WBsFh?~Au-s^c zE=&pufSIER-ZaMnqoL^2sqf*8%jCjvUY}VM(ylN2q~)~1**|x7N}FU zLqp24t(sGy$BGT21yuSX*vKZBLZgaRr?4zvGQH3P=k@wvM#X=vaGq%$g}2ShkXTAX zBDdKJ9W}7FI5y=DMKTwL}G@j78itEP-sqd2ifsAuN*z(raf)tm+i zrZrbpSwWly*i$EVD&Qj38)6+{=mCCn#5bVf#BH_f9ggy-iVZI_UmI3JJoR}__9s!V z#hL=#&}s@8)LMUStrw1DJ#96YjoNI|0hg8PBARE$Y909XcuaM&=71I)HlGDy1;H=~ zjtAJnT$^yigBpIXYA&TQ1!U8nHtQ|`mf(_3yVsRh#2m@Jyv{Q+$B%`&9Z~I0G-D)d z&CGH=sNi}W4;uok57tIU?~m&e-VCCNG-ij94I9olv1xxFhzUn_TYR3i1$+=HT26~S zC}ZKe-RTS`rl!cLu4<&*ZF#6vpK%l|t8t5GyOuFiXPHPhLD^T+HcT>e#TdKQG{jl4 zTAQrV_G%OkSIhtdHFc>Zl3>)Q<9s$?M`Snb)rMqC&P7A6Pw@t|7!Q|&kyNkC5o==3 za5!6ZxS4;_n8yY(pCNF|;*!yf1Ohzu*{)E_pw&>Vh4N@EMC}@z&2nr4MT2Hf6=h9v z`aj@jzXeyOn+h-%W)Eq zh3eQz2lL3RwE7)u46%Jht2LO!P3V!R3K(0pfT-3ohMoCh?y#}3hArwn$ z)TANJBwk(^q`T1ii9S;5tM4}t~iCd5ae-7kM>d=;PffjR=5FKp_lv;$H#GlK?eCqRs3deWpj)A2<`Mo zbkEI3eL?Vg9Gld|Xl_ad<~AzzdMi=EN>my3s9_^ZXCqU9dM&I$4f|C$o+S9p;W8nK zeQOzyVGj1AursJaHseAAW7LjTqZz4xp_o;-ySzZO?B*mh$ePkKn-ZZB4m^a| z(8su$=bF)a*_@76*qYEiyplyorN3x3IYw#-t-x9htXZ?c5*WpR5sq>VzWvuugVlEJDNzZNj>a_jd?`7Fd4xO7LO_@IqC7!2<$}zu(A-BiZS$m zJ%eb6%z{&aR^9HnL)mtLdItgcX){enLmry4$a1Ny>%w}8fif*{QWupOoDU~WL_pNl z1m|al*F^+-vP{&KF^a-*?u9Txu4giXx75)p;xlsQN=}*@dfK;pV9U)SWz08{QBNNA zBR}fSBrRz}X6p1HW|&Rbb-mZ)yX&xjW)9Hxs5P2vHHVmWtH26jT!mE$c7S&!X;lkH zjda#FIMz^oszbt4r6#TC-UL%737@;;sXvUl?m9_EYpOb_s2!0a#siITJj{i|$()1ZNvnr{N-KOW za;?RfUi1EFsq0pjL|xzOFdaSaXns;1He7l|xN48Cad^C{nKY8L<8{yp@(~ndi#0I~ zdlixl#~{$anl*YX1E4wY1eD!XL{sn<)isLJtNBowu~T3+7Rf*l+k^=Z7^K}F_i&&a zv|VEugi-^yDqzp(I}zPafD9%= zbVL(GeZSpXvdd|=J2qsg1X0G#YZ3?J^ zrM&P)DIx)lJWrOqq2QTElT~0aTQTg&fEMdea$?^d1fwxj={3fEsH^C6!B0S~2U6>y zg?O!I;I%bKO1LW6p3MN2rs;a6KZ?ffs39b4)#)}`Yq8=R{CdK~c`fcv5m90$<2umG z2mS)-#A+9;q8wjqR>Jy!a@w0~vjD9VKF=A`>YycZeKWNUM)EzColvph)|0jyiHf>H z!dQcc0b>Qe=^BlCNKB`5B}mx$000Jx&|MCru{84#F!C}uGK6HEb)AL0l2vxG797k;WO*VD-2vrk z`cTFD1gH3Fdy?ic*V7g?A&>_WFL|AD7YS<+x-tP)%h6Rs<9-rNpIjHYe!;i zcs+GiecZ&sR1<-C(X7;o3Aw5%B1~p8bme;hIoH=~4B!w7Odu|+;847BR%w?+^lEht z#Er#bk@=9*nwh=O8}s=xjmfTCnT+byu``}8Ia`(&z>-~mbBxys5)bYvNJUSXzkn-}wCU_wesS6a%97ZU8*OB0Xl8Vhz7zR%@U zCmJuo(PGx4+ORaMsIwSYfPUBuMueEJ&ALe|wUtmYYTAMcJJZ$>$6CEwm`;as7`0v4 z5$u`PZdV5$^-23#_n-FiK5zOm)^?Qyfbfj#g8xQ5Skz z6=S3b%|==tj|HortB%|fbev48F|mLl+lgtwuP2@srK7>hS7ez7$Jwyb>o$lCT=ErL z*MshVBr)+Z7CW&D86=&9Cgst(hY`*)>I^UMnSERrk)fGBn# z7Pu|ptqq_3q)5UC9npVi- z7t&6xU@XGHlZr>*VwM3us0Z6))EezonzN>V`2iKW`eEx2EipK4sL12iaZm{H@{b~i z%MD?9#|0ohwR+~ag8n|rj#ryXknoL|88qRj;>a0$u@$Z&BNbbZwsg|y9XSE}37A;* zaRIrOobRHOpnY9ZLjMe5r`vjPqo7jHiQza*(fBPa;MBmh9)_-6PCtQQEf%wFA2fD< z#PPM;JF5{GR}RJBak+k!*w={R*oJXU!uett*5{CknQ=LdC<8Y8PH8~X*o_Z?9O%4j ze;|JHJ`2U$KEJy@t1ENzOY&>eCCg+uE7(Dvo+hrRne7UY)BA3kn-9k`p5Pv6NY_Er z?dNfot_=doaERga%pxXcuOg@{r(IBg!W&?{_mJy=eXn#JC8c%%pidMu%M`-Gl&iO; z%uT$5Djq3y!X)QG@+9v`m1AIDJ@9sX?gNxmJrS9D;&JfyN~U#<2m73KtP|FBf^8T&Kd3_(dInw8bxq|GSaR((moi!ImKKI z!&V#vY#ks*%SI&8ugE{1?6xD#loh{#oGKequ?Do85esnHuTaA+X*5N+7_v>6nGsvj$w z?9sfXWrFh!}2FVK}-eZjbBCG(t0TzqG*(~ZL2SB?nnz3pw{oXFq>BwN1}DBK6|6P?h3I4o)8(w&(%43kXwlh$ATVN!tCL?o2;r zWMl8vsM~!C++y;KTN8JVn#1cTr}Yt$dZ=`b=;Yx{j#Qj~Ay4J-YJsi}E-3CsF@~W) z$C7jw*a|{A<&dQZ5gK(8>rG%ef(N1%eYVZ$tCJw^d*=xdszS$jIojkLoI<1KAIpJU zpx03?%fS5_iG?*hssofo@ejjzev$22T5fKAKjbZykidg-yp_xh{7nGsreG1g1_`G# zmYio4E(6AYLa`%6{23h!{)S(MLaJFi6BENqh#TAh*7a|kG2sS`#qqG5kU6RQ~Vy+VR6^dis(WR(BG@_I= zdwrb#dbJ;ldcrY@ij&9doryT0BzmSIWBch+jgh3rY#@THm*Z|uMd&C8#gL%LH}c@6 zLA=yw2Fk-&30p~qOxllz46T=3F)0p%{@umC2FQ&P7Tw{qBX6h6Jl_>_!o4*&kKi=& zQBr7sYS8Qd+XvQS;vVK+Ynyu0<5g5)42m{3W_sKyky@6y?tVyRlK81(w&$6;*7vY5 z2HN*G&wt7j3tqNpQoAUiIJ4`D9NAg#W zldDtH);N>EiP%C|o?qHv0c1{~u2_||DI34ST`Wl4b}QuW4QxUHfg{^sBb}40nc~Mp zhLwAXN-aPs%6s$3{IFU`>hRlFZ#ZfWEceNBIyzPj2VDa(m7!0e0Au0qTU@n6L6*ON zPH6{`mPB3Z^`B}YCF*xQ7KQvUr)5f!5$icWw0e-Lc$lVvWVXQxLdtVPz_P@dExdqf z;Dsj5{TEb*lVBDapMWWX+|O3Q$2@B40(}+n z@wqd$_u8er*k$PI@KK{Y55@7X#dtm)P0s`s#o)MHde6)K8DO4y(3QGXb^se08}9w) z8oE|U(VB-8`OsF^M}3HY0SYc||6R)5JpkI~-^x|bsG$fALF$AgjRMe2U2??ZYXV22ZeUtpu{zzIx8PBBucUARwSghurA``5n|1 z{;abC;Ih*Qdm*QPeq5+%-DR-En*jb+F~W+x6Thhndm|;&DU)*3Eo5P*=zw z?Ly!t%|NeAA@6bkGB$yQE-FYH47!mt)v%Tf__f0PIz?{fSacWr=RP{g!)C1E-HKvu zPpY+lwvRU3qJsvRq#?hTlu~E$z#EO@{UnY)I{BVvtvu0x=v;`#`_<7!nYv`}I_Q|+&8&Goj504o^U4;P{$P=aR!dX~nn|Hd)8=N8 z&_ih!-6?7L%E-GRKfdjDkKqPE4IeE57zr#7Rba0iX;5FdObfLV%YE_HKx%r>iK6e_ zCyJ?~)EL3#`}C`6D`6n^2`+<&_xtE2(JPvWP?-*YqB|Q%qJKLOQ~0ZEk#%o1>f>hJ zH;0uimmhG(A*Jla{@4sSz=4i#FDHl9XMa!#qXn(hK&5U5c=Ns_-q^N!U$IYC6*sy1 z+t%Ujoe`g7$yq@H+t$_d;S_I~C~S2PiNK1HP2S0qPOxli#*Q#DWriR4oU3*t*IfuqX5KK#vcR|S^dz%cb;rpZl< z7*CJ&TrPh74FBQm>j7dZQGZeWOHaB5hKTrte9W_;h>*3_uJLqDD;Y|pL(^fmD|cdx z9}}ZJPWMiLzbS~%PU9jRrC-2~#On@~?on!gS%Je~a#JP95hSNbx5JTE69UUQl+{|) zC28K5^BTWQCw0XX_1BfJW%NXv--{ffH-es$$Xz3RuEAm)J8Qo-ydR8Oa}>qtEoboY zW+jopY+I5JB}A2JsGnlvuIgfKtmPvnBRfvZO@Bz-^z~;ci{b605oPUsy3jiFDDkF$ zlVB}-%H}a%FPTT046XWZAj>uYdW4QX4zVpxF}eZhut<=mO#kSm7bgC;;@U4H0e1M5Mb`TByQ`@- zY36LJYgbzaVw(9UYafF=2~sKfj6){!8#kQA^K$1W$fy%g{wPGIA*^quM>-o9&%p31 zMjDqdxqWu5q)6?=+nmL2O3c-N!2iuuGnx^Q+>I+#;{4f1gpauRBW@o#-_i}bj+KKF zr?)4gy)YjnYzHX>@9u2T0cYa}<@xX<^a)X!102O=a`w-1*^yL0^ZX|@NBI8L4D!Rshi^H=NS$D|1 z@{--tGpwxuzRup+BSk<$(0Xu$$Q|HF9cY0S(c<%2y-@aA8lHyc?NkX>ubh@_#egIi z;UH;?b+I@GW&-yIYJ`4^z4C(MG(i$B1FlqYtlFAaAq#}xT?f|*c{cJ+>fl;?Xt;}h z5B#8I`2|zBF>WeoO3?m)5Sisum3!)LDLTeFCnV~iLn1K0;Z>NdX^Tj}X(~P)$J&+X zqjCo<*@@gSyBU}DpUYMo5I29nEP9)=8ZzUllR99jc;<$}?bOp=rMD|?{u~)LX=L_V z5m-JfbNKijZv6Jg#vGt;{p4_|Vp}~y0*Rrh{JEqvE(ZR=>?D$Z!ND4UAHUfF4A8m7 zXtlg@N#84Um$cRH(_1{4)af0O7Y5cF42pM19YBNSME&pU1PyFNn?P6Dp*N~yx~sCg za_E)NyGg_feFTN+Rxtf5a~mT~-&VYqi}+~O2blv&g?39sZa8;H=ck_3{Uw9n>b#7D z>6xgc$(V0``W@zf7-pBVxt`MS4!#KaxYhN9h&(PUqlIM=Yl#Lj|CscqYrsE(luo=D zr-OoTws{mtJt0Ot4jvBUK*K6|1e)KChI0i+e*+$9K`SB`?f-tqwxEwM_Y*rp0lmZ~ z_S(u6H-dj|roo?CFy$=oQgZKUp)2$hM$s2lpvp^mxCP?cCnwYh5 zhhq#^q(>h~I_d?hANGsl&V86ZwUKfzrLf0npincN3Wo`|R_#xPmzJ267&Cgk`Bi)c zt4K#Wrt&?1K~;ukmMme46O%~5&7wQ-fIn%{&fCp*)+CZakT_}fn|D92O5yC1jo5Xg zI(zq~zMHU^wv8IkLt@8y>10YzV#Gg#j-?n-wp^vF&{SyW^hhOiER-C2ZO!S>B!H{% z)}q5mHfq)vDHR%OV6r6EMXDzJ7gbWY`T|B;(|o*t42pJXDj{TG^$JDzKb5!ncM0mf z_FJk}lrQbH;P4muQu)iOIK~L2*bv$XWTjr&aj&=fjge(gU2A!8epfKBa-q(ah<*<6 zP9^WrnNb#?I`49QK_+73SWm5GoIyZmeQOoHr(@A{O{ETyXSqT4{q_odSHy<(Y|I&X zNgJep7Ar1aMlkyFs5-hmk?xU;nx9NFIg>t~E_-(E`%c2S6gc!t=hOk950D>*TjJ;S zj84aGYs@5`wXI<#f{s8L3UUn87XgC#$z#jaI#1uI!IY*TA>jXw$+wNdI|U7!8HPgE zO^E`zOt(a%@LepBp}q6=ffib0GR$UydiV{0WLkAs`%ZD6ZrMnU|AV&=)hnYCFan^O zV`<&a5GmxE`RQb@$Qjk`mVYl@4BItZ^n(B$Ru&z?qadfRo{CS=8uL{6;bfy&Kwa*O zEq*rWHZ|OoMisfJk<`Hesh_$;;1aC!_1H5dAX7w1lgkwJoh89u&8$h1|SNCyUh;ua&WXr8kMc1q@cQ+oMQuBg+5oJYrxxSZ+fd z^pgkS)0m}&W6%J$*+&M24st>DNh62C6gAb)Mox!t9cx?5Zbx@=dA#f6tS6adrY#EC z7JTm&4Yt-4bMZ&_S~@8_M9cTQTYoWI?n6y5I92&d%)qw!i<>rOkVh~rR<5Iel4zOV zoPFNK%8JN-rfiZw3(r@`Z}K@AnLdIdDHsoYL5e(LM)|cXkbB- z+XkTRS?qxzm;gqM7ZwdE?$qv7lDqew{00Iw9+Vi&4&rWXaB??T@f=|Q^!O@a>I zU$W7HMWwFROJRpBki^f?Xoj`(lW?}UEDvSQDX$+D?lNpTXc!^$WRz(aBoi2G!~vI^`&j1FoBF=R}#ov*Y*%+umDv!v@(yVeQ*Q!S%@X{g$#Z(JRO(96}_~ z+k)a;vAps^#^4+}e&!!A(kI1nrKbq>Fq25`6{2bt{{t@t=c%z4dl4spVmrreK1~5e zU?s$|%rT2@k~8Lickfnu6_E(bW+{Rr&dx*6*^STefeF#0?u5N+XKrL@IZI&vqIcti zO2U*nv<5@?#u3ZGt`_|m&Q%?$IWuX%tUIIhj?txR{B)oj+w5^U`EKweQuArQ-j_LP<46Nn$axu7Yw%lIt5^*VV` z8x~H}Nk?cJSERfyL0>}?>%&)4k2eLf8c^{FoZd{=3|pqDqNLoBwSft_)U@%_IDW=4 z+2<$`6N*89B7XcyBz9rVVfU(?&np*-)bFcM+IMN!h@rV@#xBz-*v4pN(lvAHrF8yM4^>pu{vbO?{(e9^McUfLa6`FKE4SZRCxkBu)XjU-m@A+HI~? zchY84`6Hg{_2!=&7lsC--39KQOA2p^Q4&PAP!?%_EjDrbIW=SY{Qg<<&l>YF!2ruD z`gu^RFbkP;;4}yveIJi z2+@QUekLp=v?2!!)ln2Z(IH@#TdKfFV}dazZ#ATrD#}B!1jEzPpNBY`+z|t@n^Gt= zybeZx`8}kY<8At;eZzuYK*%hYcqW&wYZ*TwUTX`ZcRX{ab!SQ`n-Z@W8mCVMz56=T zX!NCgz7;K|)jGZTfY={1$gV0U+R43DW_!{Tu9-L^1%evCM6DRYc27fA8!#%ldKR2F zMAitrC|K`*8M0hV?e*Dq2@g8@pz%dRgvBv`y=y;8uaIiHn~Ab-c~U!}&AFc2n-)`9 zYSE;RjklL`5C2ZPb+ucb#vnAa!0d`iQH#q^S7>ERE-1>)k|wrO&;{B ztn!(WdE$E`w*mFi#U^Qnu6Lq|F$HIb2hEd=>kev2v)5l5A=>`4l33eGHs0WxTZ#OC z1%1P7w&;$KWYnKZPp;H3)(ZT)L~W`>kgImbQ<#nND$q(@3b9Z%?=o(2MMD35!sDs{ z)pHIVjwlv&8r|la(o}A698+c5bBj_S(6s?P1`volPIh11Y9&y3ul4|$z`;#<7k72<1bHs zHw#TxqSfh`I3ck)4r==0*YGxA|0>*W3mYNNS0|%Us(8*>%&!GWX^8t0KsiW%n^JX& z*WdnayhaMHOR_0yAoPSXVZXm>TG!G#Jm+5MtdVFb2|K_r>W8lr;fH(psLI3>;_~5> zJv}|>*W2I=YP0Id@FXt+Ctu!c3e|OqmSoZ4Su2GOw&&2})#@L_;{P&q{YBVec|HKM zxAouK!@auhXQFd&QPGkZ6}IAk2D!b!>|&ESdeZ@O2Aei4DX?2!;?zV9P=I=K%YNc4 z9#kk6eBff!B`BF8<|G|B4-Ym*-_@(VWxV+f0u(ts}s;a@o!P0{xjq*pwFgLu; zb1tVONkm%Cx91t$*DA!Vw01NNuiF57m*o2whh%|j> z_jQ&E^H$NCSU1ksV$Q(6E>ZTL2?!Iv<5lEAT3(@_->UlukiQAze|7Lq{scA|aFu+9 zWaY-}ucWU|(88lovY)AcPAv!T(Gd5}c&^mtc7Pl5DgeNe z>(Iy077}fSW@I&ub`6vFE;9Kyl|+lRCHMY@owA-$m~25mDBO!IIPOEJf`W%&uCRSnID!Z0@q=g*76iOd@=#H|zNzn*xQ~B5 zQ>IJb3a~4}v~s1FB{C7&&!sYSp{}JAPx?_@Fke*IN^hNU8%4hT75p^+vCh+zXFMtZ zCZAjELDSR@s%SfZT9PRuxIKA!ZYd-Z?`6DfzC}$=m9*lpE!EcHTOVMiN9ufSfCjru zc_$!;p%K{ma>`Cwc@=)qfYF`nu2|2G!&UF|(=B;_WPDtI4n{UI)<*BW7^n-V+-=QP zk-q%R3fe`^Yd3^(0$GX6cLMs6yC{cSge`NUlZ$=8HgK5YHzHkD?r03264$<=W_~Mu zZ4Dg_vrpi=c(Cwy@@H$2p!67L>4>ZxL5g}hZ|6j0h;JDO zLtk-r3L6566Jbd*CLUO*FeJG;2$7AJz)#$F>@HRU!ZW(%7SROp90m&tXjQ90jQKHK zTjg>#3mcOZKOidG8P8RlxXY!5bdC9_;s#VlUsGa#t8a{IQyHaW`~ATOVztq|4scYY z_R6s186!L6?jMNGT-+^GrPa{87N)lB0A_@k7vj{S6L%UH8=F2Y@Oc>Q^h%?7_09Ks7pSDk)6Xf8{2-#0qm)#UG7<<=NbI@4@`GqCv4qZ)aU{M-|W z1$t=x^|iKlWXS(Yx>|%9vwCjJG#gRt3PeV$VGgSz_%~=bsd;kRSc(rcJsTwC1|$-i zGNdcX*r}Nr1^_QS^IN%4_=hG6862Hz390A+C{0J@Z$lJr?B(OtE_6C;iJo|WQ2Cf+oFT9{GeBx^RMN?2 zXs*0h^Q5wij$k94X$)D6IuCq*ce=RyHS7HV0e5$Q+*)3uJD>Q8{${MeacYKH4^TN2 zAe?wq2`~~u(jt%F@3WN)?Mg>wIOGMQ@X#5r;*7&yEvR6q!*wt)B>48heymslO?Q4r zZvio00&cIxnI--j-#;;$%6)w?h58Z`+}pnO43!%Pu5VQOEyZ7iuCqvg;o8_zp0*E0 ze}0xLQ0XnS*^w%^c}bmJt57r9;YHXh#mMg^4SFC7>70vX26l>nX%d+dCJ-$C!yW8s}4y0Cda?)RWDP3X^Ieq+IY>Irg zhz$0XA?FWq18MzjY&!>)2>on(%z<)U#J+?tspa&TSfqZiKb!$XL;8)a`Y03V+l?yS zX_ZU3*_~R44M+uDa5rFLt`%Nprc?XmT#RtR!;yE&jfVKZ^DiNP1ku)8QLtG4mwjvR zG%bIJQ-wnB@f5{(z86M>Z^GQZ6ZpB-Gi*;ip-H>QYI$2Aa+-^-Q}~746v90vL7{>S zR_Pb5^hu_L7KiQi`8hnsWn#gv=Fvi)W-(;CC+F>v*L1#@tULcgic$L429wXxcIvJ& z^DWZIh^HW%bz`@GNj4RKrVX-Jgwg;`pG85Ja|$0Q6;f5(A1+HWuvl2WPA$>a0KE#2 zR|YDw)WI?Qd|T5w0@gO?HW3C&4KW?5_xHS4bk;5F$s;Uz)hCWy?Zm!|C z4EukDEkeX3gY9)f=YrG-;<~jGX2`}PIzc_+mM0zCas|69OLz(S{yqFhnkfL-= zKU+?XY152<6iR1}!{vM8MI}21#s(nfSPFzZJ+>v$0A9(nd6?HewRga9A80&D%k?Q2 z9Fy)#HshF1&O8D60Sq$tsbVIK-O9N zIqnxVM%nLfby%}zY!?Y$W6Bo=vWl**gnS!=!lHzKv)J%v!Y^($GrMcOyfE%|CZUTW zwT?LmvC~3%ZylY3Ii;IXT-&h0?m9w+T#JeTx8od@!0@6CIOSJQEfu!e7-&NRu}BQ9 zXKCb0SO6JQc(il@A7FX*MZsK&MxKM4_|fJ@yy(l(sIj}$YXxQy@f55gGicqPSeoCo zbYRkds?3>hVlkUCswZ=(h_Rj(YK8=JuKy}_x9vz?2p8a^1Z6vNQ`eN=79$po^jAnq zb!k7^jtXsEb+00js+u@C`^8MY2p9arD>L2mRfpagXCl}QbBG9QaQU&8x3W(cvoZ;> zi{#?Bj4Mr^zc@ahBlo@SB8oE&__w_*w=Y6}Iv*JT{z@_$b6y4_x-)7_bdr-Wc^mYL zGaFuF>Vcc)6WJ$!N9IKB8+lTs!5$qIpP_~7rlr672|DtEe517L;~RJGj7l!D*cf1^ z>|ob8B}dL_QR7h#b6U=f0?I^;XOsMB+~r|!*W_3peuIJ!o<+j2uH2hO%w>^5%kh(c z(S{E#V0kb0Ec0&rnn{)+ls!K1wnr)hw|eCDB$AJ>!JuW_LkQY>UyI$pMvh$Ui2kTc5Zum zyF>U4{Y~VM9W$gsEtCqJuIC^eUAr!SxKXh-+7-5wrv$VNB@cPl#_~jieOK0w%O-iH zQ)lMwWe9O6Qh|Rem1lm4=;D`+p(T&TeMVjxz;U2HSa$>NAZP%sB6r?PJqxJI)|974 zE{p`_E{_u+|1NX_!f0BIk&cA#iHKZ*)kBjThy`RMeJ#wZP-( zmN$(qa%;675~j3a!ef5?I7H5MMSEwLv}pH#v;_9C+Z8|agWui3vI#| zxL6`vOV6B{^}P;UMFdcNHX8o*x`XaPMx}#d z_GEqS=Fi|$F$uT2nt-+@_+g=cbhJfwPO#6~3ZlQCivupW2FT)?z9nZtqSz$M9)ia*;ys5=>F;?7E#n?8(N=2ulF;q;XLo*T& zjHL8vw7c~wxbUIF?eh|(G z;ok@cdPNLXt?xYhaxpCBF!3fx2RP0(WxxZ#G9QpO!9iDAiE%X$o!jmq2G zV~3Ybv5iTcihVmLUQe%o^0&%a4~%^!%}@v1PYc20F0WuZwTbgGv~VBu#Tix9+rSFft)Wg$D9pYp(C z6G_AxX@b)7(Ail7WvYXt!ci5>3mLO0zLh~pG7G>;3&YW z#*T5B!;n_ezG{k-cPVrNh$$E8S^ivvp?(1LRf<*EfG0SN8sg@&Co6260QnKsAWT%q zL=q^8bvy@-p*eksr|#zYBLEoc)f(YK3ltwEAS8_Z>M_-SBW@`#@3>Gbg(QZXzzVVX5pdhW1|)#mKu?j}pP~&4e{$`ubNfeWS`ovU z7VJB&1fmFil264ptw~T<+E_@fNki{H9kEDg4QT(-`YU5CXQ=5~!oXqU_2f{xYKv^W zhqP7wpXPdxj_0Wc4A0VGM)CV{?ADzSyBfQpN zAjh3`rO&jD!$WJ7+T5`1DLY6wmkFg)`PdJlm?^Us;2IxT?%$H(f%P_94@qzo#mVUu z^`x7B<7~xSN`4K2{IHE&XC+ttv&$STPx`apJbl^B1l3qN`|cHnaDJU{hTf;cAh3#@ z-C~9S3(Dz+P?btOl3UbtfT+%-UXV^l%b&Z4m6K_f4&-7Mf8iJ0_HT;IZso2F`%b|m z_RzCVR&cEjz{GA!3ZfxXp4G;m9Y9KIyK1 zbJStte*@`b-dGw?#JKrQ;d);QuEjdf!7TZec@ z!F?n??Pe}@P^G*4n8MeXxZ^q*Z!oXbW=#_Lp4uWh*yi&RN6q&N8wqTLbV{`<1<92W zY)yCtSD)F;h}z}-6|gzNjqV4KVN?r$n^5cT_K|Q=kx`{YWKNYgTe)K)gzw^F6L$e} zad09G9Z6U(<4h`+7rE3ZvqE&ACXVE!XeA(Viw32`#Zle+r+?bpxjdQ`b_o*qLLQng zfXm?MugKz?koLTET9Hv-1iS8++pK-8##M2y4fN6(Idb6lj22 z9??HA{8=x)-V+w1SFIY1#f8XKg4aT`)Q=yA^ii}b-~H2Xe=<}}VaCIM&#zQ+%YjXK zWTBqDS~3r!4G|MTL2m)V0~r|~!6>42D218vg^!-}x7b!l!o4qAj~TK<$AC7;>&`}J zFIBmI8^yQ3TN0NjF-L)&(!uN%-NEdid1=|>d1lgEm2OZEeE;!v-8vidy!BRsO!?se z?kzzE_ujiV_ue1Zs}J6PFMDJ_5M=6@(jdqbd5HhSi4xC7;XPl(d*(fPvLmLP%mV)` z3y4^v#zX1f>>dF!jP`Fg_2BEssQ_237PjbC%tdxjS-S>4xqn3N*q)n^#mw{vhpU-r z_7USD(n&a={Tpyg2>A|K6ur7+$si5!=rYV^=g9&iZLcQY1I!A4#EC4*Yki{j8(q`X z;=j={@+I|otXsw&zY0l8rs_%4uffLJOt>s`ri~_H+m#{4kI#Mq1Sd6u(&Q+8b|Q-J zUCV5bh~WUe?y@3#>4Qg(BiD0_I(E=xCNk@A5%|KWFcsq)eQLM4sL%x3I5Iyj9`sE_ z#4RjgTTVa9&!07a(aWsYa*F7piOPf$H#jTq3tiX7Gr}UO!ixdlV6cOO6pb`0o z4Rd;U77}R*qGSp8>LWw)~s;yS|}o2Zd%y$ z+c+ews@ZZFjN=U}v2|ETU48_htS|CsD4F^4n%9+th@b?ZZ_}?qAC4}v{PRt)0}m0n zt%ko1u#J#^WbII3$MfAy<|<7APzTeuU{O9 zFFFLjo}PbE5{wqeu0RB$+%hcLr9rflOaeTN4SmXZW7>uK1jY!&*+quZZKh%Y_Ke&LXOaci4!{UrD-}-o4nnBTv&iwgkYN=8?BxB!$X1bpq_MY z=3^imROf!VO~K6;t#E-zY}RtmfnR21FEwC;3(>6`(@jCC*=!JI3>iRKysz8qGf7p3 zL;X>6_I$Ic$kx6sZ|K11tQY9aTXKYSOcW${xsWtWoMPWRsa)35Y2(iXiI|NseI@7L z$BqbpO+P~?Z#(_u^N88+o6n-L(Rn90QokQ?KmMHA?T?L0g=m77lN#b$k*L}U$YnQF zkh)t(i|a}=J2thtyFS3gH;k2Qy}Mu^_oW3;`Z%sogXT0+V)E8RUHYnjY)nO(*Q4U}SY9&bmxLSWN`Aqmj3P9N zq~*TLdFJYvhfI#E8pyLS8`f9Xx?Vl--(~ z=GzGFJBHds*`!K)Bw`=YqxhRHGCVp*4`mu9CTs>NYS7`QXt3Vxvt<>qhoCDI!g^zW zRg|fs^BO7Ble?)y=8c=uhPF?v;sX({;HWoe(?v06WQT=CLn3M66q^9@eks=HL8^J% ztB|a0KBlL+`-`C*{NNn6&;4>!Bc|V;P3^YB>8Z_Bu-H=A)}!zMSiEy!qMLTB$w#a| z#9XNZX>wkY+#Zc!3w3N;r+^jqw!_4KAue8q4Fp#F9Gc_HzOVE?LvDh7jb7=d^_vZ|1S72SNi;NT^ju`b6VRtbHK zJ6aPNO&yxX)h@_P17hX&BgjM!IE*7r1m#JZ6$d4_pjn)hpkKD$~Y z5(*l(UbZ+OvUZNMCCi|n1#^BG+J%d*Z)Baw+<2kUF}N3hoc?B#^9y;us4n%5283Vi z2Q-LL0eIHAndx6QHaw82gtqzQwZD5KBZxGrs z5auona@7MZm?Rk-v7rDLe#Y3IK^%}$VU+T`$?oPN8moX6fLF|FijSCoAEoxR{o6z7+b|tf5y8{871zbVA6)-0KYf`ogHp z+_T_~BQ?xs8~yn&v$ zDTRQoz%onKxf>d=B3)lz-=eE17nR(J9=*f{k)>Mk)RssCNk**P+2ElCN&fm2+=72u zap2t$>jIo5T00w2E(|S66zx8B$g%?)y%Okb2QN}iJ(3|TWoF8oo^(+45+UsIWcg&Ys&!`Ckt5_74GzYF=bPlWXwoJ zQG*3dq(Vz+LoVAhOD5Xh6$D~MjPbGW<1~rnQ^V2ZkT+QJR6KHP8z{}I z;ioActDTK}^)ViQ-!KB%pkpgaPI2K7;Fn)`D&PI{h*RMX53BiG$As%y^XqH1qiiyd z_Z$xF#_@e4U1IEcexmtvH(P>*vOLAF03Rt8*xk}TK&mUheCIno+oUb?qUqL9ZA8_5=dNZ8^nlAgq4OP9h#t4}3UE=3L|`cz?WqwOuUON`^1lc7|Z5`Lzq< zdVFb$hZ^FiWa9{(RG@A*K?3x{Fg`lPg|nI{7!&pp$p3m9>+5nh$ed(|sON!t8AdYR z+f3vDKS030pbG_p-G(scp}_mH6n}E)v0t!LjXJDMjtadP zF+1UW&iqA(-3}trf5PQ8SplQNr`U6@JVHBfWl46dXEu;yBVdc}D{+pM^#^p@9=}S^ zCUEE-@6yCvXzBJi=^Q+V4jeEwT4n~z0g35Q!P^_6F(QT*AsO<@mMjekQ>8!yfJP{N zVaJGzr@zi+ep|3W2U(f3>pHpC&@5M}jYuJ{-EiSRP2k|nHw%EPS`)j^M2~H!F`XaiM!Mhe}4dCq+#$6OQqc9Rdg7l(P1>m zjp^i0aS+r-A;Al><{%#=m*N{#$E~p+CnCfhCZ%6C^7)~jS)6B+(2T@!r6t=;d7kG+ z$OF;7Nw}DO`b{TSO*LSRjSn-yL~xeB_#+;j>-F3KbMh2n;_I}Z7iPp15870^E%DGL zQjf3+f7xLlCv4IN!>J)6B&aCZuY8fm@@O>OLGe)Gx1OFDB`XLA_eTH1_Dj_VK4+yY zUk$R`jXd8YbwVy3M|m3L*W3fbei@{w&RYE0Ng%+{io_ZVt2@)N@-$RLDCLQ@HVGs< z>1>=?+-{&ecd=$lNX6%W|9X}W(ysn1xC$sziU-`cjWwW3~B$G$O8KdC(Mcf}UGa87hpv z&Zq&;%3orY4*S@Wvi`I#=rJUl6l* z0QlwKQ4I~2^$@|zu>Y${;)jssRr{v!e=|PC(~H!C+R50f0=2!kSpD5*?FCvo4N9xg zYZ-Xm=~lS>7oxjFfS=-*mMmkX(i!6yARsxUXC_v>$mfn#&O@X_gwmpwP0F+JW44aX z7#?mauYz=0Zr`ctUjg4r+uBn)gOO+Q{MDzJ+pefImbe&c;*BLE_PsjgJt)J{}Jq%9Z5kb5mHNOuzP zIG8ro$B_;3fX;0)klG&B{jlUBf8GhzZk#v*69VK6?wkG!y7{yoe5J!UpmY`HS-t!z zqg~+QDxpLhcnP9j;=RhAeg|q<;7EP3rFpLK6;Wk#-kOZ1=OGEv*=yY`ECBtP`gK=h zYOA-j%5L~mepRghmQ`aF$sDy=+8J`y7FRFDX*U4?=99&Tra%K;I<4Zvf5o)G*Qk)h z57*6I9gliB8NKkW)39|LH1ZP4W}^}XBl2)@?054&VIVO2vRNt3 zCC)a|Hg~k1Uxv@4kPfM;p3n9zrfGpz<0&UE^QjtKycPM;_CG7Yxc;PXeoh7&VeQ!x zc#dIiDRvtq$sC+~YAxnQe(>y(XSmnTyP4(9u6%+C=={9t@b~uhSj&ACLm6PNfz2sNy3E zEnL-T#@i_bfD?j&Xy%@lm&$o=|Lom-iSM~ljMWqbQHn|@(8Z+XR07~3@ke*Q3wT~!Mxw3+uHJA6OT=^ zumbx0R__OX*Uirje?k}ou;VoEp4V^$^gT3Zb{CG4v76lgB=0n;cUy0|T}fDmLUK;l zyCXe)Wk`n+Nv#*{C4qw1+H!McurYtbMN<4PHOC?t4sw-_;SjQR^1_scDBjSm1oSo_ z)4D3rElU!8tmeCRb*Cc8^5P}Kh(%&zv^Yif6ZN~HB(&@Me|~o|gF*bNiL~pq2Hg8y z*4-Kp8(ONsOyXFJiYFUxri}6#{v$RfMrcwbEjpjQ8H<|q2ca0h>1S}6JSI}EqhU2C z3PLHA*WG5-kGCOt?gcby%l4xW3LplrrcW>t5$MjEWp&(SDzj-ey-@QCUif&{N(qNn zl`qI$>rUu)f9+e=TpXtA!*p#V$B}RpzQ7A@n~29+c#i`B$i$0)1_m zto+iFfIAejmXU{d_@)gW#V3>5!ra|()7*t_Bu)Mzf0HtRdHiE_87`ft?)2*Jd8|$q zM^k()hU5ws=lF|V0Kg$h4q`g!I%Y5YTL`i!;2tJ_&>)opiaIRa0o@nW% z$6b>MvNoFb?~-qHW1q8b{dVGV>+Plx8^aA|0pd~eljM1JZF6bm%h8_Sd!~4?omlYi z^zVqpO@T*ogiIhGHWk}eb){3BHY>+FDh`UDe`QE4IVSJP2s$!;^yJ8}{Sq+A!_70Q ze()W|w+gMXCpSQsUrnvG9LB~+PkV=BihQwiVvQ`Z1|$zI=`4NsdG$0wOrU|$X>oO% z@p>koBd{4$R^pXJ3m?0%epJe+l~s)+o?^7R8X5qH^@##x&5>B-ky}yQYSb&X_$a>DM$AN)H(FM?8;)_^uo~JHfs19%c^e@?qmDd4!)Le;Z!BQ0oZ$&?NqrLs61% z->WrN>v)=;)CoX9mtPA)cy!MyBb$LQQatW6h?5%WQeTQXvwdA5;R!9oCrUUck^>dF zEeHBEW?KvdQl`v*n_(E{udrtlz_}j>c)H^uCW8P3&$*@d;}KF3II*i-RuQwM03T8)xe)P;VxRRj$IXltlR(imiUNZ&*3sJ9B0qX*;`-f-8~5z&L_J za$7vj{RA?;d$Dci1oWbI^oFk|cLKS6=rLv_4}uINtv}X(_+v>SWOXo>e}JHN;?a02 zuzcJ{EYjbTs+93t*{ex^-{OH>KN%&xJ9|j{aexoh!_P6KiMF!fGGJqu&<>6-(+s46 zJirw}Wa)70^&e~;UXU3&aoBreJ&Ja1W2k=$akj2gh9XUx%cC%XFDa4CO@lRv(U9%C;y`FWe=H4i{zUJK7R3tlooHh+=fFm{u(%$ z0H^{Tek%JH>;3)=9F4zHZO8;sWzNTx_GT#adbmD}4tNOcv*iT{Yo0O$b<3A93^Ywp zZ|+QAK}GUe<9>Xefi1dh=YhqB9Oqm?0ViivHe`jaV^^XFKaIcpf0#K^cZB?@14l{^ z$)(~>f7tE$ke za;l&py<#Eem;^pNf9XBcvVIV?H?nD*tq!2q_I~u_C%Qk50PrT@qLV$9bUs_X$ZJci zy?K=lEH@tzZC3%?)b!+5<+`lRoCv~n#k`zA@ZPufQf61ItIa-q_YttxoKDDQxlqH=~l=a9_CX3>ygpg_|H0oJf2WORbf4?aRle$JBWq}gyjlnU3 zkZ>MI66pIpT8!}2ErQPI@kH4TP|O!v9e}`E#2~aHiNm%G0~(C#cD?d^5Z#wJ$eL## z-tu+xRKqK9D_xGw!~?@g4u5k;z9zBlc$@Zh4X-J~ATullu(`v~Rfs7<#cW=#+;U(u-Iy%)!c=m@yy_5-K zN*?;|l_bSjt}6E#vwd1i^xneap13H&^tcJ~t5;S_ww51L)gsVN=3Fbjm?c7ud~&`4 z`%`PyX>T#xnK<+1BUZ29Ppa`f8Da-!?pWz7~MXaX4QYUI*vbCZ`)aj z@6Sc`AY8TUc*#$GIC3gK`jF5Be8q9aSh6nH&k*?N#{1+2IxpAh<4+l%F)-k;7-8`|3Tj86j>BWy z@coHlA(Kh6jU6!N246!Y_ZRA>^Mm>M)>sIn3okB${!KHJS zwVSz!sUL4C5!|*67@f7!RZL>OsLeTPCIi6Uk&lFXoerr82`M$+SQ?xFYpolb%9>s@ zNt(C1AFC}uM?feN8AcmH8c6diWc8sy{-vBl5s&ZMThTRQLG`?GmBIn95(_(ie`#mm z`4&*s6~0rT?V+^MM&5JD_fBdrtB+3E4Yvf%F%P zd18r&!}V@CA#E|5>P*&c@pro?p|FnZS~8jcROwyCFB@=P7F)g+Z#9-{sofQ%b#aU1w!+j!b4mfGkQ7pRA2R&Lz#Re|zfZsLB*N zh`t{8lHB^1dr#p{YcgxCZyUnNwO|fq_g?843YhFNe;#;KcO`2%_j)o+w7&a_#wEry zx?0WUwKq;1mi5c6B`riV)4k^m`Ieh>e~hIzPoK>U%}^}Q*`1w1XQFcf90Pp7i=emW}lqw%X2NSoU5)rr^Bomq-=4Dsqryw_H|?%LqINTF2HoM|!tQRV7sNZ6*dKqEd&qPF3yJ zFluAHfaK_P<G+oW?f)Tf5QDg|N6Ij*{AIHAOC(Yb&dS{zy9;w$Nwm^U-n=1GXKE8-v7?C zqav{XEFoHQ8-`{6ilQG3gZ3?q>m!|)EVjsueamNXIvhGAd(Pm)jmY9xMDo{8Ee zjRb~~oqs^&`w!F++`F+e35NcZD(1ggNq?&Fz;7RvPd#$PY#D~VjekDB{3)ar^+2N& z+*a}*(EqKNfAW7yvS&la`6T{QhhggWuT%Pi777s|Ya_jL)Pxv@1^;Hse1AiS_8FOK zmDxA_XZ_%R^y~j#Y31l@L{XVr5jkOl&=~<=R4x3@R~4+nQgXrnDN~j_W4dMIPtM1F zFVXgauODJgXY<-qJ-o*E_q+>D5$@3E`I94mjvHU!e;75$Ip#0lrmO;mfU`14_aqo( zH$J1i8c>%jAzPvX11zGTtqWkj1Q?QLqRc#@t6beL95?ybp$T{qPi#5xRG93hdixlV zWRTHlL_ih+PkWfCXT`RIK`I_is(C)taW+~OZdz;UN()Y$Y^J*7LnX@>bRThr(wJ++ zZkRy?f5F;|xd4-+omV`!b{IOnrggYTg zOd3&cW*i69(v6%F^i&};%;w{-mr3&cw=%jV-rwUhHDfSE_oB#aIQ$jGT;pLLs|rKQ za2~I^cYz9J#*=V`lMRaJk#15f&E0I98#YQAf6#Tf>`W8jw#ZC~p>HMr?7(RCJ4yFx zER5HkM8K>w;TV&yf(SAse?|f4#7sgujG}WMx3Ttl zHp6AKjQ4}9FCYL!x+kdpwPp7!$0N*6xY)vCAM;^vK}1=LIv3qN^r+Q`V~_BdlI6tx ze{Camxtmc-k?C9j;IR1BzkC?9T1F4|<7fViq2ue2QVpwPK@dwIBLU*zJ|#V?@MrLD z{RjJieuC~%NJ%cVXuvSSmEL!Ev-EJN4Ri<&;&;Nt2+&~h<)%zMf~R%$Q>RzRx4%6I zkwZti}E{#JYopYxVX+ME_m2hw<>uqFZ&K zrWff;DJ8Tx$j51amPaIL4C0f$$_Frsz2vK%(`X@}xDj%x9~wnb*ljY#=|7L>i56sCuyir$F7r+ z%!huI?~gQ81*7*Bg>oe4DP7-jLa};v=Nwlguc{Qe>W|}<hqK;PT#^TJFSHt_NiI=Vcri?B%X_>J&NXTzS;~e>t8J>5#{T z0;7tUanq*ZlA2xy?db-#J$yIAB-LWy8(2LR4bTOs>a50WbbVSAXtqg=|J|vreWyz_kk&{B2n`^2kJCZ2-~Cz=WDk4f6otpt%~_VlmsrI zJT9oq_!fFAV~Mf+0@TjfZiqzM(e=0%VS*~wI>@(C-+VE$Bw(?l9_3(sq6|;yb0-`O z>A>a6)dgD<YP~#8=Sg8i2NyX>0uY%wUn!O&Dd$Y;%~bP1R(y{d>E72;wd)x-5tMniB?9rQH~ z$y$OQbpG;*ffwYo<^p<|k)iS7QJ%*b2Gm|Nk51yc2=e}LTQo?d?!u$Vn{TcK5) zzqL^_+(*Y{UW^O!VUVJS^EGT=G}U z8R*9loM6s#qUINp>Z@nF73`W@8=*Wmu`ht)+Y${UFrP$yD0!yE2b!l<(R$e3l$rXU zWkimszYOT3e=9eolDb{b=RIq1&`gBkbwKaR3>qEUO7oWikG?lSc#_Et9n8 zR=QF(fjtd`Lrlo26>@KK4Y;}LdjAlGS-x67vB2>t`eM_j-cY`go`bf<#f=5luEGAKCNwjOh54 z2o`o#1G)pYT5xJxVB>UTU}Apmk?mlTx2sVWw{eFh@>FscnO8t#1+n&ch4BL{PL^Ed zZ<%u!H4T_li&e)jt5O_}MLO(lm6&JIDQvREd3H(;LoltCXaGXa3MPlxKvU#aL0}iK zs*W!$f8F?%8fmKX<0@*U3Et#n8wi*K9*$+J(IPik2rBcNN`Ndx%UIt8O4qq_e54hq z&odj@XYz9OkrC7`dn49|#t27gv^f+gp%+d@L35!Qee9%=bi%i2D-40C%REM$<%FUa zT^GM+!EMv)KF}XO*FM>m4tn*5>`UeAg*Or%e^S3N0;3$GD3WB@?r=QFb=3eY#4Pow zN(q`{B?7BwNvh=A@-0k!;o=IO4Iqf+j5Wh7?nc0DnMCYkdHVqw4WKb$7$+wr&{$O+ zQ#c)uae5YT-!wXqel9ZYl7kItyy$)s!5VJ6P-R!otH6Y0$ zx5%Tl?i~wP1=iiHFMW&dBt-bhOSmL#S%qQjq1%kXQ$>flz72BDQ#`u?}TUc~H z8nDRazH&+9*X!zL)Gwby@NQ1lNr~$b%BQ5Ok}L(#F6_FnsFwyf^6I8McnGkge`F8M zhapvIZUTb|tn~>L!`QzR34XKfWX?p00(`;vnYEv|A<1N6PQy)B$Mm`{fz?5$ zF-O#>UJeJOmj-3EEspL_fAjl(;nSZw9DGk2d&u4~dX6XuBtdg#+`KT#z zc&?}N`7;N(1W_BoC1()>i{G>(+>~n(a8-f6A6Zk`YNL4}GAX z;=-KXdqqRs&H||(+QZNndpvmN3C_5M`Q#8qaRqmN)1Bc9xLcn&5F5jVvY1_e=>Iv09LUXf{BVc4&!MQY>cXzw;e||&yus<8)}fA8D|IrZ>)an zC?;Z;AEn(#Y!((uqcxR2m~X~tv$YH(ynVELPw(0<36a1?BruqQoTDNL#7OJjNz zFm(4c@A z5|o%$c3+bp?hxVxf)@v(qy-*~C!6y(ARemM1+MRSpPVz8E;M9cG8Hc+QT@p02X!_y zGc;Z36bjVItZqlEpngni&oCk`wdCn_?iPlvO3#AnLG&{TPo4tha`p_r#%^eX z5EvF#e-(eaVF|HTwJj^)UK0R=Nyb zC2j&a^aTnyKxSMInqB-t2y78sGxqq1=hV~w96Hjau0^fiGCt%VUV6NEX{Zl zZg)CRPnu3KBy;XZRfy7ZF(7k8oNEc_wsmCY!7^4BuMV2D|NXP7UcL^RfJn(Qux?{8^tC9*Q| zOm~5REwb{q6Wc;U{F;DF167{(-H5V=1h?T*uuRNpfg<9Z5ja1~(t<7xxJTNq3} z!3Cs)trCM0^0W}9bY!GDf5XK`@Mk4r+QZL#C_|1%(jbAtnLzQI#>|ako)KcmwrWTtQxuZp-|dFf>MKBtbc58lm8*;1C6=;R*-{o=|PplEP`oNCott&<9mz%ObcBA~z}q+hK36dE|dv`fV;#@FvK zr9vKqrp)NX)~=-HII)n0D+^y~d$ELouS~M$mAP5VcszBif4HrptqQntAe1!F7qbU= zeWZF5bdR2|jbS_;t7#wE7RCPf6PAA6_H zxnU0>H9%{bX-L_2h=stMyiu!m0d~IGZS~6QBxnf7e~Jfv&l`q?IA{S_ZeeS(EcSy( zRJcjiI+AwoUw`_iv5Nmo(OD!}2n0d&KrC?hEOB>dhr2tcpO_o;raP*$GV>kIEkf58 z5Hb2cZ!;(>WT1liGd{w=2EM)UK zSymVee{lx;#htzuz>8gOOw2v73?q_wD$e_cO-vgJh=`MywnB^L+zXA)6{mD3umit$ zAXNtYO}XAWwmu6l!L!4S`&Q1k;6FM5JQ1=kXovJ_EdVizD~y^KX7e>iZ5*}fw|0vl zOyAx%b=b=q0@{G=XIrD zFq5tE^o4`$7rzt1^5lj8yn>Pliesjp2qD#2&M-01$JU1>TM8E+;!5}U)Dbr3Q-Kd{ ze~5B$2wog4<)zHbgVyMf(dTMZ(GiOPfwk(rixk@my>M8EKq`6VObL@xewu=l4KO0etxQG)`&WA4Aj^gfsL9b_BF&6e@{aNadqV+{4jlQ)TqQ>MP%O_9(>?`zkx;VtJd3)B35cM~5WJ6)V^#fhfj=&P8 zIwr+ ztcrT(TS96Z8e*XEK`5Cpo~}hzk^SzzYHC{*Sra-2@Oj(bHj1@gAw{~Cv~FF_Z<(7% z5Bv#(Dh4WZy!B;sw$SRKf2oZ5G_w$^KmUjvISJ6=r}c1in9(#PcP8l+cav&&thxPK zTMkV87*0FR_~B=|b)n2wk;aky3MLi)+P1mA_QhK_XF7%`7(?`Wh=UiDfR;TA)`s`w zd5(M?8FDoWlv@hvD6m2{p^kgDof)Hh<-YOiN$HxynBR+X2IAiXe~i*943U0AArD3+ zWv_UbHAS)(dcF?xH%FuVg=>_uv@`LI445;cfLW%%zP)jJ+svIkiD7%##_}Sk8<~je zQdGbMkA!ftPk6RI1^^51X-tt$%it4d7~a3O^t}cc9Wiw`JlArCSw$M*g5$g_SJV>D zl$$v*WxhS+@7$`Fe~51eAogU(%sx$*tjF>TOGqWH1kOYo0^5m5L4rdm;x`Cd1BknZ zea3oo*WCyzJa`X72{yzFps@6~yjL;UcD+g#G`bRTnZy4jQY6)wcX{%c>_?Jq$Udr} zM$D^Ct5VtSxuZ-x=1_inw)Jqb)IM2e(s{~u&GPytdtOdte`Vn#eg-FpE}pVCXB*~< zqDkWG5BdbhP;Tkl3H43PDK?3}_bj~Jr2qjQLiAKt35AfJlSN~yWU4|(O|A3Q|$PmgbWT>!Yo+M~~Y zZ(22MrmrMfSy9gMmY%@Mgilj1N2ekr)&#-u%5y=1m*CeR5vKs%v zO0MgaSRDbXar3M@LS`{>|D*B&S8ZkPDkWrzZg_n#50}9Z(Ih0elsWyAo6M>czq

{$zuxrmMxK{Yewh^_Yx}7U|g!Ie_9S!S7qYQ?{4B;$EogvEepeUu^&Aj z{?~OT)+t_0Mhk4PX*;oo*BPS|#mc({&;K!32r!5!zJZ^^R0w_12|`Sq6uFTeOErHF zKbj37hiT8;>ynFFZZ@F*WqGyPD6oBhjUzy5P6ENqRyjS2xdOMB1?Fn8bl@{lPLgQ4m)dE|G zvTqAHL>DK7@7=}I!m~7TC6+CRG~xO!f7hGa%~YLR{#W-EI0@VfgM*+>E-)l)s?)y| zC;$UeAS8Mfz^|;V@e8SOf5$|*(*py#YIjEK6$ti1D1&DlqACy|A||w~A;!#9#E_k>)aHQ+#XiEBe{3~Q z%co)hdWH-onJBs2W#|Jc5AI3oguiTye(VL*Od*bzTEGS0dVYK;1;*OM93mp|5NAql z81}=d#+G*3(ht#waSgiQ{A^wCw>29#6&cKhseqL_wJkM}`WihCOkw1N0YArZ2L}tb zjeR$IO&H*l4WbN>L*x1?)6c#ue-uaYRB!`~u$r!-*KIEHcJ7I@IK=?bI{nV+GCS#( zox=fvTt)0qT(AZf%iANREZKs>V|w`wFhuu=p$BaDf6cjSrG|{^ zLzLV*x5o%4wG+ZU;um2X0=AJ3op4Q+Ye9ikNdSa-9YPV4l@6^I$wDf40hr1!H-$OU%j`tNXG>OXMA0KFwf-1TA%ZEzrFC~xHjk`Cud<_wliPrLM=qOgV z6dn<=C+cPLYhae_B%M*AGiJ9E~-hZmO-Ei0v}Nnh)B8uf=XV~5CpPD z8mJeTIZbuW+OI6T1^`eRSIN7^k()1dugjX!wg9OPg)A)L0(kW)hf8qqkOG7@e4qli ziD1wXBmH9?4EmMzwT#WK`X<6e?}<*~tXybTZxoG#f478sD%M^Ah17kgPw2OD09dSV z7d|Ed2`PX}y}e1PFpyZNu47NH5G^908NZofDc%7xOv$h+Z58`>*$R|K$9mv4{{~$1#^DguTejVTU*bZ3-HyVkYIKp3Sk1Owb!aNEg9X4?hoekE;&*1+cw@ z{?)k+N#8TpM!^y@jY0Pu@3)vocLeVKqPnUIe*vrRXLM7uJ4`2uL)Tdvew3Q+<_pqisb34_@8pi@B2CoM+3;x9sEx>P#eP7!o>fI( zFwZa{6K13|Mnd4_^%_HTb%@^>u^}oU35#ww4&9G-eztf-{l2IKzUJ(cmYzjGc$biU z1QvgQgZibdZ$rw`2yc7J-?Bv~fD@(u4KIswqB%n%D}VPzTwg12aBJ6vk6$o!K}9=R zwc<^CEdQos!tBQNXdwb|J~FT6x0nia*;?=09`^7;SADb_d^n{Xo96P`UA59WGU`PY za^8L{lLbDgvh2zOo8$aOY=Po-g#AG1@(zDxB}BD_s=IOZ)DX5Mx~hVUMBfsrv3rQ^ z(Ok-pg9PqoIBVz#=21A^pMbmv-@3*O{csvH98q0v*Do-qv7|DY`&joOovU*Iu!r>; zy)C8UI=64Y!fi7qna<=DV>Yry%GRR*kXL2u-7;h+;`ETQo1_gPMK(Uv-AL&7ImUnQ zWT%9Y&$gMX1HrFUXq1Z=8@`Jxx_?)-{`r&zotHScT2BE4y&!|Uy8|hS{x=S{{uW;R z;!3KvR??6WBn5rGSk-bT+Sj-Lfz}%5ZG*7IlU!v`HvQjD7JiL*PM@VUIur-#d-yrt z-nreQ5pP+lUc5d)D!xV>>cI9g${2qa07Yt3!;(~bI4<+yVf^cstP;a5WQpyjBCarr zwY1F3PUDbqqxtcJ^)YmgO%9$`av~=6g&;%)l(#IMW+19JBl|u1a@hA*(o~%Aq2w{z zFXZeKl$S=r$VTM#f->PnoR-8%N{6vjw$}E*NvkSuwj7&vSQrURzoJlWP56IjENnsv zrv?za`^^1zLR|jy^XZi!i`r9-jnPZu&FmQa!KU{@U9Um*TVcr9LEOjKeT%xUI{(A4 z{1Q}qeL2Y~dQBH+LW0|QansOW%sxnsK%bYeXw;@&OAm?XdPLSlt6w0el~g36wuUDV z3IW?Q)frw7Ji7yyRunL%mV*OT~78S@`e;@ozTbZB}D*1iP9)Viq0Vpx9LyQc}BCYU4Cch*j?rREVI1X_L_ayCyDkLn>-s!w8^>F`%A^p>gWu)d(=h3`FA z6cG--714Y5326K{C7pkn?OE7%NrkpCn{Y*<>zgN$c<0z?)YgmUqBUL$n7cRObw?B}f~so*(Xp-VY-Kq0o@4>7LD+Vl)TM$Z z21Uya_u86?9*iE}5W!j>SCO9~>5wq4k|+~VP@;WDn-ul4CPjY?iYQtPRbH7YGv@~H zTjHe<@2YxWm~y??r%Ky_hr&SM`BC9VF21UR%nQuw-Reh_^0Qw2+zmRb^TpdBxxs>q z_Np||LUM*>b21b!YmQFcJ^lHfCP|`pJhxfelf^Q>APPujsoR$ps7$}!N)|b> z5*I!kUmBfbE@UzNDWpXQX}(X}SFNfnVF5KmE`L%A{2ew*!7UDx=2hO5W!KMo+RRGw z;8y&PZN-GiSK7<-FH!b6>hg#qwT zh;G}Y0m+hs zdMsU^hld>&ll~N@%M?MFU+INRK-fWXilW5C%uA7+J}?|^dX5{g+7E!qJywTLrGP67 zJj$lM;kkcyy@qrXm_U^cULSQk-p@agiGi3EH6BFuOtL1K!b5=l2)&B1$wtU7cskoX z>N)j?8z#-KvkfVsE;thmw?~5XrhKK zGZm|BJJql-`@nXIDq*wpD70aTbTfzs*Y3E#1v9pQR0azs%z5EPbd2309_z3sQ% z2=nQ=GzIVJ%E&X^YidqyhozcmN@yh^5Wu3PwH{Ew?a_$#ahcDK`5qtXn!@Lcm%6@W zC{=%?REb|U4|Q6GEJhwSTG$M}QX&LMAe~Z?``DC_RLBENVF3B5urTO_$_kHMM1whh z5HT#Kl)@zH<$6iZ^IBl~R}lf7mSy(-b{B78hz5SldLzS`ndxuMVQ3}L*X@cmp!)u6 zAGQ%9v1Pj~dZ-_EOAcja5nRM3$r7Jx`lo*l3lttTKezf=2kEfhK8K^b&P{a)H@&!$ zH)DB3=u~6z?@?=*f05(qMP}NptV95eM-Tz*e+6pu(R@fS^Bxk%ka*L52yBliwpJ~+ zj(Emnu@2KMCg@NeJLvekHnPCwQ4&QA&Eag^RuT5ftPT9>mEpHE>D`^#A-uV+wrBR49P2{6@nvc;{Ay`oof!VtP`fV_v z9hse>D!zR_04};2S}CF+G3uo(DV2(^HkYd26NH-dAEA90Ch$W7E{(8uN4{zLu$rwN&TERLhJt=368^ zVIcRP?NRjC^N(*0%O1?3fZY(vio-&`!-0MIyPt_58QgQW1leOx*LB>R^uvESg*6-N zd6*Q~=VBw=#km&>7a|Kr|DDPkg$j$&7jA~Z2H45>>zKWU3xZGPh%}Xt9_-y%2!jRC zU*h@6CPf{DF(EcSB_pMqo_xTP6~GeFG07T!IkG~mXAOU3HJQ)ATiX#UrIy9iw11;&sXcD^k6iE)spn&ceF5X%?^f@WiM<<(5PY63LuGl|& z8~*!aO*UP|Fz#Y25OgALNM|VGAltJw;1WpKB=vqhdk--#(I2IYG}e(WgVMTxjyF0r zu)X0nmGen*l!-0<%@KfOTRrWR3dCo2OLuVglv%AqchYS%Yngi~apY)s%yk=QxE9EOEh0`Y?XI=yILk2N(%o#|ldf zSq2UIPNAqjL?RD8>#HzASrUhGTPJ_m^FEFv7wXkC3b43G zxIvmdZPl`QKk(G9E~OQ@>$*TGg9`(u{O)*@$SwRK$dh;|mDDI|vNrsfUqJ|eMiFZH={6&e#BaYn z;|TFa+Jd{0l-B&9O!$a={ONB|&$NumM0^D5d%hM-m4~e%=QuJ-O%Pbxj>=8sw>A}l z*#o58Y7R{3vH4+smb8gji%Xq|R%1Bjly@FGA#F$fiXc=T=8y!?;#B%)R}tcOdk1Se z2j9RszgBIS)#bE*($sJ-R>~5uzNg9h+SZ=RxTa9DWKP=#rKY{45k(5~O6E3m(C@RmtkUztzFjn;pQ)IMP~qF$x!WpWp_* z2vHHg8O2I-pkmrR)qhjXCxd`g%C`T9#Nrt;g!BEjN?6fO7^Tjr{WTCEfC@IGCw)kbNwxu zgk)MKQVS=C13DOIiIyQCo3P1Y&dsX?fK0)4wNd!narB(nEJNOc1vQZw&H;hV4Vza2@Uv7*8A+@PIf&t_;Qgdy4M z-j2htEN6(^c@YgCNevCteO#@sAK6QB7&!d*fE1~JHef4fk>Y(4d}+EoeQ%19Qz31$ zc*ezBBc$nqP4V)(4HqAHa3c0zvvCVFG6zi&{ztycy4FtLB5GXP3LHx?0@k16M<+l| zSBYUecZsYxPVMSTZjWqzwOC{ql&dhVfwm1gAc)<2WuG-m?=CNj;Wd6?UPOnbR8Z+68b{5>UZD zTNsWXOP?iH+Y$pvN!V?#FnlUvEICFdP^XqjeWzl*Cibe0ZrNCH_VVwU=AdvZL*x$| zRr&Y?RFCl~W|I^2Ead^fp7&fN-Ys5tMkJ_WUUWBN|@XuW!W`5Tu(+=h7#x{{3&&>)_bl) zxHPJD2)!AiRvi{7FpmR%ZYWv{lMO}|D#sD+R!n=-9Y}9z^p>U*@Tv<`!bN(2JbJw9X%AI>=S*&4eD}HUn;A*11`L#|0*B@F z>=8Ga?gbDRGL+jE`W2ijbaNRE>r^H6U@}@v7g4~zxf9VQ&!B$lB9NcJFT|JfZ3}5I z<5kU8!Z8X!MIZrv9SUuKOQa5ay0)qGqEEcSt~+k!Jam4GJN@L){aSQzlf9S6a4Ob2 z4ISu0$g?hyb0~=9=MvQtwBJE;IF~#F@l`k)L%RU%1lUq?7y!3lLZmwxO z+bgb)r>Sn+dwTUxqE{_j)(p7MIOYb&xohc+N0iqX=o>ls{vGdh7hq-iWLZ|$+dR@t zWI?GJ41|!D5&h&9ujRdgU&HU_x+*_g;l`Iw*`n2^uS+*7ZHzx+!IqZff0&r=<)bhx zEb{$;yJ3+{_^DH&a|_R@uFEab!DOD+{>H$LI1FZy1ehj&#NMQ^R1rrZ1F!>=4yK-_ zSJ2qlq}uoh4<^^mNj9EvgHz$#gaOdy(j|%ADyr>|@v2^8=DPRuRI=OMon)w-q}4wf zcV}_*eidh!?sBJm?23s20w`@)>YRRsI)!w}NYc+>Nc4m-?gMl5)C;ag@tE?eTWfti zbqA3C%7E*Cnk{bJ4tFNyy5=|~Gf9!MxBcplx)G1&#wm8u96JCoynJl^zRjngam*$j z0guEW@Jos3MM<=!Q-v$`fxyFBr3(wFrTkKq=SL5HTNs?&vfW`|Eb@nzHF=?U=_lfv z$$@;7XZ^(#fATYeOAH<}V85w@b&%xwqW~NSqkhVNN(;^!EVc!9X@8ZLd*}~XR$F9& zx2&ok9)_|F6^T%Z^b%P3&tr|$zT|7egv-r-Aw;Y}0$Z+w1ih^|zpGt;-v!iOug{bF zC<}SHA07@VbZF2K-Lbmvh7cA0y<3W4y%o1beT!{d+Am%an}Zs7Z=ph$h(7tNsWlXj z6Re|u*rR(uTG1#GQ`^P*(Air*@DqxvW+&Bv*S@P(EN{7uLW!U2^ZsW}rBkF7Na*3V z4ngpp!=)n685w!3+i{C${A)c`xYh|Cf`ol~@ODMgKr^qpim=+?5)&+m-zSrnSWW2! zU)f)QF^Yvmy#^AkLSL?Wpf?2+f)0K|J@?grJ#qRwM-0I}D@5vK!sGg|Pnk<*{+n3B zGsRw}kxEpEe8j+Uq&dVL!z%RwO8tX-u&C&-*~twL{)a;A4MQwCoig*+{{l=M&xAkjh9`PMb2)}xYAZ4jY!4xS z9^*p8>1|a{%t=m|0i#2t$o&>_y4-z#Ntb7#_^?Apt0pTng{?Bz@?_V?_~erxCF~#O zmGX*knZhrEh+e6~q($>Ca5r*nu&*z&3tI&`Hsv&z&MfhXZ;lqs8w2j0g4DT(JG@!( z8IM(bj*HkTYf<1{y3SVZBR`LP@&(Kd5K4=JRDbc(3RVmJp|Adso9!zu}VM9A@{}RD5J7}gdrJU zuxhowcX&|rq^RR46?afMZxlvcKbajZW%JA?key|Xh;j2;U;OX|{_K=PQi+ADGjbq@ zF#Wj-IWgM|^B&~Ma(G;!EDB$|kLtVehS8~Sy!vQifF75`4IN;~8`NQex=6|4_e0Wk z=_fAb#hFnv$J%eJFKiA$1@J_FeA@)|Kqt($T=C1+4<#oAh2UgL=e#4*w4n6y|Dr(VZ!8h~c#a>7Lug538k=I6T#R7~8g z60Aw|ZUN$}S3pPi@(i9p_X_%3JNRMUv`+-Sg zp<_9JunSbOl|3VPBlAaep>R7uBS&7e5=DKCRW`!%M72BZGgR z0BVL}m9gGFgQd(7UtVs1JP}inj&i6X(a*JNyCJYz)4p6(%$#NwZ{%Aa3p4x=Y1 z4t)Y{z)L@I4wN(`m6J&QR0)oJasy6wSw3~$RZIIrO&otyX?7?dw;2LwF`WiqwqmmU zvN9gipnxsgJEA45Xc@vI@s$70rjWRR&%%|YRQpDG2sg@q15V~a!ZpFh zHEYLsaP~}Nf9B&urTUZ63|<{SSl~pCq&xYkXR7(deYo>l15NK5Sb+Z*o!!49*<%Il zGEr>cwykuFZl?ULZ0wBEHDFio8L-w)7}T;DXx3*RVM z%r`3M z#qSIWWDYA`DIONjv z66{383+hnMDL6d%l^b8`L0WCdE1`0cSaMmHBd)4{J$8N?jaWhG8VxBN4&M0kWf&N4_DL;Ph&roB0Z_p!|^#V;)4t9SG5vj-XM)hZA- z`5izHW`T2CW#Z>&3Dgv7%ef8B<}Wx_yT>)W27}P|b9Z`cq}4IMs$kep{i$)LyPt4g zQadAmN=ru0v#8}k14O&iRHk1y>&5z(%bwAq;tEL9C z%whR=S(UKKv-p`pg<05@w>ti2(FO*t1bP*afs6P_0E&J95&m-`v#l5aa5POdkc?rC zwU{!3rf_}jIK23N^WdE%Xc$W_uds)-TL=GtqB)|(Uui{fKWvt04A=CLZ@CgA+e*jp zd?<7~I5pT}hj9<_(Q}cCMzRG1d%isO&UNBVBG&4+%ys;qryA2nsch?d=pL}*s#K0p z6p~E_12Q`6>}@P@2(^2(@J&;%thEU})mwb7tf9cl}W&yPruw3C5vX#UCNCHTW)FKxzH>2)S zC%C|k7>;JGCtG&@Q34;M6;;uwPuDDeej8<7*b4cTh9}VrhswV!EHwn;y^j1TjXUb7 zWMXL(x%2<0-V+f}-Gt}Z!AIpIYF$?a_^}n!3(Y=7VvNXv-i?TnzH9hSP`0#^HkmY* zTNc*lK8;DoK$KEHIH82-(q#l7#y{3qh>Z)hYyZJ}#<{&2FQtPka={0|wz|fD;NXzMUW2NWU=@)SyMkb69ASd z0r-8?{#Da641V!TSR}e2ONNi+Jz5UF8vHwRz!{5gbFe1;4PJdMcu_JnkJ zT?}sm`Uw6~^!?QLGRfA~(kNUW@thN>M}r$EBA>OnoX$?h+-5@)oDHd**4K`YWUx}q zNZ4rD@D6dxGOmwctsBdkBf!*1A$9&c5b^G!5@RbzHL%IGSML72`j)GIra;=%?9C|; z4S}iBr`B<$1gE0-nLj3PIqG^eydWqQPq*_5|3$V*W$!l1+*!-cK7_>|c1PVr!+ID6 zE|PUnQ4DF3@{!(-C1F5atU&OkIJf;Z^*~dj%xnhs{z|Y*mfwmNK@B}$JO#s^2;U}oM5S7}L7zrYU-f4_;JBn<_y)3I zcT8$XB5Fzcv0vD(LgZlN44$S?BPdWj2Vp+Z(z#dX8Y&y}ifuL0S7@x{y? z(10~bLZHnf#(CB=2`wd{v@c6UDhgm|fq#6EDUTkpuz~)9bw!>u*R!kx@El+y^JK=O zT~gn>gdrZ578}ifHe|!<$IP#kF90C!x5b*JHE8pQq14^zVPh>q34IG@+tqDNq40bY zHKSC6z2tS6MZ!k7?Ka;R)HuPu*Xtj`&ng^t1FO|YQrZ83)*{ZDJ3@}`=u*qDHh@ZJ z`93|IE0=N+`c2>p``^1lZb-OtJUdy+P}L7BAGH8uS4Ve$xQz4kjf9b1Gy_%d;A^0= z#1K8MM*z5)g5W2(UDinbQP}T91(|H<6y!oaT7qaOs?_3KN!&@;G|&4jgvx}-!Z7S7wr^A8*?%{#77{M$*4TcEu<`f4oyoyvYiKI z3}om8jtv`s1$M@vHa%XC7x*-cl1v^HU6zg+^^vsbxPbX(KptVho?E`FHv?iz;;QAmm_vGH z0nwa$ekx$FcZl8*trdGJXSwbgSbfc`p=vC@4Oi4ip42^6@I(bpmt_%%o;-=8Cj%rYm5vIh6iLTuqJakyP;K8@6@hkJ z7q%7FtyMVG=k1W1j_F=N2^YgPH8=wV`Cq1Yb6O0|6@g?!dXG~Z<3SuisQNr*N^Y%a6@YV3*c0)1Uf(#Waq9tZx^4)2R|9yW zi^b2Dl*HyV7WWlR#Y#*5Mx1Bi39vh3`C>diQNaXo%a3Jo!p57NHjKT3>$^nNk&OF#5&z~#42eS5m-3LgASCmtGzs+ia;%M0LhM){q>%D|g z+7||VPe2!A2AAR7`|A6wV0YcZ|^Kgfl1 zDxGuA?wqrabNc$l-Nox3AxfnRQIJ4#W*A~T;ld;RJ|vmnbD|_e+@5hVfO~%6UPj@6 zLTM4p;P`N4FUGWJANH4Q*S|!={W@(ig0avx^t7@eh$(zifsi^+64LuSHAHd)xUtsh zKo437<*8PyXR|&$#Bh_0i0;s9efM;b?iZa4R-GGFBbyXQPXzxv;SI&t3#0vm560hF zS|!elUrEf$P#yeKmOva0@j0`=wa1=+#&%YhR9zNauuo|6O;oaoAxXiEggI=?yt0Tu zVGKOOYMJ44VT`?*+-6`ReR-a~HcA)|2PRKChi@}{+&Be?cUZ5;{vFQRNfX)?O~|sJ zD`rDl5v|SF&149~hME07B>~4FAy+^yf}shMphe7uk|3N|We0xZ3dix~plAJmyZ2Yz zmbL)3fcAAR*%f7FnoZz4EPrYIAxsN1Ww}gig*4XqYM=Y+p|Is6^cEm=&*0eoxp#OK zv*6=s8EU}{7uAWlVlv7qu>{1?rK%lzhH4h&UJ__uRYAd?N&$GAt1!f>BV2ZI#z`X&#JVb_Bf0 z!HuEeMLVXhITi?bNA)J4yu7AyV+wvESaohHMg`-Fin3TdYbqevJcz=7Cz@BVG%9N) zCKDl02^*FaDYT}?`{sjR$r>QK@VD?7NR_iv`^jLuV@PWpKaL-I;DxG-ht1rHLl|dm zRo96EAv-c@d*E9a1$-R)bADQT8njO&J>36+*|Nn5sexyz$~~}<+4wnUd^V=dFmR>g zs&_|4ZNRZC8{m{(aAY5U4HVFDMSbyJ{9(4KF41ZezC?9Pb$$u@s4A2{R%#Oz{@0Wt z5rs-^XApI>GnP(bb^3@K0hL0}DH8#e;KhE})(JvON$n8gYUp2OVM#rx+Xa_cW7hSN zEV@p>^@k~3A=Df?d-t7|j%}4@z44PRX4jbk928g5QR$m>q< z)F%ScrWuI7aQ+&9f^xdVRdQ!d8rovB=_5}%4R#_wr2=oOrYv-M4rTXgRQD(-?Q|CN zP7WUm9?-MStJ@`W8hm$9KU3cCVo*P`D|uAwCkV4M_d~bDE5A1$FV+&I&%n# zo|xM=*VK7lLf3#?XVc&flItB+``*-@UmMO(bmSRY7m}!d$h8X)v@zj#t-#fb`xlkx z+G4l|DmxDENz**g`9pXz4aF>-SiBTvz$fI*&QZ8qt7SZqBSB~ZXFfPFS2!lADGpGj z0cMe;p5RgsoADz}b5tW5V;vJW>hXnkas+ox!fUAMAEc$hVBUQDGwIQ>Nu|oE#M84M zx*N_UF}-bnUvMdv(OzY+|1e#a@BMbmeMk;sD#ZnMsXxc-71JV_hQShk6cbC}ymCrr zlhjq*>Is!#ZmM;FcwDn{&-l}M_bQgCd{#4$Jcj8~N7wGOp_gu`! z_w=J4qsnyz6xaMgLUyZQMm>g?Uf$82MF|6shEC72@a44whLMsRN}P%iX9z8u*6XLr zMZ;lM+F8u4a1ry>Cle=slnHk9dVgw?j(nOj)Xe}4Gg>9ET;x3eA`KP5u!f)>!ZDnT zhi1NirK@7dz@KksglH-;k{OWexZAu3Pa!`VmTjI>G*~Pq?qr+(Q}vnlkQFGUf2MS` zX59uQr|9RjE2%G03J|agk7>UtcFkk!ej&4B=IkQqMxv+HbC(`8om^^ZuxEK>VOI~L zv-bu*`Wnt4^#SU(+csdD9MS8^u_qcU{4M)`=1Y`Y+*ReyV=M5dqJC1a5 zP#4qyIa}?!IaEpNX-o@igsjzpm-Xn%pL(fV+2+%8iuhB}31d);et=ZNe@q3$a!$-! z3kPfs2S0hqQG@h7tUn~R2XT7_-?;_y%bo#;V^I=tO%+T+`wRdD*SQC?4Z6Y#sMLLb z@`6|Yo8PVh?(ic|aMfkG0$Hgg>6|5czC-dFA(4sU#TwC3Ul9x(QW0F2ekwH5izi`; zCqwBHC?$>Gc3drF!{OAr@+=a|0Cm0$5Z;({kFeZ+TOkG{BlZr`l6U{y7okwX;a`_= zZE?0+VM0|>i@F_G)+_E^F}Q|pNdifKiDza8zNgZZF2E)Ss4L#7QJ3&a%cTt-cb%%A z$TlhN6bIXCdjP$fBzGUAF4FFYekm>6=8*A}%;-Rk;e!F6X?{s@JnoY|%JS zjdedc&KU)iFgOA5Y>n_9n+BfQ!qOjso6o)?2~ri?lr=|%Hm5c7tBfswo9Yu!3JC*7 zP$+VyLjiXyi7v^Dg-uAha#gv4Aqt=Xd|$C$?DJ@h-Q+TCp>u8o?aB&Vs2lr*S9vX6 zPJ3Ocbs>;#eMSkB7C#x)6S)8uZX}*Q=oo^Z&z1UgkzST*B@elp6Y zr1xLW&stJKWche!r^q^edn~0qJflDTxq*1cilGmb#??lcdfIcWfh3TzmeRQ-Z*(uL zYyOY;D+N6usPdnG=ipdJyuhq1wi*w?OE^Y6it!lzRB+j!p)w`$(*wK~A@yb*AEau9 z60C@lHh2sC1_eRsesX4~$zCoIAxCwe1T0u25j1a`wi~`-6m}Ta0F<66NmBZM?GF;Iz`>Q}XV#ZY!Kb9` ziiEf=eLPC}#={WH+JXFW-=7NHa``IQ+i0IvN#fb*)ffd5)nHkaNVDZ^aN6W|eAiw+ zJ@y@MjUo%ckhil@W+YOr4dwg{t(xpzy|WcAO-*@N?!*%5n?DYhMHZSjM`Sg=6%EX% zhL+T2b@_UKkSEl4GQYjAY^`= ziUa#3KajxVlag6Jblt3hew4ZVeB5SBxx8qpmRk`Bcpkx>spq4?HY!stZS@us!? zIWw$(;_N&i;2YHoTo;K?ZM!Z;$=TtKP2DB@Gf6c59`rSL-*J~M%mV9IeAfK>bA{E- z8Id7$Nn#_6_aR31b9ol)?W?q0eX&hfhS)%CpIsUTCb~`P6-ZR5rR+9G?WSdGf|D8y z`UV&HE|N$0x~abG9%&i9^>`gf!;EsY5^)TF4#(BbGteI^UQ|(>1ax3qkO)$uqQqru z79%Ue)AmCf`hZgbI-?6s_etKCg4GjC=N8%AyTB&NrDw7@1oxdYTVdPQbp&Ifw1y!i zi0i=FyL4qCG7@#@#@aps8yV-v{HdIVcKKfUg&A}n;c+{%ij0>bHIK|O_ zCwnl(-~9QvkubOaYb8+?I@+i4-?}-kQ|$f;AX3y|u;-$9#f!$0))sT2F;-E>fw5Nn zi-xeSy((@IANSD-1BFZ6ZcX{Vgzxg5JFCcT&$pn4?IAdey*113YOFEEOn)BCym)W> zEF+76`uttlSo(6PaQ9!PEL+bjfNG|HQ;2zFNw&Z?bgKAK)^UMysKqtXU{qwp|Le8# z`wcWl7o1(4d{J&9+V zMZk#QORx{aESkJFh)o6m4Vt1)@8v~N1n1H7p`cWz0C5(udQk8JsWkDIE|AAjwRkss~>DMgjE?`Z6T zlb!@#v#G4}j4GU)tPFgD3*{LwgcHtbr+9m0W5$O#5V<;0t=js2tx@rRaSFbreb}1U z7zy^CgDo|v{;fH|yvk^Rk&v)nyW6COD-lJq=m*2o>wwX%;$HMJ2~{xi{K+KL7{o12 zbs9{OOd-~9n{fwMlg4Iidr>JncJgp3$Wzg&4v1 zZuG-)P8y3Lh)otB))u#UI+M(0?JyQI3iP?^jTLO&sh=E>&o2~zDIF|*|@aWl<_*NuOA1-_g`%4RKK35KGVw?;a_Bb zBlY1Rd@L`f;g;fmjA6?Qs|HZ0$o~|^5f_+D`r*nv2#ZZ4#zAr}Nlq!nj}d;2=Xze3 z083Kf4Kq&&s)6y3rmi*~{D(@a0S1eQ!^lk~$)x#uM^FW^KExbA1bXL3GNtv=y@n#!_eh(T!eY)Yi?tfI$8 zAnSm+Sqn>|Tha3uEf@GOq_=bS zjy(x6=dZ6EA4c;g#tud>&LD>DpCMUBB~Kg$6F7@3*%RKwH4HY4)N@4Whqg_C@K`#uzVCl?H|v|w;uuQdCtSa zW_>`7JA`n9=CDD+2PH!jtpiH(`go;Ue~Ib-DyJU{DKsncrrU3zwd zG7P!c57WPOUT}d}sED>0Wa^p3#Xc~h z_x?HZK7q?1bTpfd$7y%b&!78u`0dj;-Iq`|&zemk#tcwBDg=m~PP!pJs@C}C72}|P zDrELuetHrCjK^12v+2bWY5$wv5H+~4j8bM0ZkGWrpfVCtevGNYO(4b(@~GRG%C$(n z)B2`Y=E|k4raBqXfHp?y6Se#`@IaBZLcDVKB$R1y^Pd~UU^G>~ zX1pV!CQlI6yLOCP(c}@mcnwYE)8uAaJu{WLR8jsId!&c%#~0aZ#3jOT$Vp^(O5TkPcmWh4U{FfX8QNU`u;5%NB0mXK+qEQw}+myz;K#i>6j8H4hWJjCo{HoSYn_)fsS(9G5YKp9na z9}$4igghgH1*z{s%$=rlAUL;8SulXGWn+ecgWh41kohj`l9Dh@LbjcnhFk?xIR!go8k^W>f_ zHrK0KJUW`{{Msk+gRxa_*fJcz06wtAhjqU0VWephWe7u}SI9RDhys~fk@=K$;p!V2 zMIWhw<`MIYw`*bwtnYV!D**zJ`Xt1|aD1!6b6E5%@aw613hYO_nJfvsAi&w0^YzPq!+{~Svr=;kDwJVGdS_8XJ8a#= zLhSeKk9Kt~O%?d;uB#y6TG_TSk%wH^B3+-~Zhw~_Fjv?F{7i{6I^<5vo3n(iK}j&a z0yR-f8JiYDwkK_WFScFTPPnP`nO;|p9ZpQrUCb4tcZc|Cks1LcB=e1f9G^F={jco; zzy+MQW`uJ9jC0E}~EU5p;Xp_S@PD&HE2Jw&4C<>u2n{ z13PCH9=oIUU+-bMLdlL}nU+GjTUPct3ls|Y6M^*7NU2Xm9EDZ)A555ev4rSGALXXt=lr#0*K&MsN=vm9|+Wf zSR%{c80a|7>N$?9D>YOa)#K6t7QkcIWe(#eMNU-53#_e$Zk9yna@C{KVKiuw&>7G+ zPV&q`^1GdXH`6uGvv`%k2m|ZS)adexbop6lAFjYN<_k&3!B5W6F1Su9*DvzXb}BxMtOr8|ldf!Efrg?EOi4;np**ZB!y=L~orXU9VZm zI3COl`Poax2?-J{Q28xP5 z);tiI5$j4jye&tsAMN~(O|pNctAGXIf2lw-{zu{9EvxVG)GubgR4_m9T2gk|b`S-J zTx^o&p&W53RU8lrJuO8vt^(+fGlz*z_5hE6BIu2z*Bz3uN$lpufie62yrFhAI&|}; zqd+$5mzKBQ?P`GfccxcEHNaug3k#^q2i)Ta$SpRZ01fIsEK3)EUc;*zdx1;_g;-0P znm^oD^(X#dDQ(sQWxn)Ohci1Qa46+nb)UcQgGL4P;;8K#K<917pNsbWE;q!qb3tK$ zc7eQox}Wi=Mt?REMZZXP58U`_ld|d3?;y!OL**}yuhXjgekQ+n{Go%6f;0hBPKVEW z6P9m~rM|IdpWnZ9-BF_i+e&+(g{Y}>UG03vMCa+Q2^MpE=N?{rJa!t@dd?vF)kZbA zAxgE2WTdY()?O4@6rhzq0x|1Goim(&QrFdug0x2p(a#+)>oflpeEm|tL%%5>6U9Xh zG%(S`wguExVIj3c6TydM;xvxL>f=m9QpDGgHdWNA^=Ap^=#+uAQFM9D@%`Nd4DuT& z`Qy&nHH@JeZwul_Se@NJ`rW|EQ}GTtmb47jtlXIQG2tcJUjvDhu`+|0FGeYUgQBYp zVM;TP@}xdy4Q%L8s~mBP3(vW=cK2oCer2PNv-L7JUO-GIL)36}bR@}^>Xy|pj^n4V zq~1dU7YGa>Y#5s{rpQZPh08qGI!<05%^t*blZQDv0L63J!;Y9sa=PQ3i0}5gs zi{UsiZe5!1U}ZVyt(|jOR?5{Nthh`4wJAFzk;9gOH0Gh~YHDeA_r~${`P3lKtrf6p zv~?~Vp!cOHt|>M0?%&t;DujZ*`B$D5y{h@+>)*I34>(i2BX|7>MN= zLu~XBN?skT`d;$RsOLL8{L=O8(*l|>_eH)H5oE$B20{e|zP;6wch8kw1@T>PNP>^; z5Bmr&6Ie3LomzRCR7QopfLxY`bA|G?Rb!|$@2uO3SjtC{_(;ct)5j@vYeYSIW?}Kv ze@rso++BBBGUq3{kz6x>)G=3dSU*;j0*eRP!GOV>%$komo?y<233@E&fxKAWPbg1Pk}`YtHXyDEi}6g$tE`L_e= z)W_VCVL zZVHJC&(VRTy$rI-CHj!9Lr0gzjwbvhIqfgYglTZky)7o3N~lXQ))T>I8MGXVvt;6M zYLg|hh*~dSd?j3>GD-Kco$w^`b_+rSmD_F4!k=eaoFIZC7oCzmcXTp<<~ zeNgkd1+$PNX34xWTDXH_@bO_c!*?aZGB8r0ZX0 zX4|?BBK;L;5Y7;Zqk3{n)|W-Mr#IuwMNBW|6S}?b{H?!%yZ4ejZ$NsQ5xwG*iFvH{ zR`n|AjkwW&iDEnSBClMT^NYGJ$jr8Fde0>CluZpaV}c1b;AFw-p$*2iNL!VJ_IZ{yNffu{y?sIV6BR91>lg+F3n zk*K!9Tq*6tN1D|Iema|hlL^Z`K$C(GgocmX9hQWDgAcwi12Tq*5R5+~-lWfLgSA5} z-Wm0kF@@f8uJFAA`yb$f(cLDw0%jn^UDKhwA7I3KR*rt&6m~qaWrth4zd?{TxV~NW zV7+7qUR%FpmLI;%VSw^f8dlb=oRH$%kYa6u&$kEwPb)O|M3B&CL)?<{w-8y0Nh_vE`nlKrSc z)tOGMqvMB$a#V{|nW5R->CkfKEZ5)@yXb;VjxdUPl9Lk3U|(V&p25^*`qAKQF;={P zskZiPds8_Bi9dLXUkafvgdm@ix#2E2P%+?1n014%WqWIBJaXUyW<`FW&>IgC2o%iO zfkVXmnYs96>v(LdnGW-V@m0EA${%vwweW!oq`KL-jSc1y09~eIDCKy+B-as~bsZk9j*My-^-?Pj+3xmn zA_g08!`h-V71aa7jyQaSg~kcMxvu%=Zdn^en|mPVu2XjpjK_~ABFi*YQc;nA{##v} z$O*!&^L6jL5?8G>d?*Rpf-lQj5<|kBIHTD`2lTr-acclwxrl-QNA1UR`S!bPqg}W= z6;UG#Vh+1?dK&r@Ac2KrWA^gF+G_f2~jCONXStvTAM8FMZ}gTDBJmx zj+Gt))2hkz*;IVfygiAak)Mr!=#Y-}v$KFDU(l;zr$&eSGnTj%=%@t&@zd9}VNJLV znCzZA9SOHBBshFGDuZ{3yU;nR7GnGLvvS12cO5Kr-z0uk2Wk4M*p0jqkgs$B&n?^G zdxWE(y3viBYXB-_3T5S{&S~2U>D6zxfHs=GV_oVoOv6a@WW8K?m9D&h!v<^GugT?# zuPy&llk|%d#7jCzhkEN>f|T<^#E-|Mw)AGnlpF=sZ;nd^k7jWf$A_sZ&U5s=SME&d za0q+5%k^wG<`Yx~TR;&c!`>#EIqasVlVpeAx+rx|XJ;$fgUSo_0jkPCPw49W>ijdi zP4ks9-HE&53XadcZ+7*6f|r&p-e!ZFw;Am?uX0ZEpj-C&tPR8U4rFAcjJrpad*$w; zZDEi@ZsWYK0eT`&nL~v8^M#NQ!sPK+GDlNs$7uoVP4OTtaY_nFL+DF?NeJ__I+7Ne z{?%AEANjh}&N`Kw;2Rzt7sZ{=dP-=Zzh&Rto-Mq8@)nN7IHQw)=!-3;T0euIoQHh@ zzQqqBW_BV4p6zX4mW58#ZJI=SiU12`me6${^=-jx-QDftbTRHm19NcAqRkah} zd1MEhYD^};{L#8x)w@621`1X}!MZaXgTacIZ6ATIS6wdt!60*qH)DIB}N#%9>O*g z^rUl$RGwT%DqQ`K1-ET=C}a#r#Tlh6X15rHe$fw`Y~AcDWa`k9{cyMzwQ)9s1radn z2E-K0`lZ>P4wc?kJ(}Re$J{ZYLxDg@%?mG5Zp^vw480^`Q|Y;Q`K2&;|4?6mHT4uO zUuroDMkQ8%dA4@{Ds>zd+3{F#4UE>`Jew%&UW6ht#JYS$>zTQ!Cgkxu@!KgBjYUO} znlV2Y_qv#2NNKtQyvk)`#U=B~X7IJ&IGoxo9RzHOwytW>f%eIVEgem=RbeO;UD2U6 z7;iO(x?J5TtDF8qo5KV|8=5&4L=gcHd#_g#vW9nmo2w9rn}8WC?xD;UM=)gpS5nXi zMFx7sxtoxENZ08^wYEE0(t}V2)4`xBiLy0i7I`wa6%2=yB5hk%Uoa<|I*t$NPNolh=(h5j%eXI>ezXudEWZH< zrGg}Xo`p@ZGm&#`Bw8*Ga#oE%m|Q^+)J3uqkZj*g)bHDbyi+FVr4TG7Xa%!NaZo^J zb@vgnn3zS)*He33$(shR+jWN-2u0l07a)JB z0W5}4tv;}=2LiMYy_dB-pKL<4=ODK~HYeZq2d**NvI#YUni<52prK~r(#F8JfO!aRnh>*%t5J4VWu%k)+DIf--|!!ufO)^*E-1 z`B2OIQWolsG$h;X@vG@QCPy#+Tv4jbgZ*QKe+sq4H2wny@y+dZl_gTF`Cd$>){v$s zi(gw-cl_o>`t`p7$s-w*LsnkdwWTSlZ3zxdt!ohZ-^z5O#Gzo#35aNq-xJE6%FeMu z=lDsbL~eW#ZV(R|qZ{g#o-!!uVC&4Ux4N+{nR%`m&iWZ{;l0V0>w?mXE3sKtT}Q~R ze_LQ(9JQUM4>$`dtSvK0QG(Hp4IC@HCH+J`v8rNkrD^j#yk_ZdC9x4@2ff;IyX)Zk z+0A#1+GR0>JJ_tqWY-QZ_W_kgTY@^QW#jRu$%dF)wg>9qlO3 z;`|+L;Z-B$+P=B0DWd@69__l8@8Fql`Z5mHVABvz;UFRvkgmR=)cfVgAA7tjJl5E( zmclE&Ye;r0LDZrpsP*l5krmw1y_jZ+Wl_}IQu<*CZau1_AF1}d9n)#6&}+%;LJm~F z6g|LMFg&JmqW9;(Z=~A~)o^8-f5|EZtgCeW9e6zaG6p1$ajJp9XY4gY*Kjp;2g)GL zl-a-t8_Lad0VNFEzL|CIr?p4TWSeiWR=uZQhs#l<`L^^U%K1a7XD!<0Q9A$ zh(*|k>`I^s25Vrm66csOE{+tM*!Knyk1z~RM?u4tD@hhn2rAnvDB`<2eu|P1uu*;~v~PjKHf@-jz~$Q5XZFcN1G_n0 z@`1x@M~zdR!c>RL62@LC$DwQO8vH8dgpV{ov79IJ#NrrtHS@?ODXZK4Qh&ZUDq0I? zg1pWIimgwM9yR%tzuaJgAoW-6QAgjM= zFM;c@bL}sP82s;UcwRG{`-ymIHtOQFKMgQG=X-nSG}Q!_`DifP0%`g@H{TuY{Q9zF z2BW@lwaT4{`E9e1h6KO6u?2=YKNz$t29SDOwwD5Ud-Am(3XFMRcUY4t1b%SbkaY)1 za*C1@2L*Iyf1JAnJ#pZLpCG$&Sy+ez?{ck0)S;+z+fu57ymh}rE`#d!_*6NCq4pA0 z#e{|Rj!{pA3-guGR3TLNd(-+26y(#7BZMp;Sg~&7sX-vguftK%iMr3AU4!; z5eX(f@Y)_hFOkaTCkagA)9oQCL-Koi9kITY9|v5D+_ZBi2q~`=6o2Jkd^5to(fb$M zW*HRmO;NPN*bLAx|Ig?n2MlLe7-0YR3j90%y8sw6BfjZ;xx;oMilh2xd!ByVyNqf(!M(I?D^auXRInXrwf{i_a>xe0syek>G#-!(Jq;^zJ zV@84vQ3IoS^;c};=k`%Hu@)Z|_A~LGPd$X{A-`v6qSD*%0TghknLP1nIV&NI1*JR; zv9zk9Zd;$ViwzTeDx0t5QlP3@QN=?6>Zo~K2Bgv0XJ4c&Mp=+FH`I_ z7n{dry1CvFh2*w7rn@{{j@MAQCXg$!^VA1hXv*tn;c1O<0%5BdM8MBWVgeD_Mer!@ zb_I;PR-|Jh^l}xe5g_Zjf_le%WV?pIf789*la}o3IMll|7Vx%{HrlhBVHmm35`-NR zi3Pv{aFk^6>Enb!s=&=e=vIrF8BHfx7-A}O=@UsOKEGp|G-8`<(ldwR8OX3(DWz_S zS6;(oSoW7lCZ-$ugL8l(FZwhYd~<93>-;rRLC z!`NKl>NoSZ-|)d!F$;pj+HCr48A>4dS>tnZOlYs?>4A`co;)UrJb6svJ1oN>TXKkE zS3>P94Xs4bfXhV20h*F%2hp6irY!`=Mco*7JJ>c9z1YZYl>D!JYMTjuoIfo26HLbD zgz92(Nb~4f5FYA;KrTi&Lj6-kf2j47q=Gm*!33pKg;HCDWMVaj@m6NnR^?q+B_dYA zomQi?Rz(ITCQ&D56(=TzCqpkM;o?sv_)carS+d(wpa$V74Y(}SqA){3=p4Rl$mda! z|5WJVuM$PvhL37p+;vyiEzLHH*gOpA?4m&u!x+0z7_`*T6^NeJw3(z7f9RwH&ok~g z)C++Sf$-OQseisC1#$1hSm5Hr?FVS!6h`A$d*eqn^bDNwxW4hc+WrLT&eQt-4{LAM ztgh2^iGGz6ebpUygs3>3bCop!g%(;2D&8q-fvBjc1%CPyh3?<*f8OVPyZfAr9oqqi zO=YfJxpEbojO|2q4Rv|te`$8xY*+Z!L*0BAe z?#4_y6}rmtmelRn>-}i04wFlJ-0px18KO%H9x+at%_S1+$WE!$}~tSz_g znNImU?t@@%I>Y4Dd%UOJAthURUbLdy%DqgJ^JP}OPjR_U$dXLre;GV3<}64~uBTWr zbV#~AKijAC{Ce#5Z=L>jF(f$(etU`9{{9s`d&czK@wKnJbMbV=|Bs2T*K8J1Wjh=;d?+oZVl~hKj(%hnFu{&1L zwo2FLtJ_bXqiXONZU^jO4yKlKY)#AzSodT@ni_N^1bV=Ez60)L>Q=~{jagoe_vOo( zKTp}K7hItd!R(Yy20?r-+@9fVkKh;U`}(>Y;`m0Lkw5V)yhX37qzEnE zj|r!@4Ca#d;6Xfj>e1}(EP|a}*y((AuxIHlVXMU;XPbAVs%?H9$h1W)Ubx*;w@QbAQ{H@fDlL2|pjZq(<4i_t!>f9u@J<7fDgNND5wGT^V#_KMB!2GL2~ z4|d#XSsk;I=J1elH#nD%cl@52E8&hKnofLo9PZrx@VY(?g`yi)W z2(l#Y@8A7dkR~I?jh@Fm&FW&MTBGhZy-Km(+k@%R6Kq}j?t0O(+vEFLwb(tqrgyKH zfAvn-RR?w+CB6ET-|MN}x?ed77WC}*{qEvq9rueUKh(YQHdyZZt2!c6+c@pp)o~I8 zXEzK*3l0ZM9u{~xf7J8i>pja7PR*TtXoYq}U1!Q)RdklG^IF*ZoqIsr6bAP@?5*QO z4i?2WJGt9A z;qUA^TF?0LS+wR>1mW~~J|AasJ|AbVF@be^?d`Mv+|SD-OwI8nzq$VS@Ln^z>U)#< zCAf|+_&T_p!tttjzK)lq<)(SJvzn;YBWU6Fd%f&i@A^HFbtgSg^cJt1-?EO+e>gcz z@{QMzlgsoqvqW#lUc3J0Y8RaiMWG)A+tKqrvxk#w@rHQ~EyG36^~hAWbdl|;A|(LzwMs&7O5mUZquGcAbVT8**4pRyJ9`trNOosZ%0^M>eId39$p{~QQNBH$-|tj ztOS$ujjjD6^$cDPw)2%Ig3N6{e_sm^rOU4C`(2~9dVX#k@3}kftJ@5SbDxb^MUf9lTiRk(Wg zmVMVbycX2m-dFc4YMX9t_3T%5zD`Aq;@&9Ub|0g9r`Y~fxDL0|)m9xAVJGdp2mNsI z3Q6}Svq!@9R#n?pc{IE}u4W~V(RDM74A{=1B6Mz4avb$4hs=wej}a@lLU{o}si<4GnpAZ zG#O0f`P3U8pRiq>hY}C(r+eM@UjyfOq{VtQo(}Gb+o#V~?8ogQEjzJ)>~?!ux!=sI z_Pe+Atz#J>i0$%VVcFdl-J$m!w9eDmi*CtnJ>MP_Ru3QsSO3u&@MY+u(XVEwUhASfgeBo*W&|kmpVDo_j_=Uk+$J zy-z(-ZiFbg8mRb+6~20p?!-ek|Efsgb!{{*7TwOxpN7`6d~Y`|`k?%>aSyWx-Cer3 z^q^TckY z^~#v@`zkV%73K5o4V=fg=(^=|7`MXy{1A>{b#L9Axvl)UVWQJuG;tr%a57pLoBhjh z=DmS)ao*R9<5ZXo2NI3PezBXhwhlin;zj$=?so^Y70%^4f4qmwet8L>+x9~BU%S)l zP!iwe$k+YoapjhxhdIKQxk_>nydXjJpjjIF-nz5EFu5A09fR1oBvF|f_4GfAnv77q znlH!g9|elYXJ3y+@9{;`z%NI$kI%W2fBJ9eMeFV)T6EBQ`4}ecw*S5!79G)(JF~YK_fPQJE;y{-s9&xokNDG1 zpXY$C0YvTzotOCo)I3~fYoE?9Z6_8MkY0wC?9)G;ku+^l#AkGl?Jz}Cwt~>6??g&8 z^HG;7et>GrY$XSYuQ0!!;|0pr%&wRIIkCN^+>$s?e-?wY%VR!qpK}$5eQSaiWuDDE zvCc|w`?9>jm=pfyt#P16)u`30Pu!k$#D0D~z3TgYv0N;y*y+So5Zs>60uQYYH(Etz zWy@E`R-;;a!6Hh!1AjHxPozJWwp||F@ymwxy*D|`RFw|m>-)HMgI(4+uPouv7>~o3 zEZeXcf8NiRo?(Rj0LqO$Vt4c^EHCPwfEb5>t(KpJHD*&5!IL7cG4q9 z*vI9Knt8oEt5aW`i{7z!jXIYww4BbN<6YX%*6{B4`wzb+GZg#I$}C&eE<{b zzVEd-(#!g7ANt2?G0bPfYy5s$`>K4e%+XuriLuVchOFHi zKYFd8V=dp;p-Hz+Ru%(q(7&NM>(WVD#`dGNfGv@g|bB}WsO-FEF^kQm;L*pK_C zf1fQ?v_PN$gy*y$E$2DBcs-{z+-H7?ZWK-#gP!26k#9nK^-iKzoTLI6#Xe7iLYa6y z$pe?CfoYCVe2XU5EDZ`{K-M>7nb$K(C{ zMBZ%nTpE024*Ekio!;+xnLVG+=VoA=e;dyxi*t|pZZz0>XR^5T3fBtyo5ymxIQK++ z@Xo#U<2gGoyMv*AhvDTVizwOeTkJaS_xrH5hs zes>2gd7A`fal7e%HhH~ny2smK_loT_-`ehNeeS25<>C={AD3Z^6#dyd8$7p9e{+91 z4fpeVzaELK41yCzCTFS<%sRK~&>e0b`|-28ZeM7Tt)`1xwJEzpa?N_%-SfRETWu(U zxf5R1AZZQpHF&MIk9v@_+Nh`x^VPi?j9No-9keVpO%AX2aD>f(`cY_AILfaZ|FoX1 za?1}ECuHA@xr_(cRbIY-ya8qK;d%XDyiz8mjfok91mg3U&5jrZ_y-QORx`{;0A zuP%c_@3OuOH=9z%k6BtNf8JL6M>^|{JDuV&^LBOH8MSTWGMBwwhzeslw~zAC@$92t zBzssKqCFUEzMRtI-keAgf0whl*p%HV97ltRRIzkF*z?{a#2_C~NZGmQM?B3u;e^JQy}XnSOjWoLq2q|8;*d zT(8%4chKE9cB>jqdBCzm)t~LHW6BTJa$99Y9G+^{@6WoU-mUezO+22%DCl{;`_6hl zd)@vfLe=2796j6aW3%3Er!(~A(V%D3(cw~Vo+&$w%%a=_^}X}dIxf74yQ)pu{QYMy z8WiOokK^&KvgeS2{`;3D-UNhy56+0aOOxKu?1q?Gb5lFTPv`S8TtBtiI8y)~xozx4 zy0%7{P!zxu&6o4eX|Y~X>MJ?dpFeA6K7O%X`DXVij$8tCh<=m#U~7+$@yhAXcP4=6 znEFV3?xcGM&{Gaq0L1S$uSd^#**UGYk5MH71~2_peU;eb_l~cHa}xl6vvCdpE?c2` zy+S8{GDrZ(&C##dI*x=|->iWx05~88z$^NuLGq>nuTN)DbIBI%eOben>gCZNf?sOt z04X67XalwY62WA3dW2D4v^X%zv+`s?a%5R@_TKTEALA~6$2~6;7q{jMBoo{6#nP(c zl_g^bW$fZ+anCy@UGE)#_vkU{T2&m-G||Vrc^Y3VNZ$My8E$P-S8@INm6aw-SFt1e zyx$wuamT}a1}ecXHCUb$n4=Ww%fz96xt{v|!1d9cRf}a%1D*L*c2oueRXRj}#6Fe@LOypvP-Kf} zy+o9N9~Dkb_n@eaDj*;kAu4e|a6xL0L&;}EIIaGC$SpMDNR@Z0-&5vOmz0AIHj#BQ zFa#@OLzi<9cD|4?s{Ep`L@X_o*v;?|nx!t`X6QuPfStfz;R(1A&{&H z5h&_{v@93FB_zfa3BjhdLFS8jtL2OxJK`~j0^bxv2$&Exe@%>Xft2urH+StcvgdR( zLqHKIrN0AzMk?_kfUY=sJTh310bO9FpEsuMyR)#;!Y3f6pbCK{or4b(3I)OXa+ZFv zoE~>BmTZ4;9uV1O1S+~OI`j&n`M#m&A6uPw#fjtyylW1AnIAr%fFt>t@9NCLb;1U@j3bEeRb|OBT5-BVY)IwmoAceOiqF}yUPIKyE!YmNU;Znu~ zNrn-BETmgSNP@W{V&phrE!t&{Fe$FMqAta|m0e?NneoT@>?TAqWc!gIASVc-b#&ru zB!r7(d0+a8kA3$Z;ocSbmN53|1cZ(Sf6vNIz9!}a= z^$~ybvs=eL1^UgkW-S~dS!8Kyc>Jt6dTVUe>-Nb-Tg~no=Ri`_{Ql={!Tz}YA3gu) zkJYz<5fD~4uyzN(TkL1W4O}T~0@6XL={K)U=^n|15l;{t=6wVVgJg6T<~n~7i~2r) zmZt4i?DP4OPNxPuCqemERwXcEZlor6kkkU-oi6%#7w2Myq+TDvbbf#=A_(a3Tog3I zk~O*LNyF2)OyAwm$VVZt5lr;KGiy^A{!` z6T>6%2;tc9g81y#EORsd%^_HQfoO%2t@#7*5qDp@pGdyEYJ{=wk*$d2N3Ockw~ z`f&V}`%R8pk{Yb$k(EYGEa~sZ->mZs^JUawRkE}c@921HzC*=(y2fzjwjIHL7`%qZ z_G3gq#2iF2K>-Axb!HNY!U3VCpjZJiA>Uk{f=t7?uZ)Qh94YE;(1=r6QXL1lVoQqG zF4mYT6rF}5nRMdBkvXO!nzYYG#}hFI8SCHrJqbj4AE(@xpnO|#%R-YU3Pv%`kAQEI zASn<6N->xO1e;PtrDwQdXnDkc%P69%#2~nbRiuAAa9UUvL=>ys!dahSAuy`?=1#30 zCx9gu!S*bLJ0*0+*bL(|azF}aid5<%#vtJslu77(ShZgu^lgyw5)0~qP_C6pg;W`~ zAG?SkItR&I`Wg-#9D7J;GTg+KqvI-8XbeoiQGzGnW~ZxhD_KSZ3q(DCsiVvbh0ga6 z?pE=NYkbkT@+YPonXbiM?=1}PsHl7V!4!68ns1S$%RaAPSkQ2}^QS)l!uY1nYh2RD z{t;uu!~AHWcwwbfpi0ZBMPY$tRvkv2 zrZ${&TfYWG)L_-@7+bWm+DOKsB_o^qj%AT@Bx7Mw5l=x-(cp*>|0gjcNG%5#eqv?; z0#OTzoSv^G#lj&V`3x%38Xbm;g66tKge9HzLmdkoLr}gzFps`}hKoo|MALti;F9%uKP=?N(hok-C)i; z>V2?>9-G(Yd5)ETNs-X&8QDZci{;xe?bMGLvR4>A4nztBDGZ4yXISP3H-y`?jt^Mm zo|Tz15Nws0>zO#YH=@?7j-Tu`m@Q<4P}SP6EIL7vL{DX|h|gnW6bKaatV~XI z{1Fk%Esz#mS%RUU$Ug;9m0JiQF%KkT`X2$#hbsr@4jE;C3%xZjNhY5d;WK(1BO#fK z-*Us)V#Dh0CO*?C_Sw9BZ%@`D@}s2xIP2ZgauLJc(EMSKU#2-j%t9%%1ciy^GuuMt zT=#KQvkFD=U0@8Zgu8^vV-T#51s{Uo^`zoD4|Oi-b+JGFSoHqM+iz|gU!J$JbluTh ztx_1JV)5yJy&Ak&Y7WP0_Oq@vKYlofchtEXMJSD;CXU{0#eZtcFTVJzevRN5k06T; zk3?rTOMdke*1yykarT1PrzA1FEcz#Y816Cp)E^Qj(62@=2#?4pW{6=pFbWmleriiO zLGI)4=Y=4&Dq|!}4`F|_uOo`2LP!?Ez(kbz;*=)@|E_ku<+iCc>j#n) zA!#rEzW?DNL<ID!U0QZM&H_huPb6}nz<(%_MxSQuZ7 z3E;3YR&$x3h}6gYZ{Sd+2$5X0UruX;K;PCs419?!?Wg?Q|4*I=3`hr^)z@_=Qs@nk zb=IeUxB$Wa8E+ZN*bIrprar738P}RySZ~(!od1j^evIq6jCx*5gR8e-Wq4>riWp7^j2ZHCLR2(CkPrm?!#$NCNQns&BVq~WVKqq) zb^KlT(gHQIuoR9y4up;oPC+FU%V_@7L-r(pb+#&L-Vb$0PV>GueQqODLD9mh*A{ENb3grIYLC)Cq4)wXzCeW6k0y< zYKlk-g2=xf15n_!hcW8In%~@hC?2kW1Sc+bBzoL1cMjuG*2l6|q@?Y%|EJ8osjD@TRADI{kb2@$AdrF*`c~}+rKE~n zs45#n!^1Fz`S?geqy>BK>O9y3o`E}pn`-!1R=7`*I(gd`4gWHD z(^DlH?=%Mg^uPZ$7d{+Hbmt(>|HMh}U)OPAy^|Jb9;I>p&tuXkYV?=idQa1TBYd&^ z;!6IX;p)k)3;feZqAF_YGp%F9WZcvRD~%Sghcy+t>xw8e{H2IbM3$6-^X$F?o z&j^&{;;8(SuX0Njr?4#7Gv7oskReuXE3Qn6e4fv6TFi|dJBxBgWJ!rMsB=D3kgwuz zthE$K&9}-c(z<|TQ>$78)?s*mgRnqQmV{Q!2NhGI4kZ$ikL=U?gSG#=eC`u(NEJM1 z{>3KA4t%khLWC(8pFTz;{v_?6=GJ2Qkj4xoB?Jr|vo;j}71nATP!mh!7*}G}-tYx1 zzyvYvAU_Wp7*?PqW_(bS2hdXY0XLoZo8@{PhSb%0?}i$Wg^eoB_o;M$YgT!||8cDO zwEAeZKN_z0U-pNzr#Zd#W_T;(3HS%ssux`KNi6wM@ou=FiRNoSI@vpp`*_O-^6*NHVHNHk1@su@^Ps7g!5xxs_ z21mjXaMHl7^Qo=!H@BKg@ma%^ z`k>f{e=B~r(D(Si`@9w+4G-&5AGF4kHMj%x%k7g3`>gMhrb*X-Q_b4?pg+G7Z?2C? zX`t%@1j)JS_p1>$8x78}<2S7ktc{hgC=w;3wQw|Vr-aWwT+18m1C0rcAjKz;#u9-$ zK1ga|QQ9qNj@=M&vk>L~IF4b0rH@pM9-_qPR1oO8F;db-M>L$tlEZ3bc)B*js4aX( zoBHfe#|x0Ux5r3-P0CM=c3M;6Tz8J3pmd(b45wm-! zVaNp>+1-1KkJg@2zgg42@)(vLi8S~BJ+t{cze&Rhn%clHtK+EA-ZgLgBaU3bnmC~q zU)yMufTE6587r*Za#88NnvOwI$Ie)Qg!G})nOLf+|H4L*UKgy-Ig+bgr>W=hhE@3# z&Mvowu;Cbg-gZttMf-!KK8tKUZvl!hW(4GD+MH)GZ^eT3b~kfFWg(FI#iKo{4z2fm z=pjfnA49gH3s@|dgQ^UuEH(a8G>s9z<{Z6-oA&cF@?%ZkEb&W01UQG8)L!@w;5;6C znJF!4{N(GKr^)a6i5~NQ`&~DPf96z8t>qt_Z0bLMd^pzc=Z}pJqGxFJ&oR39)~~yq ztpSI}Exc5gm07a=a^-x(7;GvJ z`K@q~#ZLkhgCX%QDi8^)?8Q!dC%sOB^JJ8rjQw1(A{fuMp}|5VT;6LL;?|<7n)(xhGQ>l52XmoJzJBMOvq}S)eaW!_O{QS|Wess|0y|sq=Gjsar z4?}Krh^fqn*-bUf^V$Akj&wps^#GUeR85u4Gyl8LSp2LQQ@8CpVc`pnJuJ zn_%4+SWtYpNxRe1Ix3Pe!^&J(mE8uP+7t?mMakRGVQZ+=6bpvkfFB(5HUSZP!O?tw zFM%OAqHX*JgaT`ZQf{z7>Xt8S8wMW0qz$-$xkAmJ#+`tf%^ZS+<_qe)G@7P2Tff&yLEEZ-vf4LZ>RaDu4$6G z3oF?;=LNQuVs!cF`+l6pt3u)Kb*A%wjfIsJfVrIE)SCp?frxtXT=?Eins)5EydGmvm+>$s#k*F;HlL*MLw;nVAT zGhnH-one&fBtG!hWm-PZqwU-bg*L+ zca=CInH*`Ayqj1}OY-D21eyAOYbekZozR53cMgmo(QsC3<;l%WLC>>}aUlN0h}{AI z+57NM?^Az!pII{_!;Ow0lKxDm^=FP|=|1kQ)X=MQ4*o;$A9fH)89|{dYcp@uN?Ed*IJp^}~bMcfRd~I&PceK69{7E%>~Th@A#g-jM;H z*g5~gmLL5@C0~uT$%8#q!g8}jYbjm+=DY1;#4yB#Tg#1;(rKeR32GBz5K7d23Z~1C zhC@1QYmWM{1@Y8oCppFP=po@^$Qc&7KP5gz=?>2o6X&IPaABu^j)?VOHDQ^&fvA@D zF|`6;9*NQ%SK+A|WxLE62-I%9l#0Pmd?X!nxy&sP#*B($3MF777_lPyVyeJiZAK)Z zm7V6%?8IX`pa((KocP=&PwLsB=nFYQ{8-Mg#aolws5&x7L`2d_hvR!yjHfaZaAJL$ zCffEb(G)ezLoZvFw61z z0Us5bSw_=eY_%4$E5|rhA>0%K$zABAO9YZ25zj@PACT@99wXb5#h==u_DmoU#S%%H zf^S;0+MHR4TDW3#Y2_4k#76VuuB7MoqbU8Y;etvWLKqr`kwa6dbp_FG8MxAVqLsr= zVN2@@(m9oXD>C|z^u&)xq58}VzB6Ra!@e_U_C%jP!>2f9Bi& z@HBjKH?n-O_~dZBvg-Ka5ux+X@Q%W8gQWWT9$$@O3;pYmefp2JtEBav^F6tUV;3(Z zZTOmywxl+K2qRz!Nf8HziV;MCgBy>DE1mT%LtuS>ybK(pTK9*N8$g{+)05~mpsTIO z-?~SExYVGF2{d~01i_!PfW*eI^*Yd>hayKfn=srT9B>GIQ0%KEYt1^QKU&g^aCUYJ zmh?KCMH)YzNLG_9-dQrXwN{Dc$4fL5h5#vM$=kd$63n!=vPFvU?g;zK{OLsSo}|UD zsXtPGtPn7Q)kyox2momh)HG`RWda2{@9=%MUgRCBH5Q$7?91VPY5m_>VZ+(p;xp3? zwXc9(e)pM;?s(K|t&a!LxyFBV2kh4I$|IV$i)G_mNt2%U78l5x+0xfr#u-)xJ@w?V z(G7*Mp+ey$9br2k1xT1724*1@&EdSrL5}o)ZG?g5R5`77bMC1{W#6_%%R$w%Aav(o zC@~yNho(HbNvj4Kttg2(6vb56?_X;s*bIy0_1+3HFD>b$a%(G&u+JM064i31U^%qA z3lKSmAlje9B!wWKao?O1#W;lsw|9It4NNozt2>d=8dN!=X`ykCwOb`=bbLF8y}$u~ zxJ83|r|Zn#cRLqbsyyYkk-G z(y)XzAkRufBiS~X%LXQrlO}u~+II!@hAf}`O$5kl*P17Nr-bk*#}dDouoyL6LI7#j%f6^j-gAhYv%mU5*fc28OY6 zNN;S|SYvb$Ax|1NEOjGCF{qKFJ%Lz87(?Vu&Q5DAVhRB`umqP2j*v>`@YOXmH?w5X z?K+BrZW&rIPHGb`)e#Dea> zGPm)NCf#4Tu4m4TFC+=RJ;CCazv=5Y)mrhVcT1cfzi<$mB~QF~KsKfQT%(-qr|;^}J(pBu70@#lDqN7t8*2BgF)(WIOk- zhk2D%Oln*zCddsIAfHPp#C#qt^AEx;nuBDG9 zvj~o>Ko#x`=fpztiWGH!snR-S6kjDpOUNE(Ew z-uQx&4HC(kH12u`KmPGAe-LP>`NMv-yRI_fX#M~J`*Y^_TkHOR_=xn(;>UaNuU@5h z{GLnA^^?vQtA6|7jvv3tr`9BqRq?Nept(eBHStvQ&`Klw+p~WB<_-U7_)6&2e8rD3 z8cR9VJRqco<^dB_QAy8>sm?8^@-?_bJl;V$0uya;TmUYHyqA7EZ%I5@8AM z6f7YKsU?VGYXk;=+CBKE7X;p)0srXM@*v^JLhuWm>9qp!wHyQPW4XZ2!Y}J zwK1G=NF}QzL(w~;42g~I^W5f`Z@G?0It@5`Tm|gL4yrIlBWLuV8E9r zHr_I%Vn8bS%$D@AVWZb!w=R+708b7qF;#zcWn*2J_3IBz_d2Lu2m*`sl0?@BR;!kf#nypHd(Qlcvg;RI&z4yB zZiK(}fDHqGO_3_M{);mt4*qAp#2g9&ueDAsE0_CX9(6cH`W%YbgLXG)IGudtekxHe zVr2P3_due(F%XlqLu%&gxx!G-4MGz`Ms1wJZ(TJlwBEl5hikt-`^=H7(SGH>Vr;HH z=UdN$8lRTN)1P|%#ofgIy-y?Q&4KIw^*5IOXmNyp{p5>&?l-?kzy7Z3!_WTNLqkQs znydC`2wV3|s1l9-MjMT$;p5>~3v$qh%P$~^*HpR`SB8W1{8KPSMKZ-QoP4-IzHaz> zl{;9L_yR)v^l+jvPvs2DnA-HT2U#xNF0Z3S&ZsOIQ8BeUiwDR@Yl1aqYs|WC5Nvx^ z8dH#ehBudM840?umhqfO8c^*sJjFBJl!1y{bD8Y5XQ1iTd_Li|yzIOEVJK-$AZcnc zC`me=cR_x>z!ja)PoCOX85VT%2Q1ew>_-z2!uxW087761CNmQ?bW<3u=zqrg;>ZS?!+9a z@!VG4?hM>Q5R2dxZ@o*W1=_EmV@q-e3)3fD_gvrk>l(r{-fH`W#mR_CP5dB|bJdaP3CBPOK3IeSDoi z^ObK-|Bt;=luf6A=&wX3|XD^WUwlnqZD?|SE8rYs46ct`=I@~uWNW`1* zWLX}1utqY90y5z>T~>3F(Rg)&WX8oQ1o=Jmni8j7fxm_?5cV%Nk6nCbr`5J2YugK7zB(o-UNl%E?Kx5n?i2{(tK*lFLY#VkFro-WFE8SP zX2t}`$hCvgMOr7OoULSe(>{}BrUe@Ivg zy@OEu{>mJy#`kA@bl1ka@YxrnYldiI?&66?Bl?*Yh(BZCCl)_7CRzQ&8dHC8gr9i! zh4z$PJ{}dW{b=W}zv*XxpP|ODUo)dfMSR_b(6tHzLQAg01mc7vk)RXK zm6yxo97;TWM{TPzV;spl@|Dl3(w^eZ4oTrrS#atL8-(rS+=>ur&-HdiQlBxATaePN z^OlUgles0;4(@V>veCrCBaIu@T1l4$bg9>PTAVmWht?=6A{MZJafD#CIF2o%VoVI- zpxlB<7HzFcRv?+PRtbZE%1}^?L4@FCVjuClMbS%gM<`T=ovI+@s*e)TbK){=8eF5FM3zBLLp>_$c5o&V!$TE>3 z%rh?xyIu9ae6IBSAK|KZlFuk$9p`)nqIv%xJZUh0f#K+f!+!J#{43Z)tIy_Llmy{808yYeE-}{Jt%^i;Fxx{~jv~9W)6eJgeo)m0`abUl;J8|nq7@iC zrSl6IHV$BdWqy&M#O%!l>6opbbBO#6)BD?Jht?Aw46g*-0AQtKho()RJAh+c^BR8f z@FqupNi&1}-q$)_WN+SX7)eSjt{o#GC)}eXy?=Ao<{l`lV_1etR*0*oQOd*Gjs9I>sb0Y%FBqM)B&Q%(az? z2^b!5?H5f&?P#B71rb7M>(#MuA+3$u=IUgutvYs5ZV{)7_9(Di3_P%;-pp%1=02Z< zRPXhTwpMM<_adI!Qn^s8o9GFSMQmx01rmv)_u{z6WW>VAJcUV$vbbZluH1vFc+AE)<-sPt=E5N>6$YKADtsP!>_tHnjjp_&~mh~LEyWf zn8^Z9C6oANwvZAoQUb#k^gQ}VA#Og0&)XfyO8k)Ms%QJ`x939do7Wo49R9#>dWL5dcv>n^6e3IAK1)KHSgo~&mMDFoBtNW6J-JR zSzx2Mz@nTqd;O}vYQ+wgk1MEzW94;|3aIw5p!dV^3`THZ1|a0rA#?^R_VhyW079IN zP83yrA4aR0VzWqg9Vyk8f|@~+AWN`73;ReK`BW(f*`wmP(?ZdE-~_ z?EN`w{-ujmP2FAm!QFo_>(^espSkBBd$)ep`llxU;z&rXfj~r<*6vQHwcZ{IwV>#*w1QU#w^HmBf*-?Ee<%Cjw zd5dC?k?FWn`}-0tCu%}+Fv!OER+gKeDvPTqOvPf|Stqx#wkdr+)@S%>EO8wY_O zve6?0?IojOb1ej4dqkSOPc`+~$efL!;8-OXkBLvDiP9IKFfAMi69Hm4-azDnNH$2( zgT}med)glAd7gFwD*)@S^Ul9}tdirey>NO>)8yl8sy@9Dt?18O?6b#SFaav6xQwmA zGe=Op0>d|UNB~hluD`M=1>K88f9$=ZI|p=MSgw&YsrE&I3|sw5Ytxa1@#qa=y6g%u zBZAsaO@nC)OQfWSQ!&xLOedpmvqwU4ty}5d1vk9r)thr zy&vGq%NxA-_4m@?S4}nk@OCwO*kN7tEhH$`d*%h#o?z?*j3H;;r3eZSf0(_ougO}C zc36*GMeOOEwB)+nO4)&;cvC34*NL?kK6_%72Pn%kCt#M|r;Sp6@$^h3F>Qju_p*|Z z3RYPA*`%rYaIZa%@m}u*!a60X)(jhu20}l3)soMi)PVn7=iuW5`1;+gD#m*ZYwb}7 zSu`FXQYCtSlY@AB?)Zicf3g&~*^7DH^o|N=s5m0%@XYsx6H4WTiR4m+v&ux!76_%I zlxC)|^j>#SH2crVhreIitpfXUih$(0!s>mLOMR`CX6F5SkM>`CyuX~g_s87s*Y!PE z-Sp~bFGvW!$AY?LIIoIZ5L99<4FsZdj15(~@B@Flp~qCm(OAzT*4~V{dN1OZlIv{^~ce39cPOsBjBrX-^3dz#4>f8b49>PjC z{KKZ>gVuN^eqv$8e-lS=hJr^#WIuupogv9=ta5iwsQbA-NFHDjVUUr_L*;63x}JBC zrFdbTjGCvDLVy4CkMIARpQqwyCjQ6Xs)px$efwX0ufLdnjEpzX#Ma(%{k_yUn*6`4 zeOp`dI@k3N;-E!Q>fub9w&@Pi4N%UoND2k2Rt@m$ziXxifAxOWTJN*@4%WI=K+|ky z=4OmBvGzpN8r0uEk9(>Ucovu|iAFnut11qB%ZdX|ps+ zp#n8cB1R=+^iC)Wibn??5@0?__#nbvM+AX{h0ahdKt!(If3}3FnkZa9Os&QjanG}^ z?to{9pg%3k>bgErj~(-5)bZcD8pgI?+j}mq>s=scf9<}jUCIfpqPswiux=bs$-C#5 z?5xS?CpjS^e=VS31einj)*BJ|#D#Jlp6o$;Def^3NX+PPi zJyO9AkhTm2-qqP)xEZf#T0XP$=sAP^6FIlfh}nYk>sc@3Tqqq~-b=d88voN3S!yJ?cf19y^ zek_RZbr_n3G1*z$>5k1k(qcVnXFQemyW$2=6T$odNwMhfuAk?mVqGqG$j>7?&cwB5 zKl`yJdoFuzhpyJt)eO#Tw^E$CVi>x~b*)CPM43~P#gNysYMew+e16qEtMU8+lWlVj zoq7+#N~cPaX1>Jwh@dqIt~SB=f3hZQY7sk4-1za8W!oXfXx;NJmLRhFk*emjJhvlT zXouqgoXCmr++Y#A)2GnqMAx`3Xy5r2H@LX+YCD{Zwr$6#^Fds4jyjvs<=u{b42Hhr zw#Sj)FfIJ7F)0gvE|30he+=k|xfkheXn7>!RWFr+^kTbe;Q6`dEB5= ztHh@V#hbW`+E9w;39MNngu}Pd-e)GGvSxb`%t6%wVP@^8(UyqBNxIK29F@0&-NP%b zqXDJz2#5vGh8f+Jj{$1qJhUq}D^42MvMn2GKiG6v2GetCt<zCJXvujfYB5P#c&-lxU9 z62Jd(p02f{KKESQ|2U8MJzey^Lx@B<#Yt4*p(qQ)!3T422?#>O>n4!7V^`AiQcD=` z(Os;t&L|^DRp>%Xjdmo$lj*r&dW61iZ1p4qAv`;go@axA+{F8xe|*6Ia39yV&pk0E zuvQ!{Z4}Cq=K}K}qITGBq?qHNpj2o70mv7Gsouw3d~ojLG|#!T%Bd`hxdYbM{aM1U z_w=s!oA1PIbaYOo@X{jK!7nW@`a`#i{-NVV|61G2U+G#ZYBE*Qg)cJG2S@xoQ$SEhgu zMpm|E!>KU&xriYxE+Je7#&m4;;}nqK{f^^jVUWq4_9qFVe>5f|C{386GLz10mxo^( zT(b0Tx>_Ai#r@XSD-XJsqR&L1E4}f65obLab$%yfubx=@w0K#)j$)|TWh#|JMIMFu zXih=8s<(-G!utJcU&>ZQr`mN?fw|8t zIh(?sS?@%ae=!r%^DcQv?_L*T$T=hV$NQaC={y(WfSyWn5%7ff;uVh?cX3=-&(r#j z#m#5F=?HFhPNa{S${)3hUXK#jwvJ{XFX29ct#hna&s9X|#qpSIm_3gaQ3a=DISEDg z{L>xQO53~nL!LW1f6P2s*c#(pQcpDb@uiHQa;A&de^E#;`GzQ&*rIfUp%pVa^q9&W zs)_}v>I;L9ReE3NP}SAWX+EQ*aS1Bd+B@T2gef#l=Vsda85aGzE`De&+;V*1+KD>u zyN`eB`t^E<+8?;qK2O5y&nkZI3-CYboIGfyEt`|CS{iK4qb>I08ABfVG!5tj&oizT zN93Kcf3RR>Oq69q6%Bb!(tJe5Zznjcs@4OQKU0Odxz=%Hl*J$>P+1KD&e*vO?bMo6 z5HI!2nuPFR=I5gD!CLR<(woS2QJsYcGRN43(UbL-&zEZ@Bh8Guok)N=DE*B_rAQ3 zIqfbNo$DM_k&NF|J;^ejgN&v`4ebSDc*$h6snnwMn+fI*pJ(9PY5N+e!VR{gz}`aW zk5#2Van1u^Iiu}8`am7lsrd?J?Sr)LN@`xp$0+Ljerg$)5m{qh zm*{vciB5ga;bw2jn;rZj!VBtl=AgfuH7ym~ab>Q#W*=}0eW_gMR0htr_KhYSNaVfa z9}@6tTt@H!x4&~AotV(Nf~X;Rjdd|cf3_>{&Z5jg<|`t&AKFa?l5PB5OW)*WkWC*V zjjaI3fW^oUk|JJLO##GuhycZ~Wddt_~z1ZJ}=#zAI>%_#Df5iN0 zP8}%U(o;SM!K=wu`S6B$;%tx_50TB27! zBYXd)`r#9iE`BaBp1b-p@|qLt&J8wQW!~6gjEH)t5Fu3MrfgtryM- zmVsb$YtLhy{ePKBQ5U$K(x?PB#uY;?qZMC|Tv@8fjzvA40^Yf*Od)#(tX z?C5Vd-#G?_Eh}?|q6^0HYp{Xum}#jvT}nT3PUUu*qPDL1NE5bjRKcM#e~)zY@Ms(x zN)Zc=^9IeED}ouh#faURxyFR#5DicUZVY}NC9MmJKhH8w?J8pNxeY+(v5h|7d1Q8Z zrj79~CJO4Zt4Aci*760aeE!EeJ3Q%ZAn#%b?F;BVdg*)u(#4Q=)2$(n-;W5Uz9LF1 z^NR}Cg`!|ojt4Ja4`WV7f36GclbOXkRj<#1;C|rb0X=`X>^!25Dqc9aLn_;VC>}&P zW0>b9$&y5(L5-W3C?+G(m~ioPR~GKX%^4duCXvNzW^+4+=EU)NovAUE;Rl|x7`1N& z@&L-@npcdo{I0j(J-=IH^Yd#Q#RTHCOS`u*$Ez+0h*3ag>(gyLf94>_GUt@glkmi7 zom119x-B8eXdKAKr&z}mjXAAyhse^I?nKb45wS%S(X@PFq+Ia%$RT<2&Ir7x^7R<# zb7VGjb1`kfCQt}ciTdQqM$fCeWBy;QC&Rp9+Hu#q9AVF6z-R0*OuJs^vhvfP`xmje z;;zpNSDcoISXnmRf5Ho?Bnhk74OAN}sH8NlW>mKCNfmuURl45R9=sS2V_P*0ih@+K zR#-d3ae6lR#_@gdB1Hfzh-eBYx)XM^7b_x*$3ls0zC&KYYaYqSq&p@-lreGv=*}Sy z>LH4niHrpxOPxo{4Ql|OCf3n#Pxst!cFk$u^)?fKes}%3f6hmV!t2(r)1dlgV+Tb@ zr3~p#3{WGEB>5nQh7hey(7a7fBM0WKMbhE#HKs5QPH27D$DO4S z-$&oMU(6fse{xf)XyK@Krtbpf82a9uK14>{MhoO6J?G>@oYuU1-#mchKyDrh0YwJ5 zerGlcp{68h9H@jBPg=ZxE(PkGr{Z3}hFl<>m)aPXoG64Uy8vH0&bFh=jpKaEyFK+| zzd@E)TEi87vbQ@PN(=jYkPo<4*|eZ5bBe{59!>WHtW1l%D!i_X?|_if|I zcHyWfNoY#}cnDSQWU2BT;An^_P1T5R9!uwHOH+041EX|DCb~7Ygpu)N;X2Gv$7+EO zgSEwT^gYJyjn941we%Yn?PuJ7#hnz$|G(TcjuDr9iCzG8DzABkZPCSuG}?XZ7MGVu zdf7yVe+k`1JCcZys3h2(n@=hWmmXH@Q06W@35Q>z05Y-Rz+F3u9uW#2B#DkQCG*O{zQs)Gid!N)DAz zpk_A9?`ZuIgH#zb$XP05tm6v@`Gz^_8pgzYf0_`;BXutJ8F22WU_*t*OY?EIIKL;! z=h(-g6clrtPJ9s>mw%n(P~I?PeO^W9zpi-�P%Y78Bt*qRgYKF08JRjOeXK*VpiL z&gbg$^mE;4w{`(&4%A3^89^j*maOC6qwfL5Z~UPfBa zf7g+D85e==C^IHta$B(WQ${STl!M$zEC5w0%c*K}UbF)O$V(|4&z@6xk_!S+OyQI; zaU6`&=!wc^$mRZit(=rD*7qRV*!Ag@rhJI6(PpIiHWL;_1W-e;@b`6*5s8U`BFlX6 z5w}34h(yjwM(bmDFGXahb)v*bD1~(^e@f(Go>Y;mMUJXCs-bp~%ShlvgmmIe2u_v6 z-2bDSMm3Rz^c3<)B<1IQ2tsZ+6fW=c)puCes&bT-oN0S)clEiRe}_7MSDZ;PcH9wIT??Aqb)cfE^kUrvz?ihs>4fWu_TW zCGEtNq(4VKf$kiku8P|v&lKyuaB^b#8fJpy`Ut%S$_%Msm!)J5xt^QT==-=dAZ#bsgo>f8(6g$R?+}LY{hWYr5K5XN!8jx2&)>intO}>s0dv ztHhSeWO0J8=}d_&uSDu7*6D$6%?Tv-(YyGoCvfbnLbX9#`3dE&7qbemaT`m zSVD^?Qarpr*4}2qL)b-)FS)D3Fj0m!G(mPj&BFri2ql*x;$yq`{DIjAe}&I$VSXI( zF`X`4f5+Y8mEKN7moB&6;c2)&=TjLC|M1uQdon*vK+y--PbXqGxSx*8KBqQ^1aTiZ zbxu6z+sK|-BD@Sso1b~kDvXO!mv{VTd^X*klnUbO`8W=CUyl=e+8{UcDMJjAG()0w zmn8GI@&fYfm{Yo!Z z812>~5H*MGs5NC5+rY{>3Y0iJ@N4~trmu+8^5aZ60Z4`Mn-iQ(e}|+|FcD1~GIK{n zPOuQ&*Go~uR5xTIlL&DDUzl&5{(rz@5L?{yq`)&!qy52r4lhlQCTybfj7j-QR74l1 zRp8zsRW)5GZj*Dpz#NN5Oq)XfK7-L@22N&6rfg*lOmKgGUD>igp0iIgb-H34n5?z7 zJI$CyMU8rKl)$hKf0ZmKeG;xLxeL6 zH5X}pn99Zm)jT^%MCjeNT%o@!D|bS8W^YSgC4w*_pQ(%pk-W^(HPA!5a4U&YkOn!m z>xAWAoP!a|lEjh?$+;ae#J8b;jQ&tG0F9@N4qB70t1sa9l`T1H{9IhYcikO|zLK0&g-DoS zj1m3E$E+>Dww&l zq^a2wn{c`nXeTy3E-_fqr284}1F~Ejn0IyM%f!pAe}qZy?C-Jv2HP0RhIpgVIyu1E$k1<=j{VY>Xkd20luCC9$UoGB;`ag z(q8t-e>1BstqrZ(1(lAKZEIiL``#oO%@DV3Z{;wl3Xvc431R@AU-vQM1L-OEedI%- zw$6UJn!B$e*dgvjOeRWFn;|dHL_VmSJFiP}n&0^<@urU8Q;;IK6l79y=d)fj+y1Dh zJJbo4E=l;pQ#@YpBcbmV7ei;yhG(2-^vc8yf0xU9-Gm!%^1F|V&Kdf<{I2KiYTI(t zMa&Aa+KIC*M-poS6i}^-=wnR5|u40sCL=s;Ykh*K4IH6K#JWW2L% zQZ=59xrd6SAUJ9ILp)6O#7|)?Kb1xY+UUff+tHwJ#sMt2fA&$XlQ9qh#ED$&s z<531*u(E*Z`%d&aDsa;x@XrPK=VeJzf7?lwGz7+MDQyDNClm3e%a&uA);MscHDnPp zvck|9{cxO=Fom^*5ankPUq^fW)N|?HG`N53A}yTDHLm@^y+TaI_E)WB*I_O68KPz- zPxDHxY={=@^dTc=4RTUEFHLcopGN)9Ppe2Y6ECk%(Ou$un+KL1%81p?7paE9fASI? zr&67rt-1BiB8U^Y4YSx@*<8hW`N1ozrn|i0TQi_ZL`lZ)(tKFsJ+d;}+B>T!3eJ04 z*LMNdq&ge`z-voC1T3%k!v1-o#|q*zBF>JGl+EF_nHtYZfGp_*3cR0U0*<@Qy@7?C zTG7T1*5Q>&iEQW=wRceeS2*HWf4>jTn7hFIyJ58Wch3SO>bL@B9gi?&oqdw@O2N2@)Cn<5@884eH=639_*wGGUwQH%y?H0h}Q`d-V*oUfXFBBzIoXe z4Hkzz8@c2%K#l8=LgA3jBM`yj!ZRnh7xTLr_qkPrXu3RN1(E|e-GE(#)y(0 zFWzH$y{mn|y9nrayzXn;T5M5>N^=UNzw=a{QdrwDV#Y;8_&MroOhx4DzE+$243wpF zhc-Bi=ac2a*JBFv5$2`lP|2wStpjJi()ndbiZ3iEAv_^R?y2UurdlIfNQcOIs~l%$ zj|KgpuX-QM4dWenMBbI|f8IGqY*d#+bSzhvfeo$ah!J_MfY@ITOO3A|Tnr-FD}0V1 zz+^?|V_{wx_sh?_=Kg9koTU8CLwT?~TQojo*nG!AEoF}Sb|@1}MT4=QIgzDL6}Lq_ zeYcfKJYdhEYL_#GbaVDM!>H%T@A=HyaJNsg}*pE_ceRQvpH&KKn&N2e6OEUNPx?=&|xrf&a zzNclw7*m3tF`im3o}IDOVm?=1QNl9alk=*LICql#XC6n#sO-nMSb{w)f8gSG`S3^H zMr)L_Ox$fHY7Q1Of4Y;m@lS2UALrUGrn%-1|J8et(}6;okEvwz5sHvZ$3x>QH&@}^ zlYY+h;J#jz1(HP+xJ%4+Xnaoh0&-VlXcUWLcRTnJA|x_@t=>v&f;@VVp0j)PAsO$n zi7Swe@h$@!)+JB@=s@)M4(=0yc|g14N8R*^_CLPf`lvvCe`*STj?R3np7~iUT^dPP zs;zsfx43g)*{=9DI`i{%5Yp(`OhAXQFEmmtT4T zou843Z{3eyf4GgFbNzis-p?2?D&>oJsV^Z>UqRlyIs#*sC`fDIPEv61(wJd#eNEzP z;hXvJL4n!i3=OZ?YMNLt^wik2dJR@ELS26u&?s?t=EjrFGw3GS$m^jFd~out;9c_% zF(0FA-W~7zo9}enzkSa;U03V}w;`N03=l`ywA$!i94u3=WYStNe3%s?*?7C#=Kk-XnwF zlwOW{?Evg{_Aoz6y1eN>%)7=HUSkrCg^SO<_Cm)YyZRD$zqs_5f^t>Z6j~dS=)iJm zGZNY1e|aASN$wLB*|dHIO7c0i*D*zn3so@G3uNaaNg)zUMZeYz%zE(a{cC;Z93t5w zOp-aMHiD{o@=l}Bl~LfwAqSIvh#Rq`rNNBhp-!LD2(UG;S|<6l*pmk^O$g-RuN;Q z^q1QrbqG6szt;1T?jmZ+AOSB2Abk6+iFM<8pX1Aj{qmQpJdb*Pj%)ndKO^pkYb=X% zf294GK5o0lrf$y%z3=tY?>l`L?VH`jSV!qJ7QXRV!w3n13qjf5%jJthMgi;!?m!z; zh%gq5LFB`5n+P8)0#Z%Viwc~oo+DN{OwaSZ91%8se413_=|!27yN~9Bo^-xh*lB&t z>e1AmeXQ)AniyY&05Kx`0w+>8?_nxEe|F)=P!>d;3?#CMO>so6eyJ^e*n6u&sc2)eu!}U3p5$*NQ{_?e7fBiXs zY8ZDmjjF^+{jHVQmt?-!G0#pqz#mVgC6=GzBelQ1vN>ocM|9+9@yWIu;aI5M{!FG+z^9&WwHZS3l1| z$+G_)Di*ijf%%z}@id}$C^l|0e=NE8_WYOa`+oi(Hn7h;ezwL)&2mhk&3~Ck96O0DYseR^YzDc&I-P!HONTAO=Vl=DY9m1#$;ysf7 zQ%GyK->;k!#cXE{|M_R1pSE~rAn+Kl3DjaUnK41dC=9DSa79hr#4^2Pe-t^}6WBr$ z7o52Jxp_yc=^LVXC#;#!^Jqq^+J~gx#Ke5DNzn>0b+4f;X*eYHI{tb@T{El$AP*LF zypI3-;3(d=LA@a$y?#iivG-kH{bPQ~IXXCKSAMU{b+6-j+5K4G9rC2!)p|P3)>HCk zO{rXN3ykr0W$NzAHN)7Xe-wg6A_iI9v#K3>71sQRyFwO${b+?rWmZ{gCS+wzVAjUu zQaNdcahSrKNQw1E!YrXj}fyXamT2HZQx$nvf$id%Xe|#99_$oe`8TOex z5oCR$UVLQZon%Yr*%sv}$!n7+Ga@2#Sq&wna>-B3BhS&4pkBNxnJr2WFO$i1kYj$O z5a?eDYzd3!3DAeU=__X4zScOSw3uPerQ@`fl`m04IqG9ePDENrk%|qUjx$^OPqrEl z)Yq6Y0(1im{eh`be?$`y^?@^1duJfVZnP&CbK`qfOjH?DCCQ&-9oqeS(3u0nIH>Ru z_B9aHQz2G|!`O*C-9uo@7hPTn_gY^4IkzMeKe(>wuUp#hjbzc??_XnTw1`ggt-dDz zB-MzfTmI}MmJ2ER)12wgY$q;`&lW8QF~8Ufu`rQ`8jEC{e?Rl2NeQSWKS$cBv%dx_ z_-g3pU{&BwXj{c6RuvRpOhHu@f-tkNLRI3r)2y0@r;XqY??2p>2-KyTY zRWxNJT7eX2eintB{^K*OFjjCX%R!+r@8>?BTfjSiI{Th)?2DlD4&LofI(In~tZ8&U zM8RTO?nosTf0*;eIj3?tdJ$#e!usI_ia?LIvK2YWf*2rR#XhpL`H?BT$NMu{ud@oZ z$cwQ1ndQ)AIPpa7$OEjsIjlPojefizT2Et%L7mR9UVY$g--})r*L>O4M|S>cAHjwX z!gKWOkQ2;nH_xikJE|U*Wu1e?TnYPh!B1WCU{VKe%&f#rxy zzec$&{eeEmoBkY&>pA8Vc;SwX_!i}G{5*wl@QJawP+lNdW_*t5yVCi$kOI?o^oBpd zJ42Doe;6|QA;S|MW?%HUq%Nk#`r9$2N+b|!gs<9^Mo%T2a9P@2_{bhN^4Epd2+6j>LcJzgMELaccXFtmNJLhXTY2L(WYBbF!YGqJR_tSSz@P)`be=Cq}f0k2S zO2QsRMRjMWja5VIyYq4dqR#1#ols_{n;ZU(MSG|Tl}$`8>4BXwZJljYag&4^T#-XAKqb^UVNe;)OEI8!4M z!5y^d@9R3%A|_q`POZ~_jgvb2aErTs>p%Tm53%j=>DWUF`IK0{5cB$BJOf#Jpqw)Z z-vHE8JfMDDDN<1gBK>M{R+9+CqIruDbMDd2qXqR7mRSW+TV!U2GB5qN=f;JsAWO}N z%%5LvxZ(Ht3*`Yu@4suk7C^b>^57aDLr66*6E_f1GD$MqzJGcX>(Q z<@4Yyog;`mCt~smBD6Xkw<3fm;SdseTw*WA!kN%`QPGnlMMR-s@LIuKu<;bRFv2xZf1I1jh7-Z#U{!S9K% zIFZqp=chU3J_A_@o5c@s91b$pcK68#`c2Ue_3{_GF2LCz_HY81sM8;*erd0L>*s5% z^G%23USn5&>WSg+6E|J&THi-$-!odWBldNU57-+a3a5E(6IpwL_XMh_b* znw5J(^%m|DW)L z0}}oR{wZ=-Zuw@^3HZ)!+-#!zFFCu;X8Z>}5TDF!e>0$`C~|j&^mawzpPPcE^$2sE z_K;bm;^*KD=eUx`ppr?Dp=Nft0%-~&*>RhXhN8fne4awH^|5a<)!O45OPpGs-I)Q% z$n~n`IaH&9$YB(oBbm*u2$D7nQ)}Wv5vP}^(s`h%^^AFidw>P7qRRvAqBAyP@8&Ic z__eP~e^%~&8T}7)^>6q%%KUHV$p0tbX8s?~4OVq72B`1_-fHkVUjDwn2d~(U`bV7= zZQY!7c#dukul^$afg#=I2cH&4UxqJ;OEMOPN2Q!VgE5&3VLpS!$LZlH*n^ZTH1bhM zA);G}`RvWpTIxPYWckt%y1XBpMx4<1+_PUSe?|axv!6e0Q6et#;GE+m%sws=FgI3Z zJXXSWWWmOmOI8K(_si|%K2jix&Q7U_eyHG8^JUOZ^;!v7xNOB&2Hb%#_9_;Fd;jT( z_kIGxP3uUBcu?Zr)^D%K`nd5Pd+XhNgKKZNeH2r|Y?@YEe@GfD zM81QHVphgqLsDQLB-H0@!n{|rwB#V`h)MaCj-S?~Di1FdJY)G^-W%_*pcOoYcV&V7sK}}C zbnkAr{F(|Yg41j|an#WT&Y6AsaHkKstE%a-M*Ec8=x$b`LkG1+K)=_*{+)uqcH# zr}QcO8k(-V^+#MFC%*_l%HwR~gb6frV^#c@_l-BD3VXlJj4%Qvpci4iss+iZX9FjYtPdTkMLIdw&2S+Dc&Xs2^+qiuX}YpnjMzdj|lHP z;lC_Iu{$KpCZeVUWXO}^TtH*mDrbw?OgO5@Y{WmwViJcUULJBrf2`4z3U*qhr=-OE z0rp0bC57tQKZEd)_XBeN_#xS3GGYk@fjHi6Ry6}(2mz}0nO%<;6z_Y)B1zk) z{dws5%8~5i-|Y9k#Gap5U%JyszV^%8UhSV{_x8TW>D(|h3*Afa+jqw}QumdOcHet& z=U9n;PSpQ=HmN=0e;>$a040SD#r_^Fhmc7sPDQ1Ax*@MYmCck#OBxlFK;zlc^QQSa zrd2^~)neZ<3MX!#OUZ~*0v3#r^kd1X2&qIKKwdRj!zk3upGB)|%Tbq+QNpay%(p~Z zq{9BjdTztP3*zyWB+%u(@;*+ffz|pPJG*OMtGMD2aUU6Qe?5ygJOcI=z+B6%cDBQQ z;F^1R<7$?F#*sTnd%Q+)EJcTN>22TLFRWgb|Ec?o^ZJi-=We;jL)s)QRdj{esZaP z$AGqcc=ApbS?otsdtM!jQ+ak}vkT#++$;AU_77`F{Xz`O1@ZxMDCmMT9)WrS@64u{ zJ3RPHtVK-bVjFIw#mrL)kxrDM{w`uW^Y1tixeuv{{c^G9Qj@V=jiv&*Kc<7alcOiZ zrtsrrf2LLmc`afkbbfp`wPt8PHvpgDT;m(vBlsNcZu5H6DEr+@`g=cLwQAT^OY~PR zv%J2q`h}(Jw5yK(KCMc{GVffTUB8{$%R$2g^YZy92El?1Ydndlx35-gKOENNZoXtH z&uqoK=pq>6DKXc`xAEu&%&GC2Um_(Cn-jr_e`Wmc;nNn*YW;xvK*;y6H69p}`G}AiJEE*s;IFBM`%OVj`5J$qWlUKVicqcD&KgD`B*Ox&xp}eT=$4cfl$%q6CDu@ z;izD#Xnd`k-)!SekGRX5O)d@M0&YFu`kw69{y;~+(siLfc*i>Dc6`t<&zyHhV7EWGPm!N5U&Htul@3jVqcW$E zG&{_V0QLY9eC-{bvn`Cl;9Tx(%&Vlle7RRK98d$71)HH+B-MH?rM&V|9SWne*o_3M zA~8L%(pH7;Jxn)dYm4$2OO3rlJh#jBfBd&GIk$~1Tc2!83B4YDZWs&P->%YDJVUIP zw#NzU`EA7!h7q&DXSOn)hQQw$7*C%HxkWs*L1-;X8?e5_Lr8RP6vWh6K-*k@Z`#$k zG>59g`J=Xa4ih+-9tG(c%V@&DPmPr#1JqVm?@vS2(nc*v;h9~1_h`IJ?{3&f+Ul#9Es42+t z%Vf_Vr?h;Z03R~JrlS5BqCQk%f9|JD3Ez3}W%qskXgq(mFY{&UBx~~S?GJgCFN8q8 zBmUkda;3uGi)Ymelv>$rxd|?E9ME<&McnhB^T>;9-R~#g`!Dp#bf3j`Ww^D{o| z@&v>aP4Dx0N|w7cw=x3tf1*A>_kH&M^!a#tSnm1EG2Ppm!eS&Gi0A&cIQR+n31taY z%|>XBK>iLQn%mM0pep86Hi9J-9$~!1|uivKitbQe*rTOvj&4e+|Y;M36Tml_;dOd3qLTKb;RqE`tYPrSX{!J{4OV zpAqpk=AyMv=EtxQC zLB5FQL)o^{IM6xbK!*ykDQ~Opy;#q{M+{b|w?#Xi9okdmOrmXGFpLjwe5?Mx|M~YU z{^M^unoGHtmd4KPH6Lr(;CSE_G1=q6S^EIjeH)YhfA401U5$P}XZ9bBV_f|8*UCBK zm=5A^$Mm#<@A)xW&Fja#ZJcpW=srlGz|L>ui+^hMcRDB6ab13Moqze6=(nP;K69LI z+$M5#xzD+_kOl_Z_4StX-p;9hJ`*I*ZOJ`00T?Zx}OR5@$AOxcDVoi|9IB? z5_i4Z`<@M2b7xL6vE%rf!d@dKa%A3FlmwH<`N5WKV!ZOk*L$!zc(V40tg)ZLT`Ukc zyg4kY{Nf7cQz0jy;`wavr1oZfSIpUyI220|e{2EIW=x$m1^t=wEA-R^5)ubz;S+%z zv4{v;Q5*OF$GUaxb`7GQQn}t1iuoMeRpw#6l`W9>rq}tk!F4vid2jh5zV5v&I)nY2 z@Ai$cDLk|ZU7yOrbGomwPnJivKRAZiKS$Q&(2bd4;)rMtz8R9`Jl!wz*ft(N8Ay++ ze~}0C!t8KZtO*G3=O{QZxE~nHh%6J_$72pT_StTI3XQkJ{?h2fpTILdtOgXeOHn)CYWOK$RS4#;&s{F*N( z`=#^b^*sjr{V}3CFZ0@nG2Q1dueDv+f6FqoWl5Ghq0a`SvSG5NkQGA;m-F-{w%(A@`a+16FbclAjYVm$ReR4-*w$ocpf2!vu zmy}V?bpM2^q#xJ^`c|to9oXtI#N;fgAwR6mw56^bkq%SX#Mny@d$ZZby#edX2W0Jd ztyqu8yTxNuz!qVoJZL+4;c(MiL7oKax20U@{Mua}5>3^t=07UkI3mp{j<>m1-H<|(%T z+Vus4?Aky5XD_C%P8g@ZG?EZ!u)XRPfA_J~b>KanMURdc-KSDc1ZwMLL)|aRc%Keh z8AUckB$G}IOQ}SvcC5g&AFf zT^o|E{_N)z^Fx|1dNe}tOw>tmwVvk)HJOgs@BLa+(7JDESI-fdB5qikz6UMISo`9t zx$%XXoTrB<3kyzE%e;=uPiUsGfi4s6vkf=>SQ@A>0%B^tcb$)if1myBMpSbowRY;2 z$Jt@hHQrWnte1SOQKKI<-ehjC-4S9!h&WjLb**v`o52J<#&tfmRTxn*ZY)^~^y}%y zJ`yF-4+|di%dnvhVuMX8TA!kRP|y3mjT36bIm6Yz?D5Mt4(3%82Y%H~HMaX6Ic`JE ziyO6U${4}Ewa1xBf5a#v(#yNC<^cs|j|6w%9cxJ)Hu+UW9(*lKQt=Rki%!s+&g(%# zhO!85`U~+0zJ!)4y=eW-POA*>f0wAvXGgs2R>u+_4uO-%u{I*5chvcxUPtF?L`}&T z02WGbEjztQ-RpV&ldGn6pVD5-e)j{{`O~NBao)E)_Ezg+e;ohhnDYdC$@MvgZ$3Q{ zc1frbrC*8EtE|+Sjz_j+@uD_o9X~&QTg|JO%iKpE`eKIKuh!J~ieDC*cU3#Q9yY6a z)AP`0@#1TI8SMV}z2K!2Pz1Kc+B@;wA))Ka^-ES^|1(A5P`$30DZ7UC>*odT59jF_ zdvRkCC!1HVe=F!Y>Gi{ly69Zs!Aq>vAtKfHkskpS{vEuS-~$}L@gu#iPB*VfeCtE< zV=jBGc|R+Ua4aG!@~!@x5;>W5NJJ43^>Z$gX6055nLar0fm;6@BLk08#T^2Equ-Ag zC{)dAzHcbKYc-Kb(hQ)cYw5iMEg-=9xcGCdH^1Vle|h$kkF3829cPzxEoDqnKVG@} zH_W$a-Lt!OvvRR^s%?Y4I_#S)wo2@u2<|I~Z=7c4PqP>w*E8dC#VLt?>+ElW;>H@Fwv-TT<1 zQX*E4f9@Z)#a;z#Dn|iq9$Vubb50SV=;%xK0BSB9(Ea9YkT2WUAl+Vdg&!rxA%JXd=nq-SHTHp|IG2Jcj9 zU&(9U(z?OgD*6~QkPRG4-{FWUjgX@cq9B<-fAWCl1=h$5Gy&dqTm>xu)(5!p;C}FN zZn@WQ?tPz+g>NoP<;yU&(GKmb4@$(0+Jt4qd<{+opP1&v22Q7ElRG5mp}7pavwU}? z9A}&1k%9k!IqM2W#67c3#5i(-gV?SyXBHQAzVUVrrN}SO@(24QsO@@#JFKxP6S3T= zfA^RfgOvAGM~kNtD9LCGw@-gK7doBGcc*ABi&UK9BznZ-NR5 zAD#Ubx<66R&H7cV@6X=uS|j}C&FW_$e|Ua~Kut*Hf)r2Fds(_wUe!CS*CS5IA)kPK zC{ByXzL}wM;593l4vsG`HuQB!q+1D=KhJ=>r-o1OZuJ@+!-IH(MY2vX=C|wOD^2qm zD8_BTTES>c2-`;jaUne}>q*iN!AtDRm5WYi{9D7~roVR8W9`q>-__9cdvLDrf8x2W zT6X=J?0il9o%?8dSeo8)S!dV}=5P@F=zrI@x%-=%SG=c5=$vFDPHYmN%PjpGP3<>Y zk&6_tZ6tYgrL8#EwZ-Gp+GWLJQE;yP7FBCqqQ9+qEdgs{EhnQ*XcjS%x)y~RZLyzW zpZfBu$3qL{&r$a&!M?m(evC6Q?B?aBt<~Gp4(As`$9K5dt}37PpS$~^&4v1gI)8rF zbrIZWRX&@O#=~AfG_ejZd6ia_J(KI6kj=)sYaTadxh2xgT-3P|9}rwn>X3W-`=n}J z$cZDyQ}5!sw&cV=i??W}EYFeVp#QVnNDa^^0Gmlf(65_4820(16_ zkNe^}AFwZ`my>m>Bx)s;b#P%%(YE*`*2TBQ{q!PV?JRHhp#T@sAD0zsQgA|(V)`+b zL41xLDKJvZd2CyngYeFi_J8Pv=d&#zPrD0D!ffOi6`$DB`y{ljSW*Tb<1)VHIqtB3 zOQlfKQRn|>^=B*@ zOj1?7d++Z4&UraCYK&kc8(CVqEv-3cVpCPw5Q!;Ks=-@+jhwHdd=aN0@@D#Q@qb+B zgQf8s6LF1&{!={p&aLH}eQtRIt#B({qB7zh=oHtHozLOj0MZz=Bx0KDVmegB9lUr& z_Bt-gJt#h5>48TEAAf`U_y!A8j8hXQ|3%fEN6V2X8$Qf0IeUbSLcd7&BDi*4AC~Tg zQy*B?w;QZHYI1?C|5th34)W$* z2TJl5k9CciM_&(Z;ziwA`t&UiT)95nIBE0582hd4@wq?tpSvOhnNBcv&{6yqbYhb! z`quUwW`I1=V}FQ)2?IUr9LIMZT<1ed?ZeyM&`B9lwnL;vkW0f+f+IKNNh#E)B~)?m zeL6hgl0fQ8lK02YsiOzp2?$YYLY9jq$RsAxdoH#60O|WP(P}|arVD+XIwfP0A^%I-D+Za4Vt`#X3SY^@N#&5h0} zzrCNarR#prjhlDl=kNQHE@oRbKsfE{$gBSAb&A&6u=KT?$nQugUej2-D&V z_b6vQpdkqK5xDR*aFyne;aPDmcxR22F?#~iB7e3F3&g8+AuxEAuGE6+6=93H1i5&EAD;T$z*23KBE z#s4`5q8r@e^Fhxj<<|YZKy3w@>VJ-ZfR%QsKD>uzIb{$e)AM}tBrChKE&l=jVQ<#c zIDaR8V*Diun)rvCKI1KR_hj}O+i%yly=ifdE&k!BpW|D+y7GGC&7;$`B|6h0&{g;O zt6H+x-kM3gF88V8&r>?Uvr)#PFF(0KNeaetjKs&vbOGw2tK|atjAU4MA8vH}BBEcAaPyvO-+1pk}P~3*% z@ktP#F{wOpPIpm}Fpe0-bDap8BvfI{UQh}?c()z$s*%_8w;15@8Z{}Y(L-!;p9MI` zuVi?}O$^lUvF1OpHa~3E=q=~s8h>w{0LDshe&BO&d+U?D?v=;CVMV)g@|uihK1mo` zN$XoIdg$8DJyFl6SqxJOmCIG&u=8RYAdmY)LJ4S>DQ`Pv+YrzCL08OK0#Y%wdVG!_(M_to8`8-J=)IHrkO z7I--91bD_q)%0$SSZ-zd0pctO(Fuj@bGMY@xA@?Tp7-7UhX36BZTu(s6<3m7^@>|v z%0}m^)j6|%axvn$#W#FKgPFhLf&Ml=)Yp&E6w=rM)sryl`Euaek`E9l>DO`|5Pk@e zr&r(0Z~3~6U~F$G7*p9;R)1eEN%RH3LVN0OaAw4V27e-jKY{KhIpLmleh4e9M<0|FA=)3MNFaX+0|K8Gl6HOwoUXNqvlp z22%y98n8t;vn8LCMOg=`d}D}}PpY$sDluDF*S0C_vv>xHnCR#|IJfPC7$W$k-uK{) z5byX6yNLBFF0r8B{Yt^&5(jXb?|*VnUUdM9aCXyP7B`LK?!}*S)%?YAuKUYb#?_b5 z50%%m5tL(aSR|D7Wq)3fh!DPY>^#Zz$$e9b=+QKpX@}=Zge6SF!kW-1 zAt*L*t8+YCs3r;fmQQN*Gr1J>U`(vGA5B2eiew&VSA5n5e*33>ugD0A$ zpp=|CR;uIw)Ao9YF)rx&^(<{eAiHTc{WeG3xDhYdqgn7fuYWAY33W}KH##L(tlZ`& zhS8ioA!ekF!@r(OTsc?a9kE0ecRRz~OAn@~(O|LBwhbkf5?$ar@q-Phw z|Gdp08~M~|AuavMPDz@tuNX#`oj;F4jTWGj*))E9a}YE7I_L}D{c6XpZZL-^v7cw6 z9kUQ?=jc3K$$x9xil-po6T@lk?9aFM)n5YR7Wh|-EAs#z8a|PvaH(*9a)V}86MKzZ zD>(jPeH(5jqZ5rCEF+CL&rLII+dUyPa_=Psg)FBcRB%L5Tw1u zxD(u$Gsh`8WT=mmkDugvqL4eL9OGFfB=&S}4cTIT$bUeI{kC7M1!BeEc|GG54bBN; z+G6kE&>YXMPHZ`oa!RIs6lOZZ+M0ywfzW&(+iE1KDkyckUHj zAEdWBGVALf+7jK%I>W5dk+VD8jOdL&_!nE#tx@CK8Q$=jnAU)b@=RwBhzvEq~uiqZjlm9}D6in)x#dXPQkFYN+8^|A3)C z8o8^$aD8XdR0-UhJsEysGcPAQC^LsW$a!g-`6!LT#fC!lm-H7mbmOuQ1cB_W#A}OCS&OUP1b}d$1S`IN-E5&Hx zI=;3iv-<&|orl;@_DjU43LQ+wU|Fk$6)mW)?2~ndq~VDVHF>~u>K6sb)zSaHXTR|V zqrdBS4E{`(A&<+4`A)fflb%aH7=Lqxv0zGh^T&Iay7oFwychJrobH8RW-dhhwABNf zqn|bB58iy_@`dYi)#%D?aNqH?Z+QPNycyqk0dPHZ{)X$kaMxGNmX+(&ZjmqMvuT~# zD2B(?;3F@s7g5WX|V0>Mo&5p+CuS`0|zM#OBTb{jQ?Z<$vrIr)5%s zuaSft$n9M5m*4)TX@+`5=6nqIk_~6tHAW5nC5H1MXkt7rb(@UMcohG@ja=g3%=6!_ z>yUO`crxd<=LU0f(|d(yC6m(LZVN~SosfBQpUjifLdAiqHn}&2@ylwstq{1pS*H^? z-fhcu^2(;Y;p|*eCL_m}X@877SLXfHU_$cz=rMU>Y~QEUx?3dkc>pTr+TaNZ0Wo26 z`4rWe<($%Ra?XU$EP9G$UiKn8o-e2K6pC>^D3m>)&OaY)@1&AWxz@*>B6M_&#&Gxk zunIeY2M-ByKI&x75QqM*@sQu~i}dV3GND>dlJlITaq2iEAY?n|wto_YsI(K!?Bo9Mg9q1;1)Y8Y<)I=nlFX+hun1E3_Izty z(of_2PC1#@pSV9jetjat*&m)i`xavd-}w-sjg1P}1O#IeKR=CX3Fx%R9x1b_DtHY0PyVWa&K(ZB95d5-Iv^@a^>A|Va&tVHdDsSOwx$Ct&vLjIq- zhu7hT$jZIMsZ{W0(JA6tFLBx7OvX4LH)EuT4G-^iuZzC4T`*ON4MbTk!L5xRmGFiu zs1Y5!h=fW!58Zi%);7Ej51k|WzRLr2{PU=THVTa&@P83w%*6`hEB_o1KL2A3vnT%) zr+ld&={{m)HN5NuV% zFa%q3R)6|kw@Z6HqARC!WOsdqe!hhI21{d&{}_X>8{W59)bafUJQv%U{@KJ3-*i|C zArOv~OgVx6xi@xuAjgLXIX*yz{z#wI zTDb)2KSyWO@}*Es1ENig;T)d&cd3fcpPG{u2!FErdU(fv7)97`u4mK*h528`mP%7g=?U_LT$O0olE&gNg4N-w*5}jhPHW0`vQq zx5GX1D?Pn!AMp$s)F^({jt`O}Z=#Low*yEpwu2KLWpizh*R%8uW32H^c`@HE7dVXR z)PIL)Ko)kz&E8TPKX-!h!$a^X^8Z}UBd;8EkksZl^=IF3|c+quq&8Wf(v`UfP_JQ`~sK%pPAN*2@JT=FbpFi0qbqZgy+ zr>8baACR+yN=YW4^0{e;&O4lNdwA2)Mt{3!DlqML)itnBSwt>t6|;ssHL-XtCpYr< zDub!%w)!)s-xz+hCNA>a=&1W@%7}8lh~gZSqDNUW!D6+5$|b17CbLt%5HGe0IE@z) zRLMZE?hb>6**iR zTis-Ysj5uEFS71bB)$Vb}7^mpdM z#k1d@M*b=6gL|h_B%X=8Nk0y==|XwR=v341xR@~*-uS=EV`sSnL`@g}pJ}9_ygu>Vp zDH)OS%2Rp4M?Ll~d@N5GxAr&A1)BN!C+{{SxGx31#qsF(IHAqs17tK(DMP)+8xOn1 zVZ-UO{0px2a!aOR=bqiY5 z;&ghA3Vog?V)Sg|c#VEPw}^`U8Q!d6NDqQTNyhf0@G&|ReG4zc$1S-ZKSfIUg&YM@Fs6ARSTfI0yT+wu z&dD?!m+Q4d;pj4b;yUxVR$(;PDI`|u#awcOKYqRI;(q#TkL&q9^5&Yj{+T>C^?ZpL zy4p!B;s30jyZbS6Nq4VhR#5)_!d{W?zMS8q{9W zi5jT^efc}*o00HoJo*BO`!7jJ0lA5-p@UD-?8$KbHl|S7qJPy@XnCWaT79Iy8mIE$ zKd76(b1upMah?3-_!jT~SzTb9@t(Kip1REQHujmQjUoHN`^q=p`&ah!Ok|e+&{6vt z?_hY${-Xc>;7h|8=jM#p%;~ajF*JCFLT>#X4iy3`Lg@50NFj)CBqNybl&d9>iJmxWK>FyoIG46f; zIp?NtXHQnvn}F`arVH0`0lrF`>5&VM1JcvNVUZw)!xr&Va(wJ~`oBJumhjbL0_|%76VpzpF}~%y@9J@cD}D0|)Bl z?J`DuZ}$h}Em88{@c~2q6T|c!@0awhd_#X|*C*6<`-^e-C-+dEIL*1FIlH!QOoGI-dGPQH2rrd%8O%8q(4fijsI-5;$H&o(5=#Fr9kxEIQC=j#oh z!5$2+^@>8zUV};zTT3i$<^dZXXOHXCevCYV$IIUj{T*mpH(w@fW1$ODy_LTPaVj`>(~78=b_#h5M}RyYbKY{(oCJ zDc30;+s%mFGXjBq+q0mCv66OCo)i1dp1qn_rNbqzXiUkayh9a3S};JaZRAA1?q4L@ zcjohx+WE85wxg3@b)!DPNq=dlAAHx}*UDSBxaohOkHl@x+{i^gvC*AnS$@Hyn=zYd z@f4f}%(U$fuFQ5_(19YrabZai+kY>u*JVjG(dx~87`!uS2`&TIbNsvejXv6dJj6-j z&ops%Vvn|dBLB5E((#YH;!*M=CYLstN_<*}cTOX!%jNR4Cp3~?Fhi=Vizbr6Ml}6q zC%MrZ3ufY~MYREG{n`$D6DJD%hPhW6|Az0xwbS-He|9@Zs$}s$)AJ%cq<>l_{^opl zZ|XLPZ6s=p%QkkIK{mU_JNwiA9rvwe8Wo3kX-RlWk3MoMg9@PtiW$G?=?#0ccdwkV z&V%P~ghQkXOnRb2ctKw-@@xo>BiZy+EkH_o-rVAxZJGJbd4x(MypxLmM&rxuFZ-(5 zUj``7=l+VLFj@|U&2Ajo-+y(c-EZef;T7|oi~mlS_9XvHUE6El(eLltoCVlqCc3Y& z2$)n;BCC&N9kKZteOG%s+%8$y<{wlCyD`XMZb9xwkB-oTPY<$LP-G|2B4A3olekK_ z6ak@C4R2QE$rRI%B>zwKZ&UiH1D~Au{CwQK4J*&NGD#-tD?V_gy+QbK>Gh1G4f+D93_TlPda*VfuOg*Y) zZ*KYXB(9K$=-r zDs5SEo1Yrnkbic9^VDEmOOJ1mAGx)eL9CP?M#wta`Oi~2*zbM{-+lAkvVW}-*%1) z8V=;f*YvBt`qf7Ix48P>^w--s?kn$r8CQ@)ujTn@$A6_}4WVybCe7K|dQZU$ym#&J z6P_0(b@-$=4D^`m!1vA{hM9HRo`Tr#G-jJ zoTwy5V1M6?$^UpP>bwsyy6$2V^ASOOo}5!D(H>CC{T}90Q*l8A0%$vp~H z*N&`bH`j=7n~~x(B~gI(vPK&XlE8-|@LVz)Tl{7JuZ;#{CZ|{%$(z^nA08Siy zg?~XU|6RMoUuzp|e)97FB~bH=F`AL2w?)6+iQ9h5cg z9OTy&Ij}VP!o|Vcx-%v_j=fcj!+RRS%VYmd;hqDrdED@h*iKhcQaw}l*w*Pq&%;x; z_0-9Okemk{)cZmI6Jekmodn!N7`=pEq<;maTH!dN1H*Y?jW1fcmwnuWK>ryx`|0ez zwZo|0L;sPl_QxLXS8lG2+x|xS*P01`X}=?~nxWQX$|zI>it;_#1|b_gbF639c}V>8 zfc@4^`7*WNpLb09kN*)D%l>P}V0Rf4!V<9#mB9UCz_2a%AvrbsL4RDM zb?(lc2gOdvYv{IdjoJLC<|fwXMGNknjDO-b)~klsly}78rYQEcd zVL6g)Rz0}2!Lhg`7a!r*e##cxy+_;cD&*E~%H^b3t_a$4E$qn5u~(l8?lo!Z|?5!PUUDy7u`=+mkX-g82MUe+ICG$Y!mB(utn$1O(}+FqP`+L z!)9J9JnJXG#+?rM39Q6LpW>>CoQ=ctfVKVd^4(Qu`WueAU5{Pcr0M_P*6rsjFH=@r z+7`zG;;3K;%KAjSk4NW@J%7w5PL!5Zg-o*Gg+Q7F`N#(qNE@VFoU|awsJt??7?e0RGA5HT z#;UL)?N2H1P8hd|vIkRUg^tl5*;m2L^~Ru)dr|PXy1&jx54xF;$A3WFe|WBJ>gD2Z znP1i)^D9roH+%1f@rD0&mgL4~r~;!wn|uvl4`V7I1`elhaVyBvQ=A_)jF3Y&Kn@kq zJmk>18>#Exz`^rk_!^!Jr>z{`jpX3(Y0l5^OjUovNqC?W$1C*MQ7_L%C%x7m1S$_| zt1_I?2b{Zq`2C)ctbh1#o-hArb+ip!)cJBHe=#}-t#3Kt5VJw&1@Vvnyl(pHFV30g z;vd#YV?R&e-NC(rl_VyfD^SVoHV2wLtcmM1*qq<>k^iJ!?X-So?4RGTQ>G8rzv)?b znGy}Ir%rld(Q;k(A`(i8Hl4qn&Rcv({7qJGVJ zxg*-1(R0@Yjj?zZp^+ZCnkP2CB-+fwGjr?b?xI_9Gk@nj);*soN^%IjI`5N9Y+ORq zxBrEn_0!xU^SJt8QjLjn_w(J+F<1f!cPa3DtW&6y-P3%}pQV-rxw=17@}^RU$t>)A zcAXEfG1edA&M-FhtH|8cFUxA98&~{_E&ZYUh#0b7aK3U+jhIa?>(J(`9QOiGtqyyE z+JG&8hkwO4vc(6vvx`1^Z%>Xgp@I6(bk{dh)C;@Nc+I~ac7T+%=H3C#x7{3cG6h+6 zLyx|~^JxJ(yodixd)>g@ykeV}Yj5$o2ktSYC?uTw)M7*`Nq9+LpYDTLx8qI^$DK@( z;U$jrJDv}*2mNn260F;1-CkVIV(P^D5vOpHj(=_V!P6gh{-YgE(jn(x$Ez%9ISDUV zL(?|R8h;iNf%*jwiC$)ZJ$}5)EuKGVXFsk>o9Id~WfNE+j~%YrD@StC5BrBl(AY+= z)(pIDUSwvh`tU~2PW7yOWi;SJi)OCj>Xw#ZUyllAo4bC$f0sQX7pmd##kr8ceBZg? z{(qX-24(X|V>#5gR`&A?44N@ZPe!1)M$RZ)rwVeX_EJ1dbl`9}S@44KqgOrn6Yd!i zpXU?p*3Nk!aTrYC3n)aALR5>1Zg_Bx*p6on&zd~8JUGo*zZ2K}RXhH-IL+RbHw8Iu zg4UmVssEq6Do@`&-}Enk-G{X}UBk6-Hh=igIv^LE-R8UQWpG|6>^&{WDL`H|Hik!M z8A-p{tL3MaEG316l2hQ*QxY)5Z^^(XWc0e~)5%7U!rd~rkHact*^GpXhdM|5t{8q9J;X@uY#Fa1UW>D0nCWb;`JPyl^Q$~YY=&A?}5r}$A6h~ zQ4BVYU8BEZAv9_O3Q6*E2@==Psg3i@-Ru6c?{>_-88tOTrn@CEq|JHlYR>&>Z z@GZoaMd1_Yr;B!Gx!yA*aYVex$G-7vIs2<_;{zbC>~&nbVhb+oZ~Mai+4Rj@n_l)* zKlaVPe*WRlxNxVo=h?H1#%3K{aDM>HWd9Tpej2?6d9h-;_xesaAzk}u_7m>Ak zUO*wVv`H>V7$-Xd)pWTCw@Yb-$ke!hkRe++S`@-2G8r@A-A^@`SdmS*`@Ukbw?CW% zJ)iHN7b1&@98dqf#$#%Qh}Fo24O`UCu0nEOqQB!uyM(iZBJVW;H^KIC7JmxUP@jgx z)_@TCcsGx-hOhQRAzpn1P6@9+_-eDKD?h}gu9KVQm}#4v&&Y5@;n)@|{-Be2!%v$% zaL~W#nHtWdU$jkI{W`;cs}ek`P1-Y-tG1n9&WvBF3wKnBCr4%usBAV-S%<5{=KFl< zkTQu@$03t_exjnde?foUV1L8Oh$8lYMHt%>&?z*!#~$UIIN49IuZp*Mm+5ZjwHK9c z%wI1&xn!i4>$&D$R_-;%WirN5bNk}=Q*UIW`iKn#HS%7yu%~u{+K`J^HR7ZoiRAiZdrV71JDIeuBH3Hf!U*LR=ovO?>uK(>};!TZbF* zZ^u6S9Q+o8+_u{v=Bpn%wGA(4)88&W;kjp@AqrmIvq*RP>VGp_M=OP6_8|yszP8WS z$)}dx_yPqHg`Ld7n%11a!IM+gd06*KPLH@T`x<@R870Ag_O+UwfVN zzx%qohEtdZt$*J7#n*kY?E(L;36AUQi8;snJ}!OZ?f%Kb9T)Rz9mdr;cpScN1lr6JLXi^Mq3iiVOK{QIm|z6YpZ@sq!sKEwBOhxH@V za~wN_1F9R!fZC89JjEN1y!^0pkc0b-&a%oUic^ZdJbz_Nki7C|OZWFP3iN}UP*MOX zk+IZiz5pYu;(*Z$2B8K4T>oLjn?kgafDa6Q%7LEMO=$We#rfi zzNbEJ444$5R`)SG!SwA!L%!I@W5*RQSurcTz4+3bc|AFZyHPo%GjN~O_Ay>g-4A%9^}2FL)_xopo)_^$ zO1HK5oE_?VUwPZ|{f&Nnc{=!z@n_F_98|nGaerS}l|{R@zbucF`r+R1%CD;K^a8fm ziZL&c`m|uqTkx9I)3bLMoaMRR$YbcH-7#B~VV?EWX0;0Is$a}rzb0cCjLzHl$+~)Y zVjmxqo*H(}T1`2Oj^}P?`T}>(=6*aqWOwyuSWi!*&J(+26!n#nXIwn(vdv`+600 zzwVv_cYUxQ;OX=Jb-L}usk2slheJAh%s(Abru);z?@i82wk_|SMYJrUoz+`^Ej#w! z@WeF4y9Ns%u9NIZk4BFYrf{d=PyZf2S9gId4;V*moF@4H^6yR=+#;gf!^<_|^nZba zw?aJ@OTq4TJ)o(E3FtkzD)4}Cl1oT%QARXgE4stAN%Qu0fhY{nUBBnhiAS%pne*f8m6@O}A+Y+zu zu{=-M9?aBtAL~0ryc^4)ugD6bRc8ne>bR0e%Hxqg)pt8dqO4{@9R?S88NaR0Y-^S({`9NOn4zx*CF_JZXp z+xq$TK9~1%H^8!{zWSxP?thF*nsz!ae4<{WA9qIOAK!=A*`+;7>_2_%525CZ%kxcn zOU&<@@^^2)ujn0+8GNbB^Dggmr~vVoSe~73?!Ww=9ZDFGS<>8Zi$9g;_Ws}6=UaRD zZ91>o`!D5TxdMUh^TXbay@hxN!(I$Fx~)HI;qQHlYyJOrZOT7HMt>}{cgds#jqf)B zQ{O+!|E)c~wfD{bzS&Fii~S_}U)k$#_ArBRXzagpryF}XR1ip$4v8i+{2QCUvb~;z z;el?}A9r7Ofh5l~>i4=|oiUBe=KEj&O-n#+MO4-$n7-fBFEc`GS||^}3rM1L0x5pE z5tlUVivz2M-=3Q|OMiQb(*kA6(Shom6)e>B12Sa5pwlOHS^B(NjnZ5*kC^pQgYuU6 z*8p#9Of=b8fYSw0B*2^tMLv*p@F3|C6((KnJMf8`=F>HFK!q`%ia5)If)7N(^O8ET zSXTL4;VAJOVk3(wb%`j` zBBrh<%0ZFD?m}#Ju`Jw5>;}cUa7SXVicRf3i-T3XQ-7}#AEG!???F^aajJb+e5OUe z@G+*dtZ5Rs5o~Rmr0x^n?&+d-SLybPK7pI%YCdyoC3?{@c^b{PBL?FNZJj z=e^r6H!t$@9{QW;AgG>_0zcuzx<@^@d-oV|eWLy8TrUR;qUzuAFuC z)YHS~ZumCtt-cnc^|I65jTei@<*d7TT6mA?o0tsLkh1-yfBpaeZsV2)s#OJKzToqv zpad6nlO#E6vJIaXK3L8W_KOAC32~S&e4y^axu@98oNyz0VkKfngT2$hgH7ZM^wYAG zi+|4wcvYcdU(wT0=#7Yj++P9nDJd)Dl9zaFTrQ}iC-!zJd`?&r+Nm|qmp<$hTjiC!JTIm?F!zBr_XR{Q zDAx14m&R1nElE>SsfrTwJi3FG@S$g(!IsMDk>z4ut|}1Q3QjB?0wmi)13#L3#eWI{ zdHVD#a&PV*m!J8O0Hi|*WN$Jck`#;iwoK(rMRzMi`5&6NVP|Rr+0d*5kLzJRDFn5k%4-T?NvHobW->aes^M8uKyTCrN;J51t z?U%6BzdWOuH{Z{9npxo(-#_Y64ZGiMNBA%?{SHWhc8F~w9kMib*rNtRDy&#TGSR>u z5Y}kNQJ*K30&?$>0A%UXre5Vn;`3KLq4cPW zx>^a-CMnj}+^c)q#((yWk`I3csqb@Y$JJ*=%BqrqNq36+M2JaW`98OsbX`?4HR=60 z2_7_UJT!B!hJOXsezC{7RQo=Bc|-WZ@hdRnSEl~j&R~CKNIJIe?`kM`&m|EIsSM~>-ybo?eG`AOwES=iLyk{RB0rm0k=C^!g z+iu>wi@$TtA1(gHdklei!@qdXCLQdL%VvzrJ8>>fA%F0>SMbk@rIlb>MfG35KaL&g zOnp`?tNc70`70P;8(}^FbvVZH zdf|wbnMdQ0RvSeiN)l|>Nl|Mc&)PQgt;9KT+uY-v7!X>)C-Zs>&o2TcSnZkLtAD>o z(EG8lvVRS(EeMVsQiJ>eU)B|};EftAjRcW~+(P1e|GjDokZnm;bREN2+ zma2j!!+G=%Hl}cHzOG{k57m-RG)cmIiS-K+OQR!RD>J99fc~NGb5Y}SN;N4be;jX998a(uLvV0xhky8}Nj%Rhv#xOeWHa2|@n)Ul$PLbu zsT0Ws7GssdP4B-_Zs%!jsH!12IKd#O3?ty zk^Jyig!w)^c=fM&VP)2qKl5_mhoLvvti zvS*t$B;jH1m}_FzkALp>j;T{Kzc9U-kSZ6HIKq0tXXCt+FIMgQjeVcPq^OS741b7i z>Pz98Wa#1k!OVjxt_cTC`cacII93ejN#dIR5|eSWZpC#rpW6h}(|3PGynmDK<8MPG zXBErvI`DnC$FX)6T;+?RFrS=p+p&=1H1eE+5I2LymH%(p1& zGxDxB`6|o@keGa#?*{fJ)Ol|VL`nXr&xP~Ljd``^g-1h~!d|#N) z&G(xozrxRDliz&aHu=rxU6bE@elz6&!m%9B6yJ+OgmvnaSPq<@G2gp+FMoN-*StSz z^6BRN6O-?=d4JpFTQ%>0$%pGs7g_wSViAwTv=nN0vHymsZs9hH zTSh+LLz+hmUkMO-dd<6tjwe?;BAFKt6psmH1cehO&3yooy( z^w}1kN5xp4B}%(G8t<=>hGM135ylcJCcdydOHH4ub>=FKM zi0|vmKRvg?zkdmSzrc7V&S8e<*VKYUVL3BcOHyGuGes7r9u+xb9H%J{{=UTD=PM0 zO9_>!pUbtcn&(e}^kJOATEh|BB{k(%@T^D7kO@9_B^$f?mv*G^^bpVE z$sX%B)4x4uhUHl*Vqy7>ojD19e}FL_MWQ{G*#EZpKAHJz?(5>Ud9Sg+Yts%(+4R>~ zHs_Z!O@DOLZ-4vU;J4>e08c=$zm4DjrQMqMv1MbY|Ehcx@I881KhKx`kShFZ-m8!K z*H|AWUsEnANp!i-0=7rH*XJW0RFN_8HF9~6HAVz- zA{5`C(YSw-?;y^I2mAo>o_Ly95L?7o{0nA40?BKb1tbLCg;`2M%?B_mVW{~7%qlVk z{s^-HnQ9v#;Y2EwSeFP{bRfw{47Ll&9uC^}O)!D9Cy@~~>P zK-OS);VC6+Z4cn7BrnPy;w17a?K^lH5d|17D~Qr)tO?1LGe)B=q~k$a;UE%1Iv^YE zNLX@GDrbhBeJh;>EHfO$m$2NEopv%M z0Yv+BXV;`ZKR(ak95CUooo%&r=9^WoTU>v?kGQEaadv`OFa zI@Fi+Sx~p>=QjQPEq!oJ4;gpUC%0bp*VS^dwCK*Rv)yAq+il;T$MSmby_DB`xAc1N zBuOsmHfhV2@bi`e9M~{HVP@;_dxBYU?j) z-#?mN(}fPMzrO*CI`hwL9NlNVxpm1~2Uk1Uh*Nld)`s`knAEF2`O4m+!>V_muV9|9 zcI#@{K2Mz5XKdQvs{`Gp-+p70wCR7L+FH-fyk0JrU)iqx{v68n`FYVjtemTDi#o8{ zvRSwHkZpR%(l))dvrS~T>8*{6?(P2Mc7JPUqT9CPCh527Nt@ogrKh*_jcU`|cHS&m zn_jl>*K6Bxlj%17e6>T-4{qt_TY6jme%l8(>)Z2h_0w4SgU`>Vk% zy=>EO^_SB&{Z@bXZ9m;?Nt>Ru?S3EK((PON^?vu(&r9Cgjhv}Ay{*5}Zl5pP`gckD z{L;2>>Fx7rTYsf!>rZZEn;y2$7w!As#sT*>4wNn{Z@NF zyWJnMTYCHZtZkn%Y}+Sl+sA+9ZTplr`;J)q{=Ds*rR?3(vo?KlOK-~;@wR;YR=!*R zh}!mb?OXdKZTm#CTl($&lff;$Y|~r&Dd(;IM7Q>{+x9z)>-(L9%{4t@6Ub-L2GBF1 zgXZ)3+vob*XRG<#JTIJO*YbA5vv8NYZMAadWx9JDkW%5e*AMjC%65O-eA%C0`(9VJ zzdgq}qDk*X$2PsZw%weyzqNtu{MirMu_LRm>67I8?k)+hb6m;$*LRn*wS7&Wur_^g z9b4o~^>68_O~3XHIm=0#ew_nKKDf>ya#nWR^lRId_WwiJd+orkBU)m=V$9QAXbk0a zFAP|rlyjC|4CS1&4F7+-u}^oO9>4>*o`hN;#S&N~MXvC7-?x1k`4?_?`QP}z`i||( z_}}=y{u{l(|GT{Y@AChe!}@>CVS}La%s+77zx99iO~+&If7JgMj{lAS^uKZEZ=4nW zjmQ7OAN_CqpJT@#-1|3f{C)o)_HVrZ8~;b&{DTV|^RM-P^@o2iN&O%H@xN8`Hm1ja z|8K{3mdCr4|Ls5ipZ~L(r{jOG(!Bh?|9LgG!~36V>Xp&|#ic`AZ|Tys`G5U|?#DZn z|2h4sX86Z9ciGm>@t-6F{nx(b|9Ni1e=pJ_{m(xjc+XXZ{8yIlC4v2q0-{B|{s+bN z&zstxEvP>q{ZD^>6uTb(f0R9TWEh93cP_d0KUT5(=PtbcLzvd)t3R7re}bpuzGRVF zuiZTRy^G+?p~1Wim*80qLMLbRv5YqW5;*=`{?hK}gvb!HcW7L#0SCy>A};K!O`}-{ zv_mHOJyG!xs0i#iCH)#mT65rYtJZDr`LR1i>5FO0a1wu4yY4kjUB+h4{&_LvuAxxV zo3ZIn0pufoX`trs72$&_Sc2wYfkk43Zzl7po&(I19?Y%~$D3ZrEm&-FGkzvztM`Fm zB9pe1_*2Iv1wx8hr?MT) zae~aCh;4raC6B|3Qv`RtPJB!}anK51Ex4-4fWo+_YiRXm2hJ3FipY?* zEc2yLbK;zDrA~m7Di8!hbOXr6(1^1hC0ll0w!`dv+$l&kffI0f=ykfYV>1K%IrGN^ z=bln$GUyWGjUKOo#cgP>qYTtV1FrN-0$uD-&0K#C&5((qILq%=q7vE?1+L=!;E;U= znsuIPeVm%LI_T|)ywurFf3@YL!-dW2Re7Rv#30g`ojRM(*%9IA9926xF+SAb>Sja=v##QJ3&f6gl^h@0aVFx z{Y3|Q*bRT%HTSa{8PTF9r}*WE4chArCo^}KPxFWd{rZXt zn+29(Xt$%C}Z)$k?@bi4+WGP?j$t=BJ5cGYG^vI<_YU zA7UvsPPqEb(g3qA2BsE3AsBzsud}B~Vt@4+y7kb8-(6(pofx+K(?zJqSk%*HoVi8T zH<&kIZQXudks+0i@t00!{b#|aI-l>K_MYT42EViEQmM<}aq;_P1U1qLcr&ZwP`__) ztF>BLaR}()*;LZev1@Ee1H*0R1`GJKnVp~SsmtQ%HVn@W>;^8;BN|g!f z7Kdcj*}v`w?4iyrw=Kr&yrwZeRa};>z0Q^FYN<)03s6cTHRe2n7NRI<%q*;tmB3_6v1rUuY2c_fhP_6OlF|(J+x&@Ay=+b08@5dI_0^z} zDU~@>7LU(1KhF#*?}6^8Tg5&cV`YXWclrB4SbwDI}lu#(GG>S%us*$~elTo)$s6EpI3 zpRUo?vM1*G+Q>8Q3AQ{|Jd64_`1{jn>cOnvU@aM=JrJhrwLj^_p=IXVinH@!?-X2% zqi)eeyZ!)Zg=gO|nrBy!da`A7UP{aO`XVDuTI20ZLIGSH?LBB=Y_)zFKfKQ;d>toJ zBN_V&-1d_Z7_om_5V@jF%1ksT5z~`A8f8pJz{K0ee-%#}&f3sjA00aXHJYTzE@2z# zm^Dcq#LC3i<_b$eptb|7^%&6U-*RRL;?fneAzJ(~t^psDq~g$bjEI=!btGQ~6U})p zIuUM-AKdla(cFG`78*dq`%Rdt80AoTC0d)fk<3UN{0e`gD@rVApQBQ%@8$1n4S8}8 zP4BXI=-YR?LWeW8a}3 z*tvMq8o?wkN2&T8DH*8VHJkcEoJV#M;qgrUQmx45U5}acexIjqwoVf^1@FTiT~|8f zjY$=2{Oo@+1#mUt8Ihg)*ii8qg6<)75;}e)jXWx&Uf0Vyzo}Vd%04$1NcakTSav%tnHBTYq~|Gp1Z(+82KZ!q1v&`)0kJOx|EtNOr6$pJ&Zl z-}o8j*Ok~O3=7u}b{YkKe4HR6PTSse%QENf<>P?@>wIlAehP{yNbFNy15o7ga$B8_ zcl4&^!r*WvuC3?GUjuW2+Xx9V>Z=t9)lCe|PI_dwExVe8IMc07kUhtphWuRqWgjyw zfha$;ThAu@SZKE?dayVR_s)bdx`VSLLN`3hH3CmN5Ae~}gFyhw6DL{Q~ zes!k0&PN(!(};CzTcrt!^8O7}s_!VNQ{nCfzwYsl^|KmEe6iB$vfb5ws4GLX&vLaw zqLsvswiFlv?ok@R!Li<^9wRoEH2j)&P%(d}+Mho{F$Wb!oDE=Dvkx+V`+L}DxAV7J z9E*CJ44K>Uw6Wd~*x*Xig7&k|;Svc~7wYGnEerw1ee=m=cN(X$fIx-vJBBIO&HV%U z7}D$Dg_~>#HL^%EXF0<*-e!Y_E)u*&W&T;uMbPv1#454vKR#WV4t!xFX)~ETysUp_ z-uEc#+%N{jROWtqEoO1fA;ZoD^YLe(_q->FRs>`ck=nr}WQZ;AkhK0(;zC7`D@TA{6FX8vtAtyTQk z#QHS-Vco|}-->YQA~EQ`Sxq9P^$vf!&J6TCLmbfDzd7J}!-y|n( zdD=K+{oPEk-G$xlYoBNQ6rN!q1ODi-xcfq%i3g#n7PpB1lHw%EHBY_76|aAeil=Zv z7Z*05y0QYKKUXu=2O;apn1wlGE;jpn?2Bt%XD8L**yQHkBJ_4pWL*dwY<#?G!+6-s zGa8MX((TQ&@{qPw6FIs3VKSI;C`E?#Gwg31a6C}c?Sq~0Tj@1}&;S9OM9U|SshrRR zi8BcLQg^)-ofRRUrx#<~vnGFIZ}dH>1YEG(q{wrj`MJ1TfeApE1)<&r5~Kd(Lgr0i zyDrzx`r=O!W(4T9&#im89c976GQAy`r|2-0DLz;9Ebd`JWF)l;=`a{)(`9FrI&O4r zsU$-(vfYgsXln6l5|S2G1Pzn)6TOY&qr^%ALs+KBEv(K`WLG*d_JV((hP?`;ZjzX5 z{N5wuPI#D}E^~R64ax^3cGFqdvR(CPAhY?$xj|Y^&%Dk?H=(+LIFZ07C`6wZry@&` zMm!xH=g7MKMtX423UwW+qAu59uxphB(uvMAxkDy z%hE-LBXZ}wQlS^}nJa%^owX5bcNXM*n|{qPhy7y&mN9G`hQ*5wAv4ChZBE6TNC&kH z_a6o#ee)cE9%~}9HbwdqQ}c}u?Lrw{CS$^gCe7gGm5QVd=oy2~Qr)oxvD^iLC=kfW zkarv@;ak<44RB8QDf8OH-t+7R;j*_ zm5up!8|IF*>%o6>;hlEiFFV_(tphB}(TA_Wwlb{EAn3>JeO}SngK2^O;~4;2YS=Td zc_PtkWo2^92g-*1yAW0CWs?c5)#BadD5dNbvR2|V9Iq zD3@aWGpKfUSqcJ6*bMsp&Jg9KogBfr{uT68x;T~RSdD)YP9MthNf}g*lnEqk6BT|E zjLo~=lx3sPTZmouL_cds&)6~V(r!6eeRh-Zj07GK7I6p1Hk{sDR5DWW6m{A6Ky^Q0 zc!CgDFF>zQ{{q#Ce8;%o%cUctbNqce;N{vJMj}VvpbL9h&sR$CG{ep3^G%`O_yGi| z1h1x8Kk|RPy1c3}s=cIsP5rDDVo%w-T|~-uHIWpnNH+LBEcbrBNG=5m&Etw@2SX0p z2MaKK$Z$A#PCJQ7&tPJGuhd#R8qU+KueODrOwIx{N5r9++mSdJBVuT7-3VI?CY!0L zY{x|sNQz;g+Af~;j_gm=zUTv?zCdFLVe?f1(=UHf?7OhQZPJ2;S__}(LDd^mL?aq8 zV9e9JF1hCZEHMc9acugT%$I`FK8;OxSv258bU^1W-mA*_5k8)t;DdGGpGo`Z9%8rS zo^dB7R|zm#8yV<(7UubNC;TKufA#U=zuZTEzWPLKCr@0iI^p?~cj@BC>iiuJCs2Pz zuIzuPQ7SIJszRho%CYQc=D)!Y5(zyG!gB>x!)9%651JtQ+81g;cG!*8Xd%T9S0xkI zA+obE$Q2VQD&AVA<|(e#z)hoT(7%hF{^jwYMN?kQ*&r#dT75;GsDPitMO{WX>B^Qo z18uoRdJbJ1ilf(AO<*ok%*?$!>P&77Sk-?eC0r0`J9EysR`E?&h(xNpzHqGWV>ssz zCjge8NX)mN^oH#&3l{4%nv$kI7-)ZIfTAd4B;Qv>)ciu18ntUEamo8`njgv$uK}}& zqhSmPgBOEOh{oCkN&Eaoql$6FU7~4p?_ULyoPKbaTHRSzTm-Td7*mh@UIk#-=7TcN%8e*eP>H@K1 zKM=a$T~7TZ)H7|A=p?cch)$!4Z3gnCS!{*ogHUP!N~C1Sb{zw-HeIMZhP)1_N=e(7 z#qF7+*hLJmf5{VJ#|}Ikd~@2OBeH+XN}`N<9>d$v#3mG3a@{X6RB!!7(6qMEJ*jN< zgL-`W;?R$2;tce87(H?z#X$h`Vm9+fq=G^!xR(ZD=O>X_IUK`-&sDctFO@FkgiX*Z z?7bY;u80EAi2!0(o~xm{Ld4L3Jg7{GgqoRs7gl(=@l{Kz`P#Pq5UM|8Ct-hm!)-qQ zB8n8{jBIt@x#F>(O@phQ@9YZ)C2%?i^wGfJ2eJ$XO_u+z0E#Pi0*z2Q2Hfhw@kPUE z>5v))r#T1g2m+|+ zd1de2If(0^0hE>D_c&NPhL!}#+GThKA*Ut%h{S>w$B_@LGDDYI&O-?98yG$n4%}fM z-Vg9ukVExN5Jf%bIziEwJ1HJ`m2iho%mR8}r94YGhXcuRQ1iEdvUMv3%Xa|VqX9Nx z`%~bRYk!9>uU-YbfP8;^?H<*e5lU_En|8UH*Bp|aZrRo;zs&S`VzV!o!B?;ioDJDe zz&xN#e#4Q(7X%>M%k3G5`3d1?A?P-^poI{65iC2}FRe5x&=@d%3&So4Tmy-m#gIIp zm=wslDKw+X9H;7Hbi}OqkT>fto6PIcQX@n%JbJ)dNHv80T77>^T*i;mFlw!U@2V_SXqi$GHjxGZTI%P( zr8i;?@>rJhRY`xx{*kbiI!?)jQWs$d3MlM=fD-r70y30e;E8-4_b7U9}H%&36qT*_+3+k)f446}-4IZJ) z9ST}HfM|3EqEQUFlj30$UD1Gg#`;UJF9fgO-c%qCJ=QSzcICBT7QX9|am}fE6EHaJ zuF89@sV{#eU9!RoKrt`8>(w#M8R1iCIs+m!b4jDBD}xrkFhzq-qE7D`KsY#MUW`H- zdOw30UtrWHaRUS2+YUcu$_V^~aF=kuJ+Xvz1D45(vf(+veqLhW3Skd+&25iCj}`}j zPKLBdU40LI+9yvz%58eZY4p$Onc-$nMos8G^`R*vbx^n2{9FTY~`DjY9*BpSV~1V?$kH zXXj^3Rq;AWtS9eOz>vEPJWSpahjL!2#4xUP)rCv6%Q`%el;HZhGEw?FRPvh9vNq?M zy1jphqr_08w5H3x!*d1E`UaT&d@W)W{3NyoqA29eAuJwXmFb%T!ciB2 zq1BiU$ODo-04vwZ4{@QJafX-j;5(0gRm^|YC~Cu&Lo2V}G$EObfhj9C=)Qy=gcu5LYCD%mQzWXE1F^6j}&U#eu1JsfV3d8SyhU& zr0Q?6w_Z{o`WG8NoDjwH^Y%ut4*`EV$5GTwKSRzKV!^nE9lH!~GinxKs_WB9gUXb< z#+mA=#-0!eBZvsc3sFmfDyj`jY*apkl`cITkM`4@sj*F`*Vx>-Pw-^Z_muil&wzgq z@Bdi2EXfCXyMP`Qh@MR`|E*i8ia%1UzNfhKUlFfe3C=&!_@lW>wcP_uv6zYAG+ zZLfi_-CFHotPr7&vpW6Vr;;LXv65z55)3Q7OWr=2$F*ALvXrC%exL1jmCmP;H=e>1 zvKp1l6MI;R~lhiuv}eD$7r&8uCn-p zRuIG~@L#V-fToO9^$v&KYPEl5ja6xRGHEFlAm^mi(lL@u9#$n+xyHuBFdTNdug!~G zga@g}p+3w-S3_1)LUhnJRBR!Mz95z`aX1fkQGA_mpyaW7$fkGLJCRpZdNX|`76@^f zr9QV(N{#rP_EnlkDSA+=FojA=ePs2^It(-Yncb}$O_TG+_=0PtH-UdKJ^-3M`Y`$- zGvj>bC3RDcxu_YEv`PPLKNX;Zhby`|Gz&=jaJ|UE=ZlR;VdIxI(-uNF+=n*oW|8w? z#7@Bv&Z2a=gK)y>jmo2X|H+UQh*#;3-WXg$BZ@m>c_g-u!?YgCrFb*t6rgLLWaEV;x zNuHaYZ3{u=he0m(yuFtrff7gs=D=V)CuYdQW8>)B`T#4;l?{J;kzWaqf=@4~2Kur7 z?ZaJB<(+3ewQhTxP8T43zJ-y(QPRz;Z8EYgr59uG&zs_tx_!LbyEF6#hn5>M#cR_F z2Rbp+_eGd6hUi41nko|c6+%@~cA$YYqUuFP+%AeMVa6|GPZed?8(qn+x*&Z~qE?Mi z{E2VOQkB4)vd@3~r1snI8A61ay2HUa-F_vbtV2gHuodJbdVU~JeMw=c?&ti5h#XQN zJarQdE&V}b<;~9qX8`>!W5KU65+h~#XgVbkd+DiAIvrJ1UUIR4c8Ik2RXw5>?t`3p z^XiH64Qb-7+Vi^opjz@uIv;iPJ-9bk@M1fDtU?x$U$}n*!&Ge*G!%wIfy}Ghdl!d6 z7r+FEqLtS4Z<3Fpj2GE7-2zuv`nYv#{xuXo@Gp}PI&k|kGKmCtA;kn^7h@#Vz7Y^d zoMq{DzcV}8^^AUWeLX;HN35{Fx6^}+E&n7m=)mm*)~N)d8Yn*2E1Z}i_5QKeI_hj7 zF5^0eFob{V*BYL4V#!#y?p^C-wjH8B6S0aito9rmI1^+3O+WgJ7WCi#gf3wae9sW$Pdu@w{ZH<+p_r7=2sh=Qa3xmP z|6Mj8!@M>TW}|Qig3ab~f|%vrzU;{(64r zUw7&Ll9xrO1u}ngWpwD?z8F9N=!=-nH5q1AbnzQ35i!0O6dOA{< zmDOZ+Oo@WPyo&&<6+VrEV&!C%NV_XB(t=gOXcpvLmx!p!yXh8_q)+li^3G5^fk-^0 zpHF1k=6C0~WX*YEZ)VEiOxI#B!u~pnf&a|473Y}{uAhIu!cN6+DReARnFPZ+5E}d9 zc#c@RH;AGBus^@24k0My?(4cV5i<8dRjxBt?Uhi_9;5AsqFXF`Z+b1|$J#vf7+y7M zK=0=Iz5fId+5)gwWDzBv?pCdf`(x}kQbis{GW#IKoAf_+j*^e_I~7BHtCOsKNs31a zG8n~cZY_T{0HqkPwy3KJbhSbPnUx_2Qu!&u?C0H^=lAiyqFU$t*F1sSH&NO+Gac>? zBNQInwziXVbtfqGicJ+0J>xx0na5x4!Zd`6PljfO6NakLt5wlR$B1{7UpHl)bN}eE z(IRC5s05WxQECUxP^ma%ElteQfYg%K^0p-XpsjzY@<(`y4I|YIQ4N3xb$iWfU7G89 zWygV@jeHh9r6zUw)UL@;8D~sn)k|lhfk1E;jl)&#-E|IyS%&j_2LJ#8_+ej2<##kW zLC~2jy+`(Ge7n9?snI&X*QYp;zS3oTV^H)bRO#-Y!!b94s+fF|Y%_LPeYaYb1gorv zIuL);#kj%P@H~VeqY+s|p8`ZJ+#Ic)t|Z2n3dj&WqJ__rk3mz8%0_hUNML|3NZW^z z+Xo{e3O-nBi>5iI4j!VaBG7UV1^t;NH~@XxF{UlFW=s3}?F4bwL#3?>+lNoWFDVi; zv_?f5#nRi$ze%^<*=4-l$uUMT^n+E>FQtF-pSKmjAddpknv(Sr>%~)rl_>Ctb1_7g}mRR;x}ZiH2OF=M6x|8#D?;_ zOAn(ThHb&({4l(PNyTPM>QBU?rA!xcu`(^JwP)q6=I!6%C+3y4NNS#~RDQh4j@zAI#k0|K5MhHnbcd z;>qYZCLX5x^5P$(`(QIhqBec-i!UvQKQkZ@3?p*2GD@Nt=FUrUzdk05#twK2IiAFe z5cFn1TQaDjRZ0n)9fK;G3vmdm-yP&hic%&&$n;A-fp~tD-Fd+>{8Ztv;nm`U#zarAZh9lWTsWPQ78=k-Vgm z>j0>Tb#@#!S1Y6kNAfvo1QrboJgl4nnM4xnKWV&j-df&lazAIT11EogLcVr(2oI2q zh@u!#9|a^-*L`Dah>WQ&1+dJ5A`M0uPug-$7oNvS1NAl^X9j7TrfBlyfz02jYu_zh zU^@@@>x{`%%C0t*31)6ASuJ9PpjG=BlOgk-bw;3gPm3{-CU^S8279<$Y{{t*k-MUv z(yZdmxo5VVR(SXKp9Fv4$&nk%bdeJP6q1UuuJJM4Y9!5MNptL;sFH{p!w)ZY@N3XOKydQfIl8lDYfHJrwS*O~iq z&h+d2Y1iql+l2k`14VSRGo@SBdErBv?gU9A8v&kB=;;&he3O5O#X=0gDa+14k{?hF zkTUNb5M!X^)!(2?q6JxaP3-WSd}46oE5g zd)_uXnsA=+%}q~(0G0bcmt+@fWs?*jXlWwU`Dn)#uonT&exg$_q~gYCIpr>kFjDTD zO`%56Ouabk(-(iw^zbe{PHQGp+d!)np+P}UJAU@0(p|UW(xIu2PSCqjqebg3V!Jg> z2qHO8^Q5BDh`5F@#nOoq>_ppC-c{t7@;FQ`>s59b8dtfRwJso#vxq#S@Z92`Rpf5D z;Q>1?#=C_Z!2_s3V1S@VI;#4?a+DTynoehau}Qko&Dno6I?o}T?G+2>-05Ij6G_p9 z&?Y{5!m5$}YU4V2GycRPD&6WEE~HkpSuu(iyV_@9#0^Oh)M7&spr_^n*N z7xlAFm6?Bi2MjxjRe})`d)0;g5+Nk7s(siTL-qj75oDMD@Z<=nv048fcJ>zr+^@Py zeC?X~JZhO=d|v;7cV>>q?z4WZrZZ=}QYUMx=Pcz`E#=n8LkG32!y8y}{sJ!gb$7q( za_NUq^a~5iSSajGIv%1MaPkt{8LL-O+U;;C0Vsc`>BGaJlJK@RVHo*dEPt2h&7m^z zBF_8ekh>J=51{c}bEP@C_k{={1Tx0>1{Hlw(gy%b|K^6;Wh`QEvNLU2(f0CYR98&{ zb>K$6>Vey{b*1wp#>`pqn{{Ec8#b-918&=+IUcYwS(Cyn+F$F1e_n>5>H7SOl{4jUY~Jr2qi}{Vx;NC|rMV+Dwr}~e*1sotCf8F` zHfA45(7h^pw8=(kgE0p|#2BC4ff+_3F9QcuxIR*|`08J5(<5&!I`SL^jK%&~}KZ=(&vIXFO@!*lmVJ>71dHuu|BO_KX zh7F_cEKS@RoPzS?2nD!}DC^`+C#J;W`2J0M{eU8SdLq{#`--@6a*_`iUCxSh<90~; zmu=QmMC8z;+X+Z{YWb2iq5KeyHQHg3TRHC3zhZW{3`7zc3ZKY|kUcJn6!#2USX~}<2Cwr!IW?1!%?c?*Z4Hu}KzGb8%3pr7Yx3a?iRD|QHKPYBra|gkz6G+1x zGBsMZikS#YJ^L;}uyc|(t5Iq%4i>;;CEeX}iK+w!7!zSG?XH;Pt=G`foKe%wMX-OnuapoyOK z-t%CqO(Gpr-Smb;!U1GIT>E(Y1kApPFQRKMXg0!jVi?{%jf>Oj5u~@E0w`)|XZc=# z;UGIpqlt5h^2*0)E>X!Xzo(pCN+VECl4XW@QX-XL9FAJDBLsZRQ;Cw8n-hE?#^Tq?jR`h7#Mm`-`qaEWY9F0h?{edO-xdim= zq^@~2qnf>;#fZ1{lvL=A1_FWb?NjsXv0 zyRzXS8Y`=kgNaCr^oSmnBf*IZ$><$!_1_JzP>J&5;o8LOA{BrF>d;IYI0e6? zhhF(UUP>-W1BZ5y+K=hkBjmMzdT++~3{v#3nkU@s&@PEK(Qw*o_kaURthgoF>8T|A z9136gCNjs!vb8a4>JxYL>_PuYu2cX}sfI4P4hHOk8~<~D_e__Pn@a8@+l(W|F64{r z3?M?RFobqCZt5y*<4)ZKzfqzsF%q8bEWOtNq(7$Fm& zR=i3uCzv|9L#$?Ie&aR}c{&V^P$h-U{Fl{h)EYBb_rYKwgKz6Q&i-2vurNLs zBUf;}i;7L81G}j*FScNRN~fQ|(0C%Plb)wqA9}r`8rM`fH=C_xE|L-)TS!rce4Xi> zH=7NseRDhXXP?e<&ObdvnOMiTWE@PQKT7YIDF<(Uz!d+O8?R9Z2YYogTsri9u`_?y ztmJPjHKL5&wX_N(y`>K%Qj0zE0q3fMF>?~lwM^dZpVJjl1b zZZ2vVqkKEu$w^m#opEebw|0Q!hY!E?lUa!ku>`BhpFn^2TZt!=FW%iDt&49{VpqfF zMO}@RUz%b}or=}{OvS#X0X9_tO$eVsUkYVuulQ)KQj)@T{Gl24QR;~mBe~g5L~%^t`Fp(LTA z6Nv!i7*q*=fZr|ro>>j!ltC>71$eT5kj!TosY_#o?PHO%leej3QD&HqS?+?-6g9%i z-yes?!ePRHoW1-juJKoM+8^7d+?GP7r?{6fWCqZGUhUFs@Dt8DG1#4wEW6w}ewGn? zsJUEMt$TI2=hiOfetCV@-+#s({W;G8(tCaa%CanfpW*MnGUxsmkxk$L!nfeshWyX- z7Vh8J{KvNKufq?|{%4{cV2tF7`ZaM%p*7BGqVQb zwyrU}&nJIu;2xV%?Vx42m|rV!X0VyDZhd7~n_q7hV`z*K{$viZJ=ZYI$m|zL*IL7{ z{jaxwnS}X%jLB#y!`b{_Hh+KG^Y`=S5r75xpB|>P1uhqM?o-b=0moR3LcHueGo`U~ zu_%WLz45ocF}Gn03){zK%Rtzzi#8OGY|GcN%rR`8iu5h(8I8U6PhiGzSL@s4nO*0S zIRb_HaXuE)-N%kaYwO0J_I^Q7c8I*ybrp%akn-P~P zL_^v`+6Y=j^fiCOs%})&UQyWGP0rK{l3?*cL zsD#ka*yt4lEl;`4d!}p>JmVG0K>!NDREzpL91@k0{cyygh7%xyUuISI?itf@#f29$ zX(GrX@(p%{l@xc`{t88?oNUmnl=_y-2bdd|LvviZxI8=suQbRLW~m^a(AbmB*-k0t zhJrta8M=`+)HLh92_x2N$-U>#IAYj;Bc~hR!pP1&WadGH(l}V4NQg>hdYH?h`>K2T ze|dY;UiE!uLGZiGyV22z0AahWk*4f8iL-&TL3lHQ0L}&h0yv>H-+kn8&TqN@=RQ5H zmgY!D#BtfNYp;r9i^XVl-iPzac?RBdztwNWzHN)@@Ma8IlTwMp?eg{%%XE8xYMSH0 zzOFLZr;Mr3<=VW0G15-X4eb`k%t#VMb!)#g#QtU`^Vpat>!Y)HCi|CrydOtjx%sus zTVEaIPs*`(^*K!rNLS>(fmShQ8d4kkMlHMwEcdjzJMDXRP3{Zj#@vV$*CcqBXX}O! z3p7#%*%`ahzM8*JlVhMX?(U3WdPk2O8o` zc~94;Zaa63l$!H@r@?QrlA!rDxHNC%D%H+Vufh5)-mZE(8DT+Zw^h-`K)TuWTEEYh zHc0LRCXCZ4+|SVmildboN zyGEZyFaDa3;-{a^o33^r$)<%=ai--dOJ--&?5;*}T$@&ZH+yH1Hx8Wfaeeb|-a3Ow ziOTc9xxKSzciXu_oAcEJv;FzBbnQA$E*XmAO63b-A2zqkzB$VSz3E6jv`o;Z-GpcL zT#k1}cuAu3bhREpY0a;@#p`P#Xe3T;ddByoR<#@8lUr^?I~pD|XL)`P-w~(_ysDC|s&l7maVtj=5(w2z{%rGWrYxSIs-$4qAgQ>SnRE z-Zwv=W^b~&OlQT!D#os^<+I@vv_QWdryG}7_4z#6c8+`73`4IduZOWwZIu_=lCBu7 z+TaFc$Zddr7f*-C_TY>UcCahtVQAq+icyxL3sDR@3`NL$CavWsRjTr~BQ}P3X{G zKZhO|@5(0aL<_&RZ~IIaV(rx11DY@#y;y0k4)gaRcB{O8wqdtccGIuu;JNFijlEHD zC8s!l-<(^1o*u&Onxg&7?5aim7-jpWc{&&E;fdgx*Vgs$rOvo9oIF){PA9whOWA*A zs@mE{JP5vSV|?_T*O{zWQ@Z!oBw0Kj>fJmS^V57WEd6ArgU`OaBs@Uno_&4Io#)Iu zPrc`R|9oBZkrvw*`y7tOV{w`Bm0j;RW4A+pzQ#JY@Rfq?+OT$&O>%9G0wL(OSE2in zh7IMm+r~+G)%}xxb-TKKAEGa<#DZ+kdgntH2gqbWzT)6UEH&U3UX zzrwLH0DU^RW$(5j;bIBf;|u5ec-g)5gK@bp(0lQfP1^Z%+qQGYzrJcXouZu?yw2_0 zir4ySJoq|1vsL+4jcBS2pLK+N?ZPy(IVrYyCZZofZO{`Jq6tSeC~kE4KrZzIT?$ zhP;|z*XQ7oPZo+XZ;WiY+05TbepHy%7;nq0hxa?FW{G#cn4#Z+neIosC|dY`>(Y2U zPXl$KyecaiCg#zbo}N*yEw)yUzu0n{clSU#&C>r9kjU+yZ>+nAgV#VN=eC9c3h1LL z8mAxs^^CY`usS)Tb7F)Ua_9y^gS^w6qot2VM7SojCN%_~rk+5}^_(0~R#Duo{c~#Q zBfi&?GL5Ha+9V^}dagtgPS6y8#Z84;r|!_yHL`GQ`kTf6Fg_^y=6HV&UfUV>Zd3Ab zT)j)))zojIoln+_R0(1y!mG@w%)vTHcOHro=B|mcN6SlI3Hx=Ml-H*c-A%)o`GRk+ zA|EH{vlERUy|u@{a@6&Bu!5?{&12;gD>wydaTs=g&E@p)cQQ+m zTk2$4Xx4KRvO%2Pue;4iKX%U-uO8?$R?ddD-uPNnsL}hdZrB80m4#<%`b)d$uWoTn zPTV;RvANyW$?Ij4+wOd7_DM23w%2O&H8|Q|m*P<_?f07=k8~cA?0ml7T~3;9y!SAl z74zCW#av(Eut^7pwDRMB$m>qJzNjy>3hI2<N3q$6dwocV-n70&27DaGu>?;+5D{Dz83s6BFe$=HXR;4ruq8uwrsU9 zircjwFLWg=CfC~{OupLd<=rm6wnxTJ;AE=gm%U4LNc{fz$=CjWpdN&~e2f{%gw<3H zsCb#Ajf0i+62BJ%O_{&;?c(Ha@?ec)yxrJ(d@JJJWz)4|MY+FSuDB*BusLu#@3-zZ z2Ll1Cztb-q7W^+4WT z%3aUS>zxs2Tf_I6T;z!EWxq1<+^+=eSN;LO{@jPVe!buL>e?%{JqIw_{?y?4-dGe# zoSirFAa``@+cV>{@=vbxldBoCzCC&->tVI^N8m63yFXxmI5?)p;(gFAcJrPC;6Wcl z?@YPU5A`eirmFaBMo*_Zv@N z07XE$zXtQUHfGLC9xxCA&_^`UOa|D^n+?sAp{FHduSh$+P1f$4%IlH$PTyY0#$J(( z-6DCrS-RGJ7U*rVf6)t9ZWcOr+a%T@HIPV5nx|(gPYX*VCgQ09VKRdvI$Ljcw%(D6 zYPDXqrLVDag&4aS5Mg_?4h|wZAgI4gMja5sf-G7QCQQs&;-JzYSULCs&6Z5H4A}W4 zE1gX!&fxRFP#!xh7y`Bk+^~5V#oX6`tVt^nR*lIJxQlrif7>iUn7JumO$WX*-)Ky1 zfN{%cmCzI_qAe;dAg*@dR|CisLwv{>FAqo{k&AgQUjq28sH3F2wwd;`K=+kU-qp|h}0@vEgLUl4-0x7P$k zTB|O+kXEKh2vL+tps72(VoUHF4+&S(+Ngof7d}Z0f5PWyji-*?mbTH6QV-T$YP5;j zLJAqBIvLitTa#!g3QVM$KDhnbSwP`n2G&My&A$XNnwD7-|JDcpJ&sWB=GJdKjr2F& z=vfpYk;wWF&OSIEdf3hTHdz$uZSr;{9`kZfx^kKr)FkI>1Y%(oM?s8CU0IOm7Bm6N z>NT)(e}@FUB<~3&U?E3=g&P_(BAShpM+?jhyhmzu7##;XqZqaHNm@4yxVoV}Gf6Pt zB0ewu;bE#H@4gK9u+N%tDVm@inEjL+(=!J25jG?Gk<<70+(s)w`#DgaeCGYbKkuaH zpO362Z-n}}_dG-eQLKQTX_v;btcM@@_0G)-e?+ZPVAh%`$g11`aYi)7J?VljfI%A-bdJqow-9QJ$(7QAM zQH#hg9~cNCk7^ag6Kl@}g5>xjoFMdd;W}83JLLT?l-6k=;Ns$!JYXJnZ&PsO=1t}P zf2L_KQy(3bj=V0pSikY`_6oP_b(Q3gUos)>80H3UxkFHWS^8z7``*&d5`sgr)7pvW zU_v2ICUzJdomt}(q$nY45Na1&;-h;Ee{KMRmP-Y`9EF&`h(&PeVeG0%&?WNc(v+LJ z&o3sDIXX3fEdTj7TwVK;cd73@CyB+4Fvjxem&i&|?4_+>o z#AA;)dPFE)>%VP4(eqdSqqn4Nx$ED$P0C$QgFEn4_L=Ef+S3QFWq|gj?RM<6e|s~X zpk(O<)CT6SsE7;%5k`XSsfe%{RH4O!L@US!bHyb=It`_=My{uGN?A4!FuQVl$Pu*e z8T@DotYp3*EJ5{)eGh}Qv;>0haLe>7)M2>S&0ImA-$D~x~iK6^s`hYtC+hrO+o{KKsNx`?hgBw=ARv1aN*-xf zJ!H+rS7?kx9gugLXoyW#IWcI!bbF*cH)~bX##O$(PzUpDZTg(*Sj5Yge{V>pn80YK zWz>M2h<)zz&|tn5s|SZuAFbTvlyubBD$O_v1_I{hprq;zYGzHQF)-1E`t#R&o+0gKPV(Y#jAfrd{d@v1_fVUf8nM=z`$tU;w<%n*Rmg* z@xjlx?xeqcxZgh8HxB>8-5<8+7oYyvvmVx=NaqU!k&Z%LLxfUXxbtN|#yU$(649M5 zqSPP8S653hSLqUi=nSShI!_B5r#Ir$uVZysEwS$DEVT)qzxDB4*E#E!B3Us%NYF9P zFi%&;d+y^bf6WPtf=J5@Q>X-3O_5Ln%DChm(;N;DcWr*!y~Rz-l!=b&y$SP2|56ZN z1~cJdsW&M{IY2&Kb>#*F{imG&?I%ka{*5OlkeIzp=gH-p zuT0%%o8B+~_VYe|58b$MHn8u;m`5gLNy=K)!Aimnf3Y zpKYaffKzIce6`jDCOoQ0D^!y=0q}XKVgGICkx2^V8Zh@s=kBDx>+%wt8BI;T)j*UQ z0v;siHGSwb(mMqc;LF=|a@h#g(+5*sXQ{Q)DMoA_d&RU`F&-02z5-IFHjqpJ?N$!br7B%&GB`UCk+gBLvKSO7AlJQEJ6kNf_T=j0dP>TmkP zm)~)Tj7Qvp{^n8Bm3~#0ZoD4vfBR#-Z&A$G2ehdp`R13xD)r&ztZ^9PxASTaN#>|19}SAAa-m<(Hq{CGy_cL5Yr0 ze_Cp+-meJ62nh89y2Olq1%ZoUB)1GmrodgQvt(sLN;J7)wO{T@%T&it)ER!ApWAP*XY-8;mrr^9cb>X+?5Lfg}&r-S!q!{79ArLF0C@DE*T zT5o3lLw9uPUop8CvOZpLI(pSa?bEOcfR(FdJ2x>T7y+HGJH9ixiXf~^t0RvHf0wm^ zS%COZm$p>H7I;h`sTcj+l}G(9y}Ta8jA=9?GhZ}Inv;WpsEgGm*HBhBvpUiIr~sbm z8VjCXoIae4AnCiRhj!*F>_l)>_`K9mP&&wOoA-h=Ao0oQ)&*B1ipLHMVH4ZFR~gfE zpB;?15@?^S*I@89*HUf|=-99|?Ix zcD|?eaAn2Lw0)k(R`iQ3ovsGu|nGuZ#n01Y=+mj)Rw*vEee`DrKsNq^8 zBGSyfb+=5>5JI!3?mIjebKsL#?5o@>wx?Uje0h+YzWQ2#qT;(eaFC>cooMD}@+f0^ z*FWE9H{Hv)x%Pq?L!#29ufgU`3_SaJ0{!TSIRFz%v_ouiCmjFpoo5?>SP zP3p_zYVicM*O!@;(`A0jVwJzGHK{X%&*F=t7em&NtKqY5avm2q?Hp7_b2AjZAo{CxfqkO&|r@A zbuE^H=TuznlU}|YTxJN$k;(XRFs@dZ>B^>1MqVUrsC0tfn2B38t0)s{asP#!FvwZODGbODE{#rL^~>NahY@yh6pl@HId7XynQ-yua<&Z(l3@ z@r!=w@ng$BJpS80^}m1d{D&47=@;ni_j8}EaE{2;!3RA?f3Ee4Mc1ICXIPbP&fVYo^C8Y10j?Ip}NJa^?A#{~U^*ohy+71SQ6=n}W7l7^_ zvv?oZc6$4b^N*hWga0Gt4;&Bwj(`8qDSzaytS`oo&N3<^ADM@8PgOQlHOFcV!gsMmo6PHP5e**K6&srxdTa>{{YwNW$=Cv8O zY^z#`hG}gHwTe-SXa-9SxeP(-X*@{`ocOp^6CJ5ryNfoauUs&4Ev-$iArcQ9xE!%M zA#L)6j1a=3jmD%66qI1P(=(T88Qff)+*c;l9utga%G=0>mLa&r^La39v_?4ch-4`b z5As+3f1r=OMpR3`~!KRWtHX1J84p05M>OMl-tR(`7KMlxe!v4yp_ z#lCt%yiOo5XFj6@1i^?$p%K6;d6PF}yudsSSVt4!Wv&}@T{}h;F{0rjJ?daC2}`Un zoEI9WQfGvS)4=H5R^~<_T{&1shTP@XVs40$e=fuK5irHm5XhLN#ypV+3XKL|TmvEV zdrabCf`r5a&40()f7toIY<&Me{=31MVv=sjdkX_=1@FW;o-5t^O$7J{=8qf3>x7YGOqXJ`-QHSap#?Uh6=7qhyVyjC!e_ z<(?LoFry~njOa#^K&gbfCiP@Ah#T^?3F$I5>bJ!b)974{w#uCF2U{VvY!}&pArF%>?c9Ie)}Ca@_HX(_c3B;}d^!aQctkc>j7oeeId$CVtM);7NYq3tnhWA&c{xNc%u`(l;&1(?#31mdMHZWid>lS7%4&F5*WSCu? zt%XEFQ2w&p1h#|MrF2C~ET$U4lp<4c1I&AStpX4&@AI4U-tR`=dH0`K!!6t&y)EtW z=nuUA#G1cx+D2Oy^egF4>`pZx#|I9fl(-01`OFn$&g@7V~PPS zZ>e8z<;0eH>LX^~A*inie)S27Il&VxwC;0QI?hn*_k{~{H$Y*yI)SmOj@W?rv|{1BSLhASQHW(j^y=WB=yQ& z>Xj&~rCvdwT+vUSedVkE64Y-&C)+jHtw(d%b#&ZAD`xbKTs2v|(OT^mEc(z#2ph%(F+jNa}M32Hh% z*W+Yey#{n>Qx-U|=onLe}UAsIQ^LanmN!2QW3DRL8D7$eV;4*Z?CM+}aQ^dUM*nL7?l;>qxa$7isEhvkq9hk& z4;zAp+*(1yf3*Ns1tKbWvDioZ+6dWxh2DyJ2R_AKX(e#ca51+_)}(kQT$jHc&27GS z1|tBgb7Q;lNqfJC0CR2nFFz;I29whwMNz2*KRb8vUv*Q^z7JZDlusEzJ2SYzQ-BRt zCBFnY0Yp&E!D8-HO7u;xk6ASG%Md*HiZm=>2%{8if96JjOFiLmhUsN6g{2MJT(jkS z_h|!1SRLh;DNlec)C?@{ID($%e-V$fd?X;~1U1mv*#ma4o6(Lu8gLtG z1aD|r48+nJZ z_OtaKkUgxnbOsRVStZB4bzm)Vxei7G1%mql76`}L7Gr@FkF6arsbUL^J#2KO?<2t@ zfpYP|DY$oP1T7j_m^wfuEGpx~m=UB!o}To-O95-7Co|CPobU!RjnbOucR==xe+U$; z1;Sf@Z6Ich^k5zHVjbvP>L7@QxqzVA(>;Q9@QZej2GXX1O0#E@MUj>e`e=YR4U`4T zT`n_(>}&}6wN?Xq(SXhyNN>&_EC)kBPp$NdCoo8I-1plM#MkScfVIbc=QB^7cO7Ob zFeS!zZgf(nzWK{>oPWxKh6Sf2f8q_0yTmh_0kPJ={iH6E0F~4M{-GJ{8x2YPa$|;I z9hZ`>Y;DBb2Hc8{bn-I>E%^6+HrSW_K;O?V9GwMpQISMGk#l$Uu!8k@J**suU?|sOYv{z;19ST418-oV1$W6aoHsBpSalYJcy`Z+e^`2XAO0bM z;0;3g_6{iNu!;Z`dG2Km(zOI*JCU$~hiNwwByXKq&s${pWE})TTSvtaj|AyjCyFyC z8h+RLO(yHf=u@xbt_5Uiah$g#v$O{EjNQSvhi3s~DIkw=!{*qE?3Mtn*d37G^MhMO zdfoydMa_0_c<2Mx`UO z=LN-60UNrffxdP2f508ULhGI_fThQJE~7Y1$y#W+#D~($JcH;}ZSF>A$-XkjAwY5SXfSOWM!1|G?Yb6nkn<}YhiISrsA(AM>#e+NQP9_49ETX6 zdWqk(ueXD)#kjA(IS#Qt^=d=4PQV%-4W2jDz`~Gkio2f1f7-|P0!kcs+BtbN8Z-@_ z-y1xa`m62rR|o%|&#;#*vD@2nN>WZ8ShCSl%4AL*Da*NF2HN(t8bOhSeBaYYqkr%q zS)ww=4Ul!Ckz2zeq(^b$>gAqR64q_Nft5Y`{|e6S6u(c5{49e=U{{QNWLGG#Cjl7kq`0`S0hC1rUj^k?=b4)ZgOKXuP4C;C}b-?L-4e zleA(ZS9SI<>2)YdDvDL^&j6(CLLOUreKo0)N`0M3UrO3hs@UBU2-1Bf={lA6M8CtP zx1m_t#Q4)@Ph23jz>0g{CiRncz*n@E&zt?TO~jote=YpZlfH#P6}!~{;RYU>-C>bP zA4A&JIDaDyIz`Vn!ijEF75>=GtF+Csy-7!;@XuJFaw*{auXqeg+x+9l5H><3oq^6q zdMWXXh?g0C8TB@;0qbAjo)lF28aOHZGf|ChDRAi-QU>H$KI70O{~FR?{8u!P`UHZe zXkaUKe><{gpi6x%{fd7zZt3qOA3uk0@lyJEd@J$dHwBH$zs4hV(F)7&@OMJ*JWP9g z7@aAeeACmNOTJkJf513kOWjQ|GN^Lw$CiZr(P(NTd9^`l!S0}0d%#xop;ZGYT4R;3 zuq$zf|E!}9=+<7^tRL85u%; z$N!oP`?G%|D^fl>Z0Bd`hx=M$u%dxjOV+&)N!W<&`?4DUivYBbhH#+C(EK~wDRWck=N_ZGAepKUDrht=D)@l z=AFROe;|tF$Ccwy0FuaS)?Y{cweL6-ecuoI>(Bl01E0$4=mXzh#~=9qdi{a#f3G)= zBl7@Alh0n`?%l^2UGr%4vS|bXAg+YlIgX2@|JuWK5)RM@PBe|o6GT1Sp@###huh@c z8yNY9n>P&y@TCuDfV@VZYtl5FeBY`Kl4W)IOQ|{sE2!g=Kl>R{W4A5|Iiyk z`fn@z6il`?iKPA9o*^XR)4Z3_zD+}@gnvta_%}S0KHo3+L+}kB{(>L%{Rho&-C_2; zy|8BpWSe8p*UG&iM8Z!#@K3>V30uqlf5Xc0hreLw{aAj%Dze|NKV{}5fBkq(y*EVq zl4#KPoj(PWbfWNqpCK$^x%BD7A6Sh0zJI~GavZ;4vwqC;Kk$h4zVGfEFI4I$82sQR z>pt)woM77b{R_5~egA^Z<(R`?u#V$MizMUDT%hdPA)uNc^McA4FRrjX1G?RVX6Z=% z!*_gu%vHHruyzQ7WU_7Gf0xY@4Fi+ism$lyKG&{`K(4qVfE4>pI7K+WwE!7@a<9*Q z;!c(;{N(7TJdxk(0SQ_Q3>w9`e5sv08E9bmqzO36bv=0khg{FUVg0Q!BNs?Af9=$i z10X~&O@{ub9muU&tL-{IzctQ~mfuu3ed!GZUc)H(j;n7W0iXQaf1?_p1kAvQV9RW| zUJUn#HQWb-nULQX_J0?;ew7hG13v#< zW(fBuH2jqL-}M@^I!Kb=Wey4qI52@;U;-Q&>cA*rD^Lw4lgY~(3>Rz$T;`8m*-|Uz z_m-W+n#(}QyrvJRe*)8m9YYr|RG@%D*>1;BCCiCG6<{InTc0vKW!L``&uu|te?C{0 z&kZEJL1526`P4+Nr!p998G3+O%KA*c&)+hXfIgSc%fJwyahenoh6ET~mJ9hk=U$*j zw)Z)D$#EX!I$vWhcQ(C9x>EV6C|2coIB2z;k0YNS%lYdZe>MVX{dfRf!ANDhOwMoD zmtAlqeT=!RFM%}j`H}2*4d?{KmF4Ce51s5M1v$S`;_LC}_+l>cO(7Ms-}z_$TK^tN z+G;s2MdESWKR=T5czw_F`3t{hBJuc?OyF>M11|p;`JksSk@NYCEB%a1cO_k}^8TKq zLjF2^{=58?f3f=MUlVW;Xbffw4o43;|3g3jTH-~OaQhxFDXchL|Nr2he=)AR#A71y zX#5l2k$huP*b4ZP_;dhl*>1;>BHK|w!8d+-`W+-44LRNqpY1tPmm*bVsF-k~i&g{!&KYocX zlS#g=WdD&s7LcC)D@l*7#Ls_zF8OD_Js;?agzvuf$Lpg%pzY<1edZH?&+oq)9|bv& zMB?Qt>l21@`Pes)q%m`pzZ`903& zPkwX+e_8=aAcPP99psnfS`ChXmxNEmhdI!TbB0#&tLil&mk1Zka+s@b=5$1 ze|CcVFAfNre~!q3-fonDBd|DJ0CUOzH^4IO0}nY~M@YFPvcI>K2amxBv{ZfvJdxvF z3YdeYAmE4L{#*)|$iWb~K576k|3824O7OTpe~*vudi>x1_y6brk~n!U3L7Ai|nTQ+!4o|XOI6-73IG*_w+yhd4|&H z&qI{lUxz5&=YJidgl?C=UZMRfOr52v;tk#kmoRQOBCj``j3Aew-jkwE$ ze{{ddD?Qtz7Kq6=<{A&Wxp{FpTb9f1qN90rE>Nx;4w|m{VK*Iil}Ylux^CNQ)_CsM zcz<@i;b~KM*ZXa_xW~K0OtJLW;5Dg`2)-zD&3vG{&9Rw3xDeS_HN0l>J=~1FPm6Fk zjt|d!s|2GbjmFD6x)F4A+41Tq3&;lef9}}B^JW~)GQ7XtMvH2sl=^v0CQjT1*tT{ zo@leJc7wpQ{aMv2i`i@G2h&Bpe}9YU`j{n~1ojp;1VGZ3Ll%KnHhVon(@DcKe>EB% zU(G2R9IdmP@_YfQGb`g1nCqPu(`t+TaAwW5bU04U!OXsuv*#rgPKlS#{@j|P*~u|UPm=%UPV=hxwmoGj}!!|JGIdT}@;)7vrKC5QCxd0$uVf8ot-u{|Ef zvo0c{yDq0wD>CL{pzQ|xEVEyS$$KGO0s8T&-)!BTqRXC{b?ZOP+9b*B)j{%5A2rgJ z?z##e;q0|LyD$dA<95XJ$K(uP+*U3J&PIDp!}Tf&Oee4Qyc~4nd|Tff->N#7@1JUj z;frq9Il3$-Iz4E{IHd%Yqk%qbR>2y!-E?)V$TSyPlkt@9SkZ`E zt~9}ZcwoW%HpkszqM+ySJl=hweNk#lx0tTq$5@%kRUc=I@0O$?uk+~Zi3Pw);wod&8Mx{}!18c@e=B)Cv7fuJqgLBq zcMG*Dmb;j2b`)?CV9F0I$(w8BdOGqLv1+^tp34VbpyTf0l?hbEk{`pAxY0|w-C|33 zjxTiz8|{4tV_%E&!>yhPSo-6As@CRVLmHpwOROAoHMmR`hP#~t)<`?% zb4Z%^0IzYXwNJqHf0CmlSIFxT)SA9xyZvgvBKA3`S(0wI-c@pMf5)bEg`rCJm+dkzj&x|- zXUNn=8{NWfrl}cbYp)vO>2;H$-3xt*z4%(3Qqd%tVkeUUF7?0#cyzaN4IXb3y0P|& zi;HbrG}!^IH5+L>Enl{3S%|9>ZF?VkPWC)GZuNDCW7vwKe5nqY_t(jFc6j8(`O)Jh zUbHblRZJj`e^&=Z(2AkW{(WQPih7Uxfgcl81%pZC7PK5}EOd8b;7g1l-UT%t=3&+i zXj_qhfZPkNg3`#T1vkSH5fqjVvkJ4FS=TyCZpbh0#aEI&=lZ&uB?lrV!anH9*X;@0 z)y`ke?yfp~9ABpgn=ZMam+OKr8xt+)-b&8L+cGn_e=^+Jna)~eH+`543B}tjb0@n@ z>Qk^~A{N5=BAFepuK6TPqGD~|HvY^>v;do{>(S+Q{g|zT^-!Fb%y@l`?7Aed9-F6N z*{$!XGox|l;5M2v-JP;l(+m%lUVy=B{(HHctO2M4tl7AXRzU52pAboVVb**KYZ!k%mn z!-X?<_V3$tw?Xd}0Trg(ql(H*)o;7g5*Zx?$z^eKEYfcL*;Os*&6?}2$LeVtTX8t5 zs_3x&Qdtw4A#Gkp+xVj~-Y)a(?w!%&v}p3Fe|{JY^T${E;>&E`mP&R>-s1q()MN?Beuh;_r9*vLNuMD5_ zf9GMHI}>ugyq>d`hf#QVlhgG0ZV!dCHqxcLjJa`{_B!-?mOcU-nb4#nUm(a5Fby{BIXqrDG;%3S_e%_+!e?CH2 zHFmSOjmN{l#TUMx+udktW}}(O_NStLyjo>BR@WLw&#S_4cfgA&O>h+DJKwv4emUrP zY&F$9A+YT%Ti8B(jJhmwhk=N;etY*sz4zwV*SOOmYx8QfSjDDbxELPx9+^Bnck=Y` zfE!bVYr5`%>5Twe@wpore@p7Z1%`y|UH$e6@FHjh)Nli%bp!HGED(fB4eFCgZo* zTNLliXY=KveAO1Yyq*C%DB0PM(p%=v*e;UCns3Ya0;jL>u_~R#aymW;?pA7=DEQ0R zEf}4?jJj6r3WrT=Tu1X$fT~S;_mXsz<8#JJ(~oW|9`EJHuVToYuuwTYOM-S0{pfVSLJ)~)cFe;W+CbeFEIV3LQ=A$fahx)f%`vU;A~JmZ$Jak{AT@||b8 z)r?f=_`p9OcsHNizXt2_qLQ}%fwaisX;UKI9#7c>y5kW?#4VxlQMc6c>P6b zrzQHWs?qiCN|4m8ZmJdV!s=%sF7Mg6KQ>@Hv2U^vzqieb05*m|xMzzV zjhw`Id}U6_Ms#Splcaen7po@}dbZ^P**8SuSGjQ#U8rEst;@lEb+R0)+pAe5^a=AC z8Y7RkT3$A0BRw0!%pdeGuo9-|ZU%TO3mGz%)p>9MTuJSIe>be25&|q3&jd0DV&+^l zR#l-YgF9!B%vrb1+Dr1PnIzR_nKcKeGkP@2I+-GZGI!h72u~LIyfs6ZWb1+A7n#Hp zwdGhFdT*vj*??oDQ;#A6ag+4JQcLAS0)FF{g5{~Q#s{8<#yRqN_oRkg@sbOsAQZ-h zl&OIC(c1Dpewf$G0(@Kp$%vzPnJAME$1 zefRW%?BU@eI1l}Q&@^-=%p;fYegZZcG;DM znT}r1QwwdbHE$5K`7jJ{7`gZjbt&I0c1*mpF>6i*eLow>*EgbQoxlO0-AS*@ z8slDF8^#Zxq&KL&ZY*o5Srrnykh~IC)6$NIr1l`#0|Q16qD~E+fUXWzpH~}@-dEWt zfuP<$RM1{$;B9_%dgYH1VDXp*a6qPmUHZV9^X2q(G_Py(jUl~th@!~7__c5 zDR`fje-j{!IGytZo?N93Rh;0Al3v5}V~F)SphbFhw|h3~dX z06a=VZO0jaHwlQ-gW^0FW?PIFP3=RE+PMm}X*WwFb&_v-N_L}lG&fME`TI!&w2&E? zh~%u5s zFNCatWOyLjNJY`ZSYY&`={D+rmj0CD&;|q{q^eNooB8@M{16lK>90wi;kcMlSXT*s zzM8c5avBoFnXiZ#N7V2T^V|E~KwGr%Mt~$t&{)booe@kD-{vMG1J6m8`818%1Y$?8 ze?*5{{2O}s1L<84Q~}~*9)a_5$s2h`2Fx4x!DWoSsfB7&Qh7Kin@QA5@(CoyZgZiJ zJZ|$x{|NC|E}CY`ci))&xiIz_Z9fK}bi_AYP3m5+)bMy}3~ z(GcUNkY14Fr|l2#zQnp8MBPM=++fMLe?X!~(Luz6a1lX`4~glM?5vNPkGN6r$iNH% zd8I8xWWv1@R5HOILcOlw!I$+buS7WJ0mF=4(qXrhV_Pj_0**xNa-tDMpsq~bBJg1j zZUQnsqaWOcBOo`wpI}b@IaAZo>~#@JLQgWNr`JvCo@GjL{o5R~27oE5GA7>4e`AYn z+?y^bqT!LHMw5gxrOet`N63fbD8;`?*J-B9%oN3-Kt5JS;Dh+rK^SC?5yvspL}a0P%YUt^5a3I)fp| zpf5jt2oBI)MuNP_5}9P}w%auM>lNacS*b8HFuU!c6|FYc{euzv@f9IlMWyjd>u znq=PbY@g;NKHHp3y##-@&~yULvks_g^VLXFQ=jJmO9jZqsqMYW{o*kS zy(BI0F%L({u*T}}28tDu2|}fiOxDK)_Gy;ezcbga?-f7O2O55*hT`@%G%1zY7L`(8zbZOBtq$F0UIjEzTU>VUQr^371q zrKj!{2M|83i>6pfodNZh_Hwa0_)QL)pIyNUn9+s`W|aBtF`J>t%W!AE%%?JqDyh1% z_u%MYdWaE$>y3fy4iwD%A4UNLPr;azt+>JDrXNx|zRJMLLXr)-N>I4hniE7S(j2Ix$@S~*7a0DMd=R2=+rQ^@&D|-)N zBov_(hL|Ntw$>C&{lSRq?^}__2f1H5GQ*1BT!hX5#riMH`Q@q62!C07C z%cnZ+G3Ar%DCAC7I;vg@+d2B$z+=XFw{RDlkHbt@%Gi`Ivd!K7>jjNM=0&6p9=&`wi{%WtAiBDW;Xz5YNtRHRSC&{jhZ;<}ee^!Kc4&D^-Zn8@<>Qk#&nyCmRd-`HJKx~y;4HQ@%z--@$epD zs`Prd%BL-UTX$4A>zKcoWE(>srNwe9+vn3{a#5qp8D8Y zZ1P$SLxo7yvx(aaRtXP1_(k*|x`JqnVMtzV+R|AAq{+N9O7y`x_bBl}!V)J2);wgp z^mHmNfz>5EA&|gY41&ECDH>G6zQaheU7BBu^>Y&DiegFD7ww^0G%Zz|?Itqde|Tif zTyG0ecWTpV=Rt=X9Pm+CKf*8k9eu8LgHVX&z5`A-1<)i8GDddKQJnM)O*cVjC)h~Hl zmM+%=}x0`f_!%eHRf8!imxFkBWn9~ucO7*2V z09%sjhKs8a)IX5cNoshEAmKu>DvC3Od0}CoUuWXOu?cHc;LPf1#m8h#4>1 zc{cgpS}eXX>PCB?udvc)kK|!Cd>yp5JIRN$dmj08IK^S+DzMunm<6+Nv*C-}k9Y1o z{nTY*4evNtT=HzwF9p^zod--9IuAT7tDmMKd5TXYd(U{j05f&F2JSMX;srW|TUr2o z75g*?qs>eAvW6qbSCxAlf92Ot81xovi)=<+CXuuJ>#l*FT!RF%RnB6<2)V~LSQ1d= zp@h5?3lB@ehe+DKWx7uUx~aBo&GQ>u*}3Y%Zui5~%R>5PS~B0hV`P^?ihYEmG4a)i z{#m=7`6fobWk0h;CXqm)o}5txl{19A&Va!$+B4$@K+13zL}2vNfAye=;#-j@snX1sSPrb=W_(6&Y($f>YY{L8(c44b z_&ZlsysRcebQOKVfA+_yv;pouI;cB`V?&Mgf&Z#{mHv2_A5cYnEm#^~b7(M)JEK9E zCWvdW2j5OpE6z+wC5m!p-oN2h_kQRdnQ6!uN)bXc^ezQ`eTsia{`6%YM~}SVR1_k^ z7eEi?DCbEazJ+er05d_Ul4@!aafdFaWHHhgn@0UqaKN6Ke;{@YClM1-b#U8GngBI= zD(D>m_A4JtNW03_rL2J$cq^+K_QbYf8Y44FKRP7FhwqJgMpbX~xsPIZC3X?R12?58 z?HcjLhuS=#!}{z(Hpgc;6;Gu%x4L-3GY2s>(VOvv&Pyv7Nfn{HxaZIk_={5N~ zw3XmnxBJ+ie^>?T(wG$rNqEkrJW))5{+7KMfbQ-DEdu_|)6+wemQQ}K?_X=lZ|%QH zbijVRIdUpvn36;o))+uEZ@%c5fN%$d-C^sV0KNkU`uLt7`jUoXHkQ}bfZ})5C=vso zJrjJ2Bd#@pD9#)I5bT{cq(PYxzf2=l8Xc2u zR(!y{q$Z~X#$Yfs-@c7F&3b>Vg*Bq~8WV{32qUx_iT^Q24b~9Vk0m{-tNod3zcFI{ z&_c}>fBP>C3Ww`B$NZtjhvE`-xfJhFwr=$ZJdq=)@vB}jwIbqC<#Paq6+>En!Sln5 zj#{frlZkmiqsz;icGbhsChWV!8?J%c0=i;X`_!walc~*_r%WVHXB?hMXLeV9?Lk3~ zf&h`aOSi4Q>b~Hxf?*#Om-5qPj}#Iw73Epef8NGP`m;eC+8obDB`W56_1d%NZ1$(N z-J)>mhV#f<_3Rl3zUju=l*HUf;{DnDF&PxEC4h?Jx7| zfALQ%Z!EmT8w7))!x4V~?ts&jj7|J4R>-i{r^X%agNCGgZ)fIbypj9s_UEo4~&^bhfJ$O@nnR|kBNBV%6 zSlVlt2zruv=O;<$3!9#pYQHwmURV9Kf9opw3uI2!T@iE3o1Mg5qRB*L2|B$vQ(*qI z`nH+ctKE@L``!5ztC#vHhovt5^LK89$0Pcp5LHF<(2}MaRt5ySJWaZIZ4Z_l@VC^~ zmk4v(1>FInb(ccq=EVSz5t0#ab>fI1j&**Kvbg0aGp_OrO4kAosYB5iK7zM7f3fSt ztj6Lgs=25UMS8p89T31WYT7>#nj1yOQa7L7+#XpuvJIb&B)rk4nlloN$G5V~&vFC% zD`YIQSR6aSSP9<}O^Q|23aw9%^6w7*C1zxJETr-eEDHL6kfpy^Xv|1D zlB|&bP^z$K^WVJJKZj0=Jb3zd_=obc(%+2J{Fn)$edaV0|F~iQpYi#Bf2M*YXv$~E zKZ=#VQ}{=jBL7ijoFhMu(Pa`0Re#e({>dpo{=TmTBxcTX&k^&_rsC26ku>o~l10H$ z|HglnD-5d#|NVRaC|HVo6pz$l80jDHi}ZJD!Y)Nq%cysbnt%Bc<@g_lRfb__t5`o9 z{Zp{~y*>WPW9a|7YUMvlf0oKz%7_UQiMjw%K&`*BZ||*h+$kh)d665OsOy6BcnK6I zeR-*YhXBa)f4n?!n1n~e>L}T=kGpD%ruJ7zvN`4EfMzVr<#MhMpy43qES$z4MWV>V z0VDwCvw}in;jK8$>vd;B@-ydHOA-_*Ms9&1=8GdXG|J&vl1liopU!qTH_qtteu&q*XP^m7xHvK^D1kMSgd3WECDrnf z(XLDJD=FIOxX%^k?bK!$dw*1V9a)Cm5wti!3ng73-(3*lObKO-tQDAM2vJI$ZUL0^ z`EX5)njGKcE`~1@+a+>m;TLKkUWxZzG|%)X_gl87oNW(=Vr$z*F880>V(5zFD|eFSiD21Cq>K} zhKtG=MuG6Nf;c2cTr|l7DnmF5Q-ZxokP^||+ayopDXC39zLkPk2rSMmj9t7O){|mV zOw{Cg6JN{X91q<9kbj{~)Xo7YetrbY%G|u#iUP8^22e0*s{qY2IgE`?*mlIOuNg6a zC4Zf7`JimgIHE97jhsv@5rt2B^von(=<2r{G3Nl~lrqPwOZPL+Nw<((t63(p<7WZ_ z)sW^zr3bUC*XIjCR4#40LE(@HHw?sm(Ys%*p%geNy83x)S$}!T7C}%%eHo_)31yDC z9L1|tnKeH0!^#W&b;=;iBPL6sh!GIP2I<=jar{cn4d)GN(R(!O~BL?Rq=?A7?#UXQu8wTRch= z?%UI=>RaL5<)?SnlXf+<%Md(PsA1CtMlCi8kx0dlh<2#3Sktean`g&-a(b4a*fJb{x2zk%?Hp(==4cvpJLqG@OvpCjMg1&cr#D~ zD`0QG7`UN9245Ae8$lko4e#wVy@5{MmIW;|+GV(@jhBFv+>sh-yYD`CGsJ+HbLm|P zp(*YcEPqo3Zd=R_2R9xJ!~qq9zv<6nXc}Y~uF^2AlF(U;-Vn$ z$7IOTz%&TJhu7QU`-;~#XORmZ97&u-Rfd6;dQf-YiADS z&`pg%TX-0zUSQqnuf%A$l#K7Zip?Z};H!RE;P>>06YwMMp{TZ8;@pW zy7_uq=mhpGV&M<8$l3^_6aaCLzrzFDR##}06I4GGHL8+`0yTLaui?cKTMt5vga`+O zU1uzm7$0P5*SKE64ig$fbUJ3H7<26%gooYFqvg$wMEpDyY3C!xh8kQk`$e9z>VHGn z)s#6eO0jH4Vq42-Wr}XD-w7_!!{T!4@)v~Ne1Ke>-E43m4PXqF;lN9y+X@Eoq2xbUrF)39UyL)L%Bz*yrXH`u?KE^9cLc+tbZzLFUt~1 zf{=2{`(=f2%CCG3DBbr>E!f`d0*e9LWC(>$j|kvSn!5PSr3u;oN^A_P>TLpM>PPO8c`Gv~qAjB)yDnH>BQ63&t$D$Q!$6tgp^wk63w8Vw31a|LQ4ITmlkB zM5`A443zhQ3$Tp)oY*$*k(YAdSowr6@e2hNNitvKF^0X5IIa+2aYu?tlsu_>n}*XHD8(-3 zLU9OqMx867dzqJTu>334s@&_83*v<5)(tCRlS$VNLhd4-p??uecr1@ZKd+kTXRx1M z*l&((w-qn1vF2guO|?heJ4sU;X^n+PK)n8&?D}m5Pv9R^k@rj)+!Eklc6(G3n13k>P26PQR%URX#ccJ*jpsyT!Gq81*e(6@L`1%)8eZ+@A!ADR+kQ!o zdS6i7i5tu>BfYG;hQapKTjgLckqx6le~_ISBcCxdj*9{P z)FWM=BQh)JVkx(SsC;pQPR)P2LmR2HrT3UNlKgVE>woceDZsf(T^T|Su-TqNOd^db zshbh!{CEYPQ6GnpA@crc)B3~>zU%Yi?_Xq9AeQE0n@kyM=Z)zm_2dW7&y{QW1AAA& zVDm;4`u4kzuf{+n$ zN0i76q^IvRAy{lP;Mrl$SF)U)tm2ulgp{^^Pe3h!a7o6f>nL*#33SNoxYtiOH)Ktwaq+)6v=&5^uqn`Ml zJbz5v%h0()J11H%YAt-|WO(8)4YW7^@mrm@W#&fANMY7ZpHFH(y-8;Mkh8pUsm_v- zDVgU30UVV2!xtbCRve^=Z1r$1hFe6W-$;^aYET4HS)49njkpsW&dZd@E*VNX2cy<0 zHEuoJ)7!0c4~Q>qfHdLLej^z)ZU>NLe18(JER(*kML>XD*k#1;MJMF?$N>zj?6%wH z{D;K8m<7_$g-&AlIB#6MRZ1rDM@v{|o2s2c27xEijQ-*g>4BB;>F+QLjx{G)%I-y3 zPF}jejD_ z%hzE=u-d_D)b3+dlvhy-6V;uDk24X)=2GR}a%1kz##`M19ULn1YYHqE!?Il~x#TKT zIlmuCGiclH2b7m2p)zp>QAMOmYVNb}OrpuORn;%*e`ixy_E^T+i^zdz7|PD?u^lr_ z^O_l6mxUv8_TN(76VKDWlomyR!G9gstPe4c?g<0otG3@GfB_z*!`XuI0T6aNMzh%Y zU*8FRfa)BY#uS$2T#k^0yFRjyDQgP!FDvR52(%c)e^gEka-lZ-HibrgCTL{XN&6WJ z`j;{>^wm}oQRksRW`C%P4jbe{ z>L{pfIDbC7^E&mGD9%8;v2Q?H;js;94bO|d$qtd&ORXI7;#}M z9OcA!5>Wd@WKkP*E~ZPHQ7Ia3AlB!}Z>#tfE+qh-FUibMe?(W;jE$aKe)~#*pq1b* zhWB$sJ|h;S;2X+YD+_-CUzZaz@>JiON$$G{i(Ci@)ct3QCNp9D)$A(uGy!E`L`W@NtII zu)6pF45FD{^E})1h@fuo7UKT_8HJ0Ba%Sz2}uu$Axts0{jK1u7d2n^OLPL} zxny@WGdxP`bg-qfWq$$@dVx{(N6Gt!=wG(UmQ0YO(q+BgJs)3uv-Y=Q?i{qIWTRZa zJQ{NfKVGTdUH(qW>DzR8Oy70mD$Sge%uyJWp=LJp$45WJckfQtY>~^azD22b*TK!r z%0W-@4W3%%QY?vHbbY{qoom())U9iK*cV987<_JV8&|=nX@597_2AA$W{Z=#W!9iU zPHrRIA`mN{NN6TN(E@K+GbZ&&lRyH+I(fyD&*&!e=l@Zb8w+8EZC2MP``#|)d?P|o zGo^<%DkMWS3>V0!#~Sup@^@k8F+_DXKN-BjCaIqO0;{v8v0I^0clCW>MDlueToQ~T z*@MS??#<1WuYV`tXJR>CncY| ziO7D$+=^=b`bDc#b}!N(h#Agc2d7GMWGqQcH!R{mL4WZ@_p5~>fFoZS?=|Ma?n9q* zfo)CFEu;RPRjC8g(-zV+6m!N&HIeMw4by{P^#ewMHvbG5CrMVMau}MDuQlg1Pln*A zLEzd-@opMu`rOTLbe3TO4o1Uo1P*Xo^T%O;NHpKsWsA5r8LevO=r(xz(`hr4W8wn9 z7}xr3l7F1&w*1p}K`;Y)cxC+vxLi$pO^K|}AwScbGY+-ZJr_NC-j{VWjfi)!xg|r$ zWE+j!Ncd9u({&^1ntu|u?EX0KM!^4=oMVNYY@Oy|+Lt-F zhUF#3FHVYX6`7l(UiWp&YfVuu5=do5%?DYsaNg9!kkM??zu@3iz6>}7P=-4YO6#{gJEn6B=>EtL2Q!?zdIcFzo(M$^W zD}S_!*(;6=425}7UN8PT(ppDs3^)IOBdxKEepp%H|A|uhpOMybpYh2&F|)vmMhT*a zxxXdh*{Sj&`Zh8LZIuy2Pv&k41aiVCZgWn zg-AC4%r8OMUm`IP7=nsmL@!kXeSOmjHw)b=FcqVvC3`|lWiEXp>DkS9Y?DT8lZ|@j zP&@+}b}OaSE%C~0cnr(ViDY8Bp+7hW81kZzqro?~#xr{&=2%V%&f|R=q=C$BGJo}R zzg`;7rph{r#K@q<(Enr${5u}o{Kf4mY{LHLry1v%WrX%)I8pIuq3uSQ1wpzQI!mEz z@Bt|sBrtZJ9fl!5L*L^*+r&Jsj>F(vWIe+w6t#>Qi+(ecN`q)fe@F*G>zI5s*FNRQA^jDPp$Ck7II6A>E>>yoN+_RAHxjYNPidUx_#e- znHaR}-ScN23G9>8jc-xx6h1QZAVTRJEK(#yWePpa)zE+SJ>4{7kGW6bBgvR&o{DRq zKTkiDpX0D9{)0k4wbt{tinQMMs?-*{W%9ytu+xg`x)>k&8t5!-9RuR?QlJ0z! z6DpLbB&u}!O!CX9wh93{8Go?4$HldKWgi(t2qNg0w4)K}w`2kqa`{$Q5neSP$hNGm0CL+%&) zkY=EyuwgNCJ^e59D{Bm~k*n`UJW9wpGt>a{EZCSNgcRy7Yr*SXXMenNDrXrdEJ&|s zwYlLj*1UD+OVsjF)&~xzM095#MxTDpCQgK_g_CyJFd|&5W~FW(U*pQ9+>==41>q^I zS2LpS?0fGMUoLI|mFrj`EJfa1gCAGBSSq5cu`ip)N$%DeY5Xf%0-0|}ex3+tPA;i; zNQM-JXf2CE2)V4x^?%}mXd76k;fFPlyb|JML9ql0vixpA&ky%9i5C1%_ysop7Sda7 z_HogZIcTW}Oxn5;t>9Y=ZR2CHu&Jc~$b<=O4KCqduY(fdoaWfL+`#@8NS<{hGj|iW&D9VT_L~fC7aZeYE<`J zE8#QH;HtkWD2LQd&>>W1>L9gqn5vK(nD;r<_@a{c_glIEg6u{{pG1b8p(l$TBdKqm zc-E5L3H&_+%YV7=r&FOi>-ZS3)8xf>ggMqq+;W?|KUJnqxmD<4s<~(4R7$sL0T5eD z_=u=}Q%aW#MW#-vSwkpcsl}W2k2Rnnld#2$zWXzP9LPmJWWtqQG<-ueF zom;9AKb}t?N_APlDm%rsE~sM)@mLY?y&i|wI=$Cw!G8lVB=il#QM*f0^9bKvB!jED zC?q`Hg4*BNIFk5b3P>k@b(@f*EWeA?SCOo~gi||t?p_@EfYY&Zv^%AYtMJh zjGLz#`{0~>tSv*E!(?jNARMrUzO^!5r+gppLKYPSD<9`p_9ym7Z2X~_ zaIP+qv{jgCu-^b(_slUy%=2A!9$Mw}{x-zFuI4?GLeD#$s^98%MEeQxM36nM_NXE9 z9SNTd?}=+g#6~mB_eGaX}I~P%<3Q zNm&4A0O!z6ud}knJbQIp+wQ7p5qVY_i2`fU37d@c4Vv)aybq+GHY)TT>2q4LxWfgP z^M8}=lKRTf7`pj<{5OI!0tiaeh%AjW5pNOdu#3m18Hv(QBZbW``5ox_K-zrYPkrJK z^Y+eeR+kdM$VP(k6Jt&S&!`K^ffmCaO$tE|SW&KW^=lL&n^1By5)%42uuUF&s-|f8 z=O(1cxDH?@e#s2R=!#LJpQJew=K1C4wSRO7A8L~}`Ur~N6WQ3>oHdTKmR-b&jmj+w zqZt|#8Bm-okJPg)^QOx!x&hBg(3+)T@>@i=78m24pRZKZaW%fLv{LbGjgELfhm?MT z&KuS)Y0l29eqMIHll`)SA^5Uw>P#$;?B_vGz8MIzJ-W-QNEd&bE2s2284o%e8Gmm3 zC)f6|ZoZv4b)c3s2+@ZvQo8b}j<7v{=?@nAda@>y-w#K3dt7F(xfp2v(_q+=#fRq+ zqG8E8GrL?r5&H-DCFe>i;2V!zH0jB!)ZvOcB!@EnEH~gf*f_1K2@IKZ4(AEF~>>v-ItMz9Dm7zfOeyw zbeuJa(a<%h0i4K{Nz`XB^K`0q9N<`K_io;QUjN-p>NO@8-f6sGHXZw#V(>hZF``JZ z$M4_k*O6^7$(fUp9TUlKSzU=4UlP8eYgo%=5#cs!=8+ZYZluD!xnda;pY1|LdLwr% zB*SWQKKT_PF)DkK27FRC_kT~H#pv}Etm#H+h9^ry=O`W1AAZu#ZO{~uKklf@pi@%k z`vQam4{qQ^$h$0I;Prt>3~6AOZ!hiXXi5xpHw3h8-HNvJf3ubJPpg=q7(8=5w!%~Uru#cyQ9r%Q`xPt2r|;u!`Vx{p11bbkw~*H6#-@x&C6 zoL+vLhzQK%>J~0BhTMW$SInnf?rQ~eKTF2+Zg!#YvXuSy>J4)z@-zW991B09{u%g>-Udc9X7%aaVe<<^gh|g%v+OGw>ECZPcBT`XeEM_d*_?G z`l3)HuT5&ZlE42;ZU`cixPPG7edQ%q-iCFcj?mufKI{y}S$|2f>l{=};YWi!L|yqq z*GAlV5f^f4tcl^;huIXr4+=rsXEcx@GY}J(jY9nsaVCS{&Dj5&1^hoyz~lHY67WBe zHtR1Njk(#zhmFhl1$37Fk#grhWLqNrOZNSdd45OX@6i$0O zu~Qx?CxGq@!++j?SfRyB|3UKoTif3MYJ%D+mxmGQXHWiT1~t7R9F427w4tk6$48tz}^mn9xNxeTZ z?~gu=|LM=3NdymRRtbL&ai3V!$xU z5j>*Z7To`m!T-069Q{WC|DU41rU*s$PZ0O7Ao0IsqyJx-@fY;!{h!kDWKg>T{%gZG zN@#oqYJWUf%{w#Cbia^uoWWO*Q*auYX%xkH3Xi!Vk#{grX!eeO{(?~NMS22H6b(GIE1}l7ySNk#I6@LjWCYi#$O29ad@UJ`*1c^pVm|b8> z=TD-y*MAZ{#`_;~wtobN|H^LvmstG2WWx#iKO}{)`8oeX9`}!A?>}Uwi7fux?%#F! zUozwWkjOQ~p8$DM|B;;jBcJ|Pdg}kDY}e@6MtFFavUILde0Q(dC!oZ~`HV+!4KA_7 zH-E8s53@O@VYv=V40it$vwPA_dGeih?ET}pMsfICQDYQRq)3UGTT#Uq{}UIYF0&_# zB>1v$zkH(zmRrbtw=jNh%&e;%y1N;D4A~G1&i?w|8q+R#*Q8zf0eBSzV%{;(y=l zBm@XY!%?{D64Y=|QBebS^>^PD9pfEit~uA*y=zzXxEVkmLh_vR1o@p90sL~tO!H8Q zEmTDZx8geV&Cv6eGBAh>Uc^HXH)_>BtXgp}P$a!<)i>_fdVB_Dgg6Cr0p_RC^nWkl z-XDH`1HXRB;|I9=5BT>VP-DU4Z_wF~?M%TQuQk=m0!*GE#OFeet>Ns<21-YdoxnWv6$f@BUz{6BC1vue1z~RgJ22Gu~C^7~3Hz>D!+e>@xq&-~S zF$sB?*K(DHlo}Y)4Ds@yQ%40%0|Au;AfRnibB|45QmImhOg`>)lo&LEQ-98MK&cLz zxP=^t#%n$&)ydxK#RgJg%BiiE4uLS_fI1MA1tvp_gzoBrwuv}dZh#l2(bpySvF27l7rEdkQaQk7Kg<<2a7YJ$1Dj`0-LB*GB9&do$i%cv`u zcC|1aY<1^BjxQ>ebCb#$;)SJh`R+qjI8fRG&Mmc+RY0XfJdo6`FCK=njbMgjT*zk7 zH~uF&2ICP>*8+G{a|D>HtkO4_>u-?t1*p@(`2Yk*U*NyO@7M4f_<#HDThfijzCkXX zKLFFeZR;DP{R=E~>b_*;A_uAgV;Dpo-*KQ#RexwVK5}#|QWyl?!DO8VeNw5s!4O8rXiyMhnGQ|@&Vf3{zyy#hM6lgj zR1t7uLI+uRlum#W9e;wx$9`wS8yFjvWPk_|$Qzqb2cv;nkfv2^pB89nb0B`vfL`N@ znj0XH6qtw)P5cH~?dh+1rhf&iuADbW?I$KU0xqJQsr`?kHWxcc^G-@vJVd|2eXoG+gI1?1ZUWfc9A#cwdyi2y{0tT286 z)8By3KLFs>7vOwdOxK)DLBJnw-EJUfjPS+=j3&03u31}7A)rJ&J)~N3=;)NLu zEQJ8;kV}=wS@A1c?KVyuSCaV#6&{aFATe|mAC(5%mk#h7R9FD4|B5xga>qYD^z_zM z^+$g-|F8bVeHHcZ`^fOs#!N%g=MzK;as4z@>u4|nKoF!4YsQwik0xR7taT_KG(b=o zN`FVARtmu6ON@c#<94$_R0A}MjZ%srWs!A_(3KJs0te$#aUC;G4i!z;uSoKFquN#4 z5S2tab&yJlg)FTimZrQfkn zyq1JZe$RZ74o=4sXub1n)dR`iU=pbj#2_px@uN{V(9vUjXdJ8(3Oahs*j%-C zn#L6s(1z-)@r=RWMB7zLgGquYeyJsP^qx6b{Me9!VywuxptdSLY7cL241Yd()ATD& zZ+_`m_|~I8@t?eP@MF8q?;P@*cLI2QBK+kSPd~c*1-fny>B$`aV&{K+!#5cFpEc<# zh38V>-cya#)KmLN1C1Q+$6zlYbyX0#aFBEeS{=c3tCo?gRnZR8cB^tp1q>5Hi3ZEm zyhw7HumVHI2ppO%My;y`bbsy!>`16X^t#YNDHF&e4G)EbkgHL@(NRgzlT#J&?(kTW z-mH(VmO9V~D1@*GR0-V2&y(i&#Dx$zs!wdxJ363>Ye6}OeW#sm$%HK3qRtFWt-!+P z`&iSB zAsFC=Qz@BKkrY>w1AloMLxwf=WS?gPAZp1B&?k?3=4U8cEy`_xb(NZcww~3?Cg_QW zXftE3?W-d8F#-Wo3Rs#}n)LF%j3DSo*UKE-bx|KtZ{r1XOi>^40nj_%fKR?gzVfm^ zwW6`*nbb3`&vq&idLlRyBLac zCvSGqNp?V-4t~w1-jr^%=;&xZ^Q~SnQLmI5U#AO?2Y(CcT?4AWa-Q;gZS^a*v7-9&1JfUV6ese%uKM_eH$QRdj6fZo{^Vag z)lJ%1fMTTG;0=`W&8)gTI2S9E5!3Zqw!X~Q8ly-bSQZ;mfd{&(dUCbZz4eNd8=>6R;4yWN^lRPUsy?-@0%Qf=oPQ_cf7efumwwjlzjX5}wtec3>F@mi zuQg#&3$3FOsEgrk^Plz9yFRJWzvpncbSD3#!+3KHoTC}VQ+BkeY88P(q z9;+(rGA!z0w6r|NLc2_BK;`x7z-Fj_=X0vga07yf07`3%5s_bJVx53R9^4Yd1!xf1 zzG76o*9qS-=a)Zx`-QK1 z^7(Nu6&Q5j?ocVgyxT#*5n_^92RK-2d^Kom-ES+SlE*5Qmo9AlASSt~N}a_W8-LL# zLEN!1EHWLNf$l2-lz>D^akcB7K)YPnVDy=o( z7`9Jc1eDyEkr^6w=sFNnEon$ts(%4$k7zux&5~fmBQI(gSv&{^;)3X1&-~JnzpR^n){W_}wIlh|?mje;Ru&&0Nxy9R z-4E6OqEl7+YyaaLV<7to(KG6uvCR|{aV629gnYmS*ttZcw2er+AOX3#Fy#wF zx|$l%Fl-RuG%|+WVx_s7$_oss>={F<>o7B}6k|9yHH)_huy$-$_)0JEV)t@{LFa0p zx@b05I@E}|hx+!97r@^BQGfcX#eH?g-qH^lkb&9o%=46_bK!zQ_nzdId7fclj&J8718IGIP83{XzM=uIG z$EZ4_!)QR1U|*%SLA*zzl@u|OK_=gB^~{b3$$}!z!t=PN)Dm^J_k`>ykR`xQ}}K;SQVTqN0y2tM>if^T{$^32pk zJi8|m7Hbd9_QlVDqQs{P1id^f`VJd?=V(;XVF?@;kc{hcs`3;9&gPgoDl~7k#>Ac> zb1{|nUd7hn+yumMX@7C@6~M62C9}!yPXruYpfegYHG+{bU`DHOYaUb|?97nd5hc2= z(EwZFXnb+jh{;;u<(q(|`uKhxDSZbut^|6#ns4fIAFH1?so!6M846*K7h#VcXK(-g zYtQe)r?;<8W9_?+6Vfjo5_>a$Y}WYNZ}_hF4BGGYqNo!GGJpD&8-3O$?63O|@BV`? zc~lPc5os~UPFlBn+@}^4b#;cJoe>9=&D!gwOz#~^qRP}Mvmlkukg|gbIs#}{I!zm^ zy%NDs-4Mdr{DA-|0B0)X9oPD%u>7`Jr_vn(W*LCkfSD&#?B@aJ;*`L*U_U}4e=GZ) z&Z$d)D6%)D-ha!fR`O*G6b;1pIj$KoI5AKVMJ6q1&~z4L&B)SRQ=`;1L8?O5`uc!# zvud__wMulZtL{q{wO3)!hw%)fLD(~X+w%>6<>McDylvXoKpH!B@hjgJvEWbr{U=`h z#RsOtTaMrN{lwEVmE?u#gjqhz(KJ(BXCSOW|&scfMhYR~eqlAAPkb5c>N1W2=Mnl|vM-uk()& zW54UTUw?J?Ke{dWK|VU%)7RHty;Sy5kkB; zs%T|EifvIl*C-D3WO6nDMjA_1EcVxFq(kaJZFZ>yz%Bw#XZcPEIWYm_vW3OF2(676 z{{jybUw9H%ZsC?c2 zI{GkbPaID%QR3y6H#aWOi9dF)efs3Xf5sz<7)f>xS(Hr_?JIJKCouSF4o5ILx055O(xBY#kf zw|&TJ4?dz6`}!Zw(m(p}jyT%;|NFhFNHjWMUoQKW0}=gMs(4fDeT7oQ#^do=9*@=W zO#aIk*g60@0W%%PrIrMM1xp6hqh@y?3zK8TUHIUZc4S$FtT?VXsN(WL#j{|Y4sN9n zxaK}#fSz#NBS z3}gvFq?N}cCkde9K638>?KEaE6ITK>d3?nG)<7ssn+Dg26CjavQx_}2~ZE{MD-I_OOt?G8ca!! zzxs<(%75N)d{l>Txg;L(I0#F2QEw>5O161BXsM z4m30E0JPutBAu()$%7@>zklzCb34r$1-i#o?A8wuf%JIHGF{6$AU-77)E$5{(c>%$ zVAH`&C6bxhInZnbHDUv7CR#&mV9*InJ5bd01_+TzP6E+~#_SyE_7_jV(6P+MM%ob8 z0VZg5KW2GHu}h`6e*a$rnButZKy)Pm;s`ses*okd(}h4YbAZjGmVc%i$kW)&669ti zZdiw5nAiYXeOGoEW`ATCOgS6|jw0vQ2^GaIqt%onG;kEShzy7Wr{tXb^|b-Li{yP%eINBGN}3yxsqJu9PmrOy6zur z!LbowetBeP1Ap|GBB)iBTN0hJ4t;b1zz26Un05%{Vs1$wTKj72Pk1YVz4BXPk0S5`VcR+sIKas4LN;v>hOcR82GH97q;d=*^uA>C` zIc(C1-a807@qB_1S0&ea>x?)aT$JR1QtA5ELn1PBmN?pb82 za~!#hkP9O=@QFlfI|qS{?@?l7oMAsRZQy2sE895;ZG86=d=J0d^g$Et;a7WP+Zf=+ zjT0LOxDj&IPzUiX$pN6kMk1?M1sLH?d@?dd9l*(3ABLDFHikl11pjDzwFqhJGzZkG zaNPl|{B2OhfKexENzq4twSa?;0_4Yj+Cfs&3ThpYg-yBSZ9{~mc9H6$mPA1au!(5< z=4(VGi-Sb(HNb8i>2CV)mPsP`r3Kx+gGJX-99zSb>wjqIK^fib9AvhQ)lYldb(DVE zhoOi5yS=b&tbf|2uA{}f-Kd5h!FT(?wz2tcM_niQ{4M_uWGtBYVom?(fVLCyL)fj2 zcrA41+b*>ev}29c1T8q&;J1&lb8xqj!#W77H&2(~lnQ-84zq6n7G+nGxVd)-2*l;J zwZs54aetK9ARrQ9ehxgr4^`My0uj-X;LSvb3<}(I+E`X<;|QS#uZAusN6{DVItrMW zAE}03?AzKosBIe~g2eo2*HP!)ZU#dS(YyU*+gSUw3;w3>b_)$X9Nz60+s5S6F8JHL z+wF?8wGCj3rraJlOKp&*2&x|$d+*pQY_CJ|Re!`%2MydF9^4lGOU1%}8E?D%4|+zX zSSEvr<&60QU>%sN=~CFF@Y9ecHUz15e9J0DZt219Egz@)rGr$~{mA0L-*P9o_K@P( zj~P$=u6olB9CSdr{ePu*5od1xnYa7a^)LD;{x|dRI0zp9VV<9M0JqXt&V|>NBX=Q% zT7S@%b`GXC0t&&!f1bY$0G(ptUuHIvH)}NI9^z~*`1~K^1mXa@LRM<}V)ssMgbxK; z6*0SlMkhYLdJ{hBWK~U3`-58&t*nGk>j19ljmUuFSVKatZPx*ibnqQ-@jE6)pL;sW z0lfj64)m;NcIhL4T$qw@7Px!w$d}N0M_RHWWFAh^u~LJD7lo z=}QNjEZQcK`Hq{Y5wY1dh*Z>R;OeOjbm3S3QO87V9=`J!UrlvdcOX{N#HWc3FrDK3 zeXfa^mT++T2i^w`d*>XLtEVB$$r4yp+Hz1O95r9#NQC}o>OcD1DX2f=@HlLQZ-2+P z4n)FVi@XB;lg>btm7hEZHe?A@@n7Ul`rwqwU&d1kU!>SJ0NM>~gl`dc2ZNN^-|~EM zp*O4FG#zMbS^-9ITKh{eHn;T9l&`Bn=qlF+7j?inin#KTmCk|Yr*;Jtk!y z<_0!&QU>}2W(fjH5-fTL;7IsaIt<*ax__mm`o#Pjt&axo4G&Aq=WQ>o;D62kY(5VT z8|Trf!pB4Hi-#Zk`Frk^|64XJ{La6% zmA8Ema<$R`afn@(A&8q*#5a;ZjKmMcrh`ErXe-iNpDCEU*5&-5`uJP_$ z#GUx`GrwWzf!f~e8|J{Lf`4B`?LCU@}0Me zJR5304ecA4TGf;X)&HbjJR9H$FTqTlheFmtd+)IH_M3j-2tTchJo@|jb`Cw;2A0Sl zLXp>CrRx{~e%0MO4Txj-ImTVb@Yi)Bj=!!uANoNY(+~aoIQr1PkAKq-{rkA+I6@oCA zVHf~_nmA68IMzPL!7u>&b-fbDU)MkML>y-y`p@x){&W1H{~Qlu902&jj&&yH1p$8Z z^zt4DfLPFND={zpMSs_Q<{!Q3y3hQzH(majKm5?ipZS02o~jNW}$4^H266>Bvx zvX&4EdWJsq9VRzBNATVEH!UHe?;lz*=I{?Ke)l{4;vkXJSc|s@qfg@cg&li9)KO{-tX&QKgXP7K-+6afkAIh02sEc8wLnl3n0B~Z1(#+ zvKH^s+WWi#q88`dxB+SDi(UP2L3stTZisF!4Gy z#*~zG{B<9|ekSTRN$z+MV9&ZSIF=I*$+QOy;*Q4+WPcF*;6I`yG(e}o7r$rejPZhr z-vO2<1VTgq)2;%I0O_Y41C>DYr(FY> z!1$+K|Ce?boC!SroO?i}SbQZ3=n5zRoCAY^ErnE4HwVDAP1ju77Uia5U>>D+yN02_Z47AVJGMc z26ZvMoIwfT6yzKZqCKO4iE-FsTnGS9%olfJ9Dk9RX9C~>U`L#HKyN_yz7LCew&FbH z;4b8tiTN6l7*8qYA8=R#jf(53c<-8ubDe+qVi3_E7xUeUc_Qhb#+$t5z~D-eBUw93;B3J+~)v`{_p9>NAB-=y}tPI9#_PL zynh;T{UP|DQP7F&!PmLXKK-u&oB^W%I}RQ~uIYQ+3~b->W58zcDaMs?FaoIg|3R<* zXuFdHsd|I{u7H{672pJ^&U%PFnyaL05=#qyU=>IXQwq z7vXH=t4IiSy^`Ydh}WbLj*Cqm-<{sZb#%=&=LRtb;Bi4UkO*$zp$J zkusQ;BPbK55`5ODN9YIcwSY1Ssj+)Vew3fH_4uY7BX}BiPbjHwl!Kf(B z8)Ser6Xl>!5A({pWXo+At)-{bPv%=C+)dBk-fi?? zdXM(Y`E2Y3VQ$iLw`@04I#TcP_91_HyLEBfgI$|HlfI_9y%n!jDp_i7yQY6IX;DW0 zA?=lZTaMb-IIM^3xZ2)UigBNAIC878)<#>)4V{<}D}kFVQ0?(BTdj)OeRHeU*)7Xb zB^pw-!nm5I(-C;>4&kyVT0D6i>!}wUuJOTf+RkkaT(oQkyJc9$b}fIVoju0suvDP67(byN)5-(H$D%*~yW^#p%XXHryARq;cilX~iN& zL$c1E=T{!#`SQM~S*+Y51>e}=QVxToNOgU=?%`%?$gBEsc#ZgLJM(`$u%906o$c7i zW<07-aiQETuv5ozcqF@EQUIjjO7VNR*^Z@`l^$@G$J<*RSk0_oEF|SNn~WnXx#mEA z?2BwtoQh<;*gr4hv7BYAWq&`6l(ywWp?I^eYc** zMISuIFYj(=csE2zX=8u5Q51SMM#smK$5UgJo%iF~FwW4}ox_KZ>&0|CESLOzEYC9C*H>S_-6nlm z?lW4Qd#7OclYJWRDU<@^U|y#lZ+fqPrHw@&w*aW*ST?CHIZb~Pl=sn%vSDN%*XNC- zp4_75c{G)baHLO1;ax%YRY}zI^5)D9dv2-cKJ}h;5y{(Re-2K5yE&44rRDl@zD4JC za~&e1RQ9J}?P-ccb>6|w_3mT+W{0voq`F*Qg3&y`RO>)hi?uXq8fS4jW%F(4?jK@%*umO~dy z+^!R=nD!f~!q14^EW8NCH<>bB&d6e&J?nU`oko&|dEeld)ttStZ}|G~grhMQ$c6!_ z(PnoqmgDH^UrL9;^$-m4Vt(niEgy=l6!mPvZgR&It5`HQ=XpmbSD(Ad^&Tv!sm#j7 zENzwB^wocl{aX^!a=mQ)=V@`5l=(V%E(7R?%|l6uw{@*JlR`n=@fYD%th zn#`8$#*~uld=$eqrDAB6kw%u5Z@l~Z~&9{Hj0Lpfj9`q;*H5k-s(}b)x%PRyh z4y@9z2Jxz~+g>$OwF9!=wJvVGY3yz`%K)`Op}2nYyzPcDJuTS59gY3z#8}Pr;$qjp z*$g{#zNuZGOxIJ8g*kaxO97c1T&C1XysOJhEqv5zxr>&hT}Zy$Hf6Rb1KZZ(cIm@= zvWb75)44uv+!eb`t!}q!ngh^W?4s2riEpz<)S156aE|jie){)rA%~-7U>}p*khjPA z=`k46?sYVut$I_?T6KWdrN?{6<%_I_-Y~4^{o-`p4+s0Q8Esda{XqqE8yYi$AM0wj zs4L8d~q=^ z+2lS{?(mqvrDbYX|7vfk>^ExEugxjw?w5LbJ#PHL*3EOg+!Z!!^yw+dE_!s{sVslH z>w{6A)6DUm>S3}9R{nXvxm~q0ZM$KEA2BjtXF4fXZc>D7XEcn+j+Hp zf@XQrx)W>;$G+slaNlny^S0R5n{qapv><8PB&hDse67w$N<=ce*3E&OY)>-$a#rK0 z?tJ(x z+nPYVjj740Ye4&gUo_@wi|FwVJ)2;;w1_`|5vBgAh)yIJ%mP0=%Lj|~#b6B|ocZ|} z13t9J_i1!1iJ=LFV~=dhW_2k zfvp}mZnm#+WO(vCx7F>6)!`ORFPC1bj$0or;P%kyN)cU*a9jIr2(f3>p`6TfuSisD zo65=dGIjUrfVcM2XjgyHyaKqBE-OwE0nfhi`ZclvczETU2BM}=?vZnVrx-LSuDtZ* zvZ0|7i(JqZ)?W3e#pbvR-yciS&rXJ~z9-*D^s#?>~zPllRFxN-Z+?QGz zwWB%JGzri2AWc>ds!!%-u;MGQfwu)xNQ+5(Q_rA&^o|v$gE!k0iLPY0kI#Ekl17V* z)35T5kGCd};;56kesV15ped;qTZetT@9dl&i-qfwL({Gi zG}GtP)-JOf8E!kR3pX=qe>ih>9pzxbES?O(WLucW#CSc{_iUp}_QP(SspGf7sP~^` zRFM75k5KlI7JpEVbEeykCKMG;cgJ4Gvz?EhOIn+b&QQ z5L$O&E}wt#b8?WUzHcVeNxLZQWf|+GX?T59AY&U_=sZ7y;l>u5?i^mZKHuU8b*9_r z$deWzmy+9VlQ{+l(u_wRnCt???~feJA)>h zXNRcVX3Wo@;CEkjk5U(#(HUmTV-n++S4Z<^g%JhSz%SDOQ(_ z^ST6TH9hRDX%}pp5@~3YR!+)hfq6)j#gjbpoBvYu=_;H=+4#8%v_i{6T-&>#y-t+) zX2!N;4LxOJujMZXLmVx(;VkG(b#i+SsFppWyy|712Azx!nE_3!+SSwbdc8hNU6sUm z^yq)o$=JLEN~$quGJ~gUk~pW^`jl$@__7K|^EmtXn*fY(qXx7ts?M`7mNmeh&2=_Fwn2kDJlv zcq?UnpRDGa)R#6n4ZB9#g=n-6WJiuP^BaUS8?=_0v&RI~P!9s60HY|pVhjkXLc47W0KB zhv&74H%~RdXX}vf06Aao&f=;jMMw93;S5d|^!0R_A2~TD>E;Y&-E0p!AJ^@=1EYU_ zb1(`A*Z$_!OHU)5o`S|PoM_6HxX$)g<=)eH91wSt?#9y6yf04YY|)xmnw+wy_OSWp zG(23eXl{o8WKTDklp&2GWiwn~>(da#UAW9Hu*^>xd+io4{l2;BZY|NpMG?rSU92VN zT!|h(vw{6q-ljCX= zukyonKN-T%+!W!loxb*)_&mECIZAR3BQ#&0{K33AHOLyNNDqr!jB@17(4v}bo^v{T z1cAA^CPqB=7cG^YYdLOqrwtryPiJWtyIbEgWpXORe%Rw`?dDy6A3bMqzkPoxGgXRJ zicQ6RHTUFxIX|lhe!j9Bv(DwX8_u1yeaM6NI_4Inu6o#wrn||(8QsCN?b^6&mfmY; zC`L1ryVd=EsDdIIbUt>b$>m&4R)yjw+G(^~v;E$V>xHO#)%q-$su%yLdn^r$u9s{B zmdRmqT%0$UN^JEU-_J^;OXYtw4Ns#HfB9ha9G>gB`7}*1D6@SP>3-ZL_uCokS}|{* zaCE-kEYEvs{`r9S(II-JyFOoRCz8G98)a^8P@|nMyJbI33ja8%sL>86KM?>kh_oyLvhWPkZz{y+-HEzX#@F?hk(@gpAJSS=b(H zBTSq!A7AG_OJ}R&zILZgAg%c6;gko~uXI1vEoBPBMK>>zLzMBOL@@BQ+4eBq?mDSd zQ=9kuwk+`#Uedko<0qTX=XqMs(0FdR_bO6m6UUqe%gf!u{C1jU<6&iuAh#;x6_o12 zS~Yw+mgORU^ug}BoZWxqlPw4HFp+2DbaP!C`P2g3(xWQN2kk1}WPeQEsx$hMdQ|e< z?b6#jlhrJVpD+mx5T+3Sjc2@vu1Zmll|o>_o^7%1-ax&HSwMb*Qb-csd$}bXbPcb{z>r-uTe%SG2iZH~rJT-xD@_Uafn4m!IY^Cdp&2%}js$5_C_mn5a(c8ES&E^YXYq z<$3TpPG8+lo3P|gokucYLfp*UoXlzg)^j7;jJfQO!WFqlJCBX*qgg6vfN(uZWn;QR z);56=IAY+P*3S0Y2ljInxuJJVVWSv+QX1+owU&Da)&A(Q!*aS=pRD|~v%GrU-fzjZ z-!33KIT3#eiHy!+(QGzNcib-VA$7n(+i&yDV}I@AS+_CAN9#4|npM59PGvWZGMG%r z=5@{$R!JR*a*OBlNiquS={Py+-Hd9g;T=&+{Qin=dnIH`9DCPp6^`hIkNsiG9&0IH zF2=_8xvQ#GIPwy*+MIXG)*z8o#6|d$*#`L!KeK=JpubRThm(p=_2UK?*obSRKeqdE zTi>+i)NGvSDo;wRlOT>#yuEmpGcBZzf7)t%8&zvfT1ShubMw!YI*a73`>-qDiy4|< zhXJ6Al^gS0IC(81XOHjnwTN7|onX(`3M~<flGB&4$oPG_oMorBwDKbkJpi-)3kq}`q|r@ti!(5)Z>Y<h*L@hNWGLKCD7lg~wv$(H8>iVB?#6*LqiM126;m1o z*zD8X+s~%)rO8ic^=wr0)hf=(u(~GQWLkPkymyADpZ0e(o1N3Cwg{qoWzYPVYNvmz zL3K7KvcQjPo{mm#Y=nK3J`&aLpF>tYjxySIovXrhXLUoC&#$_-E9aAt->-^1*_PQ( zE^BS}xa|hd3?)cYL)tH3$!?aU4$Jla^n|^uE@z9?_^_?>C3}qG!@g`D&zAb?ie=S###eufKOKL>R-5*`o?9>H1ctG*&DKG|B;|5Y)oOQG zpJx4H-Er+TRwSN!n>O^SM|hs+=an`It!_S^A!#wL(C%>ss6U*xH9ij19m10%?P9PP z??8;Jyc<=wQ8qY<+0usJuACg`U23c2xk_@yalmUX?ZV2`o0n3B>7~q#(>#B>-1x-E z$rX;MJIyyM9C?fcizSIxksimp;&zYvbzRtKHeQD;SsdzB)f}YzyfxBcx}VM_)nenK zml70LRIRMwaU(BEA4u2X&YWszDGXN3%vKs)cb6kEPSQ@DUdQklNu!Gy0_Td$#%?dV zefQ^*>TO8)vr&?X;de$Cu+EH+lq@p*rYgYM&>lo%ix9vKM!6Z*La-?s(AMcB}O+v2YjeXLVEP{$sIi z2D{iLcMjU~N^O^`2Z6iS)VJBG4Z%dE&u3+ftAanHde*Pc>$R*(v{HZOhskMKufyw8 zE5IsQxsSDO(sh2F@8{`iS85g!(f=zE$!ngtxdg)q#0^x1#x$<#5Uio2quKy?#R zH9y>or!F)_0#_SM3mukldq@1lm=k7!$Y4`^7Q+v7dmQG&uy(h zVh|(yldOwy$TBQ6X$O30ud={feK176u>OGsk@?5(W0c$BD9(Oe6r3lzt0#)|uWw(L zUk-;RRCI}?si=S2MC`1QCtx&n+yE40J zyPm50X>TL4oW|HgSr!@QIy)qg8Txsq@C7BbEIB?*hg|c4abyLb9(mpMZfJT%i?{3? zY&WH@f}FnQIou#kuYfZ@8!6hBfEeYvol7Fuu)43Oq$q!Xi^Ld(bb%Dbm-P-4O~AcS zh;IuwScO{#X!;b>6?rb3u>s0u!}kDsIR1`Fn#t)6C+zWSgubt1z&CSamAUCE5aFA* z!1-p!k%jV9Z9|LM2id9E^SC}EKC6O0s_o{>6RlBn=|h)wCNczrpNRa-+H-#dTt|E{5~WLR(rgR7cXXAYU5Qo#*jg1ODlT=X|=E|CTY~oUkZXU*IVU~*e7|A{$Q8y z@1TDzBUL{6@x-%4OfE=dmZ%3onvP>(${ixu(|d=F;|}ezS2T%=Nx07 zJ~*X0-|!L3!vc44L^|~dZl}ebQJ~JfOkcrpW!D2a;Na5U3U(#&;@IS&2uGw8FO1@G z+MCj;D~sz=Ls<49FUob0oOanmErNu|U(kOelHl7~AjH-$$%DA+S(!74yji~Evcc5U zLF`K}Qby7smBf)~Y)OOk8c7aYnOoM@*$-<;TjoOIQKM@L_AhjXGaBM15jy} zv@#l-f8+nkorm^HD7) zaI%sgk|OZ+xp{(d|70xJiZYm|jgCj_C{BQaRNdB@TrG&#q$@r>3iRv-&oHx-nN2Qq zh+zJU@U1vu1sLVx6vP7erHjtIk|gpe}2lUoZSkr(*-VZ#vzm1-6_D;$dm==_JcEzGckYrTDu1x zio6NeZAX5L-hO%eUXxQm6J8$$*`o7e6lFh$7|G_G|e0?@{<&Khr)ZDD=d zdXS&wB(e9)5?s8sLKu6|`EH8dPKyp$4nPYy3+Gv&a%bGOQ{xkdxK64VgHg!|I757~ zYS8nMFiy>u;SjR1+nl1>Ktz9f*G2$VK9}Q0;Rwow5#x5BR%f+XJ5VWUIX_3VzQ79Q zGxrLsKt#NVm{FIW)uT+ATQW(=ZZ8R&bH^e2HKWG)vw&%Pa5#9tWkSH6kb4^A-h<|L zC{0%rRhbi223Ji<81a~kuR$JB%m2K>u5#neqq=JS)B(jkoeFrJ+%10ttPUw&{+(%5 z0H+rmvpB`8H{x4M2Q~03`UJJjhX|-2NNs3~gXuOtAh{WaAXfJP8gxp%e>}YsQ+LG3 zhm>pNBZmE6}=Uk%nQ|hA9ae30g756R2xxO7z5-3{ z!CDNWaCBcx_M<|(o<&6&`foRt^BQr;!`{PkBRV*nSH8vH^A&$f#Q*f7<)d=H-yBYb z85!6>SIThv+l25R52pOj@99Cx9y{#u`TG4fw6wNsO#5XK%e{g6$Lqe>`Q?;du3{{A ztO<&ofv%m5>>dmUsm=ri+21y0PQyI)4K*s(Z@*9Nh$={?Dvj2W^x_Iq4ge%4x)cpn zI-|tu}HAyvA?QTs~@JGkE%++!#h7pmqD~| zlOZYOp8kOhOo(H*_?a=UswXu2@RFJWt}v2pmQEDQCxL9ffVGoUbcA#*@Pzs)j(c(A zq|mpKNpku5DJS6NE-*A$^t`*Zb2*`oP8c0MjLF>h9(sSQhxqOCL0pAHDpb>M3Y3Av z5s0FL|KkkZL;#k1Lp0iaau5daTQRL=z+HgXJ@Z_(HnEmd1kUmg1E#vQR!*mP$azIh zGjmi46G^E8zg6F0y;>xGP%q)vM=4l>UyG}=SA26l5&k)|uL{0WA)8o!adhUOYIk)d zT^XdU96f)TwQ>wBgme_l{(a@)>F2{(Z%3DQM}3-@$*$)L+=nPiX|(xVwZB$55tLtO zNFw)R2=l!%7cah$DI^qFIU(l<%!myZq=4f&%@Q3HDa7g^a)-~;5C|mnp?d~4gn^KC zj>~VD^z`;vue_Q(^U5s2>R|e_s&abyuVCC)D%gLLY$GC9DTVTRbUE@Tg%)7a5HwZ| z%-#u;FtXePZIJ_S?{@>zP?ZN&Hex%cm*y~)pcc?IHz0M6l4A>x2#RaG3fd>oXtgxKf{liu?+S_UUk_ExJKLJkPes2Q zl6imANW&8PosY;Hk{+gB|3jnbJa6gtmCP&gRZSAOeh9=xfi9Rd<*E3bO$cSC4GIbw zt<}p4v?<;Z0_Yp=PWs}gkSrKAur}|K=gKSVD~HjSUrbzPoz_oNfFiZn)>w41!_J4F zYwgjF!h>D+8F{jC7OQ;t7R7ZZSnMetdFp=@59ZGX&2>}kfKmEazf>2n$$%nu_qrY*j+C_-Rx?YFj1n}vX8fg%SMzqm()#d4f>;oZds#qN1rd>g*~7K6{ujAU{-ZiG zWL6TVW~!VsG0*NnfMz?@5cn99%pRyK`|HlXT2@b ztU*^6ONfla=C4NWLLW7CVKyvgP`g|@oW#i}BGJ@jE`WYz0vo6A6l7+<*%yBTOG1>e zwlgek>Ih0`FnlXXJZWhz=7>f^{2hC&& zw!`s`?<{V_twcq!A*`7Yf)D|6dV>;29Tx3R?vx(?zg>X1>*|dm&do^!zp_6kx4DA%0f@f7}kJWv?!-cb&Ec%FHNERiC!Sy5;pmQV9<@xNsF$3h_`tyFxr&`dDWp5UdZwr4M_kDgb@ngU; zBscEy{TrnCR~{+SR2xeqS3HcI`nA;QLF-P)v0|AJa*_cSF8B~jjG%ady-P#g(+F;0 zj9jEl%J-vRJaZHUS0}cvKK1iKE2q~hzLs1F;{NMEWlv45t+!LbF@~XS&Mry%7Y69d?Z#qr6)nT|J%h!`aGWkld3YJipwy7 zIqO`{>ZCltTg!`H-Hv+_$h)#_msy4cjwEEnXiRU|S?56yQulvHOipc8t6UO4pIMaY zO1*lWHgkQv8+X|&fknIf=Au7d>MZ9{J@})yccM9n5{NKRD_kqATkT6CzP?BirhOt6 zgQ@k>S>LR3`YC>f+2xMP^H-EhGm$7Y#(CoWpbjP;q!U8uI$krY^!$|z3*nINaM_q} zZe<&0*o4<)q9uQTI-cIy7L~05&9gRwTK62Y1JN4HYCd_On}1fwOK(Ts7#e9%XJzeK zgo4wieeBmlrRZ7+G#7PL*U1VVu9UNPdL)6dAmPXh5q+m3FX!^DTY(KJ<%cgrHcayc zdS+2m;N6Phk&AG}gqd_LCg?;=lOxVALQC_1(!RBGg< zA&PeEJeY`7iH|I8<8`KYy}_1twxj|=RMk%?lpe(x#OaKOHz2065P`AQHFkD|qDAKd zH&BFe`>g0tTt(WOC3JHY+TF-$qjF2sJC~7-0`aVIcHw;+M83bRPymN)h>D++ZA}n$ z83ha-?c09{JhcR4#}ycUdZ5ycx2$8WzHlP?iC)UYA%LGMto8ZZ&t#1TVm*vdG zgWBMPNk{0gvvk=K1OPcDC}>>zkI26qW2^AXLra}@tay^AoXSZG#cu|<*#Hd*jM3<| zD9R+0ZPlwfmUs*ZuDe>RMr968WsI@Fp5U<0QOnRa6iKWw05@;DroxXAr`s+8%_qAX`G5zd#%(mF%X z$F}5{C2?T0Vqt`4Kv~K9V_Xbb5yDv zaO{5$EsERPcLSPvyh~e^;^|@fE1YD*m<3L@jfjY4VS6YUz8-6!wjJ1nf!lPGJdN5eyf1yr4bQ>L0Gqp+NoO)>T4)$ zGg|B_B+I1%?&Yan`b$kT#HsF`fQE|KWD3&xH}U#y7{AaMqus{N^Q*Wn%tHpe&Pb#V z!#e<~3`70M!`e@E}QG)sSf zw#yVhz|3-?<%SmBB&zLBwHwj8bIBqt|Q2Q(Ag<(pTyI~5-u zc)!`s`<(|j{BHjC1YOpmP6g3sTsLw?e|2f38d&oQ33;HZ5NST2-Z#l$wM?`rU4FK2 zfrFRf{)iib#_OSpGn2uR`W1f|rL){a!vUWdD*Dn#V2{DWp%@;B%nc(W8EP*buS33x zew?YRsgE z{)*;!SvHH2;#9L!O+XCyE|F?~P?;u5rxFE31d`nb-IrWLud2EGH$Q)|GokYhd4y0B|)b1mOF!RC&Lf`pMrVf(bWFrMGD)X7dnF47HsuUfkmQ2T65961hbG}%F1 zCc>7e*^dtJMJHdk*_AKDxl{IhSX{nx&?eT^3(`r~$M|=CjDuTm;|}Z>y!f&a-2DLZ#h3$0->+n09o~GFLSAhEj%2 z4l?X?=KW;mu_)|F3|e<<&F$vG>+5Mhhpsr3f-dMqT#5tyi3Wcxz~gFj(R1|PWsg7@ z3EYh@?MYCJfO-SQUIs^8UBHcIi5wMTuO;>8q}FMFC3m-b>DV1%6Omx$H8h;G9J=tk;F zA>9)U1&M|Sry}j1VGC5-wELvmu&=TD1S7|5w+U;++`~^I-2p+v$#jX}hh(E6Ck0X_ z`1G|p!WvWv-$3dYzdG+->>H$FN+4vIccsYm8yt0KD}R6S`4Fo6sATM+f1*NE%~{LE zJCL%#(r|7p34Lv^DfSI6lC@Rpt{Ijs-aV)gaiyGJ{jtfB^h-v7+*b0{L32%~kb)td z0^=f`P|RWFVud6q<9jNKEN@!ya8>Bx4ij3954Lr25@LVd(sC5F>f@7sd(R7)G22ie zwc&QsL!W;65&=v&ueJ*-E2YYLj2MO9+vPtrbePPQ$6P zNDgzr@M}4gkg5W|yws@3Fdk5r6dV}Iwi5XjL%G68u?4oy1gz;{tOXpgqc8gwNZuYj z(b5}cdHSIJ483e_kZa5MK{$h1^v%(WTO^$xM>Kz^!{o@wU7e@_X`QA}s^eO)q+TtgAr0;3WHT*oEDed*7z$8A(r@gJ!^Gl;Snv>P#x zqv3zA0$2qw7yK<2(wo8rrTOqCE*g`?U*A$_LtQmD|LUA+6iuq*kZ&+n`1J|?!RCJ$&VIUEfSrhYR%L@(&UC`^t8M=CN;Bmda#IKu83o}j8eJZ`e{~dZ_o!J8TH>_V2)vVhPbm)JpP{Fi2rUPb$>;xQ93FoxHHuJdq3a5QhI;=4_=hEg$#caQw7D|dLOt0 zn;T@hQSsn7v|_Ti_%6Y<(FnfWWgj4U1g;mXEsguI$!+{Cew$ zUBhqZ41v+(TDU!ZuC|Y6z?qxsY@pUirmUFT3th9jvQJ4+_HZq$35)FYpZ+1zYSscmL$;6Q(WEJD~eV9p6pA2grtFh$f;A^ z#`{&Ipa8Y7^QW5%3zR^zz~0G70NCh0k2h6dLos>5h()^wi!Kwj-63JK`RP)e$zI~B zdtQhQm$ozpBE`;cW`%+03>wa{`O?iyfNqqvD*LRxiUS+izr%lZ-yv?sl6lX|+ZgKn zn2fBFGzh<%i#HnCMKYsLG*|uxi}ts{^?9M>r$pnS5D;rH4Y;K5z*PF_=}NJUlGDxcd8)RVMk35u3eHTlgwYAQUE1e*no10vr;iJhH?30 z!kf8RwW|xgpt*l4+OoPVQa|dKq`wJjE%go^0Bt0Wn9K0%Lp|%cG=!z)-C~eR}Gobo0}k+6TAp8SQjJMzEB%R(#1S$5=9 zjzaj@N)N?H!`LyT3gp~F9mgYbE~OahJL7i{!osx_z~68&{3*?7SZ@*(lQB9?gY8f{ z2o9=!>=_7ubPdK|(cpS=A-FJz7I8kK9v}kr04@zhq!;kMeX~!?lC4#5%x*iZs6YaI zCjKt~m{@)Muf5tadi-35nDN5>-FReHJI^TK33GS-x0x8t}w<_=P+q~ z4SDmCvi=$vUrHY=y#xrEhL|)A)q_85-b?(76tbEBdW6@_X*@ zWM6+zG3LbxIS9gY6k*l6QSWFtttfy3x|Dsk-xZ2`gndhd}Pre?dJcKW}AN2>5#hy#5pm`UUndQ!> z9iEN+k&JImC}%km*x|@!>rjcafX>CCMCI6h-&fza?*fbKP%_!uCnSD) zO&L0Lesw!|B|nPZ5i)0=U9ME0y)PE>ReHl7CNbaw6Z2U0OJw8yN@V0zdzxR1YgH2i zWuTZ@F2XcHo<0kHsExi(Lp<72M2mk3w#P*6xIzyEJ8d=mF$dNP4j#Y12f^LgYq-4I zXSWX6vzfPiO@7y+XsX~qA8Bqm4bfeNutPCUUyE1e1r#<+kf18nhA5?sXgf6!@Lg(t z&%J(d7U~*eMbA}O)ANKdaAsl{-;+eH%x(^eK%?uO@tk7hV(%feei`B1+=zcUUaAC< z8{BXa}6#~c*^|XVRK482uUNL z(nKMR!*^J1=oEJMCnl>y^CvXVBT6M8t3BN>I?{ugfuc%d=O&)F*Fv8xAa;XWKipfeF( zjRd*CsJ{?5dw~?Vb*9*&^&Xw(Dcc4!qx8FN@E+c-TvwhNHmmN~zA>tq-s~qJ#HTD4 zG(V>KHX2;J^T!$1N40=^sP+K(ImtEq^rI}9njsqdN;!&67dKVLcIJQ8P^*lXEmY-@ zIp-CnD5w6*|l?Eht;X3lyjm{UK0CQ1JX&*UCN{Cr(G$+8LNGBd( z8G$5}Kh0e)KMD2pfW&_ZkW&+#I4tRgXRvIVCDg`k(gDI_r%#MNW5spbTPor!@REZW z0Sf3!SrwyViXM*&sY9L=a?6jp*IWwM&g|PQpJ=?@5DJ|=W4D51q&(4%VWI_!!;F?gum=Ql4d+ZH<@W@u0`Pxhas>@m^!{GbNtJJ1 z8)?#RfNnLV)fJxNzaq2pB}7C4@T%yBc#|r_=YRqmLY-L=ohaFCm*+fxBiF>araYc{nE0q&D$Hfu8qo34t`l%Yh?n`z zHL>J2m?J~(QwM+41srm0kxCxyTWoWjxG^^H3N{|VR&f~h@9#Ym6LI|^eD_Z9)rBvd zUUUZmYFCJ6aS_GEd5B)UI--F?P}G-$PkdNJHSTdWo$A|tPB%F_{At68$lXGv_zi66 zJnPYekz@~5u9Y7x{W^C;I#KhxwI8%Dqyw;fByPU}$C4A@ z*=&Fl<3`nz-W;Hz0i_ZP7OI!i36|t14%}0%8vOj^UQ$_P?|+HF1AV}0yBy;__c8~{ z!|~Y{LkbL_E@&mf(;)!l63k~NSb;#1N5Pxe=mG(t=vF)6je0qCbL5Hn?MigiXFjrH zDtW^FB;J2oH4#WQHgBEb>l!7$$3mlTh0R8qE%ipukrtDRPXW~~nP4jWBD*jStRc9z z`z{Z9D;jW(@vTo;)gjtpLIBzfUj20HINQ}h@yY~{yG=6BAKys*F*mMd_g363Rz3!* z>TK)~Sc?t*Kzf>SeZuhNDx}PiuX|iw=89xTXH0)`qu9Nd$k;(PflKlgrz*oULk5Y; z7YDCX&mjT4D$r)501na*e)`FgYRSqE^tk#>Y|;c_F3hoG+f;xx7Bw4^&>M#=*Hz4= z1Xm^b(NZh7OWh7GE}ZUC1=HORB&KaUBS+7!m_Niv%iH~da*s?%cN3NmQCJ@sr7VX6 zIj?`(F32jG>y;H}?D{H}c|Zqg|48KH$*bUqaLRk8WKm!W=_8!~2Hw&m zsE8{{ma(dRxz05KGG7QUV+tOchu1${MXY~r*$LjP*sg$a+}24e@gZ|KT7+!F6`$!4 zPqu+ZFCKI7zll$$7>t!_T40n!^qT+=DhKF%%Fi7=QuLv|XS@(3%Zo^?L7O!{Oz}j5 zkJYDbbAxhucHMM=?hRdPg;#X^MM66TI!U$##Dw$;mLQ=ie8U4LAf&&nhIBbL1+0HI zGB0GPGub10v#|Whld{84;a?ti@HVSpXTJo`zp1(erF>4b`pXz0431gR1Sy35yt&<` z76)O`4WBIn-yJw+UP2GPF-_70HZyO(u6hcQJN0v3 z?P}DbVPcMwL?41+lASNK$K>->899H_eoxk|mFnv;#MNHYMN&tqGUhbUU~uwhP>5Ga zk{yofouWuOHOLw%|A_2@c2zK2a7ra`^FYS0*k^#c?+xChh1v~J`g%b2u?~$YYob}5 z4IagqnLHqC^TPA}UEEC{ZrNjSy_fyuRmmN^hFfJN?;U3ad{J_R1OzZ1Pz!${OkgZM zfRVQW-SWr8(9?v2$JKd$$q;I#N;GhV;0Z&l=%fh~o%M%R8 zs$r+wk4Z-BJ7ySd_ieO0XIPk9dsB2&*84N>0iDvm%OLQrgyofH2rO=tq?SMBX6R!| zZ5SmU14eREtdHqJU<*r|R~dgQuU+mc`LjHQB_j!&UXb|g^hSWOhh|j6QY1OMWiCa( zo2n;-a{H7E&f-rW=mUiQN_i1u6=_jFM*e*Vi}t;jx7wwm%3@V(wZ1mhQ^5w0C_ZQ_ zMq2(INS2paf5zk*Sm%gDybG+Zss%UTO&Ib5{`_s;$Q8zPuG#K1N5+4N!L1DZi`fgC z&w)$Pq}xz*cx=M~F!_&$5F<+63giS6S%*?PO@y8?o5PaO*krl)<|w%8n`z z=39b0UHNE7uOS=$c^BM=K55F9iXo$~!t+$Lt4`hdOp($gcm+`!7Z;x;2d;zky(L)h z?Ho((oZfyJqoSD{w%dPGB_B+OWOfVoH3u?UY6`v)ePo#EA`{x&Fi2!I>9J$k*pb{F zjz>jFgO0>+&>$Q%txC1!q*o~6H-ngG&JJj~Ib1OEZ-U4)qs_6IBj$OJgMaq2R&&78 z?XN|a63BQZWS$Pp1Ez$YL__MU&6k;!K0@?CE$A1F!| z`>)<;IB#Wl&_(gtMxnW>d+&V2Nof46o)={+pk@yqkl{JWaszr#`lb*gK#>us=9YdJc`Mn5xw-$I zLq2}^Ou^VHy4!!C?J7Yvk39`n1Xyzwn(HZhUG9}bjz#+RKRsG z+d97~6x9b(g-Z%zYnmp;B3^cyBZ9b>UG3Q(n2{yqIAZ9p&OxGw=Z8=X^BdH00)JA- zIL)A5zqad3@(X9%u^pR|W3FH!6RNzZdHg!knW^+(L*ai}Hq?SsjQ^TD^ulF?GYO>L z_WX&71q1G$XmvC)AYo#k?X! zpO}YizUqI}*ky&H+P;y1+*N1*6qV~kRM`K%@ia$MNwoE;m{Ws($ts9m+@GGN?lnI!k74O;$zYea^e}PB_e0n=6A^Ja~ zQeT9i1L^HCna`_BLKCaQ>6ln)%7(KeUaF&75E^XH5X+byLdto^j+W*DWjNO-!uGmIa2s9LX4h-Zf}e95S*gG;yh8CUU?8Pd6E z2}AG)ItQHHsTd$uc>Qz_1hT>5yHSz_!zd6ox;}2uo+xeR#Wkm(SztWTsN%4??s_mo z&P-D{yL(E1u=o@2t@^G*lU;WR#%E%7G{+}5s&1`a9@w8L>>d!|8?gZ{wspQ@4ae(+ zkh?26ESA6|U0yW}jj~Dmfdeld{w8}Y6K;@Ji` zEXJMP(x!JNU4|Sek2s3jtf{ziQ}10GT;BNx<1=)BW)1_j>4qY?6#=Jdg$MaWa=fE~ z+);s80wlWkAu9V?@#d%mp`t-mg9k|G?*js2gJiAa6w)__5X6B}-!Nad`HdllUp57E zm2xP3fh(m}ToaL}Y_+ZwZ0QaYl8Q01ICNJo^2{ItR6WK$1OW^B|}O zN@pj3VaYW>igKKg^1LGR4yQ;dWh99K5YNP}8mCJusJ~1T;6X^PDQ~4nWa2@&@iJOb zDSjS}xCJ$9-bfzzeG+f+f~)rS_*=lwv_jWtC|v^UjGyJj>eH8a7~k|ZVTW0nr}KI* z+i5A8o6mlpK>OJwDQ+P(=9=h>%-~{k_JCl2{hiCC$MjjUQlkC9)+!!u;G=DznJ%5I_VF%S>?nJ8Tfr{ zB7isaAV6X0yJ;iYg!}Zy((2A1=$#b-N3AjGc4&_NtR~}!&cy*zY1%mib)l0dS-fmipQ%1tqj_-$mgf7ck z`UVh-_<0Uv06_Vk2kqc9v!7G37*S6SG#LQ4IR7z#^dI( zj56OPG_L6^G7Uof{1{0EG~iGCo$>aa@s3c=1#3u&*R(8DhffQ%(@EOll|a7!3n^kl$G0r~2NV19oR{ zR4BmsG&&p~P&yWgR3h^ehsrvj60Mp<&Y-Be<^BMf@PR#VInwPs`zsEA%rO#aSx5C= zh=A)s?B%>KBxi111{L3eoQ8~_I|536k`UF~cMT35o+P&;nG+=?2tQ40$Vv*={@9`Z z$N<${{plcPQq1PQhJL=aWn@@pcqX|z$}?bPyhujH4o!t$i;@kp@d?}L&SiB}&33oP zLZxv7!DxM1P5EpL;#3lU%y&qIje}6=N8Pd>+T%~)j(yE|aP8I!umMCb#Y)%xM!j(l z?^k?Wt-8+z65361#VwP&=+s?3Emi(puY!JE8;pbTwdBLF##`?5ELG;EzVJ2ePmq zfQ~rm>gk2_Tioglm+km?(=@I$`iUR>{=VKnywvtoG$M7dOckP?7%aP@=PJjc2y(gG zVKeC{G+9v-6>~*@Hm(3$K+%E9eOUiP(+?E(eplMa9X<`8R5y)!Hjx!pw$n@j~pkUVinjc|B z+AbUp2c4T%+Pml2)jZoWlFJ-7s)%JT#OxLP79^Vk0NRLu%D;T-U)toAv>?CN6<*Ee zGw{ue(R(uV2BVZ(p-I|h-{q5Tz`r)jtU=(pE3cu&`Q6{Nd5|hzqt0t)$ zGh@BQrg#PDPrTUC#!oE}IR?2OFrr>GID;+S*pDFVoz$ha0H+C0@+g>qP#+un2uwpX zg@Qt5Lgp)f6^rQNuwL8LY%3zZiNYXzKMPu-sR5%o$&haPmrF10&G|?m#2du>S+T?1 z0s?|I4#ou{)7)c7oS4+OQyLxeSZ-AL%y`j~;C$U9bMa#%Wu9?Gm9zLzBm`^OQ%*?v z4T<#Va9PXleFk7jQS=NH9!&|Cgo8AnqOwW5FHKB;;Zd|GRsudi`z!9e@2DF4?{7j> zg}_Pv0VXKfCJ0!_0pOS0>rnL;z~8ykRta$;UobOTNcxJha}lTj2Z-`q1R^Mfm7u|K z<@CQf`;sk3b#2=Za)ZLyhNeVaBWmbTnHp@cP4V?_;E2q<&pY>aZgwIEVF%LED(0Mm zrB>O0&%3ll^wtkL#fQQOx}ZZbmddoPerCzJtx#H-dXuTMRqlMg!;I4h{iPcX3O*|E z2`j&iD^%YX@>xFKsac(0ac1uwE^{wK5RCph`NJ4pubuY>ukvm=_(TZhMSo1=-JfUy zTkb(~Zl#61s1A|61EL97d_=4Ebd5`NZoPMZ;?U-OA}&`(olY8V;+X!BS8*LKiT=v# z=9D=1O40@0D%Tf5o5~x@MHp*pZerLK@|Q`G<9W3@j}ay>Wp!pavBJg}Pdruc0#RX5 zK7m1$@AE3is<}8qGp4Vy%PVK0X#LqvzC-XvIN|Fk71caZZ(mw2+Ew2|YSg>Sw&qQL z?5R_1qz}jHWBz%IlPNzzVcPgQsC;R;C-b!-$4NfjY~3N#=vGY0i$k2GSmE>b5wmv# zNu!bs?-TA_5)z3k+eN6n0zM?mC-!jlp?1~Sq4n)#`MoJbqkfApmZB+U0b-;MSrwTKJ$S$YY^s;XxXPgI-i^w`AvX}d{f*E91c;IwSa zz~|XShMW@AH14j|wnDtzlShq0XgKsJlWf6BiKt{)9#0qjc`bLBcV6d!PCVCt!k2-a z4N)~&Dg8(vka(Z%=3Vv|d7YRtl-*sNd775`puhIFH{zaaZ9bp6p0gH~xB%-~=VGw^ zRkNYH58(P(;PV>!@}(JF*H+8$mgih@!>Ep|J2(CeA%QmqXSy3B2;)|X+U=vNI(C2R zfOw&1Ta#K;ZcM$4eiJeC(9NlTuwW(!DjRjk_-x@>4&$ADUpL?pO)doC&`t7%nuAy- zJHO)Ta(kAdrixD?uRof@rJBUOMOhLyj6Kav6{Q~8+>J&cckN~PXuG6d@=ESWI@WPT zyv$3o3bR)B@hj&NX2?u#x7Yjd^q+ecXgCpGU(=OA&Anr%=cQ%D;`2U#C2c26lo{d(%M@C4lJX}(&lW;vQB zam1Mi^(7InOc5i$$FV|xmof@}af?*JllpPPG=6|?#Wu6c#WN-Nu zSK4QnxVJ0Oj9MstQVdr<@beOA#o4Bw2`|^x=@hh8ceFg$+Q*#1YnpCLI6VoqOJjoZ z<-OG0>iOdFcwW11$kXB>8jT;`Q~jptv~5y-e@$#urFBa(^$g;kzg)?51d z;h<3KZVv+IUWtdlhVh&j>H}@(>Z(42^u}!Ql8OCixD46Z^~U5lDf*@9wrLL4^I_f- z4)J7dYWxQAHh!0>%%94*x5<3Ey&vo~VqK%UE%YL2&&RTV)S4<}oXwn3eyZDmJl{k+ zY$@7~28YG$`rehdAq*!+_+-UowNSL0_no-5T6#5-fD84YWXC;7Pt!dt9A;yJo2kSJ zzpQ?A%c{?s7)DcCZ91yaJndFu*Ve{i3rclH~vhbBO^ZxHtJ=K zwmrq8>KE0QmK*?)t}bfg$PY6U*_ZelLLp5YMCop-y$DvKx3$#(SEG7NFNj}4u}{a_ z@cIzvT6|QNABxwDQ*`3_!uPf%^frUGCn|WJZFOFMO0-sPUMii7T18m~wk7Zu#c&Gt_JszAA=VOneBJ9pxy?2HAUZlC%A$!*s`A0 z=egJX>zgbhnO#e-M);To&z@nOq9Q-cO_BIZh%d$E{bDX)${j}C#=#EWgx%$8m1@L5 zyfL8B!iP>aM|tvcd5*O~ea}NM+{N9(=#cDxalj@m=#;zx>4`CJxp{qmit0M{CrgVH zVb`wLl{7DGK#*_C%L3}1HulYWCCe!?V%m@h`{nvwz}4|Hx>fJQr$X^_a@h6KUPZjR z5?)5TzP{oN6@ya|%8qtF0fCZ2%DcQ6%ox|MNIW+Ijxyk+nj(TNurMQrDUa3XbdKkL z58tQ*CVLieAikH>IczwU6{lzH5naV_SwiK3q^gBdRPpC--7ZC zakE5uOT5ODYkLAMTSL|wi996ooTn8y-y7T-yLeetBfp81ZMZ9Dt+YJMLxs{%h}JrN z+}?3&2+4CQC7-DXjcct6X!h0G^g6`Z1>-U!;`+H3gXvPkm@J@s`KTu2iHpU5#5b$& zGV3zbLXTD8aoeYcqXm$n;u>E;F`cKXBP6@rfas_hAMg5bVKW0t!SnWP;?8o5RQVL` z!<`{OVS5^l#JyYPWKP4(Oe>kXK~i(UXSUk;V@E+Yhs`ILF;7$2IYW#W8yxV$_`OekiUC+4ZHE(90m^(_cDGI13edY9PF8We85J?h( z=3EG_crwlp5bt`b%%qR=&oK3t&U+wqvC^Bb)pmR)jd z*gW-elg~2=yxcm+BQi963QL2ju=r(Y&sno*?l#sJ{{}MKYj5I0@jmX7?_&2t5a*sy zXzaAlBlE~e2t0Xeo>!)SmwS3*BHiHIT+k}fLW{Z%*iFXC&BpmpZ5r>I zlEjxZl=3AJK$0@8V2+bY6}h@7*%z^M$&dr)*0(ZM5^Vuw_oFJ}Z4~#v!=%XH>c* zPu=BnN}@NIor1jFajt!fcVvzG)tu6)6kGDlF(@-$G3Kd%C%B%_>5^mSTRI8hO^U26g<7gHRa z`8??%eiL7R*dv+);cUbKy>%ZQJHxBH^F%cVWL}WrGMs}goY3RbvTcM|X~DNoADzn$ zkIcK~2h*by+!vj;eO!_gvEsTEIQleR+I3R%PqpYhY$lFi)wsbp>b&fc^0px6JXp$v zSCzr}$J#0Ha0)ITco&+doM(1^Gc$L-yt(tEQ@DD6hrAO5=FH;8}J@Z75%WDedoBE(2=o2=+B&3 zLv4_M8Z6|*3t6=9$X=HG6y0Fl+#Xq68C10|ry{XQ>2VUc01_7zJ7lC$iJ ziKgY9I4j-dbv0N1GU*~sJ{s>a$?J9D+SPlQjB$?Mdr-Wv=hdB)=(?M?H_xt$a9w=w zeStG=+a0&A#%kp?pY!xZ@jUL%GfKLV!>MI|o^_<+%=M;xvY{BBPMqSC5xg;UYvrrZ zh-y%s$od zOBO<^lx|A}Sfy3CrF*l-BMoR)*-NH#%T{uTH$p^SxHd8|#8ljgM-GG~FO{gVIh{v~ zZLGqDPj&kS4096&J>AewXHsFRKFL^%0c6`T}yZ$=JVo%;jk075n_X!?&L+*F^ z%It2Xhi83@3tG`{6ibj}K7rO?JN+%GpQ!Xmr`$pjxk4sQ?U{rA8TeS)i0I9K4rlzd zlz7CyjOL*Wxkj6SC1I!YUMp=TY)DJTVuL?5QfZU^!r~mt5?;H{cBhkeLspd$(d+wm z(*2t*_2FsL&R(|nvr;4&qeRjgj2FAps^=RY)hWy zQ!bq6!1BB4#sLb173l0vHh2R?XTo9j`T^=ZjA_WuS3Ej>uwF4y2`RES%2n# z+pJdJjh}mzy|qm3K0IH4X()QLDYR#Es=IhGRmX)VhRGF|V+j1@?Gr|YT2*SsTJj)g zQ1L!qTa%h(64J1Ey>zZ3pKrhL1vLSE)=WlJs5pab>kX0TdwAPavQ>)w#B7tbNOG0J064E!Ko3s0#RKX2D&qxy4vdL;Qkdm`DvP;{G`eB$=!s|8 z2@kMD3)?d2Vim|<&|mP174R$f7**L_MoPaZ4)HMKD#Wd8HW7~Syos}n_FNI}cG_uH zLhp}{cLK{r4$S`1M8(ThE_V%gg;C0{YhcXrWKVA?VY{4rbtArzb)ae1vRsWwXC!AC zcjFZi?rm5uQxKwm%q2$-BWaB#1p0>4iOh6k(KY*h85V}L`qZT_{S6rJG!~vsx~@%^ z?+8KV(&D}wo6q-RX7PQVi(GwJ6-YCLsJ9tN4{5<$DB#C=!Af&~MjWJO^(iZ?bMjR5+erGjmh* zpnf-vvkMy=Rwsq6A@An`=RCcZz1vhQoS*vqm>gELr&^E0Hmk1WxZu*d1{d&Vqx+0Y zxGY~Q)1=9(lo!VH!+OEgK~!?x(-WN>XC+=Xc@Z~%-THBU?-wT#8Q$Ib*0U~KCT_)K zy*|ufbZRV|2K6~zE8aC9jCJX=2aH9ny+y#YR3azZM#ID_dK-Odv-8^Y9p+}~9 zrW)3JOj<_j`&+=-yANfjb?)BT3E$TsE^r-+wI;WpCQ!R*twi!kKKXHwi*P=w@Y`+LVuE(2rQ9 z>-F=zx&ZHNr7gU?i|y##`HDopZtA-m3ZL%4w!W0`Lq}WNK57rgtx`I#kwl=bxn&J? zqxW&(pefk{)~+37?QPd+Um3Ma@4;bk!X==8komkWQWb5jBGGTDl9_I}v+KYrv}Q5< zNvKSIzXta$rZq8>CHgIvPalsU{cwhPhdSORBK2^Bec5&Dc^BPAzCEpG^#Q-#9*=SF z6%LCg3@zfFbi?uO;;|<2RAI#zwf62IO*&9GxlVJzpZ9oq7LSXE`B!bCE@!KbV&mn1 zZ?kaoKIMld{kNUb^ET?QC|p5?YuEN(MYmp(wujghfuwA=z4DB+qn(T3X+Mpi0v_(Q zJ*GYP>b*S^ec|ZRda)?&@20Mfel%3j^E;|!7Bvr_Lpt|MCJ8c0f3W7 zx@D{->)EM);JP&g<>gt6ULw0Wr>fYX_uE9cDDq#m z=jAWG%JJBcuNU^7lW`@3p|dvc{G1b{_mtz>%Gg!%+i`TN-fY!bMVT3ACRX;RU0DWjFTyCEhgs84|A1w2%BOb}{~eF~1#{BAr=(^lEp+ zC^m5=z&I1ZaK66nPj}rE0Vi%R;_cYy8)37-mAGEf{lsQfzD{)fbZ`*_1}2Kkbk~QB zl^zLe?{M@yA?^J%haPBGI z)m~gxiOZV;@9QO@KX`hoC~8$jA9GI6Gh5i&r{@Ff zDyK%kN6hO*O;X>`cbXwuJGu$LMkgb3bI|F%Ed-_Zz8U@1c<<@^^QcLG&Q}Mucggd4 z8E&Pej!B8oGp_?yC>8c0165Z+pyOfmSj7fHu*Hu>usx0y@n;hIQTq_=yYSGraouxS z>7V+(=VmOtE4{vMJGzN2uW`CLiz=GI(@pcbK}nlr;L7Dyc~6A=m9!4Nrjt&R)2i8= zLe}9v*;33r2Yo${dUCOU+&5b_p=NBgG1QnEmE^VYD>ApGUg-&I+~bzuoBLo61~1({MG%8Q05!~-yA~~Oa-4#C&?q}?jzbmWVDbc%s;>0^u@LP`&nVFtr z|9m^%pg(#F6vjwwE^n4CKvlt?%cg#gE?Qr8l(?-V=R%aQ*QNsS2G`LRcUqw0!wS zvw%n~Z6Nntihbxe-hZV2PFWaix7v|bgM~EUU0^Wf4?l@Su8i5Lj>Nc(O`e~t4C;|< zY%CBiycD@$$v9~)QQPIWLnf}XG}QWB!)@wgM=#Hz6}kzmxo@Q-Tz;?IPET=CLd=7oCX%t8ll>8&z3r zoM*Fl6V&H_D+p6}t{xkmhZY=O-P?Oo>GjZlM1r0S?)E?P*phlx|BQ&+tS!c{4D7*X(?$DrxHp!oIu8N#>t&ABFw% z%o$tUk*=Sf?}3_^%{;3YHRs*;XPiDAICKurxG-bF-EL>fUdI>4^y(*%Yq3qEyYf~) zpP)EZGC>UPg-yj%(pqHl*ibpEsUF;5OU4gE z+~XIS#tm_~FmR!AGfZnj>2_hefN9S`TDUGP{0 z)tSkqib^^8B-yl3Q5LuO!-o>*U}>?h9w1@Z0<8b+wenxBR$i~aomK$g4*|gZwg*{Y z=@R-!Z&S**WB|DA`^!i?I;U{oPALF@bNp>5VmJAKN-jmLg#fT02PFe^Jis9h;33?FGZyV;b5NC`vQa8BP2|Qh!dn11`}_Sx{B}b5c0K_B;O_VP z@Erw27bOHE9OFOk2kcWOGJ7uU0{@741gO@HvlrgFF<}FT7{*|>h(+- zT(k)l66H$g;G*}2i%5N!13@}O6x15L_vAOgODi4-6~Yx3;R=u$Y{Xd*(!IccMya?( z$GB9G0&rQ>GXNtHe(s||0FL{($`*Zh;f2cc;jRx#&WIl2ApV(RjF0>ovbc z11uw7fYGj(I%&A5mp54yYy_u;znEeD1h^f*?#1dbYQ&OKMK@4AJl;ambiA(33|^V^ zw!GV%zIa3oG#*eY6xq~i^P-DhVSswE&Uz5&D_D-M7mJho;32SWDx#EsHC$u$Jy_~3 z*aByvbO*1EozH}G^)$AZuDSb(JZ~St#b;NVVYe%}*Z^E^zukL)2SgkwH2YeB3P7^M zBPf2>1^>?5MiL~;092!`0PhY`7m9F1GAhgWVQ<8W7#_i3(P{t@fRnBQ#Bb{2`AYrA ze$iP#6ff8)6iOlO*u@urRzL8z_9m?|FF5MVRiPG9CovLYN~m4q9kIDl(n%VN^@4E$zaV2p8c+khTnyp?3zQ1kFaCWT%WI8&71L zt`J*3&Du*88~3HEY=t^H1W;gFrx(CI)-Qqfo%iA$}D13W{?EJi?^fTHT^0URR8 zRNd7J%xmF)5zn!r!=N#^qRQSXWcd4czToJ*#VSC005~4=)j`Q`>Z%V4ihRLFyWT{* zv3ix^>YLE$o6&gRoT89C3mkwn&$wa|Ea$**OYB&j{39B0K zRljsJ*AL}=Riahlg*9Tqa#_zM6{myZ%R72VaLgn(+K_-FWP3g+k-(Pdn174yG|Mj1 z2w3l!{~aFl-@H($B3}SeBiGsaciy+RkodHo^(&M z83~>u7Akn&BqZ_NLJHVx;jONBfkSZ)GavSUa4?)_VJTGr(uX~)um3N+Bs;P%*x&hv zziaZJw)wG#4N6pCQ)#eGC+6{l+7KEM0eUV~Z*Gp8% zu&pk8uqBUuH(xjoV}RU(`P=F3b5npssNPvvYWM5b4l&JWiup) zD#!RfJ9B1li$OH+o?pHa`l^COhgk0w79ZgQ%?OE#r{Qo7vcCV#nh|Am2d)JK(;DblBcOB{W8g=LxI# zbA8YLjYGqyO-<@bcu$QYjx} z9)4ot8!M#oxAtV4ut#U!%|f_Dwg9v6z~l$+0_}a2*c?hM5eaj>BzXHV5H1;PlJU4l z7&SF^yC0X_4w2vXiAtIJQu3zj3GCpNoV2A48LVm z+Zow|XDH~)C;Gp3*rOjGtfNN$#v8>R_*&Vc^S|x!t#doY>Qz=8b=g=wN%+Ag9bBXE zTL*q@D#0crjV&5~v{G*51Bcp(tDj46;?iUhe#S*FNoOMuE~{tO{Hm~XJ41Bk$^|$0 zxX8FJ6`d7?I2CZVcbDv?+Qllu8q$ubM{BeM9&GdqQ4vR*CjhjRGQXT4X@3O1+(aH9 zR95g&yR?>w9#Tm^6b^Wn-x`XoBW?u*f>dq!Jfm%3P;6O$^Lbqr#Z2G?Uo;7O1T|d; zonN!-a+P=Fi@-o;uQ#Z|51c4rC%}?O?Eu#v`F#P)w@!ve^TXo=^o@;PS-v^!@Gld5 zvHCZ+9X2(=H%7jg<}55fJntu7A`-Z5T#sd7mUIx+qo+aHQR}i2ylmE1)LZp&5OnLr zLBgx6fkQQa`Y2O%&o0Ala7vR45u$R!!Jiqb%6YIgx9RB!`Uc)7ll>L{Z z{^oXg%v4AY7c4h;kF14b=c4xTiy4xX@4^}!Q>9fbnQdS%OgeEaVj{?yaIHLA({ zUrh9Wr>_0=h2#2{zi-*MJ_|_crCo_tq_-@{RL(IMq0VToYDJ6JKt=jRzc19-3!Ca) zc9pC%$puI%O$bUP@pg1m*a+@!S#ewzfvOc*Z^pTj-Xu<+92@uea=lpRfj6tyz!E|= zh=yfyi(}R68?N&R?1wI_?Y*=Qe+D*2;_SJ9RA(R@IN0F%pLIS9RVMw$aRI)y=?|Xx z`tfmn_?s&apE&ZsPyOzdjgr22Z2R-MZ+-anmoG>Ci4P!>T`jjZSWM9dnX;it^GW2i zmC99fEU*io@@1mf?WFF@s?;c#N$Oo3~^qTAg)1rMMtjm2XSXD}^;|&Y2m}H}4Sb(2dx3o^3^kdb%Nl-v(xcd8c23+^?vK~kDpkjd|;ttuJ4268vppqj}~_L z*^xu4=!c7J^7p!NXdsQgStTOA7<-dN_{~2;{b>DM-qBp&v&mNSO|)$Yg!jj(L4B3Ombeyr_>uhcinczZ|OPmicF1 zD3T66?z>KZ*ZsizN0TmpB>w6Tf9fT9;I41}{Lw=OrOw`8^()deWE9`m+xY?lDR!g{cO;q0@7JP-h2HD9-# zQvK>U=)m^B*xm%0Kz?i>d#?XkR>59DZOd2WQIOVAE^3 zMl3aZ7Zr_`vr-)-8R%8KAwmz=8pOfgkL{WXxN3Q7gjX2T<7u>X#Unisz*Tg;*IT$4 zFKKHXuF4z{rl**HjCEsKe69vTw3i(4SIF)N3#u{*&$%jBcqBmH69c?6I$yG!yJ&F@CYUR_cHJ=SNE|e>u-D=KR6< zUmp6kt#riWfosYm?mSN_k9f@Z^=2SgFBiTnMX5$H;(6Il#!ZHl%R^oi%h6|TUgZI9 zu@GOxb>UgtTVxy5{2G})Gru&aowx;+a9vap(V>8UmZuJNWS3*AdT3I(h%uX%)}W%K zFncw)z$R5Uczj8g;*xXam?^LgM4svu#PjUR7Xqs%u1I`tt__q087x0emV;BD^Rv@z zU~)^AvumXAAX31hKjcBM(FTE2QzlpOq^g|m{q7y(YFK-b1rVuVpA8OYX{D_+W787} zmW;W7wz=h?fOq6U4Cq{dG-L1d*~Q~02U=PKrK7HR=2uanc+pF@OXbfx1G;3N2nNSG zqlNnF#IW)X-o^j$hHUuy@+SYAKS_V|Hhul#fG__1FHF0=%a9QVEHmqHqSBKCAFo_L6GkCs&t_{E-a$aVLukb8gS-WqJ;WkKe7DTcQCkr z!@slr#}-~FmHH3o_}c6jd+rqX)vfCc|L%``bHQ&M{K+w2+)8os51#0OQ|iJtyCRwe z4pA|`>3XU_StJBK^()SNp{K=_eqgtivZ9*>gxo|FB~^leoRy#Oiy%8bk~zh6I1o4$ zDCANp`89LoszhArNf78PKsowiORk!$>FL~B~r=@L7EuS7-AY<5AV!XY$(u2PXu9u5QX4#U?1!w-K z%a*p=csqy@kHiG|B|AK{M{aNwMO>nFsVU=qU0u!Q) z2Zzjxcu^OO9Ld5iEWdtx(m5QdD-bdegRkGFhFyO5<9}+_S8Mv!P`>q!FgS|+r)K@; z&n1$CDm!$QU#xZPjsDSpw~m~U4gcD^DZu*Gz<%;+>DB+nWJYs0vWPY#r#RLvn*#+SgI8@QC43g`?Zqh}*sblf&n0F7T7-Q}IbAoI?_C}OLT zg-jJZaexl}V=v!d{lodi=YO@TAKm6pj{L^@p@&wUci8(E!~f{{|I)LJ^}AMI9(?S@ zc%x2BZso{lICS-Y8n_LdokpejUEx9x7(yLK$eS&rUOb0*bQMFUN&h$*k?@d)>aO6R z6752F?3M+dUO1$@Ji1H7*E*)UIHqj$tmBKxmE|P3tpUnK@;sRNp^*lO=_WP4iDwY+YY5gC%)(>wz>dcSs@%>IF z{oTht>dc}Sg)?qCQ6@s`JGq30BdR=v#f<171rQ;@CUaI%-$)gU^$(|O9S*uW4R z-O(DNaaa7vH}R2g)>8c(ug66P-}VVZ`q`g>|H4keFZcLe`#)Umhrj*g^5|@p`hgqR z-~EK(&vQ9H`&i$2Ie|)-er)#lxd!4F=l#`lfWv5UNtYV0GvX?jyuUaKY+0?H1s-Ko z&{B(k8Rdk&GZyDUQrS_=BD%I~`lUA|C2>nBSAdafy-@LZCMB<{BYGCrMGzLG3wZe~ zS+F^a%ua+2q%GUTaukh{ip7qf3`pFSl~A>Io~_0^@pWahPK#O2*tQrK1V_|7NzZfJveSLC0>x?b+OX#gJy@$A@d|qp9epV z7nVKtC+^}}`z{q7`xE^CZo@x(_^Yc5)$j8G-+21f(tq}A{%8Xx_{T3vyJZz)#_Y~$NLFKGz@;%adg`VYlJrpzx2r1Sz^(wOV+T3`5!o?cInF}-`vgOF1hHYi)z@@rq{eNLAhG6Xn zY{`bTW64nK!vf2w$|%Vyb&+0uce;|@D#t9nDq_1*N)(t=#LOYo-Qqn-f>tqH=}NF3 zZ_D&<1*u4SmXqO??LOJwtDt@sRb!SSQnJ=0ekyHiiTgvDun}d}8TX&{{94I>$WPvW zlqEiA7uvI5xuhRoUBY@7fAz`d8!O?lcAAZK2I3fh=X1$xfyTJksDorC**~A6h-$&{*o{=~EzlT^calwT#r;jp zD%df+i4$N;7@mG0laq>`fOAoQvT;~gsG0_TV9-WS;gjJhCbdEm4=Xx4;_9e*bo?M5 zU|BFS`_1)hl<~ZRb>RB-ANc$yu8~H+b0=SV?7p$hQP+@PIs@p--HQnRadn91UCfo)He6esapi!e8oJ*CuTZ8cwWj@l(D|T zn*Y>-XjU#MKXJmZpYln6|E*@{cOBK|Tif$pC&oYZ8KaKpv9&LpFzYSSzk`0=ZoT%c zp{>?Hb;U*Y0!CuuiY2}rBig5`+C$5RW{>ms8Q~feKc{ikR7F=KS%#Km?4W=Vqk`3J zPHw%SIe;7T?SkWL1 z9V6V1V_!IDasjUZFt_7fs*ZftrLNTn1?ov;O$@6fH_^NS9$>zfT+4)6`ptjAN;ree z;AQ+FC9 zn%)Ih@G?a4d?vi(13ZO=f4VswWnSl_$btmA3CD~V&wcAe@=k|uorw0QUX#~1{;b>k zl-p$9c7JmQSbHK7vlb*pO(cYh2AL+A$`eA4AdRWi7ejlK7CRzML`+V5Tm^^1idxS zi^xkC=2fn$i%9({jb-o2wrYD*S*L0(*}2`(IOFx`$A*G%fqjN%48}Nue|+jS*!%=@tGwu^L1}DR zitQSu&RCy=m@r}J!L%K(d*s3Ur@KDj&f@-Gt^5@qjwjc#NaD3--&)%)Mh&YMK1mG3 zmJcONC2J+g7*ZFzm8Es*t*M*N@w@PVkYf~6WhC$MU=i>med@jFLI&~}ul`d9pzsgVfB&1@=D%WcsRn)^@*0s}vJ1PF zS`?{AdU=xG>m$k&F$l9J%*4vb>};Pi^FZMOp{RIa<)SjE11U5S%65jDe|AB4ji)leYnYQBjeq*%DsrjQXxqtD8FMrj+ za9n@Mf6kXbMt-*YZ(a&LJ8Uc6CScain+I{^TAnP6y`2S$;*ripT|=PpRkiI%$!a*i z_~zuZRwYWW-gY6AHZIYh-i;%RkS%w2idpJl@ito=mxrvjm|JZ%{_)!kurP^{+Ziuj zFRYFAWzs>;*vP*lKHnJLRquXe?N?6lyDwnOf9vZRiA)IQd$BT*7hg%ezNuxVKrh)i z4G+3kGqa9;QYb8_?V}^MAz__BT~_Ua&i7r*HahBiUUv3|G2izTJ_J@7&tv|<3$ro) z=>IKU|C*oqj5>X;w~j3>?T;mn_4EFZzUKeG zYl#n+xzXNy*SdVo|7#7WqC9+}kT@>1q5A3f@+T<#PrJ}+JtO;Ai?Sn8*;UOL!KVUZ ztEg_Ownh5eH8E+C=~YY%>B2z^Lfz~Xf8yl160Ld5)=|AR#%{Bvb@KATuCB3` zB9aYyR~f1|@fFf}xe_1APkcaP4uNCGEC1B?lRihg?*HPc^e1-yso1PLITR*!Tl$_pltN>`t)0iSf<~#OHVHP?>dGwwEMsKRz34k zA4EUaX~V?J9(oby=HlHBY|WB zi!X-kDk!g9z3A?-T;080(&_=tF{#k2*cP&vO+6Ai>df=mz~Q;yyrG(uhg}iw@PJotz$=wE$#C3wt0t)e}SBxWqZSy+2f$5 zIlb)hK5q=8ORmC02_PXV^&N_f%5>buqWiIa%i{K+XIjGTGJ06)PyN>QxBrDCGWvYK z`82-rc%9M*^PjwK_)}klOiwZ}Fut^KWVCjoyZ8)1B$`WGEIq@0XL-0M46`=Ts!T;W zL4J{{7`wYI5>?Dne-r1asrBLAl$&?_X;&I+ExB$d+O#EVC;5E4o`xmDcVsTN!Vyjs zNdWy=4}{2%zxt=v=db?tuiDk$`Tue4Cdr7$9sOYblRrUHvwp=;rOpfr4J!W>JkxCv zcX%%C)nY0k7M#?si8MEU$S#~-B8{nM^>DaVtf*24cY)DEf8Y!^VtY;Pqcw}WH#?JHddADsJPV%Kga!M zn9xzL@7p^R@kZ&h2*fEn$ks!T#C}&uA)Qu4!QDg)tBvJTajSGlaX+q{1z5)?Z|k)W zW0ZTC3!AlUcC-eWf;-mti5~;L^;z7nL1S*|w_kX%z4%)%`YZnbOQ#P1Qp>da`Woqu z*LMB#e}#YLqV*z1!ZKU=jGDOb1%FoSGLXRS;)4N}X3fO8+FPp5T7DcKlSOtoREirw zCK487xJoKwnMU2(fSq1x{W9{V*3;rKiQ>*k0FGR+gHb$T=Wm#sTf=@>ob;{>QRkYp zqoJCoOPy^D-IexA)=svCxYJ8ZCZY3|dF?3Af6H$-1X6;g9lXc9Jga`yuc+u>@*kWA zqmBI0^IW&bTpswEYOFC)cuX))2fI_R?X*peTZMOVZJI20%xoPKwJUZS+>L4-I?x5U zvyfcSjF6*u5G!K!?OKI)PRsQ<8q&vZs0++blf%Y6Onv6kRRZu)Tn4WP@ZwB?U-pWQ zc9zFcerk6Atj)jv?>7dCf8y&L29*7kr-Lmz9;Mkx!q>k5IpbebRBeCKWTgw+`9UbMLJpBUV20AdN zyw39X{aH`@kTWa6ed_Q%c`6Ee*_BSa zfyHd7`2GOkejHogw~fEaaz7{sanPV^=1VduP3Ga^kxX8+NC;*t7PhPkL2ll9EBcx^h!`s+A+w`exU%%xfN3 zslRA)C9%r3+~O<0`g!d@lJUIfmD{jy9qzX#!RiCPw~*W2Uu!m(^0><9e=jv-{Pcn( zZ~4J`y5HmlLJQI{Cv{naSe?sLs>%vU)S=xe9>L{|J=C6sk;gQB(!Z6wWJo226EGCA z>~iqB(5$E-L+Ag-FShu8o-+XWf5GYc2Olnb@%$%Wr_myfl=q7gtqRai3>}qjgCUe% zpB+^Q@fI;U()WeMLPgBm4)KhBwljEwYWd3SFb)Pim7*X{8X8JlbOCf)uO0%jql3MWy{zKGjAUoLbOQF`o`N81`2&kdxd z{ZTv?P0d_hf1sU)DXZEL=haCmsJizhQIjjt66sFPBwJUz*63Qa2kLJ>)yp2LLiwy` zxTgz#`Z8v*Xiq1GU$37`UOx#s5?f>&Mvi+MukQwkSImA6c>VuR%KiWRq+HlGFrK9V zfPD772T+cD2dco&U74j|WbzUeKtqRBNL$!5g#u>sf0=;<0S|838<1n<0m`8!U@+YU zT;YJ9S5Wiw1Nc4T_cp&LneKuNf7&@b5<%!-4%;=@nXUqg(F5QL;u{Ghf#?h&y!bZ+J-Ic2z5YGaqkn8vGv5P3mxw z89b5ojR!FLM{s{%$!`L3yZTGNftn}@XJagvm|2&&tgM$RM` zjK^y^j=T;jNNO{Rp|UgffIM-#yaiomE9xjr!5YRLR949z)Cbq*IFv0Q7O7#$Z64h=SLAuCzl=9Kpl!Lx<9{h_IB45(_^v0w%6Fy^p{e-fcr4{R#C zku&~pLCM_6>5$jt)C9@~Y*2ddzYdmX!%@8&ZK>2S}q$TAVPSiQy6bauIw*nNKpc%!i!ye!7&BwWr z<5fmLrP@|9SkdGy&uNe^f8k;H8(HXDijhK1K*r#7 zK%5TYFry?g=!|3u#vT3g0;ntKIDIL4b&T^6pI0TXqXWpgXXoy39K?U_(}arm17kmb zXvF!K&J^OBfMPuBJ8u!(DVK?e{JD2I z{~|dyfBGoG@9*KIlSNzO<9BDcgUi434mH6UFd{{smy zes#sr9_TnHF{f8(46{z*^Xmo=^8m4F60CUbvqe?gGHYyCi=jtoY(JPLQNHb6+p>6U z*Sw)e{n&moO+vrz>$YXp*lrA6`Dgx}4qcELDp^VKvV%}OP*34<+nU(QT7-VdWfYTV z5Me7bAAnMqf9sg)*ZdvOxYOJtn^ldTrM6G4?~ZNvAr=(qJ7&re|@vD-6)10dt>{{#IWY`X&&Hh zVwANg&nU6JbG#aDZwC?X%%b78v@=b38!LEo{bh~%E6x6y&!CL`tQ>7QMjGWdwzA7n zCJQ{ua!XJaMSQO8jLwLp4&(d`f8oJi&Ju<5mGXezcn}5LP>E!esDsX7KPKQ#9$x=X z;YAa}fAnYEQ~sV`@NV>9jAI4JSpCB|zwMwrew;bNCiGMEHjvOL$g`E>mdah`4*xuU zt~+8sd$w}gnt8f54ui% ze+RW?3pd$)0M-v+upc~Pj5}00f2(g}u1`1~D^R&^2k+G(FU;G~uh9RFXNZ0wUq44> zCAX8Y>aqxv109s)U+__?XDfGY3lOy^Em<+zLq#v(^)sL4R_1&o{sjhinCor~nOoS} zh}(=v<^d+A$$1raR5UsmB5qf{b9Dd+f0gV-<&OKB9Tb0Q#GJ-Cc^ z>BTaBU{e_n)(>yh|FxVCQb{qai!v)mJu9#O74OQ*%<5A`I)uTlN-^fa^cz{TrO8PSL-^75~&9$8XQat+wbWM+!Xze@Y!w zxlhao>A+ImJk&;)(ao?M@yYeveNxO#)8Q zzFHrUGSVyKKFP}9>xa<}#@r{AzU{9LRo1JOxt&D%C@&t;zJ4qBvq`8R-X7o012X7u| z`Cfa&0PN@Yf?xmq{)N~0b@+wfC$H9kNLO=xSXbH#qr?wnZw=3aj_q` z@A%OduKSKZ7~!7Z@hc*;2y$ z{tcUPy#3#>@`%T8*l|52{FRRRi1+%3E?6D=9)9RT(=YrFO^Olk->~}F_ixyS)PU05RPzf(d@kLgb!eTt~xxn!kifoI9>(g zyzU?mejcnK9rG51U=C-DanN7)h}W$*f7&q*X}$5i91|;pjz8%zc=CD~ zi^p+g9_2v<)Bw^B-9Sy@kU<8D1@U+vz#&5=|IdTbK&tOOuh-4~N8U@ohQGd-$J>#~ z;bVqg9FFn)j!GZ_RP&?8Py9S~l|abvl?-+rQu%vge}*2QF*q_b@O^BD4z-{%a5MA< zRe~cRb;K*<_yj!uJ8*n0j+fm0y!XG;FBXA~{dF9l|55m8q-TacpjI5eaeNko7pH^M zK{h@I=)(IwG9+?*_rU2b{ifeBgP!B<0DsN`gZWlA}ulDf8E9j6ueyjn|mgh~9A3FZad&$4|JMxJVbNB%?i?@&P z6u$g8@-Z}A9-4o`kMeoYVm=Qy-X1eljPk+9rKKEx{c}G5 zkyrQ&pWrq4T;56dFX=8pvVP@L|Nf8X(MB6cxy>Q~++x+i+5SQ5)rt^19I)aLlU&e;O`dMg|X_elq!d;PsE(P<~FG2yy^+26HY) z4ud7gEo_13$@msJgt#7X86Q{5lj}O)ujzHj0wVJBX}3Jt7BJ*-z6=3{oFBrD^LGVN zh7P>`W5);vGlpkJWGETi<)(3{Me?163 zmTdiQx8ZmD^iO;CyWL@b_obNU<8^utx6}Xfzy4qUJO4_||90M|r~mWk>QdChe;9lJ zwEI7l$G&>L9{+7@X`e%V`j0E`qW=EUor)}f{)Z%r|E+zb|M9OMp5b4knf?0o%u(E^ z;}H$t?4QV|62|(c4gkMT|GktUf4cB`scemtj#uUxi(dAQ8JR)B%-7j*4)_zH63m@xRT`t?`Mr? zY^9?&+%Gpv@8#DiP=ovaP)+8i-8_9SAU>Z%=>@`j8_Z_&m9>}GY2#LHf9HsDyjtE1 zuT~r72EA3mO$T?Y+?|W4%$CpP+&!KzZo8W=o`M^hvM|-I=lw({b|vL_$@WL;UmrA@ zUH0gmt zJN8KFR|z>x&$ird7K@d9e@&EmwsM6v%PoJc7vPG?xi{{saK74|H}1{M@O^EEXS#g> zE*GAZ85ieBg&MVIIecHr=klVjj%}kX7bjL|j~GsYJ((=_o9Cy6N4p-#iqt3ZGbhRR zocZ0x_Kqp~OwO-~p!V}50oty`_4Ltw=G(XMK+bHtXYF7sKG~NDDdHpi1YBE* zR|_x*#(WDdG-l3J8$Wwb4%EkUR?x{sMsXDw)9B?srVF|1uhZ2o30tMkhU0!pE?fUD zlDQR%C4@1&nHQNc#R!di5fA}~pQ~7Yu%}KYuzAr=K zf%%EurnOcx?WG0kWD>oG7oBg0^H2)wth_7_v(pjQ(`|j+!)F3j+bW(@TNup!B3M)O z+_!9w*s-m4Zl?RJLc`};%mP`Th3vh#(!?YiFv zUbOXHg-S-Le{|Y=6-l>vX7+TkRlp_hyH4+=v>FVnH7@ME8>R!@-NpWnk8E?FrD;8B z=AKa9h2=szZ!TFqEqwLE!F_v}U%*8?M$%RD=; zywh>8(4<4%WxqxmUVZk#QrsIae0k3=-u!X7>DN7Ve}>($n0dF1m_#o8Eu9%d<$_^g zLj8|}`{u4B*?y_+qiIGOIz3hQW`{R+8H;(IKaFBb+es2VdM~IB@%k8=C*eBt)+C&R zu5BO2t9jyk9>}xbB-hJDK9EbXw*C9dE#fJzKrFzlZ2>PPKJ+V-(SYE zq}uZ$f7Yoa-gki}s{}13%Ve9c2c6kxgk{Z1)X606un4BFqRD!>CW6QgZ$ogdZ$pc! z)8Qp{wTlY%X}z6TljlLGJi@b@Y(i<>zzLg~MX%^o+491PTXs@EpGFGbd2YtZ1(G|; ze~0I!Hel6fhAgWhm2uN0p(ZYzH5Q`9;1Zc`8Z%&pWstyHxOsWM#SOYe0h>luV}< zTkcgK)Dr<1&eFa1fvB?zm6O%f;$8lik%1obI#UMtzHJt+_Xi{G^F>7#E?~ z!1Ne61!DO+KzHbPC7j`SSqp&?Z)8hDf4*jh4@t9itOwJ0I9Qc(yri!~g(J&1RY$y< zl`2M`OSKQAxnpj2<~j~gRIC<>JUde*&~U zYXhBK#k%TWTVb<`>EuwA)7|Zmo|EP#H&HIxDeE&->Lfo;#Ol1;%=XK?Ri|^|nih_^ zUOQ{Uza-vm%Pw~{sdPu3wa7i~p0(L{(`SF2r$HY2hu2XLo=g9FNWbxPM#c(=pf_y0Dt8f9e@F?uwfT zu{Ue(x57vcROyY^$y*4=^Xz`Ve}t&g!t{DtA-X7cueIUO%lhrAmsNx&6KdYQQo&Yy zj&RCa+uv^1QzuUH9;BzAb%!cgvpp5Mq8_L)41FLLd)tL2J011(OI>Eb%mGikzBFYJPv5}TY2 z70yN85{OAB^@yZ#cCoCORoz1)#_={F?R3l%Daf`~TE8?<%zBWEf6uvzQF!m%!#zg9 zKEtAjlxKs?15?!@$Hy%4Pnv#rg~bE6P8<7IYG3i@+!(c~wEci-n5$%(lWe%ePP4z| z>T=)Vsr{LE)X*Xt=aS?$iba<4pjk%PVc(JR{F-*z=OwacLo2jFTc($8e~Q`aZPDZQ zVp;R(uAGsRommt=e+2=(1zVb&@8u(p%KjNe&nL~3Y_o*b)W5GL16XPfb9b1gc`F`H zpF&?{JH}-LZ*r(x9ba~RUe^2S6Txw(wzL-O?mm@oJH6Vu3s7c@FcKH1-8A)fpChB^ z$3lxEQQkUD_wi&gsb7!jb?4ic(=M;?hhRFNRjd8w>{27Pe=n4sLR)e+$x^Tvizr#Q zWhy%Q^6?R4riP7J81jfPG9Cj7(O9BM%gsN+fNO6A)ma5 zx3NqOU2)?je_qZ%Au-X;-se$l4zt_sdPWvGtv{=i5uZu&oNj7lUVK8s>9Idts`|V< z?XOe&=*Q1Wt}x0S>R)GzIu=8mmk%}{S~R4a!)Lvh6uVNb4@h33NyXC_{FT{HOJS(J zbzd#=YVnd}Q+3tU=BMgLvt|-#i?eguKVAX6pR?(CfA)g+sXRKbxp~by?R0p*oiLaR z8~^p3zvs8;m}C-u+TBpIBRL4`-RnL7gz|AoQFjx%YcA+JPn6#)QS4IE&He1GVXFw0 znM9j2y<{Ddu9v$mZStkkE}8_%{UE?GPg?2Kd7YDDv{yVsz8r)dm6~H+l>TEEy=qqK zq)5(>e`i$Bo|6WnV?Mt{%jfIkdwwj_yMFnUUDm`gVVdOUMLk`?0bbKpc@jdE9iEfs zTHY7))eZTJ4A6o}eb(O=!}R=F3tC2Vh7_SY6&G{at@GJy(ls-)e6v7&KGWy)X#4BW zYnlpM>GGfpd@hq^a&eU1c8fmO&1rGSg%=gGe?;27rBmz2wY}ZNmvVVtyli3fuJd(s zFJH5LesT2Wq&-X~^Lg}GZ<{2W9ZP>+KaA5`spe@@PkRs-+V+*k)A?a3zL#P*g@t1( z&USMzG<89>C=bkMecjzoc^;_w#oBC@g;NXmyn7gTnO^eJJ!|BogwHDkhbX|w$9xao ze`~kx)(7GBF7zh79N+igh^E(mM~6$km}qM_it})JG|Xm)X`V&NiH0k6x-si^T_kZ~ zP2q}CYvnKRCmbwI@35ouqjqonBD7EE!Sr+A-JUjVar639S*-7#)jm3=-lO^R6H>D1 za+4n0dvXXq>Rx>Ix;}UM{<#y@kvM+_e=~;|>qD)FbMKYz?w45Gt{%t0J#?qqDzOr2 z`{^A?x~9HV2I{UqqeXN}U#%rA7PCw8hH&wGmpVEb=%PreA><32O0%w>2;_o4#UsBT zFJ)0I++~@6_&IvngWYi+OqL5%8T3iEb3Tz+o+VFwnPqO??d~Jf*T>%w^S&;ZfA86I z=w2(kTuzt}t{=s)cgnMMi6_TN-yLziY|h$kb5-tVn;d)9GoY;Ul0BPxr@+NOJ;Jl| zb|wEEOz`}p+%C;j-WNgD)#kg{wwe4KHZCqQaC$PopB-geAD5d~746zST5ZeMyciB(6#$?DB6b~cD55aAL#f;Dc@ZXwCPbiC)azs zH;QdAErAIa*^lW-b8K-|-_KW>9nrMuSTgu39kz$XzK@oV^(UDJV)vOT zuScYW#yyqd`*6C}eL1_&MR$8pq8S$V@dh>b$ha@*c7GB4tA1H!_Pk8dZfOpuE7^F# zsZ17UbrUnok3+I}MmMjPPS<;|T+nUKK5G^4bC&8?$tTUV(T-X8e+i1&yuF6@{(ii_ z?$%89gsFZ&<>iC*Pu1_H#py6P-1oJ4`rtMlE_;*i7Y}47ul2$!%DA_PxqlzY8Ewik zr+2p6i@_EW)qLG{Q>e52ZYvSpbZ&*n>%2TJ*FreZ)a&+(RorZM&mdD4a#4HRX>LX8 z;k9U#>Y?Xr8&g4fe~DNr^s_nOwckjySRF3Mx3gQ{GN0MkGL5utBHU(|^`%bi-6X-? zN?sb#_L;gm6m#n`TkI6kq3`Il_cxX2B-+aV*xJd2R_$hrCMRlpI$so5l?{zf`r~t( zQ9-xYf^F5qxpduRUxfZ7etU{pM>?d*GZpxZl1gXj9~kVZO^%M&f% z!MS{z!{)u83)UunCbpZ)*Y#O_AEYxf>f>y61I3$kjU(x6x7Zkdr@mJ5bLXB{ul4?V zPmX-YoN3!^e`vOA6$!;*8?A=*K{rz;**nhsyo@i?^`g_0$Mf1W;WYf5mSELIV!hoD zacY|?&(q_%y#~ihoS!!<*1c1HP`lqhg#L{6gYT*nCWh|RNhXe^bYget4n}e#El%X? zaBTJY6uA>Co-ZaBBBf<=SarF6-ozVU?lB z%hhG`z>m17_U>hVnofk-XLz8!X;q8R6|!bPo1@}h(cae2LpKSkqeRF==y7!=%FEO2 zXm@famLb`eEo~0d&3z?2m;0UmwyYDJr(<_ki=O?CI%ADFZ#9y;c=r0 zD{qTwBY~?Cz$=$6B9@vWpw}gzCp@dQzDwJIl)v zf7;AYCQ5OLcLLrYkNZ+3(P@9Wb;=AIb~VJW`3GFhJ2j17#ShtsN>1P(d zQC`e7#hu8X@QF0(-fXM)byWnD^HH0C>d3;m+$;~d*_QsUcuqP!V9w;4-?yURRYRFa zL4sKl9-6b;D)c1UR(i{$bmfrAa>iuGI3hZzzbp<_K;*BtIgNIWuoNbaX@ZE?bbuP>c zZFd*9<^lJHbCxE?VM$fgejc~roF&l^Q*<2!X{OJnmn@BDW+Yd~Qs}25fvZ?vRFZg)L$EG)=lx_`&eJjvH-)vU z#6o*_#!fY7?tR#u-V60(ZR)Tx7S1r+wBCHn+Ipv!)@!+abo%}h*9ZIc4hpLE+u*4h zOLuOp>c&#aMHNk)_hx@u=8uscb|l zmT}b&N99qz^ryIHb$ngFcg<8vpgiN(!+ttfe>W5Km$kFK**v#YoI|iE?8MO)^mwsv z;bC)EuWkJPC?-3&v-(BtwSqqW?R~x)IMc#czP8%T6QfmWJ++dl&!hA);S&ZpaS}ti zBz``#w8re{H6inv_F4C5e?}L8cQ>^(d^jqgl27GL`rc&SZI?RU{^g?7-xw!-x$K5Z zQ0&mFl(ol;eV)jB8Ah5mBBOW_?1JlAO8kTgar(CIyIMb7Cl9f<(xv)37U8p%V)u5I zs>kf4M!V@bUcIgu-IH=9>B=-EX}28q+H5yJ^dG@9UqjjjkJt6Oe_0pv=pK8ESt!n@ zlkh#i>eJ`2S{b_i*{0GoUN-M)y2&J{5Un=UQ0f28)_JU24|Lo0UEDJ=D5s`!j$65> zoO9OGoAY7&zi{0XNGw&Apb(21W71q1mvJ{cSQrVGKEPj~t~;O?d9Wdk%<5M5lTMW> z%wb~??n*vtv7}LGe?ImJU6#001$)8Z7e;jVUSl1o8SKU=$Cfx9>6}VcCX_alTUZZ* zhX*mG+6wMr!?hHMA9vZ{c<0_NZeX1((Y5OdJ&ThXpqMK%*JNNw z0MCvq?ODb#eLMA1NZo+BNXj?C0hEtxLvX7(;wloJ^RE&w1IY%^$7N@ghe0?;?)^eb z53;z^8m{HRf6B%{DWKd3fLF^wi4hfnHNC5$3P(|nzsP|lz;B9Be`WF%2(Ak#!I|vg zg~1SXJo+TN&F{TDaojp75E=LAgb7iUiD>AF03RXckDD31H#Y1|iQnEDBvK=EHSRC< zITUY?#UabuX_VrA2x+RVJL;_&WN(qyKS5x^X?eh{e}vDpWBw|I2U0U@iKSb{K_(18 zBXRZKu?jSi!a~WWXh8u#Z8;vh`vsN_SUi^$_NbGH2F-|1@~XpJqhrQV@r?prifA2b z!9oPLz&yEae^XvQu>dvnQ7+?5S*u^H8x)B+Bbs#K(6{jZ{iL6P9BUJa&7~i$YI(?2 zS~QrWeMDat+%ANv(1%^yjEzCz%A|a5XegM@4ammKZK+%ddPdhKwFa`ycQ3A(K zf2qDM+9u{fn}%UnBsj=XGqZjN|v5 z-O~GrX#+vNk|i$lPxc`ox74to^_{FKf6}Toy=z+t8(R;+ZYH|aBZ1<48jEB4DJ~__ zvu{Lgu(1&ZbamClu$8Y`#!X)!OH!a~jqHhDPR(+{V9nk+z&KNHZAwTf0|4* z@5b64Z6SO?p|IE@NWR(XmF$Q~VpH%QC&>|UtO8Xzt(PoBWs&bDS_WGRJ_@N0dRjK` z(Gmr9Y|sH;FyyX8dTuy9r(m+@S8gv9e$Y(Q@RMgtr&bP8%4i6L6^%Vw4xf#UgE^@q zHBE-(x20it@jq9?j|^LNoUkJsf5wBhP{yGLel~m zkbQYmrIT9`iN_JGj`F9Sp^?v5hPIPBhWd`m0@g#9kcNQBgh2__TYtpSf1bg`j-Lcu z`%FDYcuWzr9Zc&cW$K|GQ=wiqaxgzR0?M2EJv~apA^KYe9Hu?!UDqiq`4OD<={O%O zgRm%~>6S5;*C8t%fqmjZ#NLEmb9Fa~MSNV5H?Yjg<99{T(NbZhtec6D!fJ1HmSnly zTGiUklytpVLY7K`!?;Vye_(07yP*J78(W&N)%JuWUv493!fL9Od_-c?y& z)(BNT(ufP)l=|jC;Pmk`=3vI*sH#L+-tf{QOLsJZMQ3IZHuz!qY98*(_Gs3vQpsd| z+M?*5mmvH+<0P$@OxXQUXX^z2ama68>b1mlY!VAC!_We3R|W#@e>3e(cF)t26|>oi zzHL?kD)p&zpQv_We+sl$(b6>mz-?Bz2@@G34Rt2`&}hFWKY7}~4T{p2qCGIz54Y(O zpd=7Yi<%s7Goy0o~k11OTpTzTaD9)ClD zb(`y2;67#R16Y+#f4!Mrn=O}9`YkK3`FocJz!s~kfF>F14GtU}Q^hD)h_0ZYvR{!S zh#cjCAWsN8daeqB)dUU!?Dfgfzby+pXw^7>zMZne=~B=EvAlq!E-RT=uSprkNe@#H-1nvnB$RPeoLV3~=#+G@9 zLE#b^kRm#MZddmc=_JOZWhdYr6uLXyI~I#p3D+c-{t}tc2&Wy)@4mP`5HlQ*Z@&K2lY8Nix`;}Qe z*N5{Lf8>Ol$zJ&}R`NEP0h2a(s+7q=hjPw>^3+#kQ3e2PyOA5*_s^b5Gs(4Pjw*h z5fL;2T7`8xw)-t6-|CNkVEG=z^j{}3h%fM=qO^$O;TZ4G*YLRv%X%5$OY&oD1Rn~_TuW*2t+1o7O8222r;%9h8 zDSD2?A~^;9%sNsMj3%X6*3}(MUcXG=-ysl(T(9A{x~>6k`2)dzk}D+?icBtEi~y80>gT}~DkV<2^i1?sCy-dwMc+l-6%f0RT_ zCZu;%JljR2_^mbiV`R~d5B^8u>coXc1W&5hX8=J!zQ0MCD!}^Y-1KU~cW8<6GyCI% zL4?X_Zb!v@E$*{wT!IOTo9J5@qy?C53ZF615pL@iKWg?T2k7~k4IaVIFn`Yo_W>{Q zbn~LvMO%GRHaAy4j-`Koz>0X~<)v z)vR71fhKTo2P;HEnh}9hj|$6)qB|$A@-#t$Xb*};a{#srPjB~!sKt&70i>2hlTO*A zl@#ukL=Wh(c-(q?-P1&UCWL$g>TbP_uMk7`#EN+fSo)lL{3O*m zb?;{5Qs0G7xVE%}9 zi{i&nOGTqwgOj9UktagufxaSPR#|oFHjCwCKbVpV{Ir0s`z5l0{LlfuvbWRqMPuEhe3uv3A}Qb5xH@4>N2@jz9}ZhD>|Wa7ScLVew7Z` zEY6loel*=+i$&C`zKNYa?$BWxl1w5+qz817)8w&ms09y^9txAM!aI|JDrsU{;Ss22 zN$N2!t68a+G;2OlRDl&$RM6{!9kd&n=n^sAhBlK*gOVjFY))UW*kJf2}B$@boyE+yOGxGy;GoL;Y-5&5QKv0|%eUv7wFB=fZT z{Pipb*d1RL5w~9NMpgd1>j0If)r9{_L~L-f4l(!~0#26X?SCfnu_hmRTew?aocx2~ zW4+*ID3IGsGLq1~4f}EC_0=OfkQuLRr)hsgKRA^9A&LEn zS|<3Q1E&YGsDG&S5%`$Jw8`9d_QW49aD~NAh7~8oI%o)3KGQ@47dSP$7hwR;%_{o= zT@nwI!92}-jj#4xr#4_j0!5Rx`%G>rXcp58_9%mLdD{d5%`re4j^o00Je!8JsB100 zRoOEiA%k0nUU5SAQeIgKSK~;Wnn_ui{#FV8J4~NT)_-yo(a6e_XCZmnJ5=#*%CO?n zPYN`R8#AO!s2-RL=N7!hf$d~mCYX{Z5N{&G)+*0RNLZ5=?rI?Fd{hQTlxNU-sGU7V zGEuXc7ebBO;#FLP7Jo~Ejk}bm>Gd3~4MJ+JRIk_Fz^NrYfhM7SVCLtDkxjdjPAe+D zr}s-S6Mx|X1x~RY`sMX_6m9Z27*nle9N_tLlmOjCkm`g}H#yX%|BhI+n^NftpzuLB zl%9~ALfucY%ukXUl4nl3F*o*x{kJRF$=@+mL2O;Mbc$vJ!0xhpugNfNJ4WZX46y~l zUNsvy*)_|m;Q2&kL?A2TSUJ}ux`iDGY-FhVX@5<{zl@*8^$f%fd-e66Ded?m?J|Jt zxy4nb{!T{GrMkL{xjHe*18Jc+b>MYBoMi;9$TcO57AckT&CpD=Lv6XVbB3R=~>0!jms!Ncf)7E2aa!@m6EOrw3x5Y5-RF* zYJVGCn*j~kuqfLEtldVbh{zU_)60q@23gQI&ClezUf`Q+55TH2y=rgth%BA=N2`I|cDuuE-6-PH!GS%t%DF!;G(H;*feZODNp<-05Sg5opVfillV z+rVZpv^7=}2S}x&NC@f| z6_axEoM|a@rl#nm69kAsaVyiX2f#DE&_-NNu$U*`n0{65+T6U~+vvoPdMGp|?)DxE zO;-wFdyJ%*BR|B#%{bHnh2R7@tA5V{do=Z`Wl>geKHwQeQ;r&tt(Xi1TtC*Od4DLU zNt)}ibs>XfB_@yCOQKrKoWz(unTX7VDd=!w_`-^=M0sEadGz%3END{$W zT2ZVMgPQKI0aQW=Y}@S3YdjB-O6>U8Z7+n)29*+5pnOzkuBgYu822@Ov4aPpauH7|YcjftMsz(-DKuI?ukXwOra9T~ zJjc4U;o_xxOyo#T`~sgf`HsDh23aeHH$mt^4)j=M?nhDm-FL5wP-4mAbrWu=0&>a(uSDli8F>cBC$1w<~|^GI3by z&iH^6yE*<#m(>PzB>oGA3Ov%oWZ?n74*ag(l zg&dWD-vCg8gq;aKOA(r%*fBhlIuqP|QpTF?O_x^%L;g{a*eE&MF#G%yzA08m6!ClIdQIJy9JtfPYh4GI+9(U$WOjT)$({P)CHr-K*uwlnh;Fd5- zMWRD`$4VKZT?(GP`8Zq9$N_;ZY`*E zEfc)e05eSHC6**iyN_xu>S>Dfoou3dOg=_8F9+J}z_esh=y|QS1YQ16(UAT9sZj7OM#L*vXJNyH#qdyW`e&VX|HVt z8JUb7bE!D~>ix3_Z#%;Fv8#Xoc}Q0|4KUv^5DterSC8d7yQ-xEPtgw zL?@V&@LwMN)c>2?KV2~rzX|t-&76e7pO||8XBPQ;A?L`hWA>>8LnXs7D)m=X=>APg z2W9>T?-^qLMH7wx97V?8EO;)kaK#znru@?!bLswn^u>RJWO;{}vr#+}hhdC=4sYqN zp3qAQT3gGVq2^z1#EbkrujhZa)_=W57D8s<Khw-%^qflMwyB{aGD%W0N6SY%{Q^w~ywLC;1pno${twx*e z0>7#cflhCk-W4o!>(oJjeC_qu+2?LmepO9#^J5^rvQ$XHe1D_lthz@SV!qXNDR$Ox zP$huV$ll=f_WQLs8CQ+$qqSSQI$=<4av_$vuguM#?{9PS`l|Xe(=h5a<-XD6-;GOiqc{XZY;BDSDvf6M&t>pb>j)YZR}ksR0BQO-n88>mkl_T;4j1o0B)iBm;>vM z-V!zCY;KTP+;f@eS?T3VB@sq5@)smak`>BVi8b^QKI3m5jvz58dVly>Uh;ue6)1eB zC@JRzv2&eCSJm`k3Zhaq2hYAa*cl`jD6E0Ht5!dc1xa`3S|)18_D>rsgmo@{S(O7= z^;HIA+&Jg}urzE?$-clCwOYC*!JeX^P|j*GEb_@^rb{UXa`dewq?%ck^|_V zlUXFsuZId+mTiVkYJa|}LEByJ7&%h?ZB+D&UxvQ+8awSa06t#wL89)-W}~Z?FP@L? zPR>16dLcK2LQMVKX(eHsk2voZ%pU;J%^F8~F4T~9;91$2xf6_6KHr1ng<2EZbAwo~ z?>rB25i(nYdP4Uy9+rxvT52i~V>jg5$QcN;d3w+z%J&K1ubc$gYi)3|yW z4Wb4(lCX0pK6650RFU~@7`x&0?_u}?=4X?bz)C6Ir++0H&U)|C%B$O~n23_{GKD8S z>%!RQ9cPILJ$j!X0xDU|+tWlhRYZRKR1nqqBLslE9{m`36hz)bA1R77^b*d{?V8jE z<@u`)0bck)JntK|obn|LqA}sVx*l53j7i(jFAn!h>Z^XDdNi)Hk@hk-zGBd3K@EStjREE1d!+t$IKv$gF_rRP z#iXa87~9LZx#RkJ6x9A!(PY^33hrdc(np|_?0+~_ehuY}<4^#f5F`05PkBVp+7SNg zn|lqBN&rX04LN#bl(VXlebr``1f=^CM|@3Rw_uPJW5Kg_%mR26b7?|x0wJjN`br#; z+gB|z+DNN&t+w`1RO?8uT6TOKV#D$`pK9S5Qk*StlLbk-legc)z5OZ< z$}nYZf0SeHMV%7k@EK zSX?%=8h8YZOqj>1rYu_&d5u55(BB+vL zl7rlt;A|??77ZZ&04|S7mov~4 zmM+K(ACz36^ja`qkxpn-u%=wROGVqhdK8P20?bAkWLZiDU=adlHndHi+kbe@mV{b# zh)c*O`mdjP{Fp>J#$|N3f?$&cNvt?+7Kt7#Zi+>$m+#A`Gf~WT7^9fgQtb0EIE%z@ zTvK30BM+Bn2=f3jbGzz^JfvD|jO_?%zSo%$wJQovB@3ByK+fYQ%Jz{DVH;24_wA?(%Es(S zCkyi?ei-Itb832N*FY-w-FUM?MmIP9tLE!QO%k68bwTqm*B%(a`=1f+DQ~vmCalx? z=94#Mjmf21f~bws1V%WLVNB_)3zgC;Cm1&ObW>sO00pJPB;8~mO+{eW;r898!{3?= zF0Q4D<*0cHvlqsD!?F-!* z1o}WH*X|8n8@dl3uvFAFh-nPoVO)Ak#R@@0A%0WJbAx$2j&B&1290m(Pi6zNuPm6P zjsaY+(QhR#r~PgO?GJj)DTAIN`xQBaB0il$KQ)jlb86p-Uw=h`I*yL(YEzU(Qo_xJ zIJYiiJtuV&sh_e0nimZ>b(^bDwaN4z+!J_B2Q&gi6XtBphHiX~3={e^Vhv+Pu%eCE zOzvIuE=Z#&s1=qkY)aJ+=^z;5NSVOBX-2y4ka^k!N8Z66C%p|;^7jo>t5BhhD`l0x zvysrycE)`7!GA_t;@nDnr>j$M`ZJaOy??jo6qw&KAHkg_+jo*Y@+$d;=T|54fSTsF z((x`LdkiB*W$Vn#Yc}F&8DLfGD^EU!$Pp23eeL|I0W|1tXAD>q7ZTBW~@50Xt{%Q@nHeQiyQf zw(>5atQ%?jrSQrOock233W4WiAIP9lQdmTq0(yW{hu~`u2DXV#x^?zXRxB(Ur1xm~ zX8Lq1tA7a0;F`Bjer2>QS|;F<`;fPAIObY>+8IdM%XRg>qt>A-fbhAjgppdO3bh6a zI(Qux+Mziy5_vd1PlaqVBptsY{rrScMz|*KMt3lD=ea}(}m41sFuTZqA(u?+t ztKr4^W?^b+s%FS@W+D+lBg2lv^bpH@j7+HSJRb6`vPd199tXunF*#R$~2`-%(qov_^m+%)0=$ZvYu02;`p^%-o+`zK}4 zvqK&|zb|c$W|c#?0fhqulHSyNT6j6;TYm=)@UmUHM*T=a2-_U_r{i1h!NEyLkEcDN zzj4|Wfd{JSWtU-|6eoeKEUWe62c@Wz|B@^rZy$jWreaNyuDd#~t@8k_6_@SvZG!BX z+@0DlhyZR3u743Ju5i#>Kx;$^wMIW%&tSa5+KO{c7SQsp50hq_X;&{D_Re#N? zA&l1?lYQ4|z+`7DTmx^=21$kY;P_NPuC(R>=jY?KhaHXt+Hm)LQ^L=1%zQ{5C!@=Z zbu;E<+xj`&`WS{UdT1~PAKM@|osRq+SuVM3X2G7?Zy}~A>lr@qdeAgb7yLpnjmUgT z=E6-{AKpnSSj<=uQ<2(xS^V*n+kZa1+7t9n3`AR#NFeo?%Y^YHukK+D>J*Vkh`EiD zEfd<5t>u@Lx(Q|cOH?2&1BQUfGH__#iqOiMkvLu*Yq|US6Z{+e6-_A3PenuNl&rN>=>r1U3zJf8* z;5me+>iFau@tyN{v2{anI=&dT{jTXUmbR~Tq~uM;w{SS(6(yA^JmLBLs$aRXMh)x~ zKqZ5`zbaBc8*t_e?9RKnEV|Qq}=>T4SE?D0@2^NbL7xolhHt zjnHi0PH?Hnqo7k`HGkA0IvST!^}HaMB==D6QRjpwe-NOw4=;IWAyNK4;wU`t8)7%v ztwV|FVg%jgdCm((6CGs-5l^g0xr%CDlgEUG9E+)7xnMj}JD*ks_CfZhsO3#umntlD^?GwZeUL z&>)4DCk;?ev*#>`b5Eef78JZJagJ%Si60Z;#jxoUu-a;1GcpC?!x^_aNg`qS8(WKTETTV5WO~B^^aE01$c; zT_MPKe>0cf2Kp}^lSF(f+nxAyfNILDYvnVZ#8zAW=_Ymm*z-s0a}*2{Jf!ewsON-h z&g3g!A$wO4a)->#*G%3yKNh|Edz^!#($bV88=w6~HGk0xbm|OHh4sn5U2?Vp2|ExJ z1{Gb{ye3rt7d?PE%MK=ErNtQ}fjFD?Ek4E|bp@q+?YTMI!=}C!z9{L^Vblcp#2zAi zp7t;V|_-pprrH~9-a;Vv;Om%M58@`Csf@+k#m>dWtTl+UN(uo;q>4PnBl$fTzvnKm(`5xlVERyRHNwv8 zm)<0=Fb*BUt-rCY(iSDT4pzhS==*yrkn)6ALVxs0pN7I4=N*-HoKaU39sD3Rs`)n& z_)!mZBscj>AFVkd?qC>g^jOtqkc(`JnlUc7A8Q1~EWF#LRjX-EN3bWm0wD7k$#Q$k z;qj?YW&>F&vN=Q07kbAgqI@D=@Vu~qAE#zX$^Vk)x=`^tb$5?(@(K#yiYwJsvlsb! z>3@V~-7+ZB=T=PlmyoXOfJJ3AhR9y+mCszjlLLUQ+1K(HH}aQCrj1f4v|gf1h>W%s zaUe|{v3@L?`iSJ)NZ?<_ymD=!bEHixHDVwldUcVe5lS`xHNTvn_$v$tF!wMm+(qN_ z*Dw6@qm0PzL5LG5xA*#x-<~2CJ12C!?|%sz0Oy4dBC6-&l$gbQj=5u;GH9dfl`ENaZ)b6;j!?$rK3c|-xMGA|A`=V9y^vhPu#Ds@Pu34f5i zk5SzH;gydXX?XK?EP0|adv2^#6|rjHNA$U@GDw`=-)z8_i{YkiB?*~WBNgv12*2eU zghd{*MBwK}ij}97v}6UdrLKzdm4jcJ>QcXkb4XAzIRC)8d<}G;Op%lHwe#rWJ4wr) zfUiAN8uq`lElP&CZc{ zf4Pl{n{kZW@5J5sa3TOg)~#f0wdlZo&KsYqNTf${Br#Nz3gfNzX}2Num@dKc082Wg zqCK8`WK`no~Pa_7O&M|!SRDZ$7X<3e%DV` z13nfcJJ*DDlKNs^mBUv|tA7(7AU?Tnu)s?3USTXmBw7koOWE%%eX?~1lDCxxLS53A z^$iU-UA~`ouVJ7N=fa`Tj9;GUFOi`yHEGs3xRD;`rr>zVoGVTT05HT zJhS)`z1pRCitWKpk!>Tu@W;|4b0r?Sf9>iPYoUOlXK=OGcf@_m*?)2bQB+4I!DTxAc+!6_8jqp`}2eOM{esdp%WS5ZEV3%vALWX zm!+dtG74x9v2WKgNKwG%+>&%v6$V1o;P5Ei51qi(rVDS}bqt-D`lz-#lfBEEegiFGBAdAcI+A99> zTJn;qAT(zfj{`EkZNFX+uPEcxGK1@9hPNgKZ3@%byM$>0KV@rRkMjh7II4Hpd@>r{ zumSfzhBbKpXf102%x4xlj#oNP#mET)pW+Z~w6hRlW6uyo{|+Zzp0g0~?Bm|QS~ z4RKr&Uw?&e+1(?r&Emn8OU?8>4~)LW?cR$D90W*9bjeNfERnw?)XaesPB=u|G;+1+ z1FiXca~qzWE^UGhIR^OsG&B=i7zq^6by58GYR?Cq@;DOm2VAOCT8aFY1cr1oc0ZUL z{%^|OWZO|?*%tgDH2?`o(8z{6b>VE&gPx0M--G;r?^ds=iceD%Q@>WoRXwtuf4x0ekLS5r06gSEsaGTskxElWHQ6NL_US&7RGY3h{Rw6DOqJxutNs~%Q!!!$e-F+BTb}|jB?0;5a zzANVNoL>IxPTtUxyfx^~QL(kx&tn$V=lZq|w8XHnTF1mI|7>KM_&F=|mdKYXr*7o| z&<7w_ps>-b&uVEaTaGD3cP22VgrC*2ZWAW|u+g;>EcdGNPWSn^X}3IYSL)J>{0%T2 zoZMGZv_ww^kiGiHynHjTK0Ylr^M7Bj^T&1>cnXh)u|{Uo44)J$3^9zduvAMgdD}}3 z$OFDd0wv9s?uyaUzM|mt8qf8)YciRex+wcl%q;Rs`FS;}A1fV^r>AaUIeSG0NfSZW zTk>E>H7Si74f45qU6p&Bk@u?@3;-rvDkyI(>UP2Et$d@-c9Ucc-$k9Qo_`=ofqQg= zYTlnGmoYkVZ9s63%q<|o>f;8l*4N&e$VFtnG|Iap(3{8~7Y8o&nOJ5~A{C|*#x-8t z@Yuu;Q9C^y)Y`V5Gh8Y9p}q)}$2ml_a1EHIV{!6t$E!)bB-fa|)z%N3bXiZuEZ@!3 z{X*E?)yqF_^Ojboh{;=hAb-VyYL54jA1Z-A#rz{?#E|X{O}8z#e=zMpnz?6n9^V|= z@|2kCz@0)r^wy!B2KPxio7jAyZ=^lXM`gh@%bF@e#vMMHVXlXGk#c;uc;%#wUS@UK z#gp^mq@LErdMY2;YdpeRkfUz$H62H!w-T8c&SA@Whp<}5%sxYK<$r4}W35N(-v!)d zS8sMcxL9ms@;snWsBFT!rau(6Y%}dxCQhzcmwc-SKI6L&RFat+b*=HE-b@YlIS8#Z zI?cuD^Xb+5<`tcXdi>N`N*sDbnYZ5hSd9|em)d!~^_ZYG(iMk;NsM~O>~A2s6EqhX zz`}hbE5Ks(;`ZvB`+rk5IcoL2d?LI-6m;J#E{C%&1isO>4&NZ4H^2^_`}>EhmUbU-b>K=ZqQ?jxcF}Ixb#`Ql@#^qQysiQ zO1#?>c}gGdh(M5N&3%A}F_YL0`7)En8%W^H3wiE6g~w2bmw$T&)T=jNqxw4CEV-?& z7u~s_w|$vQf;g1+atW?X8=j4_90j;AQi4S`zxqr($oP2-adxD7CAbwk~w324$ z)?WS8D5D1ti}-mq)`G4>!C#l9>(ft=1EK9vGsh>gSKq*&FGA<}MjS2Fk{0|n2Eg687^!x|z}0rcs=Co7SlXND zuHNjlxB4O+p}$=1UYj=YEJ@ihZc;7{m|=3CjFy?t>q~3aM_%2Q%Tu<~%R1);CgR?b zY4udFEdNppIc>6n^V~XVo%5wdrqc*Fof+66uYYF;fh9YYoeb|2EZjOyUe}>tp&V^lAL)mJ_uoik3h$eJ^7F+&z)>xNaC=zPLlJS|)h%;In|sAjXs$v&E<`9$zVB7`T|9H9m0 z+xc~yRjI4x4|jT%vK7evoslGK5f> zWv%Vc{IRhr|79Ne`+kVL8}rdB@PDzZ?`Lx&d;KVdXPym;RC1S;&Ks8zuhLTt2FTZj zzVxIaT=PsiU+Yd^s&?35i{`CPigWEQ$MF7SB-sf*NUAK!8Iyr>nYwCgXP^rAX^`35 zCvRs3UK8dB3VWY$#gx~BKfc}h@|2-Z4f5rxXeAL8MX2F3L%lq^3Lfm?s(-VX{A|)A z^A(3>UudYW6*eV1(KRlAB}4nL-8=0Pc}GL#l^0JZsxOo0eBewcbdSj9)RAF@=#@OU zM20gSRfE=-?K+hq5fasjhyB&k(m4%uHS1{QWt)dr%uttnLo9Lln2YMBo2<L%%~S~Qj1)X8cQTJn6lwj(T0 z%cbM!pr|I9Ep_qGSaD6!A@{Iaf8m332Mawo3+HwAABcZR0+l*oO@BD2(rp5rU&~BC zZ{}(h*$2KHC$E<&J!u@;5y&0UPJxW3$`p6a4KpSXKmJm&hPnN@W_ zWz$7(Z{KQOLJqm!B)A-4Fg4q7*1-m<9p zPvxAw-K008fXWWHyD1xP8CI3@;Qg!|oC!2T0Dx6;iGPgMIQbb=`I^1V#^WxZ zlO8z&*|$Zy-IAsl8^fDUCuG!2{J3ZQ7(IJA4=?q5iMG)VnQ=Vya4^l+=`lY2yKegD zq?LsfOQQ%+KMqh%8%A(F3RfZ99uO^~_p@ z_wzJVMP(>PW5VJ@?GnJV!6`sneZV z%l&bybmS;eLX_$S6iv+;pVxGziF@;LBZi$~naGyqWiuI`@1JP8G2BoW>)}b`$8gRQ z@$NVwP;=ifZ
m&fajt9T0|wfZbcH*o`PHtcz7__!{gjdGYV+mS`}** z)zinyFMq;}E^gO^$*{I0V<%EkjQ$cbg)7&~$I57$9_!CLBNzASv(+dmL&|K`Ne7`z zKW;435srS|fLW*=)o9N<1Dh>3Sh0W_Z5MG{LZRhVPG+Ov;-`HcLfvvh0Xn91(pjEN z-#JtT+f%4UD-P!(uGvjF%jFuF-Aj@#!tru@-+#zOpy~GmFfW4=1j@4rcj_Esrp7z> zlABia54IXphIauWy^j{}RRz?Um9b8Fze<(jRJk6Wn1zp@r&yq5#4{l6hKszl#DNrbb<|UnCll^nRtE!q?2Ik1 z(SOIaS+-%l2cPT%z^4TZGXrLYhnJrZ# zJIBppB5tJ5A38YSOhkvkQ!{kRLy#XCwSQbr-6c7Rou&u+*k4HYNC5d` zi-mquf%=*BNxQzX^i$VSMzDuMJM%Z{*neCs(^QTeR*NKXD|#SSA>R@YIcwfrPojTu zo*|XLs%uUsb$&e&-9yJRYizK(-#&+NWqM;u&MUQ|+XSS`g%0%Y_1135oIRRxTo(u3 zh%m^tWbxhlO3b34`Z51=u15GCIRVP|HH6^rQ5RT%dOtz}XOsWu5RL<&vcKTV*?(Ub z7y~{8mH2sGm|Hl{^s@Fv^cL}&K1C1}Nr4p5n7;>3hz zneifx!iSyf5}+f;F0YdAMFebC$uHvmuf4wKmssT`ilZkrP#TkoSUxlXAb7Cj4pMTD}9*Rst z>+EV0?}?bqE;>3hpj&t+n_8GrXNopqIZeHo|KO^(^KuXwg^L#liq$!fVdawI5~eESS- zZKuaoeCcPkVWacGNYcA@dY4D)-J3{zG5q@jjhPu)!XL*z%AR3nu+pie+V?KZPFG87 zUouC_+^Mcj?PXZz7)SP^4lvn0O@0l6)^3hqk#0WGm7Z3iKZgKHj>~odO!FX@pwK6 zale5G)ykp(K`M*rRE+aSZghIinkk9)AQ~(w?)^{snQdNVRmb)s2@dYTl#=>t-VI}b ziU^ws2#~>t8PdmcQ-2B^a2NxL6tfT%r3##kv{k)xpp1FpPp~+eSHF(i4ArP9y2~#^+!k^N{xMM)QdC&!=yCKg$jQ@dDc?<}~gR00BfQ zN|PO*&n2KvubT_#`}5U?t?hJ$+AEhWkgK*^VD{ zxlvYuOh>FA&wuDBzq~aIS5{aOc*;hwvdxa@5dv z3k>gYX3T_YNm*c@fM7q|{I+wGwWP|56ZsJ_Cq`wgdwuRm!i=Yxzja4D8IDXEox_wA zfRn|B=pEhXYvg+=yy|lAJY5<~CqXdvx-BmlzKhd(DSv`bIM|&89OwjFIZBuEXo>bm z68nm{b<%Z{pQra{D2aO5dx7+;Qb3(}5huymzC$39T*Q)K&UEfsakwTcSEep2lO8UC zM%=ZA7ol$!QiRd3e`JEAvY2Q`On z1TsE(HGj;pIn|FgQ;VKa;4QohdiUO}TC{Xt;i0g_(8lDmRY6Z?l6}9_x|Wbd*?%N-E%|3z$=*+7KqmDktQVbpH6Y7z z=HS*JV=7N!oxJ8&e4eqVfOFEqroV%7;rJ51`G$Vpuj9e%%8xBDiz@Pmw1;E=+^#nw z-eRY-^T+yp9tK<3p1FI7dkqJ6bvBPcoYBgs~+~%}( zE~X>tTAB0LEuN$f!j`@b6QV%bEkfId^o=-9H=eF0sguTIam_rXhfH-_wpcavB}6CT4si?OGublYB=FB^gmi!&oK5H@?O1Vqn#Xvqa&9ARtBl+d z9onAha&GB4Na*FDwRZ{Rn)kS(+1(4RN%Pu~L7S=dOGB^brKTAlLo{8I1f45P9Dg-} zM6hQY>Y_KS8mTA3n@rm*vd91eG_Kc-s$%zTT=s>{K=Z*(WQH$Q#BjM8bH zFxp!aS~`02dLS}bHn_t>rUP>yQTeooQ(La-#Lzx}CoA^UKAWWW2P_(*s(#kXNZ-b1 zS0g|u{1rFSx!-E;DF!NyeKF1yD}UvB|MECq)lr6vV@z`DAx=j=d7jjVaV(qjQB>oS*Vjmt}Fcy$sQ(n-|F(VM0=m9cS}(XOmSw*n`@wm-fvBFKd>Hdr*}3rGEwwOm?ngjG;dglb-t9JO2z6T zg*fv-tCK3+PB}XHt{Oe{(PP@TG<{mxb;Y_W>U;CFWh!dVW28>Kcl0X|To>k%C;EK| zuDV{G=c}6vui?ohVLq8Od}p@_=+hK@bXFpU%tTJv=H%!3p8LkRo_|ZRK2*}?my&h6 zW!iEc2$`_%u@IOG7CfsCgcDX>wXywyyG~m?)wl6@eO;n=K{=20(XelpQ4ch~KBYkr zv_5K!GUD*l0bI=$Vr_^=EwzUkW7H)n^n5UvZ>{V6K-Fw7ToM;OQF?N?_*o<7-_1S7z)|z4VxeO_Dg970eQh)fGhmLZ`*Am)~3xQK2 zAF?zzyEM%O#riyfe4?b1e4WR1_5Ctr38KFZ?in3mYrc(Jx8BY3hYk6TJ^J^}OI1O|XP zwLTIQp}*C7IrhYzxIWO(@&~(uxxgs<6oEVMk3aQDr+*fa2^&pPi@1Xgjh$7Tg{4F% zMFC-h?0Ov2EfW*6%@@kE$$Yk7|=(Ro!Yv5)O7y_%~H~ zl@*F^d~jXdxQ)BGyLH#CGo@74ula;ldd8zkJlc9$-_PV|dap;|`>|Nq3#DC{jDA+q z6hvesRe!?5N|)-R8OK$*7F5@gl@d@M+b#rGpq6C4v*!>VmXkf#DPOnNN&fWV$XglT2y{{ z$P)Y9PMd!kjH%z-g=r{bU>@{mp=5M!xRD$-K%Q)PVdaNnQ%n9~PVte3&v$op_xhyU zphqg-Q5dt>F*oCV?2f0~(jG4!OHO&6*6ftO+$}TPG69#rRm>)y^}9be~hAZpu&npA!TC%>VPBb5H(j?8*E6ck0P+*TDQ1 z|1*CqTnc?q6K1a30KnhA14ANdpJKv&rETImM`}MtVOTy^B zQ%b&5N%qMl-)0bJ54X^!KTK-q7y#qj#E^f$O=9cFcPB?s0O0NK;^v>JB;P3{002_` z{yvBizfU27->D-2K=C{EL`(N>)IRkE ztU&PrKmg7FphegOC*1@Gt!;-_R46`?jVrhb02DAG2oG#PTLEZ*SODd10}aj!hpK<^ zXz^fX$A;$?-^(SMu5{ji<>b3wek|yv@=}1U(XkDx~XeK89Z9W((~Y)RwD2! z9(sNZBT=0P7uX1nYB4BIq;a)F%npB2BrvE7Aod|NJm6)5#d-!_126%EKp+A4y34p` zu2RwPs2W?bMys);CRkCWuBoJ~O0U-Y8o&2)M4J-qYlTH5i)DH=T->V_s^FFeXj4vb zjq30Htx*q4&)dHbY|;L?PFWRc|2L2|2ff9Gh|^H5$=-HJ(IL;Xv7N=hek6a6lM?!3 zyFznk)e2+XW;#$;I4@~l3kJ2JeLnEdiW}gAR*jJi87bW^3UoZe;?NcH^T}#s=xq^U z3FXt^NDqtyly36yB} zj}AbxK@1?OM)$`TI~I0d+#`RXP<(!&1*wq8*)9yeU`oDxUcTKvSte{OyutMCeoEs! zK;SAbXnUPneeHTO>ePp&;K>Dox_5U4sCZgITJ^)tWz)Ph zxbOFiHv-Rs2PObS{Ll9bn->T+HX%qUquzbxk?Klw{)-m4Wf*YvHl=?`ak&zaVDxS( zaSjbp{rCd&fo0*j-OW;lwn8qeey;;(I0OCzUKq&V>oDMZofRF8Ig-^aI;mB963diS zHzdI=I*IcaC$d_lLGKg_aOhlgqD5Wln_AL}22w#S=~(CHlVT4y4JdHt0iklZ7FH#d z;M|BCQ!3k>DUk8ywZMO}P3P7OAmFaVyU<0@41k;W`x(Xket;v$iY>^3Bb*dlIDu$} zCJql9S0C0Qh5A}Rk*g0RPN7j15P=8)40&Ru0ab9?-k*gpqyZqT+LHE0z1!_S@GtEL z!g()})dkmv%R79V&Qb|o&K6ubWIFXbVM+jc!H-~!pt#2$NH%{^eDTvNZd!toXKzKy zHQ79T?JJxJ2JKb)f)>Q?mXRzxP3_9mC^Y~2$ifZMz?Y_#9E z1PWo*W+4Pl+5rp z#hrUVR4dgN@8N%+nBLtd-#( ze-X2cz}?%YSndA$;s@BNwA)m)@rLRhD?6WfUq1PE5yvAd1?U6k^bK$%Y{%#>Vn%-i_W1RSx#K7E;IRPd?iapL zzJ3wsv{bkB4`1z={^4JcO0f9e=Vlc0jVt>@iq)pAg-UJ}EhfjObt6R%AVIJw6*dYE z2g+7lVHVnk)%*~^A{8JI6z(U>b_6-VS0N{WD|WxtaQEAmhUl-&)6>`1KQ^e9BB~9S z=Kt&7tcZVigDQ-gyCMk6;!;DW28AeMFEi_+GO960C3NgfmB`{AI7bd>_E;yvLr^OW zl|r$kmB}?XZ8?%YqX6x=&~{wW#STUVNMOfBWU-~&LRW`P>M1H4UHgijesMk^qit}|ISP72h@Maf%Qx%C9T>=CIrdp5PxFxJ3$PC z4?z}i!mp8K3d&+$PNCEfF&$V0*c+bxO zf3bfm!JXs&;$-(5&Fx%a?Eh8>`+ZRc_w^s#KqJdG>$*P4L8>iq>)1i98rtFuxQ|Z) zakYj1K?nTM>V>wM*h9c8!`Cb-5ov^#bJl%_Dn>zNh&Der^%96bcxnl9giIlO0Fb-A z5d5*%CSo8fYkCw`rB~cE08XzGADeQpL0o@;AQCTJ_=T(q)&Y(HtCs&@ny5Ls}s5^d)c|s(4 z$^c_&rNB6y5~=jLaf2u^YS7-rhyIR>Tp_*D0b(SHBWZS;D>_B2%w%UYQLJt6_}+g$ z4ArGq#rBQRQ?Xo7Y_yHO!!;W&K^%C^j_2DRZxoP&U1upXP~MJ^3V7J@+^Ta_tKa)g zkH!*(KN@R~w=W0%x#vCpcYnfojw8#DZ)lB=b~&l((V@zwVdRE&bcdBJ{@g*KkkS^W zXpK&=80PJH-(t9^ma1gPPb9ANbEAJ|UpvmwhaiE5;W7giK(yn`=0VEsIQ#Pdj`iK9 zh4P~}OT25dRVj;5afDFfl)u_jtmsOq=n6z=QYvLqet3Q+NCk|utnRv2{>HQSN7p+4 z*0sX=tLxSud;E>F8vMNmoS6^rC@MG8j>IvQREXzaoy%Tkl0YR0&`cw#81;XE6H?nz zz8jcE!3p54z!dNKFx}TF!M(7lzj^zl;IuSWf!i$V31`iRpPxU0*9=#u*w^tU2oJ`E zM(yj+i?1K`|6QN|Xe+Bgf8x*oa#j7S*}p!fdC|sUQvJ2zN3VuNXwXa9$X4Dr(U#?e z-BytchBchxCaypBJyjsl?X-W81n{z}^x>>Up~7a6ocZE5=~=B&VsRrWI>7tUPdDO> zbrHN8;O_Zv8MJTy!|5t6JMIKh-O}(U&v>4=4<|`@tmYR2M;?X@R^}8#tnf;y82X%G#lGrRi=Thi0%y1mj%s^1 z5f`thX`Z%C*K*^{f&g^RVM1ZSjuaxw2~ucX@CyOa1D=;f0np`Q4mxqMW0{Wm4@}qHo>Wn>3_c}%W!#BH4emFY(`hKA~wCht$ z^Kedou@FgMqeWwldbQf|lK#{>oAMvI1r|zTrb>f78s*BU>~B}oT|3TZCepa!g{ZjH zd?AQC{1WkKO7`rFbI}URtOvfBi`_lX=9v&yCefgmaz)xIVp@N5l~0)?SsVQFcDS*D zI_~@VZ*KnO5rD_Sj)oKhj5s^JInI1DY#c#MdPdWH zc&;f0Nb|@OB2P?oy?g{n0*(H#6L)=wplvX6ZJWcX%-LxTOX$N2_?*qO7?P{K0$Bh< zDe#_~!P(+xQSe>P_r%oD7d%)+8M`l-`wiOj<&8La+@*ieR>04DzI-S@`7(rGp7?6M zJ(sOkflfvVe(KzBor8a4D-HhS38hN^XtEQkzj64*90&oHwnx(PzJ0HvT(C!3;2vV| zOj2i~MbY=>+AE)qZ%@XVAw;S(EVtMME*8x$DtK<-^m%lwrExR6b**{4YxAHeV2toy zpZX7Eb)$b?Q22vN95qyXYBahwMSz`cSoO+l`Fhra;-^l$01bjC4fdx3#w~sQ@Xt4| z?Ky9+-F&#Fzc}A(pP!i8d6eII2L9Cf|I@d|m%q~Bhp#*dt`VFTeCO7`IWUgRVZhSf%uMmxOJv+w!b**^;$|k2gmIJ#b%SdZNH!(fJUz*{ z>9oSLVwhrBGOkgR1MRN|@yQo007@}ZsMG+`-Wc@sS^C4oa zzIQwLyB$bmeC^<-fjIxH{XaF#?oXOi)-TWe&4*u`{n6)D)ayn1)j7X>bo|LN-&pu^ z`rdy_`Cv6A8n~0_ex9|K~NkCG6*b+Z`8OH zeJ`|ZvJPt|be!l3;f$n$i3A%fIz=|lW6*ztexMAOF$2565~k1UI_~{3jyR;;GHc{s zx7d6Bb(>Y0$8TK#x8JzfdGi|=QUA-mCvnehJ3s98)n3zWzj&F2-l+dq4*AQud;JBT z+$=#~)ZOyl)u6^9)BD^KIxC#H+Dp*JjH)|JHWZF>nFd6|V0H5{ut6y8=R5r9Xa|1_ zkl@hjSk1ZuwR+=|!LhLQ>h8U$$K6^ncINS|+vk_G4kPcPJLb?Te$IseT2AMTiwqbH zNa*V4#SNVKoNi|ywneM&Iy@(6;xuw4)^0#R0KN>Ue(Q1XTg!d5h>QQpEkz{%2VS=5 z^lOJb&JK`I!*87Zskb)euQpW*aYlbo8N^kwIjm_;E2Lc6hP+#pvg091Q0_WM`rsIf zmVhO$C@gLnG(KQ*)J2OSwBxNwZ`z6JwBsA74*IyR*NcW<+OF5}j{4v>Jn+>>80GNN z_P28n0|REHb2e7^<4hu#9&X-zFan7CBcy(2NpHj%EB+o|A^=AKDhjQG$<2StFTbV} znh#VT7y`whJFS4tk{;w@$qZX~xs5R1D#g`4=M-Z4 zq!lRVOLCA`I^q_Ea|DbqX0CAvo<^bNg5bSi8RaMs#P!2cpcwXi=>Wdib?l^K^rvt8@7VWYk#=7n7pVWE zJOAj3-`?N9a?VdK+52?Ls*3!w#D;yAY^_G5?EkGEKvh;pxH-~G92Jr1u1#_>mUP4*4H)l`Ql8~NEezj$5nsmsyjq2iGi-b z;i@`b`a2VxIAd)E&t`vg)8GB*dCkP*uODU6RYJKyKm7L11+?%dR`>p|v7!Iz%OCFi z>0kfVufMf(@Eh~uH|P9IgMt1z$hM#hGg(T`Pyi}jEKKtppnb)TUi$3gh>uzm4|fel zDUb0LR#_8678<316QO^UqtZTjWmNIS?5cRwh$qDvZu1gLK~@)A#(0FGFhc>nCA>wE z;NNH2Nqcdo1AT=TdBzm3X-8Rwz(~DPD3&TO=t1VlBlK+9B?Zg>#MY40+%g9H0%sRy28G=FmpZnz9)v7`ynDEsj2Uco6I( zvImjHs(lhUYRM!DU>`8NT#PAir-(07j;lc;Kk-LeZOsdjiRUyIO8PEf8%R9ba9zZ5 zaS2oyx4eK`dGCMw!4Gl9INtll#hz~$QvS)eUp$0ATIwH-`Da}}Kd1eV-b%l{^6$F8 zx%syq`f8M&H}{^4M;hEO27fU2uRhsVr|xyrziL66``YdsuixG{|5Gn2KXpPOwK7#q zc1d?hE!-76rV_+4=9q#jb}gq24c|b412b<6248xSQzL&9%i|<2I1~V-%1d;X;2=Oy zn4%;2Cg*96Hf&1e72L)g6%WMH(l~f;!;)rf`pJg8;a`5q2rn>j1a z7zti6Xd{0`x32dTq~RjMA$CbN(fmx-;BZ5$@DiPokilr`cBBb-cuHL4>Kn3^%COM< zvzsaBL=852$92BrS}MM{hTj;B7U-)j|8Cof*$I|HfBvnDJk7P;o5V$Xk+o#ELC0Kx)kBA9PrQA}v#N}^Pm zUq=%R_#<(QWrA5%;c!>ip_X7oD1n{gt+1pjGT-(VeYL&y2vZjq#jHc{F z`3OnIcE&*LI`MJb>k=ID_RK?wWL!fcs8`#E1$VEM2;2L*pLU?3QeInGKF)V-V^VIW z-(r6;*jzP4YKcCGilg1m96_{T8n`2d-q+5!_}Uq+%6DJFsMGByZfFiI-=4;=KBxIV zb;%E}(9*BXcD*kBwP&&SAZY#%U+aYeM=`piGVM4Bv|>&b9{W<{q98cmH(!`VwM04Z z&W*q^qsj|BC<>mN6$9WQ8^ zmUlk*?;PEWF#W-yv-1fp@B9t6or`|>`Tqs032+QeUXjmp$|tHoCe2U)clxeo(6;XfD9Tnq6 z01s%FNV7X(6Wg&8N*rs8Qfac^ag|qq63B4#uvN{-sF1`L1BQzth(NHkQVBWN&Xr=^ zbceeu&E4Q|^c1#X!+mVs_#y;L8j5(l>^g-nqQ5b*`AWu@Q7kRv&03hBe)hrR%|e^@ ziZ;F9lesk!T z8$#nxZ{v^t*g5-~gMWLE+uuGL9QpZF*r^HULXu(gv^YT&T%)oYPlt9B@MR%qesj@5 zp-9Uh8~Y;~3t1z$0Kr9hTBelDH3c`*w&Ea1<6~7@i~YRYX!z}a=1G4lE>Y{j5HhMW zD<<5~s<|^QIkxy{pcc-9B|zjrK)&rYcuS(^3zAPk0oySoYvIJ2Qs8-pGhneS29Y1u zw!gFH!aTHfcH@P9S<9^R6&dW06KG}82`jVtE&kb0-1XfDb0dcE%g-f%|Ll|P+U!p+ z_K)8C(>I~zukH7~QuTjFm;5{Lq3RnWJ2w@g%e$bK?UhvB2gb((Z%20>ZRHqfq>iBX z5FHRmV7N#SMY-c#k!UZ@!^m?UeQ@1WJ(muqpg;nUmQl{RMmCIdDizfmx613>n>f_4 z9CQ&61}uD?I|?6g((a+Z=CO-D-d36lH?$fleAg}Q>waSHGyi|ATmR`Bw*Rc>r-$?p zcB1{~e|oe(J^JG>W`5%Jr=KSMtGE55Zwoc&s54TPq1VY-GdxcdIU$fMih^a_x~;+l zm6u{3%}V2sXcR_sWksoaOJ+Gwsx!9QT9lDfTuK|hRP0OU0%#KmlHlS*j*24elpCs> zR4?8@
faS*U-yQ4o~C6dT+KTRK_g5L>Hv+jrbSPIP-8u`9ImUI)S#^1l1;tM{v8 z{wC_t@yS?~pXYq$RD#Q2 z&iT_b{6}xhdoBO%AO69}YB&>>eJBtJPAdMf}jdY8Y9MJl5EH3}WAqI6T9IV7j zJp}TG2f2I@HfQ*@t*D$+oH_>43Fj5C1bN`1zK7|gm-`5aM%D*M|Aa;GWbM~KJI}rZXe7}2>@~_<_ zVf)#`{Hj-GY@llMP^O8 z`E*1N!h?P%%<(E;+0JK9FB~G$eQfg^}>8`nqF*L@Ydq4f(n7aXTRk%t`v~R*HxfW z?k1?ak1a8#MIWE|RYUu?>LQe(ua|zsLayP@tuX5i~Y*nOgQ?vLN9-i)Mc;jT0wU`%2Q{3fQxtTaj}Fg zu{_zc%bzbu8uNFJ-bYz$RQ^ywA2Sz`>W41#J&f)l<~Iuieusehu^;1Rem1YF8Xl3a zP5MM1OD42EEf$aa4G|Cr=LI;;J?6XKEHU(kJ{c!z8U^ULpqvYT%_85d4 zb9NX?b;WxYb_>_sYD%IjnkT1P{ahL!J?i5H%SkRGfW>giCBND(J*QTfqMPugRblx_n#(>TYmB#8?!h$$NPT`{z-W#f8c~5;PPN9ER95MONZ8-k&Exz>S zx>2Gd(vn)+UjIk$Y3-grP9rNB1&!N0+-vZL?n|RMW8WWfRJe}PE~jeG?^dWgKrUxn zqpDOC-k%}xMu;fXJXAdJOd~FMNUU?JQ;Q?St*(DfrCPuiF< zGSr-+nP<{EQsPJ1O?-e3jqrBG!nV}SHS=mS0`i4E%y?fUy%xbL}1$%gE^)ldXn~&YokhBq}4Cqi|I5KqYcG`YGNOrXKW9YglTAGe3|VufEc7}M~7MNH>u2qxOGLh~_yiYDyRKGww&8L`wGuq#afljvR3p-VqK){os17)W zWVpV1!zm_NQ+(da^Mx}t<8LnVr@C3Rv2={Gm!6#m7=H=OfSuT}1XH){l)^KT3d;b`uAKPCuoyy*_t{;k zbm{WPbMRgG(-&haV=WUG)ECUihnWQR+EUr`-QgGRlQ)7tsC6f}o~acrI#`N#RN*%K z)An184}Z`;25Q@%TXl|(Yw!>T-4H$(`!;-sE0u8YHmlt^7VXUH6EPy<`Q0etQZjEz zw6UVdKAal>A`x+w7{!U*ucm(vFFe9#V?xiSz;@LQ_sb~`;xh!5#ID__CN#L)9!YNj zVq&3)ooA~Irx2soIOYVZ^UhCdfvX6j_a_6rZhuWK1#a}L*K$)3G313i&J>9u_@2le ze>$JJ@+QnLI+%m2qZZLa+4a~n(NIKVr{}x@6Of_6#`_I4ukY zR1TbE*HEC&O`!vEIiCaB?+fzHjnn zY1ZP>dY?I=qH2s&3^wui*4?PuKsX$}tn!bCg|5ODCF3Z6FuQ243G>U63kqVwAxjdp+_$iDMsvvN1*NKI>k_P!95n20U0sgKnF2MvUO<3p5 za7~f%r>cIl82_kfPo&=geCP(}M1O+xut@?VQ65AxS;#ql?JV|59+Nb+6~alQubibj zQ-0)}l_9tMp|Q~7d8cGD^m0blivOk#oO#KGlM(=bw>;+WjlM#h!V`Sj$twMLynvEL zTNkgsYI)-so1K#Yn(S;#Y5UnyUX~AkT7CaB6+CbMwGxvE_jqF;kL)dcH?J>f$N&&A6OUcRvXmRQrJOMFGWKb)uw4K>$65U ztLxUFT?>fnB?l6}%*vwpZr_ZPPHYul(w}oejg#jC>@oEm_MD#z+<(>fGCQ!lau6bp z^s8(U8-Bgzx>3ajs9{@t2bsfBM|MT(sH-5#$i$<6BscGpn!dP(mfVeDUXtg&E_m>x zciGn_({|@&*tn*&Vt1CgSYYS5WV}H5_j3Z#l1LZN)Y_bSEoDPb#b+J|^ceqyI*CT( z7#+?W)(e%z6hXtsVSndZOT&j7iO*h$v+t4cO4Zb)%PI7LHI}k|KxKbu)+mH!;-x9X zn9a%WuvoZLh`pC}Ka{vvwNP$d>(<%~krhXBjV<;*abR~XWLsKo660OiHnW?dbV^VJ zb15#QR7Oe`n|fqljG6@{dl%O#1#&6|+LGP2VHj^=?CoiwM}L#C(mD1IxE9~`XJ!mG zIdV1+745xh_6ka>!i9o?1P$!xrBB#m>}~X{VoD0FW)?nY3=EGCeq%&HLpZ$Qs^`-Q zV}ZME|06KBAam67X1tDNAMq;OV|x=iYyA8GXWJXV4C1+gZ}ol4#)fl4nVV%9)Gz`uFqUKE7F@2%2R_7tWAmXj&X2skCTi{Gynp%9u|%OT|Q zZ;i}P)WbP6d9PLFIpl%P0yr^N*}4?n)3l(lBx!H^$bViJfq4#oiE7!o0Ch!nRbLWT z)O`sQ&YTC20`jtee9)(8O5-!kDa~3pe=CnJBgKznQ)vfEI?qI>P?>R256w7{axE8_`v;|Nde!aCg@7N!O#~1Clw#4 zjP8tdVt>eU#|5R=x)VPEpdQwDDIH1#UyK2*m|7+B<-j)f7G*i4ca;@!;p! zo!es5^Jm87K=mXWvg~R<*Mp>~eMt1ZoL1C$Nx=RQ$xD{|_4qg%z95c&WWvf#$bYdt z7{b}mh2~bXc#;b~kG4Ze4V_bvqfzQNdDU5;8SD`7m^MD4Pxd!t^~}>&ccsvCV;h=^qag-?Cq|Lg&wC$(8jMaB>A1+x5&41W05;?-m zt2~sgv&o0_Hh(dIC3;h4(ofw(a4jme=6G8V6%c5oC4LzR6;$k1&_&apKWy64i4!YD z2n<%cX~-FAX_u2ouVh#}J$phQ7Wx9T5R8NTN{180%+Z-I3e$=-JNw7tvkmpGxA@xq z(6V8ZKGj2y-sOV7iN2XC?tj;eHpbDIX`wHY`WDlnf}xdII!t+#9D8SriUl{F39EGB zg5azlr-dxZXDsN}#?@XXSrBuVv&8hH;kBY|Oh!WLsLH1!T&eqLsrIBoTf;3y2ofPY+6pUa;*=*J7E zP zPM{E$Tfa2&9z~|`AJ=fXEpJ=e6RERL{5AuVcR_gW zrWdgUIeQ@C$x2&4UVkK1pjs8wL#i!{L$?vd1H_Z6$twLqmH^BlS#JA}oHC#VU8K7z zL91|Uf4z2>1W%$e?jEx#29^-iBMeK0!||^>>!(#XoGjURZKTUh&XcLx0xDvv4f>dBBX+eu()6 zEq@uHw(;)4;bK&fd$mt7IP*x82~7EgDL*>R^_T87pY}U6R~9!ZI;mpM2~6ioKa5jK z{~gq4Lyt&J$5Eau#VC=jDI&6ZaFE*Bn2lSxFTLQBRa6OeGJOmBBd??`#WJP{3d~j<5z@>^*Bj)*4lLZSSJ^Lw2Bg?W4w)lIt?&rl1X6QLrjfY4r zR&RWheKhyBHlLO73ovCMyC>J%HB-a>oYfOAVloJGnp8d*a%JjUJ|ML!43bmVBaokT z_1+NAd44N7sJ-}$E}^ocj9qi07}i=EyuB0`9^A~Hcz>thLVt|4-MPN6&1kgGlryFe zKl>kXI#)p1ukF$)aiL-rd8J0IPNBZtf6;xGX|yoeurt){M$Ceg0hp!Ipj;Z1Cr|mc zOAtXnetKLv9UJ0qj(MBvN3}NQ0upJ;HHE5rDINXcYp1Q^_OErWJSMJMj3O4DrnuSj zDP1Baw|^g%^7j74v^Q7GDX{LkIx4$tEr6qpEeJ_`EW`}clGrp1ACNJZimWfZ#^GsX z(7D^Aw&0(LRVOn;kOE&|@!>h4vLD5$_0!agiM_$JYK+_{ zLaJj!DPHW^Fo->ZtEIQ83J6@c`dFLK!R_Mr(0^e-59-`SbKJ=tAmAj^h6i@NzMQ}p zu*+bq@}4Wp@uB$a>ktm0FWUnp^dOONBI{zu-deI=i>uOP;L3GH?vYIw4&yi~RPhOD z5`VFUHySzU#ELL?>r;-SnMSu@g2GX{+_c z$}r#|5gdNHL<(KptXX0q2Lt(BC!4TUzkjRx>qpY5uoJbEkwwMHTJWEVtL$^kY(~wR zLHOqr?E9`=;)hXqjnAwu5TJelCJW~CJr3BCOz7?#YMazp-vl6?mE{kJ*x7fRCFI$l zN*T_WOL>bsN|SqyWG`}bUPZ@lvc2CY|0>SyAZi+*TviBHa*Y53#QnGLfds=ocz+bk zx32dl^A6MlHJsDawa#t93^-K6&C*n&<^A+KBamJ=1(ijy2CZj*HZY-ua2xO!`Us&0 zjB&2GsU@1f#(j3y=$TLHeZq|B9i=Wlv=R92 z0agByO}PqDpbjRWN}K=&KZvC^1u0W;NfQp&zZ(bjx}6s_H#MJ7%V;%eSAW0<8MOgC z>#XK}eIvM9;!SH8-)zat?Pg|)JhRN~vbJ@m@G_Y|#FTa#+4IKVQ}L zK22aA6y{^JuLNE?V@~qf=W}AVJrB_)B^fTGZEQz&R!goS4IhJeu~SbVN9=Gae!e7< zPH7kf=lD!EW3@~d!p42Cl6b*$y--8Xj#FtMLE%^LN;yyjfQA+iD2C7=$ezAnKk?Ce45Sw*h6q7Q>8=EyRTf1lsmXd&)_*VC z3R3v0z8HPvh~S67On)(X?ZXN$Sw&@exmjK{NqqLTAnNN%_gdFG%Kbo#?ppH`U;D@7 z4%p!beU5m5$CnFLz0*s6iTTd5w*k*+4Fo~xbEQNLhbIU~ZuvxvF1zL zH08Q0f=sMP+)4~<_9_l@jshSH{$O!A+?U`}fbA+!!aRaM(?|!MG^Qr+ z0T_fDGk68UqpCtYgn@2@s#@JVz=1EI3;1+*k{*I(&zDn> z4TERXqk@8U@KbZTiT2u9q;GK#{UKAas<0#@f;T_knIdXN>*Y@Ql0w^ zfB}591)HH^37+vVBSRlF6kxH)6J}s@pEP0$v4Z)YPJO0a^4?D4MgWIUVS%8Dyn?c? z`4E$Yk*DsDc8IUX*$VdR0-*e`x7v&RK zd6di>b^)m*(5wmn>$>M)7me&MYG)KW#^c?fI2t0i_}0vq2|t#OEe}`pOAGXl0e}K* z%TM(iy%8UH?bz&q)_cCx_F^pJk|d8hG@<5HjDM@%ty1DK|NOq;DAKr_Xkk@lXb@wl z&Pkl(xSl-7=-pnuI#CEh2`!N2q^`3F$nRM7eZ{frX-1A;WQ-9X6#XAINq)*=%)6VNOIWJM2(vYBwG~W%lk9c`S)c%C@#@3<}R2g z`+wyvG3Oo02lSg*&6b*b`0@m)6x8UjFHld!E8rw-0pXFbe3{kH=;avDYOlo9K{;I| z$N9%J_H<7&t0}&_vggP$_Q@tI9n1}{ilFsIQDgMWWpcC!cw{rUQn@z`g)3N&@8&gx z8ub)KNLXaQxOTV`dj3*(z<8*#Cugu9{(r$u(KN8ADEYrFdd#Yi0r(wEl%Az4REppW zUdx2!E}*eHvgNK!$WcMk9+I))+c&OZM538nP1m~>(Rc*nm~8yG-2X-UYd;6-GCl!r zVHp7&Koon3^-B=UeUEyYU9+3!DFbm|{+av=%?6Ek9O9D0Z%_qZ&~YWizPBRj^M4}X zu(NQGbApj-BtU!j(@KpY2ai^Bz(M!8-k!c4S#R=mVR@Y4Kyyx5OoAD}ee4){Gr*zGOvZqap!k!O}^^uTLA+=VevIvXK+HERv|sU|)f9Kor@L$$iI+-$Zo!QH(b)PT#@f_3X`iFycJ)-Ar9zAsO$mO}{T4MCy*U*1k z;=9@Md_zT|$|x&;exCNnupvfaw?^P}Yi$6>2#EavwS_9wOtKkJvN!QuNY zyfHeHrbPwcKv|Z}NYqX9oD(9GxF$7i4sQBJL+QRs3oOv^JRGwsVtw zt&24G(4|ZD)DlE{%ySiv}5qZUv?tYm{rHC}Usm8pE?53qmN$2Wya zaRMS9`Qaqv=VIY}H{jeGHC}_4OTr#Kg6E&}tYSAH+9OUgv=4^IJ%37*iTArsT)+?8 z+j@c=-qZTg(Bs0vpuJK8lLst)x$Q(cXma7J<6(9XRT+~PC-Lh|L3%O%oQb~hiNRM( zZJ#k#qUDcbd7EyqT38MOkkxtPajhEZ!Qfc7K?Z-guezTYfmJZu!`ASSqV8 z(^Z4ZBB*bKe-@h(zBHf;vYnC+-G(nH$>j`5kr2tvK^x*(84{bZsaKL{WI&;O=e922 z4_;-jZ$(7#%$I`KbtSDKXBz#BLYtX_tT@jZh@HGFeIZ+^XQ-0P*wKGqcN&`=5`nF+ z*X6ameWW@h;-(H6=}Cg8a_9OZCN?FkzBRL2v(;$x>;0s?W+7okninIG?qKCQ`%J+c zRL_E#xsizbA+MQT+M4vIJgIYT{R-N9aBx9p3+(n?w|{sY^}f+?bR_kSgDG-{=~Czz zr*F(t34=rdXCPJgKrnykH{TRisA|c{mcB$#FwxNBsJc+kv_{k97gx*27FcgQX=vVt z1BRo$oOgt+kO^pQ=POub1Mz7OxV=K@?n`q}v}9Gp#H$Ve;$ayox9Zc&pTIlTD%2rhwI zT(M2N6JWouq>8w@(Jjcj#-szUDwEXA-*fgd(+Hf1C^oOozc(eCmrt#!N|b=JUPEPuUU8=3Xb+j&aG)eHn}v|!dY?>35at2=sj(iD&< ztt<7RFHM3SlH8Ldi%S5Mu%>IhIT{4W|GF7+sahWhmoJCTE9T{mHSHj_pl^E3`QlN)6+QWQH4yPM!+3~suFXW!i z71mcfNGV^&)v(8v^8WJh9Z}q4frj<18cTMo84hs1wU|KNB5`|Nyfsw@#vo$ud7RkG zv}-!M;dM|QTHKXLFGP1Kndy|*V;AXMne=&VR)2U?xmb6dqr%H#`Q3mi@w}N|^bD<8 zWWY=wxRO#O$n$*ru`4dXy>!{3LV?-~So3iVa4TO{uVE@f7gx88UF z6^8e8fpfb=5-lF&>Xi-7l)x2zazFMzAxeP!FLlX71dAeTGOrywQpHn*%E8T)~ z4&7l%7S-F14K0qD56HQ(Gq8Q0uN-V-kJ~8>LcG@83$C&a)!yifp$$R}SupSxb3%~y z{2n3$QX~1>MdfA3+rxLY-8JzLfbWh8+kgF$v$&vu?aNbF6r4GM3L=h@(l&*tE2`%q z`#3L#J^DCuy7d@tTHNG}^!lmVHWTQ|38bQgVY`zCw)`eWy)Ri!YktcnpC6Agl>J59 zWj}ZbQ9cs>Z>z*~GHWRQeTH%D7e=?_asA>h=+R45vApN(8-}1FKMdHjmbNxh@qa^O z6utfUCSkTcEZp$@`f)$zmIGV5cX-WP;4O>zrg{9%HEkAso9z8N292TDN$rx9ms=>v z8Qlq~0_W?gty_&rogu4DGr+)D05?F$zx}m6luxhAbMu~W-gB(-PEZQX@S zW0L@!!9?^4)@+jr`6~$zjchS8`m1*1k(0{L7nh&m$7@Oq8)l6$h9-jmffp$Oj*sM%0z^8_h{OLaTgIsL+8H^6~QUrP!%Y!<-AFfV6DK zQG6&#lDolkKLP5v1WI@x{Bz4KBAkk_^VMI_(rq$&FHI+KzyhqgC^)7h@Q=$ChKdkc zO`p(tL+0U!YkbvJY;>#`yjm*1-j9D0onkCx`Xw*6FPI{778oD- zzv3SGTb0@N;7JM~tALZ{D8kIm(w;cQ%W`kKQlS(!PyDmn=AKgi3>d2sv$B7Dp9H}~ zu}32=X1Br3Jf77ixr)Zw1BZj-Sku+PzTleNmc-6WPui}~YWg6gw9}?qK!%dhn$i$P ztwL}>^!dC%V3mbUJwko_W6}#r%3MtGxpPhI^p&9WyM)aHKoBu>%BW=;>ZOzI`w(wp**ol)SNE3(-&3F6PT_x5MNGBpz-`Ex z@Z`0Xe@L3hMYe1JW=?=A?PX^rG%>Vn_uc9@3!@5^M1FsKFbv0kZ`TwPb;Z^FrKT94GQ4W zd)~@0(e5-TMIe7E8RR#QDfF%T{J@2hfJk+)nTG&q4}O^bM3wlEE4^U`V9A_2;+yQY_FTqJ z>0k^>8e3LIpUs+92Rzl8sdcddk$fYMXYB(iYQdycNwt4L%*z9}%2#uaj#WxggS%lZ z*OqmscW}l4@t8Z9P1MB(YLmq4rP7CrN&q$ENX>xC0_!0mJs(R795VrQ zGa)B_!ixp|9ESsyaW);(0(`w=i|M!nMQeu+;HVv_{N~b4(f2aU7Dl-O{S|?MHrwCZP7BWXQD3NIUkOrf z;4Xu#6CAUQ9K6KzLuuE(lM-(ql2Y0?_jO+UeqEqm-eg!MK(t*~-7jfhHsz{ZtCf2x z1SmP-EL9Z3qGJ{`gua^MDR%&kh4BD>q9K0+dFYs0A1i(f4Bor$dA1d&5|(Pfk`3lc zn(m)vHgamx-*~<3(Y_p}LpI3mFTqcTjvnG+#h&c)Qa0RF8KYV|G27Ss9KYKuBJq@h zp6CeE_iw;}KnJfyI9i?dm1_otM>b!g1vp-79xCHxWq2Hj+pVOKJ{S z4X&14Yf>nqH*OFN!94C5C{iLwap>GzBr(ZN!Vr zSrYQ&pHb>8J(&OU4I^*pGw(frg^ETS(=mtWvNn)QEEYa7z2 zH#}7mWpJ<}d4BcwU&3>3QAEvE(?p)y)%Unq-?M7md3+nzJkkm{4Tf@P=mCk(cwg>>Pgbw#bQ zqA490PcH9F5|#sc`f5qU)F6L78W53_J^5*-q7D)%f6JcQ1 zxu78;?P>HE?E#nY2Bm${?ip@tQ7Ld;OF)d~3JlRUN1dzR@Y{v6 z1Zhb7iapY5Le+hE+PR@~vAYowq|svNdkNCVS>KP@p^0ZZhM|0^*{OePEYi|rW4pC# zx+9GsRe3VENs_YwEYMz<=YSX*W=A?lzE*6liJW8FM5bk?#`{qa!ppyS8GkfRC=#Qp zb0D*(MAUG`Ih(RHsxgpw!cFLIOAAzAXum^$Z+69npb!f}liuiMmrL8>^L) zOM&bR;Fe^jK->=jd)77w&;7mNQ(!R{s+~jL$e*td<+d;f_XU4|k8?iu(Se3ZXwH7y zkiyR}WeXcVwFUBKS&j)rktx*siS?*7fhou|4USSoD<*9y#^ZgFr|k-bw)rVH{feHoNTon6hD8~e zX#k}(JW1D23yXi?1s2M^ExeUxwIzy(xcs()qQ>_CKXEM#M<>F&C32?0;aQk-z^~~i z&z7{%iw_^f&u{nu0YT9Q(>0M-AeE7>5r1cyPWA@?cgU2i79Q*>a+0mV!RH9mc3iGX zl~um?5k~)$>!lR-Y=JfAR?v{MuO1bDKCBpaQ7m$d7-P5Y3KGqu;mqLQUs(YYT@_{g^B1{ zNYB5lPb7bsO6KFUewbj{T?+nKzNG|^>rYl-6jG*=)yp%1+e-LABF{ZAz7#=wOxaOC|DJDzW~QH#L5&Su+(YJY9U}F zCS?EP>pOO~g~9A@C7M-6j6`pVWE(awqwn8D*~AMAtjEX4z`}r^bM`X* zi5P^ekxV*HBrW32Yo$2?(WV&Ms=C9P`q9OyLB*XQ5kc4h;iH8XM!iov&WYf;#2lW@ z7KgiSNT(y;qDXmgwxvQ?VLevM2R%OfU?JHF+9NUyk&q4ci+lw}R}a!Xd^V5bf8RAp zJ-B~tQ;D?R9P|xK;U`B6&ttc2*r@30AMjS=O}CRL{&K&h)!uX^;I+!`bbb<%F6(bZ zm+5vz#JG}ao3KmEmB0ZHpb6OKlBLQ}!fSoM9nFJV7=kZ2Z;7I#7O+7)|0P9ayV7byI`|y7s{dzti>f)qZ8d3Brh-N+T3f$LSuyO78 zKrzUgICj_JR3{P2EYSs-x0ZoC3u^6P{0`{@C0!TGs4qZ4-dBUdRc^;&D7tz`@r{Z! z^5&-OQd_rd%R3qM{#g_;I-5GfhrI=QQ*I?O1(&o~7YvU)$vm2Xi2`^;r@Sq z`u(#Rc01E>h-=6bk&|?HsvTI4uHjTH5%0jsPOO)YNm7v;I*2!XnGZa_{*{BN=khsl zDjn4RAIGuDSu74q9+y0UaG0~UesTz(<;vX9!Ox=k<1o>lTxVb9_GLOn>>KF1nbPBT zqP%y`!MxKgN?9g%9VMraUPr)c01tmU3+-a({*5EA>ZPcOW=6Du z)s5k-mXI=>Tb$y)>)HD%(RrJL4kV;>D0hP*gVR{QSIyeJ<$L$@YKicH5FU3xu}$C##=XcXh}1gRRio-$b8F+ljd8{ie@R z+42UWl>)+qB1--6!bF+r>@Apmi=kii0;;>|_?04*t~5&a1@ElM#b$~~a^*AEw8*Z` zIuU)B7-^z?Ac2?yLK{L&uD@h9Bi<6kgiT8xnxCvB2@zx0TM{n@elve(aWeSqoaJR> zDbuve`1YT{KGe6w%Qs2VOk8jST!f8d$ZW18w39i41+N?7aYvp06lZNDshofBD!Uv23d=&Z-;nWg$;yq> z`nIz_)=H=2_iYpMX(KLOGZF>bEsj>{UXA6K62Q0~-b+m&&^JcL5i zq9`v3FOYxcnq1Rr)+MwZw;eWtG^wYg4pr+>ra&S#F(UZ+dlxRJ%Cl}P*qw#Mx?prs zF(Wva-}aZYKMn#r*@_GhF&!p({I`G=t%L^&p}Z;z6vV-q3Dqd0ovUl zP%fX8+x1VD4^^}bhu!mz>Iag5U1{b91~1ZJ@_2ua%C$&O^N7x!^UQMT6khDFGO2CdyREP8`SrkMk=(K&()Fn_r}$n!LT+uoM!-b`+60D|zeI0ol9udXuL~ z?gOKqe0}=%*g1iECy`A=oD4vR1!4}Ya209sEnyy*KHj}h>AIjy;xV=9836Smr zv;luLE|r^Epz0_gRScmM6vw^f-djS^q}MoAgxj#4Pq4hvQph?+rGgamO6hfuQ>CqL z?k&!GK&DS{?5h+tNmgcRKYOZ!$BN9^F(fc*boYKC4iD=3n11l-DCdT;=W43Q**sBE zaO+D}P)^N1)_+e$tY$j`HrtF8bIdAh0?2>0lo$%C2d(9fUCrCdYw?l!9Rinx=!iW=%bH9~2 z4hYQRTT$!;!qBF*1KN{YcywuEZ&TrncN)@gm)ociVomI~(11HA7TLl55k0vgS;>F> zquy}dwR7sNc+gc0LVy@|9ZYB~Y77!@c0rn_e|jz9B8XX^+q{iQgNSj?&5m`@$mt42 zGI?&Hc+xDBeGsnc63xOB{vrXkRIn}$tZc;1HT4u+_IfxgJ`!tJB|rV`3qlh0E&fytdcnkOT7M@XaLBJx1jFBA4MJ>h@x)7XTLzT#)^QFY(iVFMUqYIh|9ba#S!~3-Tb{ zWiT~xX9b9o2QESPd9`kAUNQuhtSFpEu&Q~CAmM;Tx_<8cr5;#bNY5j|H;W6Oy2pX{ zgZK!WM88opjhbggBaxS*uor)5*qs?czfqsKQfvoEi;Kr~y!bvaIs1nK3i{6Q&ZhCl zKJ=?XG5CA01{LEP4_Um!PUqf8<3TYQmaljHt>@#%BkWvX4>U{n2Dhpf&=Og{5W(Y?NT2WnLgNv(kgfledbmk9`JwECaZc=y#u&p z%*saO$(Asm37?3uB19l}9!Sw5z$Ym7v*%Es*8m5{rPLPSVrel?^ zu_s#9ItNKzDvC{Ylu3WX0rDP8e{N~CoJR;JEqh-DP5$mG!6=5PqEN@0Hutd0k56E2 zKL$7?H=jcMU3#kl+YKx%j2F-QC0YAjKbZ|#v5Y@dK<)5(`{}(!!~ni)Exm-rY!Pn~ z`p!hG6s}-pF&TnX_LuBKi4ZLczB9(q#sO8loj_tw-A-Biwse2{g=>?KL9*&Y8#ogh zz?F4oTTvB6Vgt0)_ZsZ*#B>2$i9dt9d%QMZO%1ww;q3CH+o9Fu=4@LEyBnMefa}}|NmoDOGP(*C^<$FgQX{%YwP5}yv zLx(8UK_`1LI8%uAX!9647|agCT^UN}mEX-Qx@-i<`QX@x^V}sCQLQ>=TQ#%Mg>-Evf zKaom=4xWDkqs#axfQ6uW5FZx37sA<1q3)Ybb+>#z zU6#*ol?wePxy(k83OoIVP+qThS#soczye%;oeFPRaUBx9Uv7+}C-v}5Gt-iaOKqTV z+9D%#?I|dca*^3e?su>1H%Aq&>k@bfrp$?PZ)ShE@fcCH<8lGomzc6b>Izcg^%Bgz z@N=L-Q){#DnN(1%1;#sQ1swi!>_qoLFuGahcdjs@Dfp(r`Gfl2VVbRiBOKj-k+2JU zG)B+a!kU|u_Fc*TVo;CrPR(HwxckGJr{|L3baXFj<$#R#a~J4qFh9>aR72zZT}vIR zn2~?LK*w@V;J5uj-eZyjuyi;)8TI^rzZk23iD=hR;d7OYuTTm@!3-R&5@>g>yWWiq zpWu;YqmD<5m7W2(2*;-=U8!j$^Q|oW^mU5W<12Fu$5Bq@$9l{pS5SX7tKjtDUmDtN zm44v!_6kw+Ri98Ng>eR>R(l@4UA;?g{TaMSe<>Iv65=*NbgG0EMw8Wc1L!>EeG# z0|_v;p32GVQXqS;8Lnj4yZee?w*pwCVTa;x(m9-|?bVD?n5DD;KtjKkV28za1+`Q^ z4=>-TYxelN)@(vJgS|u){hptkS`R+*zlA6n4VL1;KHQ+i`rj55+OZJ)WfNQ>CiO6| zwqa&wxCrRi6I9X@d@ByTA3(YJ0^om)0kUW0uys7Kl)lK29^r_~tI)Pc_4RAo*7|ND zLU9#MSl&{GA`s78Ch?a=$JEwg3Y~uQ^zF;(-W6%ofs8^!OpT1FARNoyDO4A~;_~cy zGk`SJ#djn{x`*p9S98tAQLX6x!be=4-TX0z2y%tUY`8h#)D=HB*ATj{@ri#3Fv?EA z2=8>b#`{x0H;RD$=%$NB{S`W;iF|ykRFGC-QsJM?2hk1FXZ?I;YUPohFeH9fAm~Uo z35j`(VtcZUfzkpx#zVa_#)YOqkKceFiw8|*J<_koD{1G8(G1FNt{+7|a&gXXM;d0j zkWK|_%$c=TwMPWDoVFQ&s#Sl4dnwk*aGcGJT>dx-;t4_gfoZPo(E15afuw_bwK7>C zun2&0U%QCH7a`nf|-auy#ydwO1QG4hYGWZn}Nl zQ^Y3`a|Am1SDZCRk#PUanv7R)BbFo+*Ez)wY_2d6)(HFpS?d zNKI7nYzo{_v2s_mT@2-Hf;mVP!x~#WPsKVNeD?8{)XSNr>>U-jnEtdDzLOlwq6 z6mvZ4E8b$N%&J*fmbtsCfpOygX!-R3n2R|%@S}j`N8?HTJCsD$1f?0bGfO_a39G^W z@O}Li*ZDi>Ce(jv4?_8a1mhW=NY&ulGgE=SVD!)dGLJV~toQ&L4+|y=%+F49BhU9d zkLeeoC-9S#9ANS7s(3&7lW3pz4O|ghgJS}9L-W#VJ%G#;+7j+oUA1}V7nJC!fy;Kn zo+()*&1bL)nf>=Dt34g*T5GqANkm)uX=nRVLmf>b5g>oAMHer^`jn^DsEWUDS?Yg# zm0IvO*$lSwgLF&Qq)Edm7)Z39^>h$I+n=5M!LMFp+11PkSq4fq1YmOd2>}b)jPSob z#N{7MIt}MNWUw4Z6~ezbZ!_h&eR0KjaQLQpK}PZxyzvp26}(T<%Ac56au;8xdy z6qtNU$lEUgwuW&uaLGp-b^BvcTr6G}G_jR24& z=8}R_K!xLpE}fZ?c-kOz0#HKEblE!Ya1CeAg+c}q13GO zJoXl7pC-cK6|v5|lqYw(fOwn+-N3ryqX#&2qq#(8(o56&Hz-2i-y`2TkxnidXR;}B zB&wR{`rD@uEfJD;H#3x5sqc{o>AHXO`p-JP=yEvB3;sDW*#d0QY8fF8PNm#r90s776lw-#}8fi~qUk-(`39VMCsSA2gjM!f=e z$k%3$+@aR!T9M+HIK_Qh+PKSLr(}udmO*18k1iK||9oU_Px4NZl6s;m)? zrXPKbt%l7i9*vf9t5VCeh8y(Y_9|0N+8;mL@NSsE@Vi@BbrO^J!jXAHF9{4ewmFX( z{b?b@CF9)l9U0&Pwip3}p=urb{_KkXa^)C&4UvP#x9SL1w-o+a+dY32fCVm3vrf_& zZd6EAD7rnBJ0}Pp-Ep6I=jGVXS5X9+tW=FAn@PcjS6_fYNcCmz|>) zKKn_^29@mOcd+#6Y{3CTn+2kSW$4(4t`y-i4R4_KaH_H%!BodqkXOlP;eA;64V8rO}=X-5w4esLcZY!)IL-t@OfC(fKNa5jg&uuBYs)MYn%1Xr%5ya+6%m_{uP} z8X?z=n4d2TFn{Y|%lxvR>o^(N2MdKW{f+Brpq+}-BbCi5*&B*o3f9`S(A=>I)Fx7F zP>hKb1G7lld3J4I-th`3q{mlh(9*;*oz8CmL7UPaWqO&w{xG5b5h^u1a`@i(AsdKnXfW^_)mmxPGzQ8V{cl@1RXUO!G-X)udr0 zK{xZlu>BL`Pq-LcIN(&FP*2zthmGW5#NC{llxU@N+W(kA(w+l=Ay4rt~;2OV=&8OqJ;!$cs&S5zQX@%pJ zd$fh&zVbLrjWiSO(I!MLC!z{GtPJ8 zsMigho#p&0KN^rcomWN1E^-J)s#qqx2BYYwZd;pnc}KB%@K(nOdjhip=LV_05pSlh zHCumTtXWD1(Yo{gG*J4vp|fd2cbyp+`u8fqaMpQwjv{F)FapgW6aq(2BgR=BQ|oBZ zv=dc!okhWkECPDTAD&kq>AEyXXpS~<=79ka{-xl>AeP+EWSO}uDw>iDcoi_`@{HYn zf4%DT;VYdE!S$LdeX#iK0zf*knJGtPTFQUVj)2>7TgsR8dK`AZ+%W3{V8}DTxf`Fz zf#VocnuvL~bW=vgO9mw1K-d-mtJSiOtM6c2sW9>8-(nE%D6ZNVhd-;8ZC@|Ar@U;* zixT(;Y16y=<~ZA4MT*SW5D0G5%(MOk{EV241~OJpSq z5Z2KkPkzCzuOp_MSvXYzWN-44LU7A~Hv*M=oK+6);0vctef2(7*R?G7ukttv!f^U>%*G1`Fb-iWMR zh>7*Nns3!m0n=KiT_hu-W^-IF?K~YV>eECDxy=Y514P}eHbL_rih+OOri-YE$v=>n z0XDgF{JEIgk~ptKMuhaFJpIWE*}zh$=m*089*iLw(k}XA9n{zw#!&+)2JG92!q8r| zZrmURn(p`W#A?zNc4WL{b!+5YF34YE1I(nmX4fy!x!-Au->CYHU_S7ioFD6)U6M2b zm6>~Vp%kKpO9fL!Ms|Myu6Z7d*U)jyFrS&SHi@uajBdrpW92;}%RAu>4>bq0G28{% za|z`pTw$hUlmpz>75b!dvTOmDpGZi`Y#TI93)o06Aba#8${!cHmabhx=zj#>@rl?fB~Ogt>o;U=d#fN!K$U2==tB zBI?Cd%xxu6qu)*~mAeSiHgkI-xq{42x`_a$verH8=%jr;9JWo-1P-2%A+{hnLAJGB z-kwYg^j*1lrPThPV01IK1X11bKqteQ*r*5?Q}z^@@>%=Q5R@4lNU*}l0qzoaJdA>E6_ec(g1&KgKz6oR$+Y2NJ`lqzq817 z*KKgfv|NO6W8~cy4Dd&@Tqz=R6_+Pw~|KX)0Hdb4C=coxW4EtDr@%Y~)igei8 z0*=`?{dfJye^Aq3rK{uJXi{ZvA?QSmLRF!qeCU6QERt8ktDwNxPms-CW*vsQ3_^si zijv6ofZ%6;Eh#X72bK3iepUS`X8szA=d{S%0N4d=UoRm$O4@+1gtDwis~o?k*0-#!_NG0mF3<`x?$q;AK{ ztxkWYsvU(CF+O=bT7MaTBB`gbBG?+%kLGFhXKok&LlvS8^`&vzfPL=Q^Yv}X~98q;F3_WV3h39b;Wf+X(sSwn7Z1Qa#GfF==l=kWebPx9q*Q5m= z8|#%8M1vwZ!T{4ZvC7d&m?(Ss3<1R#_*2*1d}#!gfG1JMmiJmI?WR7AvX@GwCQ(=lh%HNUxPB*Z^CJ35| zT&KJEjhrHOto>-DhUt5@k9+FY?TEZ@+&9d7ycSpq3)2c`1*$57Jr2}~^?_U&!-)Q11$noL3cbbqOdxuVqWG5| z5N4BeyEF{51Q0YxYaPXb%0eoe>J3Z^!fHt@YfOyNNh?}-%|dNiBEYxRmymxxDrk^x z-ua2(*jUYf#p{_a<~^Y;N1hoL!SfXLK7XUo{h6H4Y{-C}MOp2p@C?R%ZAPyuoPJl6 zZq_&)ik&;anAG~`MpF)PJVntLW=B2L16sESx6T_h@Pw3J$Tq{<>L%HUVXna?*?f0S z?&1=;UMCzHoA^UIM@ea^dQN|F*OF2;TY0mJ$B&DWmvF?UnXWxlBxH^O9|psI!d9`b z&amNyxs(NL7uPao)e^n83Bbh~l(`-XmH0iST9*mF1M`Dht)@Cs_!2+<+Q5pP zsb_QS5%3&}>r0zp;|aN|i)BimL9n3A8*-DV<4(KB?v$mdWM`H65psXHE+XLpgRkDf zf)XVbv&g?_Z${q0fcsbZeDD3aK;d22&W1gca0BY2tQE27Y;LrTfn+Fkmd>uoXwneC6xLpFe6iuHH9 z$@(7q%BoWok4-l6;|g8ddSpJ_U8PNtL1Y7rBn}^WTn=FO3AKNE7|nH8!pXW}aC^(f zjF4bq0ybdqFgvcD0ndL`l%3coTjTp?r5F>{fTHigz!;N)g_)(A{iCp5ozcR5#;#zB zxs982(4HXHL50a)`E;j0Egp0a`-a6wqaWg@sNt0YQI_O21r3_x%IHg3c<{0=xSx~a zGC;pmtE3EHq9%VYb0gpQ(0cGhata7MDHARCXeD>(Tp@N*R@bB_)P6@r<4S5H<3QQC zY(x94wBuf%TRtmC4&ycD#Qa0xw&pFICe=(1!)yA1_yp5}3(h{3RE)#Zn=gjR6TE1% zB+XbWm1zza=km{DK);00x|hCgTEZ*yJh2vb9qn1O+nj$i>2FWB5!s0rjJa~AL+tkb zaW&Ifv|ON)Mi@bxAqMeG)Q?tkqI4HIc=2S z6WlWSVZm$GSB+cg`L)i!D)wa#vuvBfU|Q$UGPBs{Z$TUcPzaWs&~W zAkiEH?W=!YmXB~p<=4ec`O#t)oe015xS|@vLQKIiEoV65W-PcO# zi&;qC&ol8kr^dmy;MxJsOuFJ0(-VKpro1vzjm3Y^>W}Aw&@k?v>k0&)^Lvb`Y+wWh zZhf}#dZ7b271e$dXAK?B=v9Vlw^fYFl`!Z`tQk% zM3R4fOD+gooiBq693T^S(^Z*<=LnHdK{!0g5|~FHgQ_T&)va6X2TRD!h-2{pvkABl zS0EIPGU6XrtEidzT#XfIQ3-@Ybck?`L!-8R8ourHnOr-kXf6@)rZ+B{_ZX2P#`CMT z^F%i|rOf3xpS%Zb5a?m76x#@;7F*)2J&u1lby!^_O;xbogR?tLap$^B11^6O@%>#} zYxZ7O^9nHNv{|5-z$d`TywZbl!8&Kx>`zs}zdYF?j&(uGjGA8WJ(s6}%tp@YPq{Or z*`CFunAub_*+K2DZYil;3>AN! zNuv94yhNo~_R@(z2}cpQ#42a+U^>;r|qI=>?6(9&e7u!%WvszTi27(q<^)4%Eo2u5l}ofC|<2d@PO(Yxt| zC;nP2Xpg_M+>k(Kzg?CSEF4{NPW69o>M#ylIH6&!P4h+yNX6_$b0k21`rtob`MqAE zYZ4Ngs=a!!xMYexYg@4XtR<4}vl9YAsRwWaEv~_IVoRt5=^|4`LF~rcIZ7aVe;&y~ z^Bu5AUVE|5BMB1xp%=rXf^>l=2{dV>f(cuZnQ%WZ!HAPs$j#Cc1p2)QPH2C>zH3w* zRDzaEDh+m13T3eVn+82QqPeot0@AQ*w$>l!;h@mqTem2K)@N%S(9P+77US%9A-!L9 zMeN+N(fs%H*ek;*6<<0wTJy>p1hFLQ*9B(7+OKz)_Yp+T;ad6qI!=DXgdnDWro=OK zUkm%u=quoFlJXjiq-inPk2il2I}g!0KXW{xAYvzu&t>*7+H`F23;BIWc^4bS63>NW%4$e<9k}OOG&F?wdb^e zM)NpHTg*0a8o}C*5+90HnOk#Gem@A@wTw{J$(qS}%pDqZmDp6gIvjt43jtSjG=L|l zB@K?M3Etq&r9Ge3h>n>jMS9s^S{n2>pU-=Eie{Tqv_csk%1VfP3JcpJq^vT|cv;E< zk#Mw0T`5dV^Xnkoqbj$$FZ0jKLHac`i^mQO`}41Uq)B|y1)nKETjFj>oFr9%JHhE> zqYj_Eo?yyLf;yQQ{{(-FcMje(3S8*4MJ=arDO9RFc}6Qhug{#s+{i9B$FTRobB$;` zot)>d*-+bxfiRTjqcwe|A3m zeqEA#54Ed7IPJ@RE9p{;D~NJkY#i6`>hcNRU2E>n4&0s`EB7*4;fHJ&^9QAVT}_Q_ zWz7ESEhzgJTOV77ZXy8c06^rCUY?;xWn^%X%VJ12%QF|wUvIgv@$>T>6bERnYXy@J zi0a%!0UneqHYtBFu27s!ZFSz>*TqV|!HtoXF~05S41;gs8pjzjCoC8pfcM<}9^Q`V z-D78SV|1}CxXZV7=&t;Q5!Y5pF?Yde5%pi6ZAowJ zjbAexG(`fCTr^?w6}cKSOMi;F78*F5=5+KTrB=jy`I!4om?!C&x%{ku%WhRoB1ZXc z*;@5YGRZFkQizFs0n5TGL~SJ*!_-b5nvnD$3K9Lf#uFf>)LKMLz#Fe_{2?8k#^KvN zc`}2rym)`oq@y5=Bs}_kW)r_Rq$^7uU&W}Wk6uC4>AxYNHPRd*DxlwHu4jU%FH{04 zQcuM>gJBTj$1}AS5%;D6+op)fet2W3ofPuzRV@> zMHGJ*vUOt5==YEZ=-Cg^-G`JXvdNonYSFAB(LqUs9ho0p0$~vhEIYPS47KUGr*WY8 zx;%A*>hVeFlTSb%n1wQ5-XB7e`b)Ed^?A2UMg$R zOxY8pk8q}`g6=xSNml9|l6r8tVE$`)(QRuUlV!*@r<d6WbXsPn70K8^2M~Q8*+w z%j1E%2E%bV+EYik-KxeTuWk^ibwoikN%7Ab{1XPEH#xqKAMB09&9I^~WXWidy5oOQ z=~tm}A=cXQmlNiFN(wf}jaa$lHd>$pyx;QzFvC!LRd{%i`%u2*`O;TrrN?C8o1J8} z-{lZ9o{=PU3@B|jh)~M~_epAo{5k5vFZ6qX3raZ04~3M?ycqKvlMvnNdnWBMb zGCtgPgJF5(WyOT4NouiC5`m%-CVc2CY`2LEhKj@!pa>8JCBeNZ$iBX1Ve@}hX&qw@ zCZcc^!&l)PX(1*5Jy$iG5ZeF+=iHqx09$gP*9$Za}C=`2#{>i^xr?1Z6rCvN7{M~<&I=3>#bmztD zOooPvDQ5u+vF$1Qv5H?pj0guFW~z2+mdXF2=&X?>24EogAO~g&&CIyY%*^c9U)?LD zl4iO;7?bCM#vT||$jpHj`iEN@z@35o{WA1RWeRBGA}QBuo?=1UQcr_(#?S`M1Rq2} zCU@H*e7n!Kl18*r!Sa7cJlYnvFP|YJ+bs=%{jQ5B8i*oR@dfiUNJ+^+{UGRzj!dYq zO-bWIQPP3XLUQA2+2Gf)X>cd))v=yzJ~r)~iE(meUdHVOdHqcm<_#p#|{D@b7H8ZQ~Dyu0ej$Q|^Q5iq;zkn6{l zXM9N2di#^hlHY&G69|FjTH~#$3|-D3;FXMbd0_qbBvPK|p#=~g2;93}YcTbrVd@d` zQC$nN3|UJbsp;2J1nVZq4BwaPl$=dEVznDHzN}Vx{_}@$zkp?mDwY04gWerT^aFt} zF9D~#4&Rzt@GAuySL1BDi8|#$>LeGCA~IUr!8QDs+7?b|&DTjfGI%0{zdsX>Oo6Oa z{O^kr>?Ty7M<~3K8@<@p6cpKVxQxmh(#dv45nQ%fr7|dpY1tX^O_QMh$OBL*ntE%e zXMC{S+xg3XV0sp$Y)ls%{#{)I{&M-@<84^+$D1&dALbHixXOAkW7w7$!Cy)< zh_h~MDzfY0N8Y6tc!vI9aO7)&05wv@$L1Zc_jYFZlQnXKVyduSMJvLnqcpTyfaG>eT7-T$pND{T z0r~oWi)oCPYPCqB@9h<@Rb&jJQL4aLafJzs_D>i=NT&nj6?H@zN^_+-owBkTp>fSB z&}nvenM3Yi;1@4`HhoVt?!Nwo zXpPWe_Qnbc)N;qGK5}w>JmcB8^WUZZDskk0HJSD%Vo#h}JqBWI$dvtI=$DWIL&0VS z5q&;pZ-f^a!xl903n%=@(l|Fq(t5ul-8n9soyFWXdhr_j93v7KN1Z(oQOIHn1L~-< z0_x^HC~}dH8eC0N%(UY^N!Al4a(D#WyEFGu$;8t2Rod3sVT50i4=O3c+PXW8r}aja-$EVQcj91cvMCsf7pcbsZKWg3{m@`&KV*j|dYC4~UbYMV2o{HA%4 zLRZV2Vsoj?iPvwbuIpNcu59&_K!rM3hwsJo0{Z%g@eR?;AKGP<$Kx0gF0%t(`c~N8 zb$PDc(f0(Fa8*7VOG@Cab;`dTV(m)6Y~-O7coo88ku3G?+$L5e2t?N)KlC;pQBS;{bgPowBbA}AD0!9ofrfn`^=WL_|ylLEY z2i&Gg#6;QbhbZN~T%u|>wHw-pgY2PPdZLng$8QDVmwT~*|0xA1@^{{e$74T=WzkT< ze{7q4hsWtH@B;Xv(z2{7!$iSUMFS*-KzAU^hX-O#2-P}ji~IWIz-#D-#8MFLry23)vh~#;rv>05my`7 zT{eq2+3`Y932ZJzRU!70o@h?RIi+TF5!~bL2tfRzielK9|DLsEtAQr8up7JMA>)*> zV_AnJy@3SJb+VUr=r3O86JaNe-b6}InXSRsL2J-lLa!OR_r|==fl<*5Is^C|G$#B2 zFgqrHN-Q~agqNbK6H<+Tldo*BU#t;l;%5c%B*|jAEz66%V)JBv1$QL&?MXg3M9B+> zraEYE(pjlBn^I1kE?%YGJ;dQ848~TXo43>cZ5w;L^9qz1wzfqhPZN_f_X5=yQb%33 z_=UZA>)A19rIYd{m;5lQ{py~X?B1&_9di&vb^_0=puEL;g=ndN9|PDNe<}4`jKmL5 zCI2x~;Vj!?M$eVHVO##DHv+%+v8Za2IvJBmiM5gz&ZQhj?ps!`lkAj_4TM8um#Xvu zW=;kVLpMlIfKzGJNJetuyS0wuyk~G21+OgRCe`- zb(M_9MPM?|%N-hj$a=5(oXVykeryNPRyUf;Op6;P&5&zXn7|_9SV7dfgTUS({=w9| zYe#CZkLF8#fY`)!3w9Cm0L+z^8cv(M<2(U27^CCJsEc=Hdck^vR;*Z>qS$@|Ce z2tF}wd~gH&(ZhU=4hs#V3_2x9$ZhwfM=cZ|v_ez(6T$L-19GfN9Q-mH|}2 zejB}g<$6-_H)_UD=pMYUMGO7469E0Bv?ha)9?2&Fdh2!?0@r9EKg_zG8o|pyjpILw zA%Us!Az(GiZ>UZ7kX3|3WDy8RTEeVXTxZpAH@@$7{HDn7s|N%l3p2iNtCh0feptB3 zpeiGgs2_EIHKJB_0il@B%t-M&F4;zlyc~zOi7dj?=^IW0s`XI zra>yYdg}J30YtT5zHY0(AC@0>%|dA7~|9sPf&sHlI0gNH8Dz;&Nq|7)<(>VIhWzi!)#Y5LT$#cKTQSjmQ zOVagb;TD$F~bmmis*=0{<9^n=+m+MoWAS1_-!nYh4Zr?xSEIu=}PB6z4#>q zwi$4!FqiROMW&s;X@V=FV}(Sg`&bkABeH1lx+yrklNT9fIxb-yZCjoS2?Q4No<#hA zpgvjkbjI>RjY}gS8NAz%CyF`qopa%BIbQ{qm#g4Af#1L!u%ZE(aa-=tST5TN;Nf&L=Q4%7sDuj9KU0q%LX9Yx>6~o7Claa!BCq5SNYUnfmd~; z^)kmT8RB|y-8e{f6IJ;jVE0sqxdP;CHfZe3yPVlVM%ZWfi_(VBh5?*^Mfttyi)32% zO{ulfM=*-O$k}&ZJ{oDJW4GjW%nJQT;+*z=GgqY2i2EFf>D&5?HJ)TXsJqU8qX-63 zt|?oa;=*a;PL~7ajT^wxyM_HIMz%L)88`fRHF=WMLG44NP6=KA=nY?Qy?E}USuAGH zko0kB@mzoM^-tw`Oy+znHg?%Ly`&*PdGxm zW)JPVSa3!yu;9=AspLbId3q1?lr=^I{$ft!rY6eD53xUjflliI3M7hu^rC{>C*Auz zcY%8B%~=d?*7XMR1lYML-!4~w$&GFOcsMzk?-hFvT=p0s*Akxy$M#_1Umrj5+ZFO? zt+<=o_e_o3hvkCirc@bi-%A^wEBrvCkTsB+ciZdg)rK17n>hfgr*9%aq(MzGqZy zeF;sBt-^0Wp(}Vqi*b+*g-0w#e*@)C<#g^gqUmUYe_EwbOmJr>x}dW5sAVi|2lbqR z$Li6b%kYV>8ZklWLPLe8tR&^6I9jXJ9&oNtTj@Dma8s`9Cm*1Hbv;gvE>s>w@N5W8 zaoAIb!YlFBoZ;L9rKSk(bPY>$vpbMYTEiGO0%L zIdC9dkTqQgdKw0QAO~pDvsVB|=W;LM_x{{3*=E@S)s$1l8_+W5Hn^(U5mJ23+X7I} z{t{|EJGZvf9#3owRhYu6yAFn*gN|Xt-!AIL<{RVdAJJi%yu{hm!_ucHF#p-lcxDtj zrTpWCw;(8gT}p07X+Yy6u06`fBFUgemVpaN_Oft)A#~B>impo9*7@cxOJ#=v zyEqtGXFX1WTgkI2LTGwou-UcwX~YSM+oQG?V6qO*u8Ow&9ykg8L){ThidmASBh`(I zeCi916)p}r*vE0NEJP%B6^FNO>&#!z-FnwBM0Q?~8<1^tak{OgG{<}L|L^RCc_Pk`V){(UN?g;}wB$Eh)~2pgh%9rLyk zNYaK&nsgZjiB9W%V{d}Q!KK)&oBc!YHmjg&sU1QEFa%s8D+wzJG;mdMqYrsMp%p)W ze5j4+H<$rr6XlNAiPJx0eqp&RNNxo$!13-37l-qu-Xq``J9GPG&ClqVa-#Q$<2Z* z&sAE6Y1K~Ikarf(yif^IFy1P8*@beYu$p{-vw`(nY5wH}E$yzQ4LTSD_mZe?taH1q%xSC0TWj)`v2ZSd)nI8T*xI#oNe9 zn*GM0@FJ^U`xUCTkzLbxx@t&+0YmME7mEbjrK8$eIi1;-9_(ia(&s3#H3U8dNTln3 zbLc_<*)`L<`&=i(Q1QL}_Eg`PSRES}TGvn{!gWM%&c!M}W>Rlk#9GgV_B|Qhc}K!T zbni@oy;+=Akaarkd)a_sQ7A?JCag)x_tRcTYgQ(pR|-HEMYwijF(WO6o=>5pU{LSV zc)PO?6x(lu(u4}X*XNbmfUrjc+>+UU0{;x?N|iM{jL!LBehK;=HYYS>V&38A!;v}~ zv%PXJaC0k{WdK$j-T5&~q0}}x=!hX=$vGzIywKDK>qf5Ix1V2s1k&S- z0Ckf+v`wMXPrHtk8R)UDG zG6KK$vu;u7(zNll&ML+JW=mqs*XcW)Vm`oyYZ02?ivK<}zgUa?F>V60@j8qks=jJ- zQ{}5@2`Sip$DDIh!CjPKPUlsB;L%Wnks%LJO5Up8+92c#Nh{38_}E56jbd^r?jnv| zIRNm$TVsxR4kF{Vyf8j@sgxDwE9aqUl;-NCnfl<&@M*?!KyRBAYD}%fXB8|&hgFPS zoY~XpnA|I$4_TFQY&;UGdCXDT&@p)jU))58=`JqT=W~8h6PA}U|eEWQOUi_ zxBw)C&9axZB|J7|@VD%E;@n7msBHO%h>0P8h! zF9Hn{%rV;!c4BBBl>7}WuZ^>=I8tNj5`Fda!5#ba*Tvl&gP!&r88Ew z@fhYysY*t$KsMoKczDBq9hu~25e(Oqu)~vc0`(x~$c>_BZRP<{U+lcx_Aw9+-pM)< zD3bs;RKK^SmA^1`>Kq!CA5f%Uh79Lzkein0n)2PXB{N${IOl%PX>O~`_$)E$JG}^G z8bihn_mHx8>vnccr3WKY3t6vdv)6d1+?qy~|2a>+Wnj+!>L_=AzE8J3<+N}Lo}`D4 zb0;o5g|`*n6!(fB5R7AmuHD-gO=kOdgBJ~?9hYxU8n+gwjI)M*qo}f&0$;zDC2^Y+ zJaKoXsP0Jh`vp*h1>^4bf3Z#Ucl;k+HRz8N(f4Cl3syuNpv#L4VY`vIVUQuQ^rRw%-DiDYy( z&+7&C_#_456%NPZ@`MDOuj&jU$$D0Tt}VsooZyBJg)m)aXtU)-Td#xP21vgXfM9Gd z<9e?dMcus0S^5P>Z*@ao#yvAjKX(54ecY7!X)Zb-s5A?I$MvV?|E%UE)zEhc*X>?Dc2@Zz=A6 z-PL1*Ah(Qv3MDt=5vQQmz+1Eh@e<2}M97;6UlI%57#=)OPmxTzdel$JodTk3i9Fuj zS|g4eP(00?qrl#P)LvovS&4BF3Ngav!MhWU{Q7bgdMM${sOb>5$>Oe3L;6 z5YNhr_*<}ZLEcKH1i_|P)31F9nDWvG`7xzx{jg_2F~ezn(>5p7_y zU*pO6m~7V(h7}yX1F))4J|(!7wGROoBPC0Ja$fO_Uvc)US$26VYiqLjdaeAjMM@(# z3kzAB2iM;=k_)Pb4?&Ywh+=G=r*_eNy2&X@3MS!LMkk3?KWvOVlw6~5v+-3yLwe(Y zb#Ud}93P9a5D%Hpl+ruTLRvXQCZ2DLlHQx&l^;>A$lZ#aT3m>)_q$$AeH#A6SNdyz z7DH2+m=(V!a52w+ zGq&5uXga4PYls-*8LYv|h5=!%p>kWgZr(Xkugu(6)*r1rFa__3Z#a%fUvP_A16)$+ z-}!3haG%ia6!!o8P-j&lfK2**n_HiM!S5P5DAtpnP=C@QzC`L?Ur%1JoK)$N0b#U- zkc*9efpsmJU%HZS2~@Qr5I$=hPUMf-@pN)72N3SJp>F+&XwkvY6YcMzM+(}LXQ+{Q zu=Zp@46q!U#(oRW&$U8KX98~}j(wx_Gl)?!R0g{MCka&=vpNu%>#lT)HcB&phsA;h z^;3eU=0CNBeVS!)Il}6m^Tni|haeOS(d8Kyk5iG)^7=ual0oi@1fXopKeY3c@rwPA zySq4JT^ZQ+>6x~#2f4~FE$FB)fAm>L2PR4;uT=rzf)2$?c5xvuTwN7BR`Q~bzdB=cqf`t3AVM*2*gQFjr=Apl(#)3Var{^`W87$IrX1_K2L^RnJsxHQ2-b$t*iaQ# zrgv|Furl}7UM3kKt6ba<+rOqHXbTGRDMYH(UC}=9>+hBy~5w_@D%tqFUmeZ%g4!%2Hn&m0fg1Z9DkGTDKf?3(HZ!FBXedD z*rIzd!IvS*`?%%{yqU-42S7d65aA17I{)J{A+gMRrLSnAYfmW+{F;#1#D}OC!2p!R zo$iUfqgce7D+EZBYdH;n@P-n_hqB*<;*sv1y3HtlJDVSNbk9#@I7MYx{43^^rDXO{ zVTvLNJ7YJK=NtgQr!hVrlO7e+pmalE*Pq0pFjYTi=#nmHtho7lpduh;WFs)#W=1@v zRa}AflOIA}$?gsyr1`=La}oN1xr0K=YoKy`NPp%t(C}~S>a!DnSxLT3zO`u$JmROR zj!z*^E{WysvJi2%7@XWEQ56>=pg}|&LHddc+U6-MMn4b-#aW^yi6=z#+<>SF=nus> zD4NLz`z6_yB%?~C2*}8u`m~f1j6&F{Oad>=2-U(dK+W8vOz96zq=-m_+>??NbhTxYqb%~V*05Ti83t6 zXn^mt;tDqdyqRYTz+5$IBIMecpEMe;fwk8!H;K1Hhfo`T6Kc`CjB34mu5+-%W&xa< zyh6#!gEtbQZEC-m;3wOtsglGCqVRS(&pfPUQq>Y3u7qvc>F=*P6j%r+L3JIDLFlME zC7W=H8>p-8A;_aa*ZImg5G@W1^rwpAkn;&gEUM-d^EGR)bKM6o*t-S%1(QsEONW6QN^t46D~`}l z+9@UP^$KP46NUnDJ*V5pjQ)O@7&g0z&Q_Vs{Mn^abHc%LHY;>2suoA| zgL{?|{VS$WbI3nYpz*^!$Mn>-&w>}J3z zo11K)?p<^C1BHnJIkTl7Kq>BN)N`sWC3v8Jm%ARZh|Vf<@{miG&n>dc)QxD-!Awkc z1ki0UW)Pd}3>Zr3S8^zL=50Rej?PThzjcX!{6R&_J^k8PRqragEyw9JH{Uepny8h~ zN0jgmzwg?nT!n369i(fIL4yy=@7F-guO%b=>Ukr`8HjE^!rF z#$3hfT~JQE2N3weuo+MP;#$^~AbD7GBfbQQN&kWSoPBefDh`^kX}~8^&aQpktwyAg*taSKgw}03 zrJ67+?eukJ!_XDK0JFNsjxaTU!AS!ZH@Sk0rTxT2P=UhXb6PR=qy_H3p~CBXtF0Ed zH8G|_8rLB2XME1nlo?|y2~RZXkwl@}J5F`J3vtSuYLUG|fLo$f3Gk+Lxp?5T_kqX< zI!Ps2-eaD7-zBu|lT$y+L*(j=a=c%N# zD1)Ib+}CM!PM;0mA&96E8Hrp@<}iToiZ1emIjk}zC!_jtindfva|I|R`meS$VzZ*+ z?blR`4Fdkaiq<}It*@h@fc7IdYEj%4)c53A*ObJg5j?jk&KF9+zKzm-ry3Fau=H^v z;B=cTc-W=j$;~wj$IPU$9}I95dhR;^?kXJO1{A%0q6VSn^Y3d zF~p--rpDA)P$~DO^8v#X%_S11>=+{!)*+zHYDw zn_JlOkei=Z5o~f+SgnoWEtW-D;)6c&n^A`mwXm@6TG-NmymhePw|^fyodBsaO*-LI z^MyhZw3*C^YEl&05S}ioL{^Uf?-pBs%7~2NomF(7hF5&{3#qzLna4H!DhU{j?c_x~ z7?6AP67^RRF^U!#e|fz&A&=M!rg(XRO#kN-f)TWi~$a{mMQLrZ<$ab1xq$G+wLk;g{?-S-WW`zgS(gqzP048 z3lRbb=!*BXjkEymjl5_y)RfK|B5pcQW)Qv;ADRuQKx6e%*;Pi~T*w#r*LLMA^NYlU zS|>-b7?zNEdaD&jc@?)HwZ?nJh^lh!l5$+X3Ziv?ZVm%dT}_FsX_uEgC9=7(7bU*? z{JR+|j0d_t^(ptk563?z-GcEspyf_5;Y8uf&@e@QF~hlVKOu;UwPAI(x^JoXsMh zzEgwPD3bE}$3d-nBm))o8+has?=~HnJg4yu!XNL4W6uy?d*j{J1RQ zoeSNa@cR=!Z1YQ+3;r}@28<*26zoInGma>*9}{5ExFdrF$bzu^EY=98i*@lkTI@WF zE{!k+&kR@rS5u?_+y+c2!{wXRO9-0%WKq~DWB!xD^<&D6p#_t3mo6w_!@WN4(8quJCHg*-- z+aM&NKc#^{O2n;PZ8(fxRQi)~#O9v`!6~wp@g9E8v*U^_>p-L+K0YZDP+CrlT8Id; zpw$)U>ma_~JcvCIK|IKqR1Zy#eWM;>Rmm}vctVpyJp-}sdnNt=W2z^jg z3lhP9GjpW+ZZbZMiWpfLnLse5HzC|=`#+JsC%O6LjV zGSK|?dCQ=Uf`eX{B9yOk!u22^HzO8&`aG`M!o0zXy2lr+-Hy<4&WjX*@bCrUMqQkT z3UXYB*PqL}|KwT3BQ_gLc2j%BqRxjs$?^Ws*FcAc!Q)zqb|#{_pE1NPuaaB{x?kO} z-$7Yhc_`R{9dk7Z(QkNv6g>^X8a}508$|lCo@&aHbjz9w3sBV)6w!893)nac6a`vw zKI2V=f?1VZ+S`+qlV6x5pftyAVE}3-EVl|^#C&rrj_@HB^vsBewSVfb5=9t>HjtH( z`w}7$&yDv zH=zXA2upRU{Z1LtD2+6^yEKvhheU@a)PR!E~(K~G(}Fw&}QqCd$YWNK9yVWjnXXWPeC8dXET-*5bqA1>oUL1Ev? zSd6H@9_A_qna@I-_t46X2BqTLdaQBTM2H_GeV;nWAizC z6hf*Rb>w8NDI+M9ydtX{gf<4^|M&36)Qt>f>cTIsTKh$aM^(ia0pz4Ids+~$bnO}N z0#I4>NTg!lMt3KLAmDwvn5?^l-L)y7^PRx`h`MG-{YD8!5<3bHEP^(FIJKG9d@&;Z zLMBJ?QURfV7B)()0MU+_MbHP%x#(5}`N0_v2Bt#8<;=odpJ;+VMUe~~tW)1DI;uFO z#u?2f{@_wT1;bM4^UE5V0I1#vu%e}@<21i=KlaDIq{h--$?HauRYRcJvw`J`Wkcp4Sm!0bCY!0XtzLRK zhPXeTVfofjN+f2po;40J;9WzII|f{kre4xQiJ_ot)di!2NPo5R#(lR&W7wU|FDSyM zF1vP}38@vy5C+_O(7uA^Z9S8o9iytdj70P zLkbR}C*`Gn87^A@B7ZEVE$xrhW2@k>q%SriWMJ-h)<+)_m<8m34eOS#2EvV2vm_aY zwT}W2iaK)5!N3+UR#8bDX*<**ekrYL(Qoq6cA*xy?y-ALzF}M8S+o$0adp;;QjNa z5kW(o&~zmW>HN4hbsBLNM%xlh^RIZ>+IZ)SN?)^Ane`3C0ZA1;z3rJKtXAbfwgf(Z zU&h6MufMpbvppQa3nY<>CJs>Zd;!e+f$tix!Ni>k0tX^_<8!`d@K-HPQ&PZTwo z*0ZhZhxR?q+Dm`m%OiT3lfEjCH1eN_{a~Z7db{u1O!1c_a%dVJw?%17TTS)8(r= z|68Pcy(qnr`<|!r-`4$9gBwK+h^5}^`-78bC%|C8NcnU0Iv*YH^+aWt5gnh>fmYiI z76ML|2wk4OreT9jsGId-W_NiRUD}(h6wx~>^ZR_55M_^;+I}8VV2vCOFa~jd9NXp; z-R~OJHF)^86Wd_3Z8gekDs69IkuZW~*G=sY-u55*|9J(1kVWF|KD;g>0}-P5nU%ba z%`F=xo)zR8O5~}?!xr0WPI;if>C+71b~7G!hY{)xyw~|vPxD>_EKV~i;*cRlu{f1+ ztCq=p@OFLeAs993^5CCj7O_8o&hxv(>uqD& z^~%zBfDSmc)NFAOD!2kW$xY=c`{L1+#oeCr0XpC`7b}I3ET5&cppX}ThFiNG;njeh zy5(K=9*qj!Qh!HEMbI^$RL2lbUp<*omqgBsjxJl<;m+Dg+1s>42waTM{UfV9S@5YHk-vo(sUSgn8>>t~EV z@?4!Ulv4joV4(+mHMU=W(NZvTK=%^a6c*rkYIG_oxJ0&rHz2TnQQfEMQkyRwkdLit z(e6x)L{TS`KAdP0zgiZoVmd!>`Oz*p-|7yytUOsRT_k2U8q)?cD9LF%({erl#jHzD zO2wh=n0#`(F*nX-D&MW)YQWtyQyd8lLm>(rlKm0}m99A&i`A<~U~Xvn)9*X<(uofXu$Z16%8aat`Ipl`smDuMiF?McMtkBr2OXvet<&9K*ixG2igJ40xG0pV$`ZvA7J3+ydAX#oM@= z+vr>p4US8^wpKZR1z;4X8J4V2$&xd9?|dJnIwezVa4Kd)v31buR3}RnNR;9{#U<*G zUn+yB^RFmK;Snzt{3T{}4-ILn&D+vnc1o*B%FreSXkvlVZcA?zW(bym4guhw z4(a$qmzGF@g{2u?3D0nHW0K8WF`N%HKf7G))ZB*`j}SzEkP(yR=h>-WUwRWyjaX3I z*QXM$3OsWV?LrtBcBqxH%Lo;0Oy`9ip3`Aac6*9*y zfjWJE=%w(d#Q7vDzETcm5Pmo^2f-s}Aer*Q*1F3?;crefLH2!7nLjUK>m(rLe{7w% zu7*msgdd210PjM)_nr*z`SI@QNBbPzJIS;#f~b=gtg6-Zc^{_)8=8~S4};~mh!`37 z*l8f0-tHN)Kln{>?{BIsu`BqC^EV8#Jm5)+fj8Zo=@}84oG`ZIA3&VX_j~tzVT25l z@kZ11T`zl#Hbs>4hqHStNmiF1%-Bn6f!mskAErrvl57jGR{K+$0vg0eLA_r;$9@W z#GF*ST6l%mxK-WfXre9~OiJfvUSts1=1NtB{3OFM2eiz(-o7Hy$yRx%qF}bk=KZQ# zqh_#wtS2wF_PexM?)h5Kdk$t>t$Eo;H_ziDeJ0aSo$zGjZiREDA61V~FB}IlP8t%r zCo?_hAoeNLXi|DL+*r=45h4H8sIyaPD>@3ia5cI4?pjO0;(Rr=!MdoB3oS{IPXx}; zJKgW1z_IDXG77jBt~k)9O44a4A=V`Br_p8i9q)CDc>*(SsewH${-`+JY$s47E<#ELS< zB|ovKsJeSDg1vDzc`B#n;V$ zUDu4?9un^XDVp^&6V))~@s$mv|INQqb-UEbWZ;fmnDJoCP3hF8NiKX6-UDJB``O-k5)qgRq-S z`RGKzJ#HHvSbQz%4eI-osa0&WlCx{P%Has>>0K_W-4zr*J_iDsb5vHvL zwkrU>gD5T-HyAGIS@-c8T{EnGzE$DA!=8gW`w=w<$@4dbI+}7dT3Btig$J6$g@o`%w91BSheE;~Kp!P)ZR+ z?UBzA>Gm`E6S1)Vrgv}hGwfuu)}qw4jkq}1%#fpMZ-f5F*kGcZ)x&LP2*O>f)<4_{&F9?Nwjm4)y z?D%?I=w_ki`8yK>qe#%1e(-6Iw@LG-a{p8tHI#1oa>B?V+cx;0uhhLQlkuGMrj`^Qv@*;M;VR=m?M1CDoqNF_sTPjqY zNJ>&&tFMxI`*k!y4GNkSU!zE9PxdZn>rtdvVCQB2aB6G~poGeQ8P1s%W4J_?Z%S7z zF`j47LVo1EFO!6<+WLXxjF$P8Ei4l2@OV05Y3-1$48Q#3@fCvxRQ|OvhJP*sqWTEE za8f%PcAN+h={+uGwmqbQs>DIif=z$}R8TxxX?JK&&~)0&9FOr zNMlS8tRm=g%=5JR7I3FCqwZvtH#SEXl0K$dmO?J`l8eO(C>l*4wH-8yVqxL4fjobJ z47_g`zd&;#sw0u~o14F!_6qHZHl2ZFWIuT5Am{hXQw;rop~_-Uk%F~5rB6xET}{yj zsN68QUNtrJ`Yf@rt+7bhu$8^oQ7PydQFqoIA&2>|Ff{cP?HnRe&u{xbpv>=b8#PKN zM?le8=ixx;;%z3%XW)~cpO{ks@z+oz`O47%p zVxueT$j!fhkPGydf7x!&GoZ>CV#+bnNYg{n1e_lv4r{l4oH%*RD?q@+bvd&{m2(1v zJ%HV(Kb_j8p31^7QJg_FJxwjn^Po7_{8|9M5S1yyCs4&o632bcadA{ruxwGc)>?|) z)q5k~8}PqFJn zUw?gZMoaGavbfkTw?{R&`l^XSYvc9)6tUBJ@4pDXsc>6MjnFu9=hN%ZH5B7Aj-C0y3@C)JEBl!22ZSWB*KZ0f z9xmAQKEE~@{|u^R+D775OT*o5JYx>qWa zj^jxNHk(Q%^(p}#qyqq#xNr<_wcdNF4~S`#2pJ5r&$WuQwGiyooEG}a>NH$q6`7WM z9Q55fO5*L4lCeb+Px@MmIW`h0g>!3Ej#C=eI zsp~D0`zehQ)H@IN0I**IKd{bVH2X@a0~pAsJu&6v#+^iqQEFMRIg}LC3Jd)8R)cae zz6>9({yOC6-=b`($>2aMXAIJrQa)}p=0%-p!dWctGcO*b*AFTzYey9+{;_1F5U-8U zXklV)nXAGP`Y zobAR;Ycw!OzxA#Z12`mj@Ux*NSn?sQ*{pP5Q!D$$ZVnL&C^&s(SDso_c|o9m_caX< zlE{yJ?H2U0qkO%eet__5a-n*113yT8&Ktfntiq!dY|q`tQ&I&UUF0V`_1rOttbpWK z*mdoU?F)r$O&2AC1eZ~S+3SEFcE7J>t3>Q5MV^5f8CUT)`)BIqhX_Z_Nuv+xfFLax z7U1OKkIi=4=2;D{J*X7U9POxoe6JBYw)h=SNMKRNh9;YMP1>_n=_%e8GUOG@-qT`#R5u*5ts)_` z6A1PK#nzz+uQB&&@3uXM3uwy~YC;85WnH~ucDFU?Z4Qf4z0lN5=Ht!Pv?+}|!ACHq z^`p^*(F&>Luy+=aou(#Do-07eY$PVJMKd?#NzVy1{nM`)~#66h<$9kXsFrzN`DL!b7de!nA@*ASd_VB_9m_WN&p^*gVJ|z^DKh zx7%}O1wn@qFa|{`zh9kG)B7Td$N<^3CB+Is-Fo(`>UE7)C~o7|zbGnrD2oab-eUz{ zlM;F0Vj+6h41*L(2q`QB~y{n}-@mBvqQ(zdYqbS4;ovzkfH>&w6-pm)0l zxLg!tet6$~CDyyP6A4tz{Q{_}H70>`g-#hqHR}{|$sLg6Mf{+Sm0yV%^8w_}$}uY! z&h8t!y)Q>yu`Nq7Dvdgxwy_M!xwN@LwRvkN$^9+EYnU8KTP1xCy-2@ZC_d7vfN--3 zP^&@koorBO+qSZQConbsc$6^jY3DLq+-dh0(8W{r#t6f3KdX=!v zA8(E2g+ecdE`Kw8cR#jm#}@}Zo^6a2IiGQb5IChRP{~t&dWS=ms|wtT)&c*1RdwUN z7|C3@N9RCP@d(z03{s0(cB{MTLIDUHxh!+HD=6vv1(AjkQJ(9Um=*x%Nv8Rme0ucd{8l@Ej^!Pj9K}@ox%~uO9=^FEt$x?i zT(%}I&C4>lQN*8K`Gc7HTkAHf4|#b7j>4X;o@i>Ac1kt(=1! zH~{X*D0OS}vl>_v&I~E_@7`;+>SjZX5BjcIT^$9~ixE~|0oCdSW2V(b>nlmaC)nP1 zykH}Lo*=|w!qj1$-(=fzFPDx}8#~AsrUJMcIuDwuXiv{}#ZW4`g7!oIX(Tx@}?Go#o9S0D`&(kA}2t8J?b;Luq{jMhST`E+ZsIu;=J}dL&a~ zB^dV9mM)+twoQ7UFI$GrE6$vr89x7xk&2FgG8p1ZP7oR#)rwnpxqJ3v>?UNPV#)aE zcmjs2@{zVxO9X^iJ-L6{K882B9|xs*e}-^~SM-@>m;eGDi2NcR=-wi~?&S%}_`aFd$d@@uS95rU&ZGz6=sewF zW|{zHd6A<+qOM+pYREgmNey~1Jn@DrwK3vHHokuVH!z+T)R z%GV=%Y4co?(E1#B649k_2FV28Ybh#!Wv!L59j14109xzu!}5p8(^c6h({)ir-kZ#^ z54}yW()Q;}ylb7I=LVin3SyD%KJlHGFr;0c%72YS(~^kEZvdkok#4)xLRn`BPlj24O@4=W zuiA+kuN3}DhCucB1Sp_@;a~Do(OM9&QtJpQDZ3Yv%}dfx@S?3ydRI4R-Ylh!9Ym~M z>KLpXPbz#9bdIS5rm5PHf0L z(TF?Gt)Bl;ITe3b8m_j3=Wi02Ntp?P+q99HE04t+C3>?F;k=WK`<9etFnf&DNCFK` zuCq`@U)%`>0?_I6Ky_t_Pjez)Y#y4_7TA7U6UnsVmU(-HWWP+I0{Pc}8_6K^B2GoQ zcUa|Mf1>gNT&-l|GKBgsu>?NWfWYz$5`qmR6HJxNsbU#OyED6XJaE}mBVGINY6g`HetDebesiXAP0O^L3KjzAW(7w#P zJ>{YV2>bW$X^Rbi9mV{AI(J1r;%p7=IaxX%E;tM@y|Jjn%umbchU1=5MTw6!BQKsC z&|eww6lCUs;#?kU$q=j3LV9jz$9i{JVRL%np@z8a@n3Yc4DSLjBNJqO0_z~(U|noy zK#o^K$$?!>7b(h0Hc%x4*>ciDm`fK1%1`pps5!bZ>?5_5V-wwf65@eYCFE=}q!HUJ zCyT0%G~_t7u1dQoTE{F|MXJYau@isHy%wh{1v@1mWpU(whv6?@Rh4;|&CK`A^^ROf zr3nCAK%~E(QFiY)%xOz}K2gtWZDyry3drDoZ|baMDR5m-`EYrKd@RCvPm{489)P@=q1Oel<80`g}DZf#+Tc3@<2& zozpX~V=|j0$PkdhvQwrX?336PyD)PRR+SjVQ4sBWPvB5J3q71JfAi(yoPJXb#-4wQ z^@iBOI*uKq0Fu5_C6lOa+qSQ;A4ox(QRnIp#=6Nu6F+mb!llU&_6Dal=zq6{nzFBARpJjMxP zAp}rj*u|88r!&Vxe}3UzjxxbbRLXX*VJ2H;?3M2;6#?5;Y(LGyKt7OQz9XnR4w_=y zl2stDvh_KRCsMzD9+Y8FKADH@EJrVGTc1>N94nEeLZ%lf)>5u;xj|@W=WGZxjkDp4 z!3uOQ)m5yC6S9EBT5TGgOjPMJ<7|u!QMHw*G*EeJv?eN9fBZ?;3CQ)&CM0ns$*`5| zdzy+Db%gJWrVQ%a3~{z1Oq*^qO1!~;vB|HVwkRBw3`}zZ zOOp6je*s*zwe`557I@hocVTCgmwIhy*?7^YSX|McP%a?J^-IkN zDAD^^ma-Oeu@cXbgqaJy;Y-f6##Qt!jrtZz5ceB_>rmb;+fJ+bXADI}-CPX{rNo(? zf8IEJ8jVAzfLS+T8`cz+M0{)$lEbm#Ha(JUM=qhDWn-98U)@yksMTaB2@)}=e($Hn z;Ob5njolh0jaOMHXj1TCFuP*@vY2`i0E?pl9y(#c#YSn}EL_3AA}2S|f8!xkyAfT5 z0F!J1B!_HJOEwN|hRdCall?xY55ctE`d z535n-G%4ZEE{hV6=CB?X>s2@){X4(m8XVfdoX!umIG(&flg!y8A$-&Z+o7o0WbgpT z^$q5YTMIXcGMyV}-KDvMf?KNe_(7;#XPCE>nOWq5;+@#86^Gx>;m~lq`8L%qf5tOM zjdPxwL653g&olAgs98jwd!ea%kSU%Fh=Yxmw7D%_%#69!dF+PZ!J+Nwsdu$qOrUyL zDrsRppKMCo{KuD&XwsCLCIjFpqmm#OQ9fyJ^ClB2Cn3(o@#3h^LZ@iPbgKBQ?F|3aM{@s|$at3E96B;g-qhZu7~Bb^+N6=`8~BAp=7cF61I zC+OE@kBc2;`n_tJ^g;gp(t8-Z21!vB{v<_>ilC`u8i3YfLyE zGr*_yc)R$r@XDM3MlnbcB6~yxa}v=RAL57f$ynN|SnoLk9gj4Y1615SWuH{svqwJY zN{-7&7_1?dh#J+&$u|;SOOrYudu*nx;ADR8Q?NXNj9|dE=rdxee`33Y(I1y67=`|R z+gnnW1gQMvt0tU5Hiw!|qIB;WiD7=N=4#Pe*u4hWmb-5afnQYf)3BZw1Wb}c z7IcOq=#OvM?<+lAe_~N#r(54@a}ivUs>H=MO^7!*7IigopN5U?7x_T>yl~YY+=RYC z{c=oF`1Vt;5-iiS`BDpMflZB36gD&?o&81{H>ZQfyP=hM9e+VK{fG2ZOMqx*C{Sg`m zZg0LT9!NZ?M5Mk~nd>yd29CptTr%PkAl5dyTl8x7L!#?BR?!~Tt@ye$+v#Lhce@1k z$8@viB)Fdn+p$KK#}30u$^POQ;$z_){H0&$sk^!CR|{3ceI?woCGr*@z2ZN$GHDFG zhE)9FChE;=f8aX3-ng9Xk4GYv#iN)UvNjzcs?+N2ghiJ-TD;sxsB|KD*N!G=&0mMp z&R`Q?DA#=03-0aCO}GeB3luc81+n`XXzCfil=vG*a=fG<*EWOs=10NyWXFwPA36~e zK7gLpFUQILM~ffGQIhS1P9pJ`VeqX2^w%KLlSBeCf6_Lcjm!n2H!MeJKg_BhBNdBA zt6R-YVB!a6P$tUq17*MM_=-MZVL}8G1$#$WUIK$3*Rh1QypH!UOA!JWULu?EDAz0! zdN-=&X0)J46h-nzUt&(^uY!@^XU@r^s z4rI#2*Jdq`W&DsCZ#Ae-hnb%V-oAkVkHKT6PVk&@_n<1i+ek z$!JrTKL_&1s)uZ^JX%B%o445$5s>h(J&(gF{8(8dxp6F`8$^c2=rk`#E{2>;A3DKAYu_VPCVV&j|NulG(p* ze<^XCp?QM~a;aRwLGoyV$^{bAQCisZDyHjE;^>{Kl_6^)5|$`EmTsHsGsf#lw2f-0 z^>yHR<24L0O*8gSgJIV?(p%%GbDkP>0NV5X1qzE;E3PED6%M<|lzo)TL~n+n>5WSg zI`O=%XQuQvJfy>!mv(WFT0hD)%!kF6f5#S2R^1iD#|bMB{lh>?NJ3qu-vl)DSNY1$ zr_PACgdz=g+J*1jCs%*KQ0~*b&fn3&BA|DAHSAAWFpK=K`3bggPADB*%yB<;N6u}rUSk(+Rir_NbBYC2gLaxsA&B6Pd$*)Z#Y6)o3VXi_SgCt z26YTRVnv9;G+4y@Q>~TSqb>_E^5yBKT~piYc2?rU6&E`1rl)Xm&Udh5fMfAS^c3lja4%P_Ls$y_S6C= zC|m<7vYa?{&QGMbnj`H7Q~-K)0l8%u@_Ef$)k8SlOH6C zfUk>Zz0(Eohq+b;iThb?l1ah=3a~5kr&UI#0e7VdfUmF{HFXI+^eY&KWf+E)7_McQ zfcbYQf0u+&puc;?H}^kpD|0Ds$ApqwU>KdP5i&gxG`Eil&Hw-0AzF6?rV;O#Ep7n_ zg!wLGW_}sXlVT+^T76}ne@rZYg4Mi)bp1s2C&O)za2>$(SVcMZ?sO{qOI-$M$w30# zeRLX{#YzT+q~@*$$~JEClgHh(Zi$rY-3xCpmgYk9sXV^9dx-8q>sqn%E!D*O&}nop zpw;|RtXRpZhJR~YA-E;F#lUmS&q2 zIgVeldCrKR}_H(5733~KAU!Yd; zjE6Q&Yc?$>GTPM~NkcWcNGggcT+TXBGH2|b-aWP|kVb3z zE0Yo}>nOcTx`z9hQHlUUpB@5Feoy_)Nw-s&2&)Y98??x6p2y+~LNP|4w;r09Ub%tE zvu=n8Ayka9ZnB#KOrObz%%FVHjiJC>B^6{QiS8$we>F#5vQt%G7S$gHwXJg#ST--Z z-9CQYI#5_MJso9J9a4WXjb`@=8-1X^mW?Xk>KCx*loC>l7&4~4sqB6dG{eP*mfISf z0UAh}m(j@$Cn7&cOzI`jP}gOQT;oVA4bkPD?;uIvR+JgQI#Dn3-nbpibFEZ;RustRG3Wfk9O7H)1ibxTk(WgxrH%i>O9-AwMTFr&TJv> zMe3+R_NPigk$XRAHV4-(QgFN_EH0OrUF^!kf8ku|`U$s*^h8ojw{fi6^@c*m+dW%F zFJC?3TeP?wZm5^y1vIe)L4;pm9rTr2DfK<qqbDt9CyT`v~FY$X%;VA&(ViXCP zTesk=^x{chw7vL~^Yhd4GKVs8+YGgu5Q<-M)?sdda|msDxd#-T(*Z}|HUU!lTGjiI zfBQ0b0qa8!oN%q|8I>dfs6cz$rbKxfnqjh66w$l~Ce&R)A%T-ba%iNsgnN$Jkx z$tX?BAX0u5BZ>jT^%BFqb5n?wx%7#oe^Phu$xRxuO*ZHrZ``!N{+^~V+TUaF0?*tI z!}88;kav)v+cP$b#+YUiuuUPgPN99w2I_9tO?vTBU2_|@FB^^t%ZQq>u-DxNqcHp_ z3!d`N{BnchFEw~XhQHAY9uYJ|^vygFFy#OHEki23JWrC$B09=1kR5weOxq0kfAVu9 zVZ~D$;I?-0zVvS6vW=e)5Tp)uUzbQyhqf!*yN~+W)M``QMxD=ZRFid0 zlqgfrNMj@BX%$Zw+QxHO&c_tSEJjltcy`pErBc%NoAPds`zEEqLEQOE|w-^C<*~5Mi33|XHDMz`E&uyWnTcyzs~QD=qtBDp*4lA|ifJWv>*x?|zuf4RPj^`lr?3i+i3 zj~|C~UIk8h=ySZWGx%+?+|S{~>!d_RqJhqo4-@YHV5Ig3y#>E(wqSweq#d#i9^!>` zFs_J8bWr#;ogT_29d*d zV_2TpFKdFxTkux#e@hEb#-i08_PLZn8QCJu5<6s?nTGiryZ56EtB-iwg?7#O^2iJT zoU$@_gk*tU8IZoX`*3TEwx!NaGkHlUlu{|X?C}W#t@OP4d_vsphfqa=#|0!XUX_|+ zfcWdS^RpC_x;%TUk2w-52$tTDE#a_7gT6!)UGdMIB*tUCf9Pv>iD(g9bT3JqJ20Ol z@{?EexPC3z|MsA=v8_{>J_`~~k{!2X842PN8e;BB=hlvMdZ_bdn0ToIAR-McwSgB| zg@}Z0QM;^}Yo|O7R%)kX!fOM?2kmN$AnmX_OMcA~ermq4CXb*5mIevgcoeVQY=MBW znzqVyg-St+7Y6pT5bKq~gIuel%7n8TQ}(2-N`TA8Q3PYc_8gV%wNX>^0|)HB|{c5|xxL70P_?Amn3_fcf^4g6e$l z>D|;XmT^n$Hu5`a)n^ELG;L=V40|Db7IED}g4hI)0e@k`ny2RF#=3aBa@bluBh0)g@qB}=RzI*&1(>K;jfTPQB_o zB4&T)SAQ{6Feen#N-&G=3WyXT)WYT-PdsEQF}a)xAbipn8fk_Uk=1HOE=J_*HQV$0 ztT~twDAa7%xgiS?QZ21%$7vE!=6x1}uHc7L=JN%EL6Y-i6y-ctW0P~e>U3t*os(Bl zaUL2?VwQ(q-Gu@3vNUn@;n!KoK6p^$bzr2}Q-4(wf^+_MT*R%31YU>!hKS!yzh{Ov zpRMIp(n?v*sOo%frUx{|HNSY$sM4LtHxjt}5Re%**MKfBD6Q7jNH$n#WE)7j+`F_g zEws#sp#sXlAFH-HAsuL9BPv!<%%Fv9;JJ9%D?>jOWm+I=hUCp)|A($by`Mx95!Fuq zqJJlZ!e8rLNazfx$dQ>*#*zVLjXp0-+{@oh{a<>|tpg`#Lx~!ov|bXrx=7UL2AGDh*oP3L;#6 zS$X?4B`bVPGe?agD%e0}3i}$M>QVm0)_;KZvMw)ziF}fAt`8lOk7LWK~$J;x!TL9mXz1!5H*uMu-v4 zmF(FzNysx#^Ph`DAHNU{OMy=Flz+#B>*M9?FUY_I*kCkBGdKn?eswXTU3V6@#7LU% z+v;r;09vSn0vXLqNw%Wun1x?WoU%bu7eW8(1iD!l?X%?Kt2<`6KrKR|?hR`eqhcs7do%YUz^$Kih4-%^aJAXU4bgd@a z7#IYP4i!c@Hcubk#a-LlLJ~G&m}Gu@0!nM007;{)6w0N)7;@Hz`Kmsr6K)S4P0y(C zIB_AZKg}mzT14H2!t~jP5>x5L%)gb>;o7@9oxghLW}rb)|DwSRes~*)1bdJVoR4O# zmP-s3`ql>FbIEpY0|ZcpI)5&(M&j(7dk7>zQ0y3hQKwuGe}DQymq>h*a&4DLX_H!l zu(t(k{55@Z42KJH;xl)_n|qUU?DCL|{Z45+NH)eG(P#9{;|+3}3w?J9_tNvCW3~X# z)duTm*YM-aO<>|yhsa6uS4tk~E%&dYmk1RWNb4<;JPdFr-G83}i)-tuBhdKO z09ZWTxepuwfW!q5gBk&$&bg|yy1zBeY}&+BaBq?N%8$ZSsI4vg?)Gl69^!NGzda<< zp$crwS7qH27f!BEw)IF(3kC>$XVOiQv`9aI4}r=72N-}CI&5=mq?PM3nxpb zvBJa01cyiXl+6~){<%umZpZCwl{4m$*BSEV{*XiLvK~RLxH3!R3`FtgD%nX#sRQHq z2jl*4Q^XrRXBL0Q8F!5^}@o2qkA=B&3{d^=qg=ic7#{>tzJfgP*B;ioi~^*-?wbiWA;ST4HBm3~ZM%D?rl?EhSbVFcc-aQZ;V z?3h<{4|4TSzpeRHz?}ecuJ4o?o5YVKkigx597a)8rEQ$18GD)OI?mHFNmJAW$AqC7 zy2$lBE;37ASASIH*o(axd-T>c%~O)$_gj+Z)z)t4re$5V$g0}qwzRe@z5Os4H+newwxt*gujk=XguZE3y3o3PV&1+XbNlLnt$hGqu?yVG?rh2-S9UzzR&w= zDw>9wpt5ZVxQ_OJrN-Cw=QJVG6h&E{5IByHNAnAUC_=O)P18Kd!Y~|LAc)c z^E_FP#_J|}9)rQlwDBW82Kf$0~l!J?P}iEP1f>HwA1qn`sj*ud9P!u_^yr zO{Wjb>m}dM1GY%jYV($L?#USY`OBGLGI~>!$F81kH|Bp!=R9WNV)cNY)7SOat{}5< zlV_vRe6)JV&!f^jF6zz9e3)-v_tqj?ZI=hDo6-3r=a82eds%mM-bTKA{#?&rqK^sx zocRxn*O$V-s(n0ro(Ha1Ot47bu^&DH<^SVXxc33s0@v+;Iueaw? zJd#HG$DM!k_4HhZn8%HLxfsQfFZeNkQ&*HOUv0a&zVLlxC)3C2wP>!!cpkp)Z*>aU#(yGlU?nu;i~wGTF*Kyx_NBjw~yuGoSdVt5Bj_k zxEm)q_V_h^sH@~Ddo|iDrVsgN&>gva4(5;UF};86&AMHrbI06%@Su56D*Bi<8!Xlj z4p%h6Z+mf*MI>l|Exyj7*R%;%>=luT;z`fPh@x5%%k?!Y^*^RMv12#)o* zpl^RMJzsZRb|!x|f&`LUk77O&5)Nza{)KA)qm zj?CS|Bna0NxtP44AJ5e;HnxdViO=VR%@qno^|7X7I}vX5(#$aCsgmJ|r^MclcJV|$ z9lFbg%^#X^;raCjfCB8tV}SYp_uv1^zW#qWzFhy?e^vE<|6l+6|M>hk{I}Pa>&ySM zuTR(U^}7FmcQ2Q>NArL5zXh=Di&_fFXu`jEwR?}>4FK=l|1h=vYq~)X7bDyO;P>Dd7I=}G_of&qU>X!XMuSfbq#QUJW6o&F+Z8uIpV6fKu58NXdx z*iit9wg40%SRga~w*BX-)IkM1xn*qlT{`p2wDcjfS<9xk>rO1_ zueosBbmEh2>UZgsb;3afFWkaB@G^DnSS_%;_}G9oNP%#`Hm-m9bR#y!!GU8{FNnB; zB^d=#g;p!@ipnHobz z_5KIObBV@s@d?y~HJ}3EeYQ!Ob!3P=bnJt6Os}bpTIR4kU0Y4Mu}a?hQG9ZXOz)-M zn~ryJXGMJYd4F#B$t|>9rZc}wk8YcuSQD?U{I*9jOIR0|Zoz+#{l4eiwgR!S@@(Tr zBJ^>3vLfE?LIc7rN)A;S1v^#Y#ph&OrCq8VVyxhh zMpPIS$<2SWC1S^s(t>l^*g<~S<}zhCUy9X}&51Eh&g_FkFx}c5^Poz%>om17)RA^k z3eYQS2dv$p%~#tn?$^uP1Nf5-AfS zmEG1&`>S}un|`nKKAc?D@5k|%zn=LxE!hxz^xl8We97Z}AKOg7@2G#L!zm^QIf9(o zc209IkBdYJYhl+5T%H$+AeuLB#}5U!H&aQ?6Zt&g=Fjzc}A9` zuV*uxN&b3Vi6!*4sa;pl16gH^mx3?Ia zS|~U_du_I|rTIpu`P7CB2g&f0pra|5kz@!R0cV%5WC$W!)8D>|4Y7MTCq*{+I7xFW zq{&VRhEe)v%`RwpZ9%9>lvJi#KPyC!~vi0){foFNey zTAnzvzAQk!S|KBF5a=a{Lp!e`Yh9NsWe6JrKf{+uWe6sJUk+!%Tn{8ZzlaT$F$#rS zS%I+59>2wRyF5c2Td=NbMO_QU4m>U^okxUU^AQLJ?+PTqOGkhqUR-7aKKzce!l zPH6iuaqr*t*{SL;x2U%arMsn27LwHx3$gTafsy10lnAXb{Yt`AvCzlM&czE=xkQ=_ zqMaImhK|E-n<6aDBVN!1 zh5X_0!9HoO+b@;%imI{O7%>)YI)4v%u-V%t@kA| zlE5*4+9SS5uQgwGBKPd|yjbe>x3q(GKB1n2J@}P=pIDem)ba`MVuR_m?_wSMXTv?w zfHyHRgWc$w3tuW`hKL5ap%x@hDbz86?x~&UbhHGyK&z+y_&Oi;=Z5q-fd#b*D}8QS zx#;~^b35xpelz`J?fQ6_4cJ55MHzRN=+{nvpP^fR(BqOjR#{ukZsWCFSxT;~V*o*! z&Ykx1U>kbA<|~xaaIq9*C0PdhhX{mQWH~4*&+ZCw+fm$N;EdexQj?}WSUCHlfjn@- zCClLQNxRDh3H||j7K0FUrWmz@TB9I?cG<)U%%@OM2LUIR(DzNo&w?%WItQxv7kKc0 z-@QM)`|e-&{Eg?PH=pSI#%emYxcBwyF-h9_WjeXxhr91Q#*V*AoLE3DRxyrqlrQb= z$wF@Xz!!x+0A?)$&S+x>yvS@M>pf~<0fjg)d^s%$zp#*f!2XpcL_Of#Ra}aMMGN3DAN;vd1=G?{DshZvcOqtk+|~0OP?3^JVI{b3SbXd z0A9p7gCQ?uVSgVQPfb`9&-`+ZxsG#see_yIJYh}TSz#6+nQb8FLB5ScvLT~~$LSSR zN)Z^gErbQi-L8|<%Yhy~LGjThdfHBfj{l4x{Z;hj;dOPbkhRM*?I}RO!+0EjV3*JXHazjmZ06^;{rb!IvOG?-jsL{EK1L3ES@Ya0 zIYD9e;3Jb`#Slozt-=zX%v_RZ8V^&DaTH8~Rg{B`)WLsk_^aJyXu!(z(Kf|NX81=c ztB;4U&i$MsgywG~tPdWJ7j9!Ta*Jdxf(v;+Tw_p_p!|{{DToyS-{h1)S3Q7Ur{LOb zrhX$LwzFFJ=y{7@_N<#vG;gu&mzrq^Eq`mzhyx;10*c3WQ}@?+Zx0LL zkciZ3Y3&y9VKniQ=4tMf6K;|C!#cgXH+%}(UKY};bl!$OcrxIixcK2~vKi|!Vt=ah z+$k+JKhgXC4bS^H*WJ$j&EM(&DKMNNJT?4C`eh|jhejr1`FxK>-C)ct*sdu^FUO(j zf}#j=Sa=~{;q54-hZ=)OZILqyO4|%~k;o`WmfFCrtDLZX@L>5OZ8U$zSK|pt6PFSW zKm-T{-nCN`uT9q=`3>*ha?3m$Fn_nkY58|f^DCFhdV5BZ_DO%_kQ>^u>FwO~xsDZx zbsD_B34B?^g}Db6`zxpxi_)~n>|vkGWQ=9C38nNxpo?XaF&52VXi&32nWM0XQ7xHm z6%RfXm#Z(Sw@57U+1 zL5RvqfJz;=F-}$vHtsG98-M02*Qp}Z)ILF%$9$7LyFk-Z*NEvMJ_cmI_litgraPiHK z3Q_T2`4JNF8iCkYl7GKaVX*PX*XUDn7Yl&`Vd-;k5ky_IdU_dOFCl~iSrEHRP{Hv* z5iFFCMVK80Pq0!hVlBt@Cqbgp>k42SWF>632k)TQmH9~YxHh`^w44RExE2m)o`rN{ z<-$ptEJngQ!8)Tf-#v4btwG(NrzjG2)aN`Zi>dmOIEXc#C4U&F3vFNNYV-k(fkOqE z#_d#q#_gMX9_e@K!7Z|C*t@i|B;3!Jg*J1a<^p?{7{#+=JpPyy)UYKMo`axrVf^?w z{%8^^3_*ZJn|JmYJk%OM`&2liRtlF0<-$X|>*E}y0K$2L4+UJoSjV{)NmHbHe!ln< zuikiw!3TW%0e`o=$3TtSzOh1b!+NB@hi@G4Qmt@?$@=UTCEJTq?ro*{7K)J&kp)7> ze~Xr83exaNOA9Mx2=aM8)F0*tx0s`On@K8qE<`S3s9BihuH;7tE<|&~1k+FJU{O0X zQwwnmiCD)QM8j%7E-wcT?90WmS`6nN#^ag@;XUbmVSl6jUvdsvBegvocz<8KKsR6B zW1g-*XLro=6BllLiS<`r=N4J{_j_+SKzCnTt4YbgHSZfVnWiAZupQ->MkC#0`C!*4sQr8WahSMiSihEFwuiE zU7tiI65K{X_KIO+`a~>fJ>5}hTo+Jt>ubKYB)u$>;OG;Ew}R_@3BgfOBpAjbdOiTf zJ?AIsi&M&(uP9!98fZUzdIkXmu%=5EN{^Rz1e}3zz#rMyBA1_U2p|DAm%wib6@Of< zrh!O&+rNz}*yGH!tf$k*dQEPA~$IZ~<~n#a1$)7`N0EjZwbj6hUNS zew=?TMU3Z?V#`kqOFE;uqPWlNsK&vI5qwmz()n|j-txMi99Gw>c;4rBNq@#qxaPUO_CN9J4gix?HBca{k4ahlkDP-iulM{9`HLyJAd#Ur-zZE zbCU58%-8Tl7muH>7l?p`#&)&NCyPkL*a;1s5fef1l=9BT>zt=cIUn{&QZ&v?gmP=2 z6rzw({?hXegZ)MDF2M)QH}KaRPdD}T^@Myf>_b*kU0=HG0dI8eCrel~yoUvStx99} zfrr;QN)w})7#Aqb5#(`3!+%T$Q-9C$d4Nk3d1~=2kIe;+26ljIZq8f9B1_^FNnpoi zz8XytKYOyWVE;6yZCXW9hCwYxk3jWVyR4)jbHykEwu~+6qNV1`;G|x(`2w#p>~=C9UuC?+8Gi1}TV9lsNO%umF@2pr2z{OL zo!7l#cWG4lMBV-y6=ZX#ICG71@*I0TUMAu&Ku|?lE z)Y9BPJfE^W!#;U1!+**kL_m=Q_5gdA!Bh4#ISW1^dVOf<`=SP!@&QKb`HY43W8m zE^b$vTNQM)O+*TcVMk*ypy2A*J5l&UYOR3gFMAyH69@Dj{(q`BSs|V1y&v)s&5zJO zVgd~Z-mXI7%L#G4*PK4IFBUWx2>lEB^}6N0XH+$p@pW?1oC%a{r2l_fs{a3RX7>Ms zC`Th=_Sb9c7`CnT^~U}6iu?7#w@nWFlGC1egl~RsT}K>m;?kxtFH}R!Sietm!NY9On)>z8!xIx zD7bW-iO+?q!?%a{g@mGnj3Eo9?>qRt=1&FGBmdhjE#+anwZsQQw^)xsWC3j>+~>zP zeZKpK)&X;FgdBhpdNN;o7R$1cF;Uu<`wtB-#*AHf+2PMAk zPy2Wrv+EYp`{zZAJ;IJ6Y$4{!a^FChr*|(jm>dwdiK9}lr2p|yYlgPsb zB==||C^VEjULU@f8g&RCe^Y2o{0Xqt{1;xtCkITVd0cw0;rv~Hy7Qe~Y7IP4`ppmh z@F}q|Ke&>M);S}JKs@`~$NGfLw*v*XTL|Y&Y&VXlj3X(6*Ou<=!kSSba9PxdJlZxc zM2Sz6)fT8YF&^8AMY4w=*-l>*m(nTo)CtS_^nvv^0p4-VBIxldf9N7-F=4^n1I=sx zeU1m-{&(J|<7+~DZOed5`}m}BtL)F}PW#%O|1u4l$At^^p0DR`+vNGOQ28=6&WY8U zOQQ4RZy(5P$5ztr;%kV61#ff&M-##R=>#HOI6*b{qgYUR)9-Y1;tJ2y{9UZjTd&`- zw#d)tw$?GY5nEfLf3Nx7W9o^+KlWsL%in(0lzZM?=M{!8`7I6)`JUBe?GVTNz1~GId*Gp+g`h z*9(?(Ri~-vTCV%jam#@Vv!(Q)stP#SV1wwgacxXch4!TEop82u3^XnA`X{@W&1V;X{8l7c7DMK zu|C&Df+rRT2gJFGOE!0#xpe{`5_!R-y^e9{c7e*UFLhoPV~~u|Jf=tmCht-s%xlaP z4PT5Ze|O2%MuL8!KyC+I_}gm<7erWfa5`*((7aR%nzy7PTcdBd!F+fQv~eJSv7I#1u?Fhc!cvf z<~cPNZY6Av$2P*-1(5;IMWSArcdif+xcL&rxINmr8GaJ^wu&khN zL_d`+DB5Xkp!lfsfo&cCop*-Q%`bgSn?4(!h>k1#x(aSMW;-5NB-A)&SVw--vwUM0 zf9QF=tQ@ph{7?Md+h;oSTRwpUB)ci+*j`#YWHJ5FyzUB*RzGYNS(#;e&AuQQU zB>ot1*J2biOUqZpSB3n3J)d5u`6YWL&aTG?s2q<($aQ>*p|2zN_yB@G^B+(qwnX~P zAM|*j&%;g3Za&4w|GWp5$C2OZ_y4g6e}xtBmX>kpi)BP-DPuXyh+@uqCUz?{w=l$O zOH~YHdd(cTBB}4AcC{V=i+I06*KPxsP@egggvRyMnIlV1GTeB$YASybPWP~VIKcZI zUq6Z9;b8@`wm=FW8tVbV)o8Y<0#H()NefGNb*eJN$$+WGM_klpdKxgQGfKghmoRz= z3hmf_+CS*D5$ag`hfezk zCVJgcbKpoMIGY0)+G2NQ!Wit!#3wKuA3P ztS|;<*@#joF9<}stz^oe_ahj_Dn+i=#X+X+4fXccWWg=}Nu-z5FRPk7lG(f}jX(8D z;-*(p!#cPFu3FQaZLGmh8bASQ-8V)H_AV%*(unc?e_;$4b4dgGInKM$R#ZN11cJwG z$j|q48y2S4Lpl2I>w58xf9>-K8XOPjwZxZbHn)jEcG}-KCdT^hA1RPNABoYxZw@i| zw~lrHPVuQ_2s$-hb&5d9@baY4n`BMzFFV>_2du^f%BS(O1kzu}z!M}oRzYb#^NtVt zkk$ARe+m;d0y=$e@gjcq3+4>^5cUa0_O0n3Y=YnQFXgwMpva7$8f=-hfsP*5ZrCE@F*M47$I81l;y%AZJax8jMGVrRw!mj(4~uni`8nBazUutZnm4sTy*CkF+^hJ^F7G8A;R8UhWW&W zw7r;KRwA?E-K1#sRC=39EsiW>-6gYuqJ_pK0UeFstC^?v9qQsw4_#S2bQ_(<;~LGR zmvnsyD1YRLm{}+$NQo$o^Ow~cMq>8Tx-lGyhy|n%Q9_%Ds8>Yvp;Q&drFin9X1wjL z))v#s4Pi?q>+Q|`8b893;;V!YAcnTLheZHH+Z#lEO)H>%-H9g_=1a{L_)+{@D?h#0 zWCje&OFy{hO1frc;u}{rgJh1@lZ!p9HL^d3mSez-i_hldmnA9bP*+vI^f$_| zV1X|U4{8UZ)E)Yd$ao~3Psjwg5|qw&7p>gic>LSG+%&l@Pb~B+PtoVN5svXDz?D+39u-C1cLg9S04ma2YS zsT4sATb9p{ZZO}`h@DVWO;=w}TK7l+J&k?EJ|W8>ltZ6KCUwd0{GKLq)XdHB*|`>{6%KjWnk7T{r6Mr2T&V^eYtbljAF@+?FM%O^2GQ?zg#Vt$f5! ztl)n`?*3bM*lrK3kq*Y$yFYDQ4(iiy_|ipcO`DpczOK(KyRQ!l2ULuqsLjk^pQuSA zba>6N6=ZG|jf_ehL1dMsY!B3OgX`LbfF&M{J&=6QQ1-W>+1xK350~&;$EkH}JQ?~x z?tO$=GVqM|x^17Q|2iLk=3%tgN%4sX_C#|P80RrTxgEj%j$@1>>chLq>KE$roMKMoqDW$$pwefL44^UGSMBMi-HCk!_bl-NPV;mCgU z1tz0t^)T--`+1olQ1(g!9zUJNOQ3(@?Rq}-^;n?()Ae+>`1cDB-1-VSez)DpLNa}Q z_0RRakC3Dk1u%b&&avCfU%dbbmX`;e+n}KD%`4mp@S+t5jz<`;o?HQDY*iOyk(*~X zpGOY3oL;&F3t`>z$XC6lq4K6c`hJn@J0CB3N2ByVasF$o`@<$?Zkr8U|L=b~&ONq= zXt!3>ej#Fy#q&FA>ZK(v2p!TVLTWRkk`e3rPGz+soqVvG#KJK7c#$Rx1hcp>h8R*E zVz0a9U8TO6H!WYQ_kFT8%Z4(BC||6C{cORtmPuM_>fPMdp;^?!eFeYC9=jrK|I zk#n1Fna)iDQTdr|>ia>^WIJxizt*I;G4v_rhgZ&HD8a<$k@eSJhYYpwwkqcC?r8M) zd~k(DqaOFl@%Vxr=}_HQ;q|zeZm{}CU&UWq%WP`3xBj_1z8va;hAXK{3hA5*!{1P& z;z#zdCE{@h+nH^!vlD-$E3C}yHCIclYNWrNJ`d|dvYsb6OrQ3XY8;3QNuMYytD=gAWk3iN-T_G|sTk1ZNKcy@bn z<{~o!+>li1IIz?Ak?Z?#BwYJ`^kBZG`aas&{=vHG?7LoHX5C+$;)$2pptoB`JFfsQ5L`Wtj0<^d_ra78^x zGLLMUYk#hLzqPy~L8}9r3kc(r6Hoz)QOhye#>#%%G;?v_c)8d)Ltb>$dC^JdMKsmd z*$_4K7Yyg^*01-S17)-CdJKHm1SY-T8fs?Wbts)X={kSZz;*xXp=#3g!%=*aXiFTL zaR}1-t>e0{$i;Tg zmw-{OWW4YSHljjvfLyF|&9yK}@_FG0bqoQ+wpDwUD>9-C42`R(Yd?O1 zjfvlsuIqokbhiBFCz63DPB*`>=yxxz&UbIM{h>zQrPI26Tok&#B+blK66rb`Z&Tmi zTv8O*l!dT&rF$hG^HKQ_yCB5QS8>HKJD@h6VC5(DymWb!ZPvYi-Hbx%E^S;zRBX0gv53-mj8EardsZhxNju-7_Fr0S&$^U9aC)+zR9 z+j`8{Enkd|h$GxxMPzYb_kW>w?)PVXVNXy)IfC?q0pkL=W#_2R(Lub;u0yV9JI#E- z+seRZ2pG(shqJ%N&rk_;&!5OQPk-kDb$`(C@21)8>xX}8PV_x$yX!ebREbFtC}{^d zqsxCrbW}-eA%#mj9xs&$11Whr10P2);mSWjAXC^WjRR`T;%nB%y=}T1e$9of``B^X zcJLPCp4;ttt??Y%7<~Z>^>thF*LDBkD}CKd6f&C?tnas3NOmH!&&`Y5z5LDpPJ1r= zuh*Hb8)noquG#)N)sH|fj;8Pn)sXuICpa~}tC z^AgBcpMXOW%fYaejT5P=?d7y?^5(G*&9X|ic}V)jnq2( zg$Nl3K{+c}g|wfo#FUE6L8!p1BIO{pE`{+RGy^2lU{S@xYeA;_=rKY(M2cEmT#Bs; z_9|on2pZ6$F45Nj>`(oD;4uObqg-K2WWV~$@0@%({kAWChQe&nGvx47 zP6Mz}6zx9&+o zT6+1gU#v825MVoDj6ukds=n_}0@wVuwFTVQ6@&Y_{pgsq?mK_oSU)~P`$uQ6MZG?| z7XoI)aJ69t=Tr#~6dR$INxMkTR?A9)IK`6VmCRlt$+S)&v@z#Ya2J$oqh`MO^LgF# z+1szN2@S=qKqc50J$LE64b1!8rHLO6I*5LrQY5}{y^fc+{(JrN0b33lpzCVNqd3_|-x&rYe0}wAI@}>RL?pia-!Dg)x#w zw#+YQN7ZAh(k(_}L4<5kku{+B;wVk6t8k!AF_OYMh}?haXF9cF6gBnIZnBq!=5~XW z@=|THM8{!@v4|@Ki60C3j^uphHkSp0*d`?KG0+7DV}XhUKvvq0FxY@CrTdC9-BP&j z`8H*`x4B>6y|5a;TX{BUR{Nf?^f+$kk`?A0gDH36GhP()%4rQ;#Nu-i@<$68jtcg< zC|I=sb{&7_RRFAnH-ZhO;)C2yRyKnls-N3^PIJwNG z=jO241{a67@xmch;c`e_NCo6_O~f_C%D{P-%}9R$drzgBRz6alzz7;zP3`rZaVt}%(F zF>HU3eaCaAvPs_$c+-W=^2%ALx(0ELk44=r<(xu3dch5wtccd}3Ife5MEZ3>I|%11 z+;F1z%hLt;F~uJeEwoM_FA_h}HI+a6@MnI`X1$&^(78^m`v-=8)j?Bq?{T};S^7M- zw0^X{eZPezQasFU;1fNAt?x|=v<9hvj68oaF}F%FpYYQ$+yb1`y?0`+H9u~ndn7ON1D9hV-tTC zYsh0g6V<-7*`~*p-}85}pFW2^4<9sazjT$qW`tyeU(_*l9>0PN+E*u-Xbus{Iem2k z<5h4@0*2uf7_W}NkTaivKY4Y677Ioktu+$cHk$=A&@=61Mv!ogZ075lg68)KhQ!c+ z7YRu$kT#OmB%1{d61qOm^?3n(UY>s>Tyz~Y)P3N_7A>)Pv6Suu?{zW7FOK3~C+vDG zt@}Tl>>nCDSI3KfHb9Rv?eE^_Ha*f#99;#!W?Z~{a7i&Empvo!`F?tpfzmSxsZW~v zldHzGWZ+SZPN##*s9?m-FDS%UOdX*tVT2BN*-5fjJsV+sNs--N=`mtUjNMNa1X&}h z?fNANMA9e(0pEDJNh8R`v8kj?%)~b48zk7fCq09TCUKJ{Opb7P-U~!mPx*RXm*c#~ zpp5lf=(8cOnvF+_8Srt@I#B!4!m^jhjtDXqVJnteh3R4B)@b8qJ~B2es8WWgvuKwP zj|dq8ttyu*j|d%q@vDc86T}OW_gXgRC7fRaxgVLUr%LCrT8lN*o%tK+@hN~@ur7c< zUN;d95QAG^An7@;*{>O2Wj`~p{D)>h+t=w2?Ad94GCtn$)-OFsTxgEF_fL|>-}TNx zKXmN#i;BRmq8OFWuj#S!kRCh{UzcGZb=^AAz1RN6twRuh-KH?#ik1;>yk9LrEh|<> z3h~MfsdOcaUsSoYn{woZmr~DuLSSONO;eCok#-cL@Zq}c`Txl(iervMN^k)Ch@{k0 z7rIeu$qU+{g4LwF5UWY|Zu`30iwj*ARX+Zt(hbrLgNnI%r8xp`rPA9u9!cmFP6R9f zNzF@p*Ou;osL*vZu5}ncHm27`##^|4S6ac|Pf(K0mZRay7f$yTFK&w=su+$wCs zQtG)AQh6<3Bj~TQwYqFJ>~;3%ECH1{x7dC6vBsZ&J;K=(1{|vQ_~b+CK**X!%S*3- z9It~c3PsDqn6s9DFl=3Yy0^J{*x1tako40eO*6N92BJjg3dbVd<7u}5t%+Gwx~nL zxE`pA(ewJky8M!&)8V?r&!eTvoaJnh}37MG2?#pFzzIxeZ7Sb{Z~VO{@Zin=zA8*A2Uz7Z=W0LJ`_HQK1|(vX;IQl zmIrZi8skiXd$_AsAT!I-vvhYg8($7UaN2DmKF&tihH!#dy z!;YT2c9{P$ACGE$od7yXmI;IRpZ^5T3^Ik4KEhfT3Io6WGxJ%`+R5%`59N(I+w$y2 zh{qt}c9Ew`EEjl@*RND&2M_7KgLt$IQfttwq89cUisb?W%zw?-c4-OT?1o{mrj%j798{c z6FIG(8w1#bfj_v-1HI?-x0wS><8aN@ZGM-4ln5JtUfkj@KB_VBRrkRlmNKORP!)kH zb(|*qOTrfH=jcf+Jb-@g1G@(C3d!62nBuRwKq}k(^R>4}3)2d6sxi|8jY9F~ zbAiUHUc<()JYX9yuLE9OJWFf1^|t`F^$U&IPak#7@jDE2G5@uXy6=-2VqM=CG4#p| zvkLxy>W^Oumg$)TAn1bS^NAg^N#5s}zx(y6Lm=3)X6~TXEWXW=nriG@==mOc{(QeK zm-Lq%b+^9GHEOAJ+jQnjnE81>OIG7s&D9|##fz=FIpQL?R zd$P#Z_7Czvj3zOMt0pJ}w3rmn48{U6n#3f3h5q{QT7bqp-M!!5`#b2X^C*czO>4LZ z6wMEp5sf~FtG!d?$OY8CB(uRMY55s8m`O%p--}PHnDcY?)x_j}%i}y-$m2}0e;(ku zb}}Eb$Y+1(fBnww+Sqw_(ZK6;yDyrSSX&fL5BJ<)O9rw|jkhXlUw^?{T}`3_^z#0H zvDY718n&ZhMJlxzNY`16SMnrRo7!^w`%$lsN4;9fFisDI9X`*Kn9_NjJjY|#PG5Iv zZq$5tAlzF@O-!z-X3%!GM$Y73!<#R*&Eh9thSre&!)!?*HU@G~?i4 zuM0ve%G0q=21haiUog#=LT4^fGY#o~(c_0`-Ds&)wGY*s@0Awzr5NnSHq!~lC_bIt zZK30Doc$v`jStwbVu*|f@TTzYBfhExc0k<}#9l6lk!^l-R=VklHyo7~Hy|Nz`#MFl zzha#wZE7~ke|67o54x%6mN2~XC}Q6x=gfrzIDi;%mH1*dx)VfhoV!C064{$BeLwWx z6!y_MqsGsvwokM^;?HD;y@|CGg0lA$d|y@fU=l(2fmzf)If83 z8hI!bHcga(ET4++hZkWU98f8rsw7F^_=WxLnRS24OimrLdvf$!JS{+f5>qWeEzhwo zm%C;>ACgy9OmmYFVJfGwIX5@sN-GGkGsnKLD1#4>)@XC{qcPnLwd|Fs$5F{rDe-@L6Pt-VIhqWKXsb81}r~j2x zM)XmZ{M|vk$2oh#Jl-jP4+Yj=R+@XWXmIjk%Y*r$_jj+|^|fSMY58Y4r3=#w;A2G9 z=yW^2ADT40BKOaAbxwXTN9B$FP~GBfI^Or6+9Ja?hq-uh{Am2$XrN(#7O^{2htbzZ zm`n@vfxPRGn1e^*$X;~*NfoC)k)hCv6lQdJZWPSHcpxdL>XQtA51eZ4QJsgN($S>U z??l;7_bk@dIm13HrFs-0UzhRBc5Q9A&b@lvX&CG!Az_e3rE)@TDCi_&8Z|QWD7fcO zs5KMBT^_+Omh?T$o9CEEoUOdA<6&NqoSS{Odq!Oi{#M8Tg8#nNKSGVJyFY5-4d?b2 zzu4BI;Afw{)VBtInn)%r+AQXKTcwQg@NGhCnu_Hh7+4-C-wR58w4=OcSZl}o zi$_#yaeT)6QOVAMT=w6jB(>QQ#XY#Ik6@fBARa4(Gc^foFi0ogU8wtAJS4)_$JDOJ zr!cQ+x0?j!L zFsm@nKQ_KS7k=D1LCqpM`>xKufy;G#qyF|fzS&%_dw`^ZC)61tR(r0u{s2iAkN@C9 zf8rKfs61)L>N_zmNn$5*V;hXi)M%o@47J-_UgQUi zx&Hg}B|PKjZx!z2&%ko*yS+Kuq78rPs-=IOquqYi;3*cZCVKLB&#S}OWwvPNHJQY6 zD?+)JJC!n1%^2Zp51QKJN|f0*sN+f2N#sOsBGlM_WyXX~WEy>1yU1QWW52jDxUBNu z*E@x{c}0zxibNl%F@(kb_WyXlM5Q&bi>#u4}CH+h@Mb z{*AxVt5+tcKv&BcjuUDuPmzo^_3pboH6)3N^!B zKlu-TzDlao6+T(yTTgl&F5_Ob^_45NlD@;RwMa6H~xzo|QT#{M(0(VT&L=`f8|-1eWF zdGP{mf9g{AzwIS7T!LTP+?`fRo7eo(sUHNFHGPt=&75n`1gq(dZh!qh zogMuXY-D+GIEAwZ8R}m6BEq@gC)aC#&ZF%_=b>}VsFV4w-CpCYWhT{%0l z=Yui)n_A88zk7W-aqAJMvASpjYf+EM1N|Ef;}^Ymv~mNY0~s2ZonYY%l8vW-HM!~p z7k)PRtAEAY-|8LXeNj##BhQH6yy{)m*FB7O_10#7@+O{mHttz5GFFso&8J4O0@Q4x z74^@upxQ+J1K2#&42<;QnLAP+34rr7(vtL_#&posO@Li{l9#yTSADnjKWgXiODc%; zg*qs=Rz9brooMuRacOr!F9{)^svQ# zkmw(W_S`3zB+iWVJjB2&Yc9xWMpUkU2-5j#8HZ8g z(dLNW2YP|q9vz{x2lPw^dS+ZqM*!wb3~+&1w?*1noVMV z?U(B^t&@6jPS#Fs%}}2TtlDG5IwPk*l)eWcuRN^+4%Tr>{1tpy1eE@=&#)v)5n>V# zaekmxVyh%4z~4DBhLW}GYcpfY+=WIFQuCNVP)15WPtUH)!Reh?M(A0lLT8OO3WKq4 z8yQXutr+?S6^GA%S(#@{SAua~F|^`paEz!)E*-n5az~Hx`JM$yd>ZmOuk&hx@%4AV z*Y9<42DE%9^Dv+_;-j&@Bt)1!B$_*@#RCTQx}}^|1iX`fBDIH(UBR%bI#_%JP9 zA&8Dec-+!EJWnSmp6}Tdmf?=jv_6DEqy$qHwTn*y3iD5_QJIL;uye^##|+Omz9Ntd zDDo96eCZ{D=dV&>TjOd{e(KVE*2`kt#39ja`~{yGIV+-&-wiZr`SQPOqd$Icf9^$P zZr${@@2+}(2$y&~REwLs%#Cjh#wECxJI$~yZ~s2nz8fNbRPQ31C=T~>{c63s`qBuk1sCqH$jqPFCk2{KPMHLahiW$FQ0uM^hi)+*`QapZxWx`)o^l7 zzU>IMw)ngY%6ry7@_Pr4PNIB5V_Dcqj;GRlzYlB*o&(Y}#ogfC#ayJBpUrdWut=@W z2W>S#TUukxKACEBrq2ThIz{=%Qui|Vq^kg zf5w=~g=;jYsncz0b}b;kX(y6RQ`=pA15<}UN- zvIY^$m$n{X4eqa9+qdT${qyX*)yK?R{J_;0+6B(5#P`(!_Y)R_#o(Z)QXOY19P|gi%wb)Z#^8UM2}Ef;cSm2?F2%9{V#RYpge-=K^oB_U#$5f6aS@<#(Ni z(Utq$&67RUyw$F)c{_j9Q6iQvajX>QI~c`?u51$S9+HrwMu*4Bbve@q!ibzx;1gx@ zJ!Hmbb<*Gf*L+MV_#C~a5ImeXGDbYRP-CA(fy-tOW&a)ZX2c7mBR`6;mxj;V!RP&i z&l~Tf!LLNgZO`QN>poy47_Kok?vrgSP`J^O2yL%nIf`YR&j}o<3zv$e2tNY06PL@S z2s3}0kw+onJQWYN@)gxN7mI=7T8a4boB^M&vAL0o%pQvVyKJ5{gK|A;yLNSXapJ=+rtUN_J3dd1zPrh#9*DZ>?c6(Ye9tC7da*xz zG0n$N`|Wctm3mhqc`y887J^NSEFzEzK1_d8-g2ZGy>!^618x#Wgzb8<>;2Pw{!i;l z;{0pI#WlzcvZ^SBnBiGayU|OL*c7Bqke`5FjbZwbAm&t!;>60J?#6@f-Pr&_)|pR{M1HoYWRO$Z=~ioan~icywO^`=vZEAadjTHISmzW z$9qQ}ZO{{{G`AOTAy=r)dYrmfVpMjT`SEmRbf4d8|4OW)xzZSF*9nfv0wiL{iE1wu6HST@e&!m!NJmWzHFC@EAXFe_h)XzQ*&N9#)I@ zxy0$sx`AgK>zNRj|+h zc?{>FVvtK_M$?6pWzWUCT_skLhU#=8?W>iVkL8E5m{)cguf1bZnbYpI7V(I^M2CKY8ylV3tEbfXBrm)D# zzVM5n&Nwx)e**Kb_V`}*{Sc!c22mKRY4ToV^Gfl+Np$ZHcMeZzVCPaSNB8cY$6KDiB6LB zKu_7O7z#XVZ_Ykgj>x{O?t-!^VN>y#jWS+VkMPfDf6(uo{%eN^#n|h|TNG=amm0&~8cx8@|JV3xX_BwF2A}9b z6r6ZDi8Hb){UhlAX-sRER9e*XW2*h!Ha?x04wn-cJ_BhZzZQUZesisc-ApH!p82f@ zyF--Pe~WO@6Pr;C+9FNfXEL_64ozGv6y`R0>9O$yR!3qh6N#pmt>UJ(gEctgPIiO~ z(3zy=wOLy}{%p@$-dH$&^Ad*&5%5 zCO+jJ80Qnh%I8JyfYNsn%U2XRTE!%Zv}BF0e}u|8R|#$B35OxH>s`V%-oDgL8!m5$ zMR)<6L(RU%cW%Y!usV=;bLe{%zmH*DH{VBA29FX&nDTmJHcG|!F=D`Fz^kFH(Z?{0 zs>-4SB*PdF{UD&xBOuW4jrmORh7XPTvy3`#zCul*tEbEM{OraKWBqIp2=q2Mk_Xh` zf5jY8O=BeVF`cAD<&2!IK(m%cOpvaA9VM^iUyZH6Jmno$$I0wuN@9E;YAKl3 zGID*^)?O^yFXEmEGj?D67xN(Z|HX4Jf0H)XR`LaLUV=;$`*A)U8l`~POGZGUR`m$}yDs^klFD0ISUF@B@i zqi?+JpL@H_!IRB>TSzIR(HQaOR!?fA649_KUjg5Y#hP!@jLaHm9%ffZk*rt$6 zs#I2{7~jH6|2(D##HTkNMoH<`eBbJpU`-F}rd>Uz;7b1Bnl3o8TW{m;ENx<5{2OKK z$KuSmIZoUYXHbtS+Gw}rs`0;6e?E7&qXX5bEg+dwC&8N~Z3dQz#;`%J7NV`mgqd$p zQkpQQFUB0FShu5<-!ERVM=;?A6FsiM_*$YKHjF41%9*KT_K{54vCf%M+y&H>kco_* z8!jFtei!%lBf>^hws^VQ_0jdKyY+EX&+l@cwl{vej`4S6{5SgCH{91ff377(auO-w z(z4p1KPxdRY{odATdO)jFK6Z)>NmesE(J>noe5g<4?24o_CNB4FybBN$Eg=1)*pGV z)nK9?d%N$4IljkXOz3GmhYXknRT#_<)^S9U^t0p`)HC4z}=K-k0)^0SS zIAO(MgYyWVhx0w|!8>5Te{qI;>`b?OC?3N3I_BM2cW2I&Q27O?{HGUnt=HtIpL#*l z-wN^n-QGuz=&!A@zlZTeBAa?kWKI)pf~MwK_u`~TCZO;v&L*TellerXlh|o`H(9{w z;m%i_Qj!N0_XK^|OXV}CR1iv@Gj z^ocG$ppLxVbMZ|2(<88nT`uzp^=SCwI_FB+)#*keh*eV$!+flnU#hE_pZHy1#1T#s z{a#c6M=~_WIS&&_f575WSCZ}CzndDf@=4}qZ^Tq3zDGjzq}}$U(kZVGqPyOiESI{$ zxSC1TKAAnp&;F-Xy1L?S zi8$2kWVgPdFa7U?9>D7ZuP)F1>=#|H^e3Nr{awGvPd7NAAD_KTeA)T|gxws!!KO6x z|JCi{Esp17r=i4^$O!lx?*r045WpTp=83snu?EvIs%FGTe^#47Of!{7(EF>NEV|0uwVObIeRoER4Fg8 zj+r2~ltEPvseT_u@cUXQ!4;w0fYySC8AlD3qrN-4bj(Sq3)&w%(E2zex;hGTqy_RS z!Re2Y!TFIP+pKE$&8yedLdMs+phNUkTZ4h=^uy|We+VH2=>Nsa$=EA|_#>Ak! z-cThHu}#FCMv(Wyit>HLEBXpn2u3Yad%cN^DivVd{sIIxCIR&kz9orvujQff?S*l_ zH>Q>HY{bJv&AgB}Rt?Vd;?49+OZrd$ryu<4FPti%=F9c`!=8xje9Q5E#a9rU1t`>H z2^_`7e{T#D$zXhP>Rc1zRp96Op6AXWh&y~Vf$a!!gxhu;M?5(3e}Vkb+(uqbNOWG@XoesbC~Wowj5n71 zbb$Q#m+c;FbbK@V7PSsfD)ql(> z2d{^`k4QDRmmj_#V$eyabJg{WFvr9AglTob;|zcB+*b{r7Td1#iw1Sa&CQ0~_!o}u ze=yjpuJ^14xW-{Q@&z0y=5yqny-lrSJuUSH4EDpZN02iL2Zz>bLbY`G>9`T$c$Fov z_q1xfrM)`;m@vcwDgkBwl}gCnNDZhy@Rke0iF+E+z8(jp7(2`)9JKm=bzbG$$zPy; zE;SYxka>h~pEOD;s8(7bgKze^$Uk(i1G(2Zy&!#@6T3aOgiD7LyY^r-gUm z)GRQs1?8T$s7}K+{Rfz$zX9UL&@(o6w|OBjk8HF7^KDbVEAyeF{k>nv;Z(^9oR1UO z)$4DCntOOI&z}~qKlAM+1Lc%pCTnUkn?96>{`(PgQchG((v`OCL3#fIK6%xxe=kS2 z>Al0-2>s462yJj`{UZjit)R(<0#X4 zw;u^;v!1>3j5b^o-w`4Dv8h>MfAhDNr^x#6dh!C*@wOiIr*EEpU2EqHM!wxwZeBnA z_<}V$Dd>r8k$HV5h9XV%2>X=jBJXf>~dk&BBi!;KyhIXoyK$V>I6F87T( zM);k(Jkn&OZ@e#PQij%pe}pvqL8lXT&H!@XMUNU#_S=a_muvi45)Ix;U@>TOzv~y~ zix+ar)O6SKF8Y0oB^ycgvEP6|A(6%PO~3gG;1)gY5Morf6uo&Tu6EC@ifm{ zw%VYVK2IAfAx^D%7LdvmBI}W<$`PS8hj_ekhT0Z|Nr|o^FRoH3Mz4cGTZc$W=wCFb%;IsWVYV3nm} zp251Juz#x-dziuY__$i~JKld>y^7`hRb*{Fj~LR|&NH+Vp=^ug*evph!_JF&c~*ocC8;Ix1O3;{;XdlX;*8(eec$5 z#k%w<8_n~Jf1c8Prd3}hPc^b^gI40Y- z$lDVCxt2Dm#X1D#EY1Sx)@9SPbVZR*7F7}k#gUlNdTZ;0JykyCVzeM?DN;3JaLV_f zL*EHUGA<#?s%oW*0iCNOd5LnF#K-;|#uN(jB1~e3e^G)wJ<{)MJ_Cm%FL$O%-0Xk1 zo{zurRvrE{zs1XRbEo0q{XEP1%7@2ivipTsy?6>Xf9rYep5FH>0#15oZJnxsDC7im z59W8v$SV(u4A!5rzMsshDuihC4nr~x3YbjFb*NNM0yS_#JyyPDDY~&6`F}H?`SAxpZ z_6#0TV*G^+8-FbN2K}dF0hsUN`9!CXQKBkre~oZRR<7C%k>eB4LIuR+0s6fVl}M^5 z9vJL#V^0S4q=ed6AHAXBE;X|4Pf4dwH0{CHsIia`Cc~PWYICKR9xV?pXq^c4QK0_u zopGKCNtU>Ns?Au!s?j<1fs6piPi?N#Nj%SVa;ep{d-d3w2;xsbaVZMvz2REL+9Y61 ze4LsR4vsu#Upysm<%QlPcMzBiN>Wm>wRbGM%xrx9IO zPH%N|Uz5Mp(Ru4@UFNS!@zB;4q33xJYBixnjd4KCQ!~l`x{9qX!i$CHR-|b*x+JBDG>`dXtAs}{e}k9 zrq+Pk+NWkO`hvD++{CbCC=X37G<+L|JdZXWWS|tGrFK!z*r zn3iO5vhuXi+IQ((`Y5CB@ zu*(wQl-(JJS&uFuN&7vk)bVLDei|CaRhSWF#q6NdShjDmOB$-Y1}$TaRBRf4OF~l&o;Q=pf;g zZ=CY*Fi|{;h<{#qeI4j*kQCqrTbPLm4wLxs2B9nYImo|zgeeLmI437oe2R_vgQ$I=^q)V|YDu~S_IS(PaojR$f&esaq3r`lm%q4HK~cJ@oz zpM|3@uX0mzXFc=5Nl+zo!ICpCKC39yOQt^8TVj$R_9Oc)b+7CCVFZ2cIzW%jdiss+ z$**T?cLrTzm7DrU<8|?>f5;O8>$al=u8S_xF?QptjM&d5+YQ!q-wl^EqB@6 zopMOd^R3ZNF@B2-;_?kAWSP4sQ>k;Ec_#6LC=W%GCp5f`CpcT=Nkb0=|9PIRCLU__ zH2prxE`EO<&LO<@OddH+4WL$2!qm_|A6T$GQ2*5SSY?{IOy$#Mf0CoF#eD?H2O*=0 zZLB5M`xr|V!x|N6e-@2*6Kfff(}7EK*s#K+wy_2YVnTHy9fCahd^3qg!zt$)=J{NF zTc%eWBb(flIdI!UExd?>XJP8!OuN`jief2QW{({xqeWC{m zAxyb8t^av1VKe3Se{h}!3i}ywiZDO8ZoOzNK6L)Xve-m)a!`MNLd4_At+}7xv_nd=BRF?~E+TFn$iQ{(gVu`~24V_J8msKOU2JaLN{|fm+$|`ZqRbyIju_e@o23_D;uv713rO>!W}uf3eMI-Tzj`36BG1 zGB1b@Qx^Ss{bAT%pKUlCBU`n7Ho$!I`V)GQ4_WG@~ zY^v3>M|Xq#q&*t)DJ@>8F7#FwCDlbvMd)W#9beM0l9cRHM;~3LOw`mnw5HEQ2@-}! z{yp?~tzBB*?;Fget`=!A@@S}I{)&xil{^#K>TKZtuDq}jGlRB%@oT*_xQ*MIh;ksl z@u#`jFq_ihw2U%fy=SrhnGfGDd(koT}>s%jw_pDRIB(-_A91w?c*T(snCnnXU zPjij)L*+EsUX$RQB;DD2o@)PYs^wX>I0xp0JfO3O$NtBG(NOxZ_Yw2tb5&7ww;ONf zxt59t5RV1v4_3lh*sD*RD0%m{>d{fc5k)p+Wb(WR-(>`~D)u_Xa(X9?Tr=asmnpvpGk=E# z52#u#UT2R1(Upr@b=YfTp$ysV@zno15F7bvO6<9^T=G^f`bW#As&e5~axlv0HTV19 zUhhHk6@&*76*_`Jkh~+04lI{wf%`zzegs7`2VjxJT-Pwy&m)`SNPw0ONgt#&$dxiJxeS?^O<%{bG| z!yOnuWXedg{K{Y7GMk3J&$VLxkIbc)Kfnkbf4Wh2%j@Yudp6L%vSLy!7xBWxY&%@J zkVT9f1&s#V{ujSng@|FPq!@iqj^*)%n1n`%8!hTX7@G~&s-g!gr}`MnMBlARuIhSIvS8zPpQofBYXVxUDW8SZl7VXQJ|#$)h*f5*54S zfArM<&NC7oeXg^4Y7NG`hKDpBEQFTYCbF281Bk;T1!XBmb7pVfA7tozKldObC%PA_ zHh<9lha+>8>jAYe-o^dv0k+xcpL!%0T%=#O`-F@-Z1O7|7mBCyWJdPGi1T7~`UIWw zYWX3wW%I*sNuQ%rqi6iJoI8UFmnC~Fe?8+h{SB;>qrfMqD68bF7mo8}mPdHY|(nlREf}-`j?d z@ONSUD!2D#R4e~J`h8zqaL4f;r~{jHn(2E1>BVy{iV zO59??yIy#=PY?Z@{QRar)#@+&^njY*UBmo*)K4G#;^=|8pC>~<`$MVxl!+1GIidZih`Rj<@Q75urr8K>u_h>CMW*t8o#SA=b=>YBi5Kn=sbPZfVfL3NHRg+F;!Q~{yK|@&Q$kV zsl6FKV3`y1zYX|f*lQ35Ua-F485msq^{xNKRo~*L&XVnTf9pNyK$PvVkK3G^y7QDN zauS?)=$}IPUP0=cukaoF4x#;76|i3J<_B9zl7aiQLtU0dGD8fB1Rku|-ADg(Oz2zi z1aqJC(Y;G~5AS{zKsbG?;P~pQuh~Zkbdt^@u{{t8U-1Sq;wXvN+f`+K_cMz~wSKJB zh}e458=fvce;`j|^bdFeSRkGoePUW)s6RAdZ}Z-%B3D)>}duCoPhh~j2C-@UC=p2?H%z0dcdT|FIM1WbDG zTNnL4-HX zy-#}|e-Z-UQ3AHBhH67Jp}<}cefc0TufrIa^VPr?sZHuZ!?2kO^4 z-2xr|-{W3(J}P*=;aKX9=U8@rXBK@rkmOA7f9*H)Wa6v4h?BuCQm3#qDm*pRTp&M zf8gE_=e%q1pXa2&I+cu*M)L>uBj42r(l7GNR?Y#`;gykUDek8+EwxwO`*T~h{$haj zF~ma}0JlB++0|RCsW-^GsrJt|zFK6ctmuMCVMhO1C&u`E_ZC)5VR#KOM_z}=8%}!{ z=e5s^Aa3SUL)`Uwu6|vz)$K(dbI6VTf8lD=;$g5hqqU$4H5(^)_U2A^f72C82VgSd zD}K8AE8b_=cYFqb-@b}9|EUX|c1W% zeEwI})~p)*rq}P~)JHvgqA)JObFE#UUEJOAP#e6IICZ{z=WGa}wwYPngHas%fB5rJ z^px)q<2?|^t-N@>EqK5RkP|QodotRmW_w20igpw_SViosEL!Rb;j@D8rdf99mjzP!Ph6H`rKCCP;NF(^wCn9OYMe>ECeV38Q6 zTdFIJo4?^v32Khw6a9Mi!6W)87K*duVsEDd=oYQJYBll6YnSb?r&k1uusGVQ6GZqx&PN%Esy`Z z*U@j`&)V(Z=;{4cH}9PPe?>PBa{sTdJy(Bytk_%Qw#&BAT3|F2#{o*<)rlcPVx7m%#Qe{nQtTz*E}E9O$y z8pCPU!yJIZh=Z{rQR^4B$GXk(^LYwOVWtlZ{WwjYa^0Q>d^2_+Zv5*)jNO~@GJg06 zo}0|Nv*+113jt|=Q%F8SY+PQElm5YIH~EF{^(i=?h+APe4qLh4aaVgf`LAy6_^v)L zo4vi}0=`DezCG@0e?mzJkEl-xVIRHd@xB|VFLXJIf5Eth+;Gp;M)1ve!Sx~o<$c+k z+T%Lw{3M+T*T>NjP385V;h@ih)Y2|#^G%8yqGcisf*FxRte$C znz3PAxt7b^p=|>5bmTDhIoIzS(CcTSKOLhvRb$2Pt2NQte{_)?I~+Yh{RC3N1xMXt zQbfu19~vobZRoUQQ2Q%(FY&B3l^)F)@fj)JNBz%1r1D+g)*`2n1w6EiL&#&~5zzW; z#G)5&BM$@PdNsX=V+PzbEXQG2bB_2G2Xm<-xBM3uaWRN@ir3FV`{b05IVG~;PVF_P z;+yXmbv8$hf8Mu`$r%CWjQe;@eSzw=N0_Osi4qN}@;I4foI zzQlf36bz2^^rXqbs0g&v%d~2EDbT-vCCZ;d4P^7O>Af1c`l{B>aaWPC;S9+FM8{8S z953)`f%-${7)}l6lg(qEJHQ|;gYG%4ex4J~?WCQr8uqvND!<5CKz&YX#eMYZ?#@Jo zJU^9@e{WMU5@vFyuIJ{|hNjlZ+uC=AJbYqr9!KZg5Xh|#mpT3<%+T7vsyP>sw|wyu z|1-aBahHfQe7)vFcA<@e3QaqcC48?J$-=%Pe?9#7<`tel_wQc0sKu7259R3XG0aSd zb9(Ay{ChU=i2I#e=7Yv3@y1*1s%stj9Y!dB{l(|HYNE)HC2aAV2$?A(eahnqy8K! z5kTUGXJ5iwQbR#Fjr_TLf=3vUDf32H>~*Y{}@SE-11?sjfb zsub!VN@A1?Z{KqPyAx3uFQCT6f8=Qk2SKvCF)@7VzYWGj%^h4f64%WD*9~c~ska!a zZVq8RpjjXO_P2)r|FOnWa?zH%!MQ==MxZ}UlDdW(5!%T8t*brMz6fo9KdJnM+*k5% z)|yc9aVxg>^XQx_!Sp*Ul7|e&emEzaP$%<@9mvE;W`m~BsS81zheU-Qe}yMHtChnw z>iFr1)T*!aSWY5ENk|T1mF}5V`wmz34T~mjT4G-S_raTIff}zkFVv0qYK(m-XjZS%?W~9-q=Ioxx_BHg&h{mI(;beKI z=>X)6>bTbjKAal-8*xu>9su=F(ck1t2k#W5Pr%;6N^$re^>#(}80;@sb3f+I`7`T} zH?SDdfhKR@Bu$*)X_wN_2r++R^D{oT+~IN4;~FN2BS9`SdaM{r5{=I%p$sg}3$Uq4 zD21wI74}FaMs+qA=}qQ=)6brwuQ(?t>!1`IOy6hg zTA@#D+}&fv!G3}B@qg0>`?Sb>&o8=;$qUIj(-s%QBH@i!Tz{hzwMT8l|KZtPbZ;C6 zs7rD9d)=EGoT5}o(e9BQ2BvN_fUdYjeE+X{K;53%aaZA4OCDx|z+p2{2h={9KmjDx zEibblJO4oshbu_0n8CXL@!b0~`a`ONAVnsa(rzq|_tSdR=%$GImlY_v!3hW|3xm_R z!Khqn>KRe7cp{4(fepR6O z)GqLub=Yck{0?gApidzw$hr)%-hn2^*>)AnK8MVu1%Ds=O-_R6cR!*{j6!B$kN$Rk z{ZD#2|7M;=e1|_bF;AmE)BfHc^Y^csOm9Ak)ZxuBN963Ho>)Mk8hqNvwAEXB@K>>{ zsL}xmGn=SGF)`cTXCI&8znPDG3ZDsCnbGG|q1K0_cAUJi?}vFamVY<*i!p+X2`Tu{ zYcM?j#eX=cs-6*!y4R}Y)tX_9V6;m}=`J=8`QK>${B>Pib@$qHqs2Y7`iXv>pZs=i z{|{QmyhKgH77w#V43=vAKNZg*9RL5EDNh=G*Hq&1I+L(E%*7k-wf}#*iWeWw4^6~H z*QBj)Tyd*g#4)rQ7*}uaKi}{AtzDeiX!cpO#eWiEEw+g}_cTo2^7{VWCfuP~eS3z# zd8aYbxnk8FeL7nGMjN9TMC6vxVQPygzp-(>&T zw`$ED+3hEb#}knAcOKvn;Ll8R`$7PzbN9xxw0JCE;Ikc6=N0^O-h}>h&jyaZ-+!U~ z-`k(>;Qtsy4J0_=I*-!Fp%|l8)_F^puw3WZOm-Gh`DCyRu2Eoh zImOdwu#=pxqtNmzSIFjVIkw!cU!Mi2c!9RMf=hVsY`+TW#aFfC+RDoJp7Vt2&aakx zgJ#BKwP)LoC^)14fWam_>cA}`LYS?aZfCe~I)|_*1X0DqBdjlCC`Xs#X@8;2hbMa~ zMQTjWa2X_aOxI#xBs_>xuJRd>?$u}h9#SzElT zg+7Hl=Ng-S$#JuCQ2Z&;@8N8;3rSX~Z7?;pRe5Gg7G1fOVVY-#=Nih4WoD^*Xtv|= z$c>e`*QdCeGxI#2*?m>6%K1$0czO-P3$ru*b<#1ByWFqGGj{QyZ+~e%ICwl|-6ptr z^4;oumg4o?B37>8&E5tE@9+*`TbbbTK4nL?u++EKE^N`MquL%63GkU^Gls-?x$Llo zY}LiXQG9vvs*~faN?f?afY$}8AUT6*5i!8g2(AE%1{UBk!1wU60CZLb zIJt}g?#x~nFoEg}Jb=nIDle!k|EIF`DT8nMQ~yJcd*m?x(tkG&Km@F`K=@N`0Go4q z4mz--`o;#>l)V7Zfyn~OV#+Ape1Xb|1w>160LZz*0v#9-+~xtBe&Hk%tL~t3i1M5e zG*A`=L-l2Jj+yd+vKS43HUG3g2uKge7AouP8_CB#gZ#t)8D19XIqL5n!4-?g0Gq=bc3?n*@IPhlorC5JmodNsR6j#-C6{6V zak%E7Mfwmd1p}+N}FMnTK&_((|{liX3zwm(I`66RL zK>FoIR8Bwbg#ZBa1*X=EP|bQZXRkNu+dXXL-eqG3Sfk$yk^ z6n^E0;27ffisaANUw{AjBl3Tm`)%LS9Kii$KYz8QedvGe<&E&aWz+hj5B}pnA9?)o z-+x{H;pex%;Fmw~@9@8s-{(K$0V=boZ0D%`@ZYqD^uvGs^%u@_d;aYYyL`qQB##go zjck3&AN@mT1Ud3wU4|Hn3wfNug7PIn{%`uZ9suv5`USvfU?Y9AU-;kK`@@e6L?j>h zUVi|C{P&8+Q|Sy5e{jKp1=1%Ff$Sptz!z{`G81i|xH-V!Rs+j2KjYU&9wG{F|73;Y zChUX*p_kn^RbtF2bNM%0)PMSe0@}yF=jzmc<|+OI$7piPj2jl)a;V}Ec;A%}LoWwdwa$b(!hrNt`TXZ6G?hTMQMbd*MJ z$MU|ZoZP2nRjQHv^2!&hiSn42r%hYsWvt{a*p;Mi-p-9v?&mh#qPKFWyF>XntJV26 znjpup^LG*#dC&v40%{Hhw0W-OV1LcI8!*m*$vFzeY;l^aDSa*)Z2j?j~F3f_HdVw(~+_c|wjr@hrusoGit{O1hkZ z60kBW=UxdnS(6L7MEB3iB`D=hZsam8udD*(S}$!!k>wgMJyt2?)+*ytIe*H%To$Ej z%1^%ROSP9rtK3*^mfw8EvU;f`dNVu0MM?GMv_-kn;@y#b5DK=uuG{NPvmAzDScM%l zYw&DTUNnV)nOiNv!9UwtVQhy#z7)#ObM>!o7ioQMa>GGmh?g5Q6OTDH0>2hkQS-a21 zk)np=K4r(9dQtbmIKHS?cYj`vE%mzHo9yUNPwrk_jt13t_u|4(RCl|l8B>T%zC3#y0Y+&>Fh1F9z!IeIUD>8T+c^O1`Xjm2m|5(-Te*z*F) zVBvV98Ub_y4FKna>gL4=R!+p{MQT3v?+z)_Jb)E?AD(gnh)NU$b`c~1szejXCjdO5 zXb|6#1_C6@z<(hZ=q$PIunT1UB4XJE;)WGyK$3BFVOfI}H~vE-WXT1V%U7mAA7s&4 zA*ryk24sW_Yd;iEJmfq>2tWUu3uvJbo`9g1d_IDLljS}y2LRO*zd$EgP|~76gqBDO zPT=QN;0TedLEsVP0-Q0C69@w5S9jA0(s2Y0r=C9F$bYzym!}cr;|S`?Ds>bEU0JQT zfL3WoAXFm|@)4vjEdh$gf=@rW+Nfq=fv{vhbVnZMK@cw>FmhY__LD8QjkRcWr8ldV z;wM)8$G$X8`)6O^*d>>FFCPIvAURtnz$*dB?Xa&x2iCF%@~_T7GI9{fHy2M31b&V5 zY6YSnjekIIWK!V&@W46$p61Bfhz|JB`IS+-fX%By&*o9zwFHN1G1SsAHlh@ns(uNL;ctx%Pu_owI38v zH9}ZM1AyVi(h$I^EKz1Y0o~N9h6vX&Sa~BCzJFXbUno~7+I-+R3m16Roh283TQ$!w zG>`a!*<*p=Jo$wS?6v}^aK2xnQ)}FavQrJfR*7~2yt#nVXGiPIDxSLXKyIu7K;dH# zdgBS@3M|Nl8sHUARm+l9Wl~&twlCxpfT8nO8QCw=>G-9=D#7=CkbtX&-%1}pQy!j(uKc!j>ySbrrt z6(JOp{kgcv2E4+SExQUJA4Yi`#7#l83QgmI%#PE@%HwG9IqILhZ7V1N>R+vCJoklr z<7inQxKeH_bUyG-(|GXG=5m=&b6EqXMViPbOIbs{ zTrQRfqmQmgP=#)ueDuS~f3rc}gtgC(fDcBFe~l?;0wZktsiL2`*;2P!a?xo$Ck~}m8tPKu)UY;kc&|+`IUvzzklyPy8s@^ zi(HItnT36sMlK&^ZprQK-_H{xfE?15Me4Mc0Om7>!myE9I=4>%Am0V0uax~$lJOLc z*CAR_{Kd`We~H)%hEa22?b5%PbUS z{QluBbn|KC=n>iK2-C_M0H-8?RlKjRMgYBlrSmJRBbl*(hshcclmyLLe{jQQ1YYAL zpLvO4(T4)jsO0U(wts^Ex}R&Mp3B^(nymgAXRq5o@@;Kk{?UCNY9J5A_+QHKFB^ma zjr&?1BRdyIm~nvs60d1OnOh@m;1^(9gO)(}&vkzv;iz=jJJPu z|MkD{NAFoNf~B&`A3I5aW^H4I#uTm8>A%_!$IOaSKK&bCLH@C^YT2T7 zxXp#4RgJ*s!r-%pR&pJ z7C5Tgan%9uA%A%}Xbs1WgY0~4ae-*g-pel>@xC1w{ZqF_{^HC0{kv{O>ulTe`0cnD zWj~F4|KEV4bqs(4G#6t-7yiSAE9mzOO>J{N>Lrw}nOP2OdZEc-1r+ zZMULl>+|e=o^4H&;m>>L^Zb49e(_WE?0xY+&;A$x^M4$C@juTuO+%q`kV9<;?6!Y? zV{GFz3iXIi(*s^dc(JCT;~PJC8^Qzdi?@s;`mXJL@Ujmc5I%Ufa@$eN#ux8AjyV7} zTEmF|J;U!aKaM&7^Ik^JKkxMqzJqijS|9xHv-iROK8GLt?{kXy0D%AaasTO;1$o3r z>>xe>uzx>zJ?anUt@>#vF(d`f&kUI zf9x6`MxICb%ol%NtP`nFm_ObM!FsXe|k`5AAYZ zETktHeDP0^MYuUy)4P9gS@uKw7gtAo{NffLp3i@iBP)DpZ$EO8(f9IQSRYF{Hl#fOIQ&z0d2l=?cRoP+=EzcLUm|SlBB%6lwro;eypHlFu>f5r(Ev#Yu?0I64=DraqI$6f8&nP#*r2Tf8Qr`7DT5i2{(sT#isb#b zc4^eEgYZQjBss`YTA*-&1qzmrEPprwsYdVu2%mBYgfnn01g}AY_U*(oNKi6$J_N}E zXVk8pBYtGWM}z3=Ihavh5lB4*~_}Z@>fCG}J27rp{#TLX-JsCjfM?N3@Wk`=A;unfvJwhDuvzkC8PBRQry>`~Y6S^DwwGtd@< zuOqxCvdav>9og;IAC|w{#~biy|NnD7w)`?Z~B*iD7gN!4~?XT#&O{@d#D{AUB4j^bVkz=gm-_mOcmy0`0u{N-wa1K?1+14_Gw(yl`Rv2LUD zlY9-F9BPOu*Fq@^QJfXDfgl5KK}>pRe7hlL0}IAL7!lJM5PxX2pN{sCA;?()$TVV( z1Bwo)oJGMSptjJ#SEMYq07iaoK$pR5;Ui%8P+U-)HV&nn ze|#c5ExrVf4u2KEH^2#@vhY1{YN+P?fCeAU;%DHTp^oAJTp8*Mj|10(M$S`c5HKw^ z1a1$_g+HTw0@s{-1NRJ3fF1B;Xe|N{R1ew~>i{oAcZSKptD%z%8VVZfP&|=GLw6A@ zpr+8X1PAy!XaMU2--5n{^7N%J073-(9tMk$0w0IBTz`0=!6vkX66K4FfY5*-M0bVt zAgE!S3j+uim{`ILf-_8ku!5Tm(?!6+&4XDkP~eusyCodKZG;aXT)>Tnj|Huec$nuR zhn%snL=l83EP<$iu!rR$>LA2nm5Wyp0VxZZr_MpBgN#VpdeJMLf|NhUF4(NS{(&o; zW&OZwgMWoyRwMyY{NU}TYYDzsG?~bYt|&}lYZss?o}htG=o`<7Bnn9QOG-9lLeiWk z7EeIHJ^n)z=b0}pdC@uf+hzq8efhvQodft^aPJpxhrvq1kIe??B>rqn@fM{ z@Qa-nO4KPuOxZ;Gr@qVYER{8J;BP5qZ{f|@S${!k(^eQV$=_fgjL{ADY#?lDd!Z81 zZ!?e;X{Wq1%3s*_`#x=(E|x4MyeV5kWpzSj^Pf6ld%-rx)!*(5`Aj1FjBNCM{ zyMF=WZ}2S+vOWXrmw(bO6VZNWB>i_9WkT8gANn@uDX9Id^|zE1MwayH_-5cyz~FHp zjI8KSsbn)L>Eo=JZDC+P5e$<47zIR=be5b_eici0AKw1-MP7exqT-34wl>`j^$^j% zDbJLgrttR5$EN?vQC>}JUDEclB$R^3!GC2AgGC4frc1;>HYIO=(U%FfUEdMeRAI0d zNoy&Ic8~tnl534;+8Ri=L?YSGR9uiz2IFI%tqU-?sbq>Cq2uJpC{K^EAx&X?x!P=l}Kp{y!dl zH~sIAIKBU0KTmDhZ2zIg?)~T%iqm(S~Q+M4@+lvCW4>G#cYFY{vhj}wmnmvKn{ zV;-9SC5xx{KM)XG`p1L(FMmlq-jDSEmf;ss{}b=Xi~3K#Cy=8r0zAL}#25Zo|6mJ1 zUM+HvwEO!D+))#@RH z7LiBdR=rfdZej&{A3}@2iDW^>4}0|$o3`0*EfV(|=wxZQ-%CA`$9Qa2Qc&9fLh%x9UXRXKZY}kUOXhsT4_Q=CUPb zG6Tg-_7AOxU2zTrSj8nBvjBBp9>!DGUN2sFR4PZ!IBl`*a)4rP5@7=w~K`X)#;WA%F2MeHLr1b8s45B(rzB zU%*_`&01mi{8sevyb)N^nW`TI5ql|}_t9p`v3oC1`rba-5qfFngxRexu7}%R**EoG zv(LyUx2?&hA$lBkve^^Nq%QkmSJH@hgCI|!0A8ou8}17k#MoVg4pkBSbjMF^w&`JF zJa$F^l{=if=#+_w^FMoidi8m;Um>K6Q%V$2IVpWiCgE!A^=}J!Rv#pV?grS zmirtRRgA)wZJ^6lrlxgSWP~IUFDB0D;B;s`r~kV6Mx7e17<#jbJvJ+C3==Vc84QV zX`XgV(k?Mad#b=8lxy_qT?^m9^s#F=O1NYm1q+amZeNT&v@Viy1saoG4lH)XPivDd z!6Fm)BUjfODbegReWJbYbkY*>De#(kXRnu} zyhJU9HO2j|J1<;W8R~uO49j@>ZrC;a;OvEgwSN;)E244BDFI&h3--!{qmC7>ap0l{ zq3;93>-r%N(t_l@`|ZqPSbjOYqb`&-Ztcw2q2325y${J2y4-%vj4WnjhUbz_?E3Io+HSV-`=-(lji(ypqsuh4U?M$K`Np;H|`uNhdm2{npkmYCK)H zhJVR?t?uIfw5v|+5pD`yGAvSMTFT8@ECyDvl{ML%4Jgahs?*}YM3>ak*3K;E>Jl3D zR9%b*aG%Eg=%qK8TBA4!jVbQBqwe|X+28`)ue6ZVvg8Okoa3O-M<_GmFP zzd4*hN+URFWtvAi$mT~%6=XGK41OZ(1Am+8dTKuT=k0xf9%*_C*MQW0Kq&b(eP=mv zd(JkmySXafy?tM&mu+`apTZH;wq4P$AUMo(e44HuV2^WE3<<#;QhbcC>vmI9?XK?J zSh(JHUtgSUw-RIYV0jBgHb)f)V04n)$_#~5qQB1UR-L$d@9@b=!*I=FUQ@Gcn}2rt zL{Tk&r*EpZ;UqjO_MA;2tdAxZ=ayk&INWZL^(wNMk}FCXS?c!Elw753f#LQQjwqJ^ zXQm*V-t2HcJKoEq%Yw()r+vW=?<$o>msW^ZX=F#dvP>y3`@?xIO+LjP5oCvK;TPx3 zh)d3$`aP)6&4p~DlV)ahDpx`zRe!WRXrGnaycCLi5Sh5aHS;Uag?bd zWJX0^II%F)k?Sj8<-mDb*gCU-qe)gfJ;;&KU~tB&z%t4U_qZA~EYkNeDSwx_-#z?Q zH>Vxv>MKe1{%i9{;vUXMobgt_UA^?4$ue6!xnhfRkGWb;r|XHz#bc1*ahc$87RBOu zz--B8us!H~gFCH(Jkl6Rq=ZB=R4p;>5K;Igsn}bvjF{_jL|V_o&e3BSF15cMi_l8F zmpVn(7#>y5x5vJfwaMk>mw%OxoaD7^eLDvI^Wd8GD9HLVSXU8WI*PIDlU;&MsDx^_ zr({H7fajuhsH^qyEZ-3y{)+hN}6Yy?N zt_2o1fu+ocxScP;+1NC@wPnbQmgc_VeIaBP#WsYxoqQ-N6XdaI3_FL6P-K{Q| zDSg6sW+~IlZ-qxK1%IAFFIp1P_dsI|g*@Y%=k)Z{TdMtn=VB3iI*}>R8FN?#RpLrS z)b*`-XPr|}j`<6xx-tncc5NBbehQJYDmOuy;^b-=B4c-^RR%=R7$N(Tn$0p)vOS!r z;do?Joi>bm8bixCJorW~XT8CTs|l{BL{I5JPPUTwsz|(bqJI;$Vt@5UXW;4?C)$`^ zI_9Nd$C;FdNKS#$_ITjNB$&yrsGJioPm?LNwYrC0&L#q$dbeJU%&5`llZxN&Y@VPwN^4wZ$zCIVo0 zY#?O$1Or`cx$C0b)B5z<`I_YK4QrxFZfvAf}D6(A!c26 z5U(cDrGJBSr^}qPJD?mxuQw&FlJ6F~gOS^W$8bntv%SY-yoc9hk6P57EA z^jx`_c1&nnvDWU-i{Y*W>fRA6EZoh)?!amKKV)?5xU4>(YKeyxTpWEf^KE-?Oo6&8$guac>!fdG9 z_Od;?Hx;~YH1?^Tj!`(LTh+UwP}ag4xdl83f$SAz5{0DsN^6sdHQv)E@AVS#peMu? z8h`7CTWPb$!TQj7TrhJ4>4u*GS*pT zFX+!E8P(yuUI_Cnh z-j?szJHbzGUUIl=*iKj&&g^j0^72+$#Yk9ZMy;I}E2rpn?e}sfpUwrE$zh>Pta6Fc z!?I=STP0Njo#_rGtyNiXHEwc<4oODTQAGlEq}si zAU1Nz-Ui9=gw=L}=8$_A6JIXOyG$S4rG_!cBA+1+atirPIvLn;%kTGSQajP+0)>Cn zZ%@|jN+)D}FX!!gX5;Hzq=RMSqp`l@x``tB<*dstOdROWN6~Wbb{%z%YFb3mL|~;8 zYFI8xVm-Xw&^q^Wt^7$!nJ5=us(-~$!tMDybiruL<2jzNC@h6mec~{kYZ%PdO;L-g zG>$CShZnP+-wYMiIJ=}lcv(uLdMZR*!yvwT>VJB<^0S@WMCD4u zxGPP3dGo|1-{7JqcL^MxX0^3D;V9Dv)--5 zlpU(G5*Q50VNdrZ@j>p5r>yL@_ha{%HFhwiFbqvnFhytjMluhLeQUX3jN&n}{FQhX zmy{-6ub1OZ%r(9B?|)_HlgBtP7*`q&2eSL~5Wd}Y60N6FP7mDESDH!5dOD{XMlpN! ze8$J|QcS_NsC$Z84)H$dyCzBe}1&+*j=_my*gpF-F=u=^pzZChr*ZOP7CbJ>O z{MhK*n!29pDb(fCHPnC1sVt>Qq)(bJSExzw@>Yh~j&4h}myj7uJ@X$1De-6ZV2rfX z4f_4y`O__<3_bLpnppJ5W;L>KMBt{7y^P57&R6%z5(vL%1h<%O1__eDKbt-DQ+ z*P}2Jyta5in0VrFg%z4;9@3O8jigbNvWUswvj+EyRS1g8>gXX1)L3c{M9yWY1&0R2i z%Xy24z`)|NKRJKT1Dn$nn`Gwo5O*UD_ZPm7_wHFA>4%-m;-kK3gdtmVu8hj~q~Sy8 z`rhF@Dc7&jd&~0a@noBRZP$D`E<3fK?f#*iuBpsE{orA#qqU#fJiXfMLOGWp=6Wo{ zt2Y{Y&Pn~wxbTfT_HLFN7<|<~os)wSONOT`*Uq8ab1%UbMYqrSa{4;dL0EqACiSP zHm-lT6Zg&a=a)4+Z0y<-;WOMSsTAi~6|M(St3g%b7Q8F2W%zHe_f^K?orQdIkGrO~ zm-C|EH3i*3_;l-~x5*7_o1ylqDmU}|F%(aN)W*;uvsS1RZg1n`UX16*JiRlswiEX; z2qL}DN3va(Gyg6XdrujXhmO6c)w3tJO~`+4u5h&;1M3covG+B%2rug5=9lX>L^*%x zN5|dvl&Hq*J-~`77WeY3wP(SM6y<2A%l`6T{AOy@d(lLe+1Gb!6NPlVUv!tzD|hs- zyVWQ;UagcmAH2HhcS<}9S>d;aYeRIefY6PA4#|!=_ue?rc~m zC-EBJ-pSh+guY|qxUBdY=v=;UM4*LUvWrfda*O{Ib)h83etuV8ovohe>c)kSOWA5( z-dkEu^xFDb4dRif&m01CnGs%uDC>1`)O{R#YQa}izZT7`dd*5-Oc zw!H|H7C$+e8=jX#IyKkwYMx_3rJikW1t%tWQ^Rz*af)%1mU(Vo{vCT0Wok!P`ivb5 zf)pfaKOU3Y+YAm?(#uBD+}NZ!TucyiF`MY%{3>VSUHPPaosM)$vF?@}ZTDzRnM{>^ z$EWMFSh$yeJsmOp6oA7}P)UFH$`n?QtXdHuKZwc@-3SNQitRmJior4RX1d-gIAl$w z%(tu-HtfPZw&C32S~hf}+ZRz%y86Yrxx38a>&}^s5t4MHx!BeU21|yO(#cMo3Ar}D zm*+5&?Lu_*;|i5bb^Qk=52G|(>?7;ro9{v)+K_fO_sb zUGK`n0=>4X1vywtifdR0+>6r8N0rg3rdYN?lJYQmF17!5W#UDgC;XgdQ@g8an)pgb zuXUvCbBO|);J^(G7lXl-N3ws zXF0!)jAbb_wZ+j)9+uZmkuUK8tzILCw=#`K4z^JQ8?#X8&OM&#dl%*<`RMNQS$S33 z9Y5jAGahWg(q6<?iET&&KL)QYda`1Au%bR6Qr@w%0QgY9mNz9EEcLeG zwG>{n^jKeA_hx@R=x3x&?n959jUZmPQ@*K$x4#zmwVj4z9~{J!kUg@yzhF+0?sg{B z2blN9EmHX=Xm4)ZFK}{G1+kEISKQOvYyTWN%XC7V8L& zcSep`ootP1KEm}@%B7IaW;%s^%Tuk~`FKEtH**f6(0qTz`rxKyMAKJFWVHh~*wXv( zeiFu*B(n4flt-pU2Bp2~yZvnQUL;*`SMsWuETev&Oo;38qZZvpQX?^zmxJeBROX%- z;v`jTU|!uy!R_684lzgF0zQ3KGc(qujD7-z`yO?jDLSW#YKexr^!`fN&eOcjd724| zQcP0b^ACSR%&{gJtx)Fk{ozg%?6d9UA6nM9FeSH6#EH7#c|uih&8=o%xo`8WYhU}b zyZ8>snzieIJCa1eoSP-wR&w9%TM;r)Z}!I<%vjm3bPf~B8q>q#-i~(z%L;n#OmPcN z>O3Yd(369Nzauwh;trBleqathVOXA2J~(&}DdB$sg?F}J4X~c*$;PL!S=y6UlIz5{ zBJK9g_M#lRA*myrPSR_@o~;fN+MH@`e&`32NX|@jx|}P*SDPIZbyWqwpifGYhn}k6BRz6ngH=4c@?9)O^OkpA7p_X$V#^+{g7?_2 zFRM4y_ARykAhH*)iS^-LH24(SOZs$i@_~P6JMYM3SQ(EiDAS{c=`I&hi2{T33lleN zm9Gq~8S^w;ceXSn$EEu$>60mpXK^647pzro^};wkQ|2WKOOdFf_CD5LT}12c>`r$* znxml{PPiU072RU8cP3tyZM-Qct;ZwF4t&;BD$}U$<-ok#VZE!bDF_3bR0ymcrbB=J zI%Y%wVy_zo>$uVivlg$&!Bj^l^>`eV<6AZF(JAdF5c0F3d(?Tk@vjcU5AS9TkJ(L5 z4taK>8=q9tQ|Ir(sp^7oZ)S0oV~1HIQ}<#2tZ%$_o5|3c}Ie48(D?y^g# z?}u0-p3^nPv6UsA!-*je_k0{LUMhdl%D$-%Td0dx4+eQ?aM=!4?_l2T!$#hPt_TDJ zdSOH~w65!KX-$S&%lK7j*6MIE(>IZCex@`>tW?dzHj`YVk_30^F?qGCOI|Z~Ky+!^ zDWMsYb1V*o-S0;#2KVFp`Y=cFi4Sr=9R~l_JUZXjs;8OXt}SzY-}9i?9wvWQ0poef z4(Z8tw>D45L)(gNq7>3iq2jKMvL5(CG$>UTlXpflxNu6zHTJLGNod}bwdB3=j?~Mo z&2TXBRqN888B;|4oPle)yQrTZc1Iu27l$UVXSXIWpAjRBr{#LOg?J6cO~L7^R}l$(Fl`|X;OF3dGpc1e;#^Sx9)aq*0a#8TGDj{ac`=+c14 zXVUz-l%E&pp>@%UD&Md`4DR&r|2$>t0|#b5{m#*Z&}W=6_tM~zF$Xu^}`}D zbB+4i)iQw8o()kn)rd}-NTadbVV=r#@zseL!A(BUM26N9k}X4j=_)VaQ1MrHia4AC zgi0&1hTjIC&>^n=z1!yN{ku5QaG!Wo%Aen&4LO88fIMTZGs8oL+&OIK7>1foBIP8@ zl$=HKF!n`$TnMUeg(QEugat{DmsbyG==UAAU@2Mz`ZN0wZ{afvCKIT!qJ2JAm)%^H z=u-{ZNghqAUtwD2`-V5ISSs$hRv-S17g@>RQ4qvU?ILshxvR?U>J)Inafapf%U-Ef zJ{?DHX~}KvU>WjWs}ffgT`sI3wst}9L4;KvXh#^5I-0~IG1GrrQqb3I1jNbhh?(O2 zSr^#)`z>MSYC08qv6Z-jrxdS6J7!^?->fEm38c2H8fl*J!}Y#d_dy4=5m5SLy+zWq zjk!JaIP}w(*vBt%#fQXkLHK#)5J!cZxxFYUFHz4?H48jSoc(_Bvv~z#v8nA2meOzU z(FvQS*ZdTWQ!alFUTT^|<9tGYt(%8cKb;nZ22LKZeT&!i=NpjiJ5S2DPgy*pK*_6i z!>M}`OKH+caX+Ep*4dOEuL=v~n~OI88f$Kp%0-l+Vt1C;YUdEG5iHo2sB$&T%nwJO zd&R~UOVTR<5>WF5p+Y2+iW~VAz%oZX*_w6hyKkq&-8z3%*!zISvy{_i7Lkh|B zC-_Y1#fAkRUNqH*X1caGx(w4(COC2!4{8rQeaFjpESf9wn*W`ZpB7=|o*AxxlgI%) zN7?FwMPGm1_ZNR+n4egApbC}D(Cg4APPz&8t^w$&LPD$=v3EkNpd^6?tFz>ui|#en zbyWKb;f@+)rD`t&zz|P%y*fd1d&+v;YcF}5R5^83&6{+2_nz_Q13qpYiBs)1>Iu3P z@SfWIfMcM}z7N12;oU6D%?1GumO-zdEdiicYfgVJuD2k&HXU8TzP&m0e2gc{jHIOI z2;anhU6&p@Qw5N_oC7<3a^5F42`tV@+SI;F16E#xIucP+I%``` zY+78sWvj@hWq3|OU^w6!QIT#r2+$yO=+rq&Xah&>nE z4sI$l1e{tNi$s$exEspIVm(D;Rf+v9c91US3$B)vkVmtfqwdf4belvIxQoKbQU?pm znhM@NP{EVS)lo!JfkgqswV5!7aMp;XP+osOFjK%e+tTVtB3MtL!V~mb;?pklLr+Tt zT(D;^Gub#7az;rpn7h2=c%%;U(6VI)65+OVpiO@83Y5BwEEadoI!P_nu-3sH>N-F^NO`H)Ll%wG4v;QWt+r z!*o>1+j$fp%f7yB(5=PDvt&Gw{%!!Xuy#z;7uV5puub}nFc(}sH#c5;(&7%q)sdu? ztPG1tWc2_AUvJ9?Ow68d^(h;^Q|GC{0NkCQydFw#+82oW0MhOBo|{W~K&w`buC9}{B+HcP1t&W3=ufId$bfz#IR7s0jQfJlIG zSnpJMi+B(7bR0^-m^Bh6Tlb3og!6uOylABha6Q~&~;4$AG*JSeJ)2M&gKilal z>BkFCZ`^Mf4RIVW_>!>q=Tk9~>E0!Q)2VmoS*R#YN4`fQgt;EOsS->(bjwvdCUHSe zQkJElFfq>bQ&BhkBpN^$j5L0+{$*ouYfxA~=U<|^YgV+hpEYmvj+hrP0JbcWOw zgUYcfC?7Tkit7ydB9CtdAP|2oE*+Ogc__Ki@qm=8MK3D9sqRNy7h`?@NdSICNHER} z1+vhY;-jvm*dC>9)h?ky3uQyH)`v6T8JP}QdJi#X1av{6>S{L^U92{hs)c3TM{DNh z6&*t!VB>S;7+h<^a*GA&swCrmjuy<)%M@Y*w>fxbkQ}=+o+9g(EH-~2u=JYxXjS8K z+%=3ZnLG-y&};&83fq)LsgC=vGo-9?PbH*~?w<=2A_6+wi7+si$U{S<(1vdYIxxTa zr=KU!(s%);i2+~y(%PD}j!t%gzDZgs3+aj#E5IpxIy%i&>2)sYMGkaeqf$<~Anx)r z7tqRceRU=pGdB<1olt)ZJmjJ9^G=)L2iMKoD=^Px79QXmz#bkRUvWOQs>P>7wNbSn z4m6zg`_(5kanigY4ryC0h!Hawc*4ePbwlx%maKU2-2VhX{Xo`fA-#yeYb~9wqgy zl+aOui8yh-(OEM~a`hWHfl|&bS-PzA$l`_#1CP1%<_0Hf7fY4SWZNURr>0bpt`O3?pPSrcTd+)DS=F6R^B0d{N$^td{jNHau2*0O#;NX&-YOu9D z1gAbXJ+FWANEbC4pLnzj*=cHf16vAv6Qw3T)FFq2pl<@QX*&TEk@+hCiYstho;n0X z1iEt}g(Wi<5cSPq-IPDIWc3!`MezwhDej|x3!&I-5HRegCwMp)@Ta^KJZLJNiK18y zveR+rs=k{#kBvtry9;{`|HPogpwOtW)@3VG80%iAnKw+!(0E~*mmJjX71Gi69h zlgZFSshOQ8mJ=KC-F~uq!X(X~V&D-8SHbW{D1B!oe@+O}A&!td@X))<<5x&vSS+lOcuqvuD>H;R?U6&cTd5Lt)sjc*1`z@AMTWKyC^N0<5v+dyxum@HhMz>E9l? z2b2&2`|1sEq5yF6zZ9usn{(1TXjw0Q4KSIvwx(ZCNQMQkfb()BSQoQ!W}r{U%Eqb4(g$y|#e_#}x*l~PJL=%Vhk zqGeSU;FCchgB;PpKzA%kMY*p8h-X2!nrDVp1^X+h^)hF?Naw?9zn8J*DGYyQC095R z{4vadg?GzY@6V3R&0Ak?DXn+WROTSwH~g*dZ4lAIcIEuG@vT#&-CGWKBWhd`NFLZ* znEN0c-IrbU2KGhrI_9520hAG+33$iP9h}_{@pf9UM+qG@TE{Fo!a)pN7GBTd2GbA|DS#{d6Zd&wHb-pb@H7w==GhU6s041kc{H?JX<;g~-kLe5 zHr4;;LF%S^rBCzjSF2E*onmgFmhmIvReu$R8o4C=Jh4`u#@^$x2Q_~Kpg;Qx9iYD| zRdCD(;3$vZ2Hc->z(1?&nvN z3wtG87S138ThGB(M`8ig?cm+iHKxG&%nveDAHFJ|E^1K&IMY)3#j9A0`if*P@5?O% zgR%P1zDZ7!vXLOVRTF=2H-Sobv$q})$1$F!(TAX9-zaEhs?ZXGYqArT-&(MHZy@li z{Jnk=Ao!Qi7>eSZkUb`_5uRTaLHVMVT;86b>@8bz=Q6HubQ{5yYF3{t2k#x89ngUGCd z3Op|nGpl^tG2fW4+Qj^3LrtNK-O1S&<{bolPVy`AVmi>d&sP}AGOjn zUV0No@mf~LoAeK6SDK0_*Q`@c`T7+|*aqG6h6R2AT%vyg*BxSCWb}MFXktw))mLL? zzxqn9@dP{iEqau$GPY9+0X)3>(sZk(F-X`Zb{6vkpK0oNROsz{*VLrmXnJ{ux(=M6 zd^6AcaGBc>IS7Cq&FV zVupeGa%^JE>(6q%8muNr!?t^&wqLe3Or_aawnTsGgF2;FgJG^SIDgZN$zg^T>BbcX zY{8^2N3CGfe>fxJkEZnbBtIDe&z0x|$H?YxPyAe3XtT2k{I?T;MWRYf9_>HpW{<{v ztYg;H+-AtD)2EZK{r7m~iFjqS#UGhbffG@gD1`8t3h}hDZ9uKV@O1G+vv8Y|PZooB znS6iHN`APKvJf#T;@&hVz8+CFyqw&=AVNmC!y&FDL8rQco*{O&G$r<(t78{%w7fvT zqzhzlP7|t_VTlMD6KM(RXVnoJ8o5}&e!K}veLj-%Zca=yjcX0e%*@UF2kb9@UmJb( zJc=UH;C;YM^HbL~mffYd2A6I~P=^M?hFX6#$?*kaRMZCv&K6fy(H19y}uWv!V?aE5CCSqtaxD zhypkSIe*{xYeXOc!=>aQb#6PV82~x&c-=3E)yx)rVwHr~(u+v#te#P}UCLwnAn<=^ zsUfl?l=5-p2UrBKY8v44rEPfg5!bMuIl>Qmrc}labMt~7ir9W1h)!+?hE!HBPA;zz zNM~A~S@Gy{ol9aIU{m9&Im=jU9qv4yq{|(*7R;1t$bIkky}PC5oWZ4-72{x6Z%EXb zN=cA}hYdW>Zr4tl**`q#}Q{vp9j%TE8w^IxtQVUkqjY(Z06__r6U4CI+aI zQL~|@BCoQ2W)MWp?$MWqdF>oOfx!nj<$Gznh8$LzoBDh9MQ|d)V!oOIoMyC~bv9S#sdf zpz=~#KiEBffj5L+o#2kaa=U2v&1tB8;^~*n}16!|);@a=U1+ z2;lXiUg{$D8!iLvzK#$zmnzCX(Vhg=yTM5Dkigu1!4pW%n#0CB9qn5bTmmwPQc|0* zc^6qi3@F$6_LjgLL2X<5sQiL?o3&hNHIe+ou)=)%-^j|B-CK5_@?w7qZZFe>iF>Q) zjD>H)=Hh2rKCn#&Xd241fmsM?ycB-HGE788v1EF$TcKnnxo#E9>=L>xWU_a0jT`Ck zTZDK{jtMK}-H#paJX`!&;yKNhdzVv9ZYazl6=>#6k&X3dCb=4WD8(9&4H^ntt(m@> zM>49Q!{gtskYel2KsSFaWFfXdnO5%M-6xo1N;?QJEw4 zWa-WocsHI*9-_*gE||vX%rOxC@$sMxBrm+;!EsY|>^rK+MIe6$-NU3Au>)ovqt8@K z_a5oalr^M~Mm_VwIB7C2h_BY~6|@;8VtHF2hgy6oPl+H}R^fw2!$V1=t{1PM7JfBy z!`(Juh{jmagv3Y;P>YRK4%wYW0rHN>e;f2@hP`Dce#AI$e$@A>4+yT!9uqFH!jXt)5 z$)?~P*f*}asQ7pn3Rjed=g&Wj{}sgwY2B8a4>Gr68{d0hcFDH*L3)ITJOcp^`#fC0 zHEo1h3@tBvVbA5K3bgKdc?z-(gGj1vzw!kHVPi~PUkrco4W+9AhS#PBbV-6x2(bDN z@vk=t;*_>aMSiJN{ZeLHLaxW?9Yg~w|=8)L26eu13eSp++ z*`wfsfTsd?+QT-%WTlL@ky&Ls*oQ!IzjEJ2#2k)p9m%Tcv74GRf?m%9&sTBZiH*vZ zDw_49!&iTW`dy>WDMN-%La*h92${#mTOn6rom!M{kaWI9etl2d@dOOvL}}F44{$_9 zfVh`4bg5ZqIV(hF9NvfmDlr);d2JWBDIX**dhK>NiyzTe+>DSy(n9N}Corw+(3z0- zb&QMW54r4kR~iGTraz>C>BO(QUg_JYbHSeI3mkug4i!p50R7zoe^>U{NZ4N~i@io6 zFK|4=CO;uVAh+2b!Md4!sEnH9)DM+;^`;^t*LA`)4-RI@>+V$-`mGlzQ=Q7aY;FN^cd9X37Z z!P026RMWyD$>u1G)*EHE|v=iLh)ET6E6R9@B|P4`}sdKHCOziq&` z%-o8+3dMfZmtkezto_|Ka$VZ~Ez69Ml+&&Ubb4>}`VSkS^GwZbA&E!=m9tW+PxaDz zb~(aMvW9|1gEHS)wVk$szIGXfd9Bw_AdY{`ivUID+cmI+`TRyf9E1Ratt$@L<0KzP zXZoBHgj>`gPTR#`8+d)?U2hI6>ZpZMyg?;^1T-HEt{Pu7&e4Q7ae@O;;OrdX;k3TF zb;PbLhB(u@n3ryI2V6RLI5?2rYmaB91F;r@ot7K{Jya-<_!qKEPwD%(HHY?tJ1c*2 zfKCrw=gS7D6mX%KF5khS7giF98xtVwP58MNdnS>zP=@5U(mH+({#4Xx&l|op9jyV+ z_dTMgQrE`wrlt#$8nYGW0gUW%8Q`3LE6wpmsjqXXnj;dsM40aTdhUW#v+Qv)A$XZ| z%ko4t20VMWmiiTgx9CJLc_`T#mKT2}1h``&zd1}@5WxQUrn$LGziaoi!4gbC;EG6> z_ZuWH#dM`7Jafn*96DAw8!F&yHTgSw?Ig$9r1im>#x|N5^Y&iwHe(BrelkeExP#)f z@O|nC$fFwtQv^g04+DFDoWBT+l)Kfc0U64wLBfE&GB0rXr?dTS%*uIbnx21Nn`Ga3 z*v5NY`I!?TMl@NN@dP!>E1xi|3Q$|(l0rCYB%$|o;&5LQ@*^xqvntj!tEnC1*1~(V z<`xK>mNlBd&UR05Gc0JS8W~;aw4lfI8|fXXKE@P>U%z&~HTD-aAUJYTv(TFLB$kqA zMg<{CEn{N+<(`7ddHBq-D>{ETw-M!a`(?N}zw#?D^^TMcMTKTAkA&ab@?xyrhu(&! ztNnl~G$HZ7O^7Y!8_2}8FN#IFD6!4EVK!sm@SVVUr2T^N*QCdXcvW{+&M)t?p%U(3 zX-J30^mMQDmqntPbY-QErwAA&{_~Bx$EIdV{0T{!tchb_-^$I1f@*(sJfM-5r zVH4eyNy>MFBsk=F35*p*DC5g;8zgjn%paR1CgGAbC9=P!EG3sV%EvN&=>83I+VSDw zQFo%uxewSVjwhdQw<3R&M4luKKN)6`k8r~UF8V)k7*cS-MZ*4RErbV;CTRQL%EHciKV71xSZEJajg$$?1Ot5GU^*8;IyUFDKYS zzfsO}>e@$3hR)8J4hY1*#R#1~S}?$rNlW0qb@_a7U;9vkwrjqDJDG|gRVt*d^!A?E zODIZV7q3a$W_#wCr+_mFnB5Aar9z>YwMoB76p5ZG!=je9rW^4%9x#AP4AMui(d;%b z;L*p_zwpf3o}Pbe!m$R*@?}<3v}1%%P|x%_ac}psU?Q(wOQ6SRb~6bn66gL+(T#g( z$1c~ZPr@pc=YzfpV}8~p^#Gox*@???3xUD;JHXp4nUuUixj8HARFkZJc>vZopEyf3 z?OKFVRq)u!@41oU+VVP!^~+hl(i=|a)!?9lK1-Ctdk%2|JrvgKwUz=xx zgW9exy8YexFo%B#f9ga}y9kBi8#Pzxb`Q>B#dK7i>4=k--X@)yD?7*^l0?q+O^M!D zSyO|_JX1Uh72xR3zH#2&wa-BYz~IID9*x=2cl&>>^`(l?z~o)uS?!p#jFUlC?>D8v7%+J+x9uQS9;rWgyLTfDx$p#GkCA(!MuN-M=r= zVVjYLZJzOwwon&v{K|SlD4bU{sCtXkBN*v>c+mPUbo`J-{7#dN^F3myE&Scr4>-!i zXGed#t9D-gfPNRznPk3>7VfBWAupoW(O3aotY?jGjlEX6W5mMh-1=rtXQmgnYtpMc zVq#?uCL$xxgL=|+FpCJ=6}ky%_a9vn76lYaA!QQNoUW|pTH zDnPBx+7us>&TBVzjA% z8I7Kv*`KY10w_bTTz)hq^-O*iCU}3BYP~B9`)RBIE-qjZha6?o-^&3@pCD2Ym`MRa zkC+76Aw^i`LVXW*)~-NJ>)tyc(fMTKFTcB(Jfk{JfhFgZ$RgP(Td8CO6uZ3>rDYq$ z);ryS&$uiEOH?fZu~+`_u93;58Nd_3{E@0_KA~H>2WhRXlLCN@q`kHC=KFv7f>$)q z5!Q9`LNJWt=vhGtWoOg=X%c}FW1F# z!1oa3vT*QCx<-ABftKuRhIP5B%-*g$#^32`8^0|-8Aw(m|PliX?z^K^O z$ie54M-CBZL&2oa==sk zzyi3Jz%qEl-Jf&W@RSies3rxfBGlDFjpS$R=(~=`z7sic(bXhSBBzj@><$UrRGOlM zzdXj+yOpR`mz)QFvhN1C#=K3bcgl)3slUL;L}s-fDny}Auz&|ZC>#Q@P6YJqi=(Km9o*<^vgGe_)xwEJ!ue?$` zgG?hvvy+$c413SNf)#9d-!+&Dl$Z>C<{Rmj3|8kivyeDM1ChjPv;t1PCT!bWp<#Nw zhy)aN07Fxcu<~XB-{tg%P}1YMZHKp|*I zA{F`qa1T<^e9dupu3{n`5+f+YKRj`rj5@t;g&;e)C6J5-^103?!H=0rIdBINUq_Za zkj@Y{56ua1iE?SuQ>mq1vD$PSWZIuIC?VbXNxFX;oD5Dre4A(HPA9+bO-Zk+_)1BW zl=MMGDt_lRkw6D5+!UZ~tB}ptWoD`r@JDv@#V(H3nD|@bSN*0uVGOMI5Ks?#)P-Tu! zVETV9wnCWY*fr@$#UX*avMDTSL!Qdq$2j*&lY;LUZwf1_8nH9BRT%)2S!xOeiSIxm zw_G4e6};!}0-%ehoaJP&Ws7~3$8%6W9wvoL#k1CIuHW^bpuZ}JRdjRVSL_+VqJ5wC zB4Bsr1c-dMM)6aOfy9%qDJhZF$w)X8W-5Q(7yjfPJa5+wt|fYHT0*V3U;f@Y2UIUU z8VYb{5NZxh)sxNH*tHSqz*LY=?d&-U)`dcE3Vws)M*^7kYBv>LBOGt_x)pwJ(oMT9C) zDf@O|w;nQgjttkF_O20fIvV=*+gyJ~l-!ZH&pA)#i`5{cGn8r#G;oBI#osXH7Geo? zGs0A<7eIH%H{~=NIy2kVd#Z4j?$R3BmYz5@1o6UFX-D3qbc?M6^#Tkf`)TaRR){uF z-lW|a0qx1H_#mRxhU1zVy*>SOTtm_aCY@T$oKmIWL$h&<&I~Fnz>QN9YJ`8ORw5J- zBB!0#;D?BI@Y4>B8v5NEa%rIIM*lfD>&R|9`-Tl2k_V+X)%`T({WCeseyRz(JQzC& z9Q-8aktq+ZV)tRZ_-UcyWz^wGzPg%Uo@}M{D$LEU_H!9u9PEAdSD0Je%})iziIVmnON^PYt^ZVK-hJ}gXcGsRZ&`Ak|=_ZK={J_v;*m)#(1-|+yGXTVZ zRUjE0En0NPo+2mAJea__?ks>5Q^&(lsa0uES!eOr^8NyN^5`3)d7-R^f7eF zYAAvfnL~{y->8dOmY+tsnzgOPdGKcVtEmF9JD_;&UgW&+i<^yH{gjRy7(naB0fYfN znm&7~F%||@6>6#~@GLh*WDSI;3#SiFTAUPJ0%94spv##1KmYT8_`d(}yZ`6^{>T3h z<^6w|yv~0!c`yIJnY@2`{lB=&S<8Q^_YW)lPxvGJxg6NqIYfmJjU$f!+T;G*Kn(on zeDEGHX~Qx`6X-9~HU2e@{B6Qo{ls7ybFn!7)J1>k?Di+}!t}T$=}Ih`9Q`-Ge<)+_ ze?$^?f;@Qgx9i&s!`}nLutQ#35>hsE5(<9;Az=8+uMWc8Y4vQan^n_*o0uRZZ^Jiku-`4!FH8EQZb!>a#I`g0O7qVe}fNs!n#`FH(l z>Mu0@^QSAr{l|aGG`8Tf!#py|K!LvrJI%EmIvqA{Au7T?+af>SH~XWL&cFW z(R6+}mI$&*6DCYwPktU5Ij(RoS#Sn#Hv_0S4EZg^O%Q*C3sAz@ZI!LD@2az5#|KRQ za2tQ0d>zUW7~?A>P#9h~(j^fdn;c|+^o1@37F*!PAlTk~$6vWKn_IFm^UY869RBOm z`XaCv468a8UY8Sl4cAT-zqLWvGq({{D^YDZ;6quMbw5w^B+TK7Uj&VQz3Qzo>Vtf* zJmB?HLmYnqBvOFXWYX5Y9vW6x!1p>H2z{juIRYbYGl;z<66r?{WFFgSyqI6AE$2ac zJE8ya8-3aJzqIN@3?z2+I}SIZTu;(~SG8KW2dJGLo&0@?9bgA?WZ#SES_oyct}hZe|(_Hwvr&@eIrW^TTf zX`vQInQaVMx?`2NN~zfNQlGEH1bAJ)4c{`J%0(Vaa{i5v6sGJs*&DqC1;@5{Q=#T~ z#Akp1A)|p7bsPPyIoOflhUw1lny@1QSqtL<>rl;12J)`EW5~-l8o%*fG!~^r9N&0p ztInx|M%Ti(#+|9P(;1bI@wjcNs5nm&)Fw>=4JKCHaq0shc3`0=t%4^??gtQ+Tw;+j zb;xeql1W3mI?YW{Ufbv4aHLCPj15J0u+x8ZM5n%1&BNQ)ZjEEDrRIZk7tVuM^J1be zOdeuwR^)ul75$A1q(QN7!l?3B_uOy8q}B9^eh*sYt!y-;*X+Aik3O0Oi4u>#9e=v) z_k(JvIc;%s)<7Hcy)(88DIP9oxt+fosJF}{i|?!Bi77|pgOVR4hSu4}!Qpj4>V|)$ zR#tIAYZ^%-?S(m&{qt#!~Js4@lm!*%%y+6zk%#U7H6 z;+rd#{R*H{i%fuEQqtcq%1i6#>8pPWz3U!$ZS;v)G()~Wrf${;>KED;50oFG|K6Y8 z8&!692y~?|*B(D*ReQ+quOVE$c#%cuK{Lzhl|mKYe4`g(slyaZ4G z3{Aw^EClU!{1u*G14z+tIVL~4r+jkX`;b5Gs{7avob{`QH~PM`YA@L5aYPi;;7jb! z(Ouql`-?kDYF_S7uiDS;?LEC8?WhQZ$&PkKvg8XFf7l)7D~{@H>lG0yDUg*Z*6*OP z695Yu{X#RDgAvaidVcxSMNoe*xR7it6@W~#3Fs-Awbw0mU}ba-l|CpSqb5gGvepF* z%P?V_?sBm=HK8Luy<$A|`9gDLvt!#na7FXH+K=UpXcuo*$;0+(i6He>%Nhbm-YsrI zZTUJTe%8<|vFjTyIb1V(3FiGVMt!*7Mj%1iXoQZ3bBCh9eS#GbOP_z>|{m|^D4 zf<>N(`-R_c6d`_@TzP+{$zbuC7qVNe0lr!5uhWF?NTc-fNokZ?w^aF*4$6N#hDx?`0y88CUmZok z#h`V!@`-C294i!m?n=C;CxD-%BbJA?=O-?gYYC-4?QJqK}2cZsvmFi5!E%6C|z$kb{n_5 z^XV)M*!IH*szp)s0M=8}`tczrH|2H3>N$&m`pCMxhK^Rv&d();EVV&BD^OIsaG8L9X3x*Z^HNeEonZeytywWYL5<*IkE7 z9cvg9X91RfQ?YX>#u!yGWoNCOizev6IIVSYkdX4TZ8C~4r!OZjpXEL^x*j}9@mfPs zxX7CHK^A{=<#^eB=H58os5G@U%Lg^)=?xv)4@{!WBdh6 zgTIjdEpM8?b%;)y`-;DP?$h-5?AYZS&`qp}5HEk{`q|c-F@w@u45952L}z5>^)AFH)AbC zIgL*)(fOOQ>8s~)$>X5Kis6vV$`8$Y&{;|)iV3h<&&Utrw(sN$=*5J9q^&cjJop~l zg(DH-qZad)x~9(!>;Z~Q{W#?&7?oWk5jQ?M;1rj>Jqa9teJedQ-W6_slG=uW>X|_R z2L}c6c!2ez5aUljuID6Y*Va@igze9&q>)ydjWAp3wNDQoz+#?sZO~1#P-7cS@u!^*@_P8`Jg*bB`UVhc?|wP~PiA);@`(M8>C${WZ`?T? zB`T3jbOUSP0i6QHwEMT1T>By?0J#sg(22@o%$%5i!qxBF2bDYfg##5|EJy$pf3z#a z+kAq}EK}sBZxVgaZ-!$H@!k%_U6T)v&0`4Zp{V5?5>$u@Q%S#>O%F-cpZwI%Sf>J) zc23b@*e*N$)YG~)LhT)4sKjRlaWp@t`%S0-+5FGM6E$(2{@0T0nXRccfr?nB?;4wS zb`oZP%=E}4s^WMT4oQ{wE}M6bDzGA<(C)SRocDnZEDck(F3F)Fi zun3NHoh3k67RLAAc+{=u#9tCrkG|rzhxY(OK)k=Pm-Rmh4}Z?5qsb}3D6~bdpH$Dk zhK2Kx^x+6hpFaDZB`w^CPb2R6RpT(3@4&&ZLuV4PoPtt4xST_Wz099C0ND#fZvXo= z-<^)zOQ9gGPx7z!3)8Am9-~>~NJjUc5yUPdKpVa>7;%Sv?ZfvwX0OQjN$qKhv5EIPtCWcrP{OU0CEby3!6h8IPJ$aATlLBvtAdXYNUif5=^}=tZPpSyUpAw zZRJ8dnF;1r)A&d4Da~ec_)o@8=Qi%)GVvLp0BC zIeK#SBk)puQkMjm^V6JYL)BM0x+S0e%Ui#ik}cGaN$pi54}E|aF3_-%z(zghCZR0KqQ=mF-1tnY+4SO4xUJ-l>+MS&y zXMID25(@TGagS#j{|PaYS#Y2VO0q;h~FQ+HIp=LZ$=w#s4!g z-Gj!So_Hl>kTDclCP{+E(^T1YJ8kqh%9X`b}ATP_v?Ozk!^$4 zOiz=hpSzhjq;p|g|9;A2car#H714pBq6hS{nF%Hn_K$~>ptaVw64*T44(%&_9Oin4 zE`d& z@tZuc^D7%=cZ{x7EoWua+Y>H&$ z{GLM-CTDC;g8C*;ZlOZ>Myb8a1pCTn?0IXMYh?T()foD-UYRIxCGxkuMfAPY1*XZY zBn{IcS+^yc=8qpj(Nrz4$rpK1b$^XXSh7o@^F09MA)_R8&_~N1otlh z?AjED>q9pHXMcUocXz92Oiv*c8R!m;J02Aa^pLu6OX_FIE-fPvjg{aKh0dJrHq$!p z6k+KKffDL95SH{!@wI?@E+>7?p92)CbWv*32FP88 zVq-_c_2+EBl`%2pXU#3kpMT91S0}=Mc+x3W1vV&b`CbN5)Ar-c{}*|l46;C)h=xAe zX(NDQ1A26BdZNe!d7<)6M<547_F>t=Tu8OBb^JwTJF7^6Kt1YY=;nS8srvl%7SI6WTF%LSz>qMa~NBE(+X0Xl51Xq zRHB0uW{{&hT9*#IRR3N&`4XNZ3k_`4m&?!D*xqE_fq7mc-1kCN`n$<9W$9M*^tzTi z{z~;LwK}S;9=-rKnSZPeRCHZ$A$!X1mgL0Z#h&lj4g=ElJCt>$q8#1tHo-pC6J`!Q z&8B%3sWV=8aO{TX`HSX6{&xMw(>MculpNz$uVH=#2+<;)6*S2pB`Gg(UON$r3(^@jhRJ2`BW_%p?u@+$6kpn;>Gx6=R{;4GCAz=g)~9 zCum^r00`gfF@N7_e&UZmRYfI8?zM1Y2Lx55&;QDzi5gP0x|e6RV5e=^(?N3&fb{hHANSP5zSYBS0uyVO59w_{6>|Ca zmR_Jd4~v>5^~sk*sa*H7VDMgtJDTLP6{#l%L53KJcz@~NfoomSQ@CL1koen}3ZI1X z%iAQdI1#9qI#m&~+*0qg_*wpR5ao&kBIn5|-%nZ|-#U3yk+>WeJ5HLCvY13e7MTVB z#?kIVXryScNNAwWG&6t@<2>y2^F zL<&)l<}M|A+}nAk7VO!Xa}*=!Gd z@RMWW3x_miweBrD;Ke}{<65M$-1?vn6JQ@DAOsd-*8OH=98#H{?T#CgKVyX6%XmX3 z@4nl&p)gWM8hg_x^6>yrgKRAFoBbFB$lDiOF*Gx{Z$8XL-=VN<@(k@T| zM-2f4I77Oz+W=6|67 zU^3cp7uc`q&gXM=0sJt;U2YXX4z2y$4(|3Qez{E3gk54g)Q&9H8rT zOiA);y{5k(urz%0;5b3s4`W#+ZYweC2AdJv4M>|j%Fna?ypyd}&ZD53JanB(l9&sV zkS^}q|YY=WPRDZ5?n+PW^HVrf z3IQA`g1t`42M+UODrfHn(GyGgc0}wkp`^XH=DTFX`6zMt{Pas*_0lUpOIcW~0w?R- z1MkG=4V;CrCjh{;i?0;Qr@)9vZ<1y-G}|G0<|w`X&{(jyb&L=Ru1I2V-G4LMx?;iW zZym^vpdk(B_UvSMtwH>vZ3G5JKN00Ze}VhPkJzRwO_Y4GMj@1G>VLRejGW~fF0jHY zongXIFd$V{lOpuI!8~ zaD<|wjTu-eUQ1EW(GL+u^buc8MUD_E6dx$wL!CTyxa#Ad=pVa?0FMOI z*r;R9M__SXj1J=zg>M8H$*#L3hQ^X+FeI;t&p9z;S)gGap!A`LYp7sO&1qR$sI;~S zEfc~yjo0Wu4BYKWqEGl?D2w$7FLjX9lnIwYObID}(({pI!B~be_-!wJa+!`rHLk~d z2amT-SYR_%95>iy1J>=vcN7dzW9ds-vDK<_Sz?}Bi8A`W$`Ue&d0PU+4)?oD@ZFM2 z(l<0nIAa+DzO#;BHIE#}Q7xnG)T_LHRSk|rNx%2@I0AD>?j|_nyJrAoTPNSgf+Fzg zVZz0Kt~afAY@!cMCx4agDlMKKaPxN6l*OwPJ`Yq!B(Ycw>J&|K4!C? z8lIN{Edlz|ZzmpnK)w2t+x4bh+y&w7-m+x{ytS{9b-=43KhY z)pShED-axGB;;tm>_qYaxyVdEIk`}OX_pDKBxgegNu)F>y>^gK-7g#=d_GVD%IIg& zG=r7(nT|3Xc4(Ob>#(q1(!Ax~u0tIT|8c$$dc{7<3dpq{c)8x~A{wtxplMw58|%Hm z6ICmTxZJ(SJXGTQ{FKG{ne=;f3z2>EBYUB1qi^ayV(VO4jV=CUoIgtL47kdFGGx2= z9M2asmaRmtSl59V<`K7OzshmWKopz;7A$Frl(C!_-i0O}%GO@*Z0UQ|5yGx@rAY_u zU*KSf4)l$n8~VQ1UQ9x0=~>I@sl|CSp-W1~5T?_DpiwY%BO4U*Ns9x|b7JooCyBxih|OY5WGqI{Ux;Qg|rmj*8xczNb%Fq5_b(9`fELP~Wfy70AmRb|Km8jhGFeIYch zw)oc5ZGsGnY~fnHeZIKhy(|C@2Vvmt>FE0H<2SVLcE5b~8Fsw=`DO>D-KGyo+drk7 zyvZ=e=GILP8Xc?lMy>0A_X`$B6mi%AeP6a$LF5gWR5;&1{Jk!Dw`TQ&Wt#*({)wB>AZz5@?cDLvmM?<04U-k z79WaIDFv||8|j$Y#@r?cf#T~T6)t2HMsl)gyXg}#@dwv`AvcuoZGJB~QbslW zLlqKX+a5`4Z$lq*?6urZG{wk}4g79|T}lJ#$+|gZJuADVe$3UFWZKCjw2!X3dVDU!>eWXgQ zyShPDtZwAvs&G;&{*K4WDG09dR_@U9?=3C3@q#d<8?FDUgKQX zsFi29N7zH(l7{=PKY>hd<;e`ni>O~w@{FdAdrY|HW1M{P-{7RAw*u0+``k#Xsw!A(Tbr{?QVZ0j?juInjJrjP7?D8~b+Jr~VX8QMk$$rVW`%7FA(@*oVehO;6 zw`+Vmn$l~mo2v;Gk^KaQ*sha;yjQ$6Ej{$c(q^i^4;a}QdQSvWN<*U2ua{= zWCnQ|JzCCnii5_nJ!F@EcyL={cE>$tJizcp1`=brz?3Zl<@i@!$Z_=9^@?AOE1uz# zIi|FM0@8|qU`;l=EQEmgxu>dz0}X8<&q}~T2%1;}L{p7CE0NK)(XXS{*PA5U=b{}3 zxTCLMoe?bRKvo8(>}IB7*e0pgOVr#hdA`QCKTsz*ncDbOHA~8E4)8e!TlMNd>=yag zc~?m#MD@V#D0Z>^gP1v4E|^Zfu6`H?e5_nKiLIwr@% zsR>HXnpUOlFu`x%mSj`#+33)@oTfsSRky4F#N680gi19Iz4}+3eoQ!PHoB7#dBBO% z=EdARKi49n^==sF`k`&$K#~C?>g><1x`vB*Z$QHdHal5ZdV=ncxiR2Y8thPL;6#2h zH>d7@TmQ;Ww$`O{=TQeCPX~7!W(wrKVCzlFUSSYf>=L*cg=K6R>XQ*knSW2}2OTovpE;5gJ*jUV`_M{3L?y;H>Ib41_Bs5HL5$enbAJ$7#QvzuSySFe^q^n z^-By_JsWwVa8%`pv>m)kOmVXFi4m0;j4tCT-%Il=BJ%7#jwtmytBaAYm`NNv%u#Ihk!3yfdA zeZJh4&Qh_S4GKtytq$&&Vwzxk``E4LN-wud=sEA$e>RuOV2SI1MNaI25!pVnNU=bA z=sjO=n;lsbETvnUc`~BL9uVo$Mu?1mIAt;XG9y200d~L+@?Yh~FMsqaA*a7qK>ebB-QzT_ZXs!{1b@RCJCcpk15>pce)fsE)(cD5ZJg_SG0=UV5pLW+s`{pb zlT(qL^m{U0L21f@={DZ~bleTyRMyzrHogT&)~NQD)CIh!5ys;Ae!moP-rUy8U6~hW z`?%64-X$r-Y-sD=^qGNj;2oH{e-jP)ORIVPJ1xAweKbUti9h{aUlFp)1sQ6*p8+|ATluVUk6$u?84Yr zcD@XTn4}{Sd7S*bLF-3FIceS0DU@dA)1L9@LIXLUjP0VofR^H!nZc=9fA=2hl5mERAVoO$=Y5R*LG*K<9xux8+kT}{Zr((bC}q|3_PV3y zicQIal->XDXuNNy&D-yRPa`!gvN2yI0(bQiP2WE0g1c>|@-OWYFXJk5GXrHfJTUiMu5R1@0p-b&7k6%d3P)Mr z$Nyejhh|P4@{TZ;Dl)Gha65E5y3wrC!O(Co?oD|_d1Ms%BIlde2w$oVtE>yYKk!%~ zB#xkf6_e>*h{o%Je|*y)nP=riZ&^jlsetIO;4s6~a}-jnub^xQC?8&Z&f$iUI^Q|L zX8n;lZ6%48Ccp!8lS?_jz~pct{)9i*8Oj z=4Z1$h5E&N7MB(PJK^xjCwEWE2+uXf-Jl+FekhC?MXFc^nwnsWl14{*Kiy8IF5bm^ z=?6ifBCe=6ACZDb9nnF1WNvlOWjywM?>kQ$=+}~mxrOYF7bRQ2?wA(rT;KLfjFF+_ zkBBTpoz~`3e>3@0Ku;e2lKFA}z(wDgEOl)gsEl6#k+b=_yqN8<_({0g6As4x7`ASr z^L^&1AR4iB#dX0^?A@Wgw1Hh2gH4Bd`R9BL)%05OK_s7nAR;7;`1Xb+GJM11fLHScSX3-=p2NtokS$G{%tBM*> z0#GNO;Stw67R#U-&YqORnfk(bECD_6SDOb?T&_d6o~_%Uu{=z2+MD`4s2^ zFSP+&QUW1eu3K)dVC~Y@sfTz@7UOHaM=JxhEX%a&DzWitEBPs^^W$CV*CGB58Yurs z?A@CO2k)kYYhb=Ae1#DlDDfgiN_<0`Gf*LWcev8Ziv#+2}&4xoud zoa6{D<%iQT;%W8al_(?Z67_(k3)P!FDRj+;!g2IoD|%vaCVGgxR)wOopC|4kg1Cph z+ESn7T!IkSD~5f|O^=d{{+ zw4`Y}#?UX&B0o4ri{|~Hh?bIxtyC(N3J_PEBVhGm?&pRl;AbnmQCGC}zdbdNtC+1X z@-T$zMdD{=^=(93{o~RL<;^`gU=1F^fAMOOK6mMvp55YrExin>T)Hbl}k;xFFjTFKQ1K#N=z#*a#m@ z^wqyLJowH0ukcFYVzO4mqj>-WKkm?YbgAtf9GP=RA3%~bd0!EA}Wqtu%Cf1g)? zFa~To`g&8!$tL~~tmx>aRrFDy)M767QCxgUN1<3n=K#?L%Y8&_*;vxmmbIaqKtv(@9Uq zdiN86S+_APzT5#|y$llXnm}pdTYQg&d1Y1uw?bqLvN-jc$L8k~rRRDf&0r&GXtz%C zabm-|sA|rZv?;v057`FNe^PkXHFXknak-1}@H0~w@N*aZ&6w1o;>pLa)}QhA<(NV! zbS1QQ7&4vS=z8~G{CtI{?c+4dCDc)h`GeOwqZd`AhngYiRA&X0J|{C}t4aWou#rqd zO|K%>ui_##P1tN$scCcTeEKxz&9wWsl3o*=#O>#yaXRFvfSLrUe^r56RM!#=cY)V6 z>5!HuL*(+9q-!i0kPDxtoN5xWx%;%GIa*pYenDBSO*FfQI{7s<37|2|YSn z6jV{`iVrVWC@QJG9S$jrhLv6n`+r+Sp-Aow4%ibr~4f|>`S&SsC z;6tn3MEF@iN)Y4_tG$yUO8<^<&lN12U@pNyTPcY>f2?+HZcR4p4IPRUe~!4}bj{jg7T|5Q)8UnHe5Ew{bul}$gUjvEO;QU3V6Sp_rl6-@94>hru!XSC{ z-V`MBo00ufosku?#2rVGZ(%X82Vc44>K(<Ws*Ui{ z!M|!d%s~GpCnvAg?KEQ~RUq24Yqp&A^|qZ1e|#V*MwJ-U15el4um&Vl38+0Mce8%e z-LgBNmH;hSPN_Q0)=XxKuoXoa;ghhjl#i*~%ij^B-L8Ig3JYt{F~CY9+I-Mr_ZtII zUenaYMh$|tahgx4WbjNSqTg_=8qYrQQ~#JB5iGv0mYA9u@R!*=z-xSK?(7Gf1zSf6B4IK)+1|k$eq;}AI1@SP0_SV8+Tm2 zx+94=2ZnvVlcs+AqT-|Ay#1SYl*A?=whvHma8A`NO&4+EA#x1pb<`zjD04v#d$K-w z)KWJrhDXtwXddBF7MHO>xXelplZ?0ANQEx-(GE=#(qBL_Qy{zdYM%I@~7(xY0b{pmU8@VNnm`60RLE7)N5)cQSx zm0)^`n`y2CN0R9%C~gCq=qvhCe^hKV+rAh8fGi76ljM0 z<#z{^kIP>)B3Vn=Vt z(#(*h-wL;hnjC}E-0hnGIkS>XgMo-+_|D5Y1qHHQ<%M3Y75H^hI4_i_*@w-KLA_qTh8p_ve1ZU;-a|5>H9;XWum^8 zRYM+0EDz-fSu7B#vyWc@mji9m_aOO=$7TCKSAoIwFC!CeT@;n{7LmLuEe{ofDt7ETj*1##=?|co9+k8cp^NU~B{MxHR z%aI!#_~c@j7<4r=ZQ64f!jQ@6le#u?fJ%SW8S2M8iHzci9{Wi^iaX%tXJ9|x589Ss zp`$H4nk2iC+wZCpy7G140Cv4vgV4)r6%UrNIYCnc9MkS*!Eymd&5wSKq*W+ze}Fg& zZgv2a1`P(V4^rdg1eM>+Rc|>eCQqQ3+c+YU7`z>kikZL@0zi$0FO6|qZv5!|ZbniS zD5{rwQ$NUqUJc-8Y#3!`3QbkN#M=qO-X?N=Dgu+l4vjGh8&om8WIv|eu)!y?x+y`G zlm>YSy2h#LpL_#K(npvA>?GMQf7_IG4QC0OgF;utG+>C7h-d|@*)*y|!eD)GW^5j*0n@iRqPd|H-zpyatt*;NbEQ<4uYNnyq?K;tOL}9 zqSRaB=|~7A==D>dxJ7vY)!tDUf1Y7aMAsV^s$~?yyRSK9)b0Z#+weQQf1n_@f{b?W zM_9sH`-3&ot2K?c=||6N@nY~3<(DOY^UhD(bh^0d#O;_^nj%@qI!kK=COy6~3M;E7 zQJ#UK`<5UyHz0$e4=IyiA{uDfd_p6*ISFDPUX6Jj)2)sOmo=-MFYOSjaeRbv?CePq z7}ta8yR?*iX+Hr1-wW<&f98*X`7d(X1Hx-}z1^+O`^lhVA2)f0K*$=V`|N&?BU; zE+dF7Wtd7o8D6giesp@8jhejl%>$(bx_dc@AaR18z9}u8H;HFD;A!?5|^vi19aL5lC;^r zm{9-a;Cdm;y;>r}eU6%yyqNP& zPS|DBT+4~ffxdHh!*qIWCQx1~stHXCVX1E|-`#4&^UOGAf6wa`5eopf5L?iK>b0Q5 z1(6)F3vp4koC{f;OzJS1=n8DNA~dYiaP?mU4^qhE$=)*PvR)lI<5M1DXm|E^j{8tz zA7P-}s8076rsTknaBM#kAhhPQVRn*@St17XxZw!Rs_t1Myu`CbJQ2 zaukmrfE#LNU*6N;&h;;|P_$#L4XAWn3k_wl`6gQBx5B#IY^1CE=NU`Jv<83$w&>;8 z@0x-4Q2xTMIy7|0PjsS;2%d^it|czoeYhw&o#N9Qe;(z2dMK|l$cLJWU0w#1T)uIc&1gt3A`qxxa;w&ER%w$vSR7 zbO;%1a1=#1h889eo2h`sX@PQafviTTIaB-YIjw3AT!b^9^gtbcb>2W)eBawXzdl&O zTA(~Le-jGq^b9~Tntd_+n{OWea?b2J+dBe65k>|l`l8G##q)+8t1C z#(-06lICV06A1)njwVsTCpyxOI&|1!J%ndU|M{0bH2YyCyXPh-PEVt ze@@xVG^Jp&s4J$qCdR}4baEg|ipB~{VjHXdf4cf@X$m2TPEt3cf%40vrsRCqWmtL; z%#Gj)<(G*u$m4{7vQD8^ZR|uz{p+f^YGo&-%s)pe#*g1uRRu`Q)N7#KIT>9OL#FPc5C0pVGqwhV4drv^Ls z+PB_tLW;hO_k{yT3q zBXsiK=ineGizAC2|AeqTu#W0iGWKELrNvsEcl33|yjg8|tSVhc%c2n8ii9Cze=56x z%Z)xKJ;KL58^!&L>qIAV-~WEKZ@#RDMVN4s31|U)k}Ud1Y-zDknNt0% zgLs3o6tfc@;)ME+y!>$+6cm9Bq~7owu#WhDd%5HNfx-YIH8R8-WUzy2Md-D~ZA z&N%OLvBv6d5wTJqbIvNrWo{)rFKp1`qi?2`5B=~8mBQR8MJoXXoMbF`?uWT!7Y?fR*$mw8xo6^~Q zahT}V7U|Eu;XhXc#b0TewRnzqdok%x&B<)(+;^~;J@xHtzAJ5Wf7wSXW&K$6rd)d4 z(^zx2FKznF9!4-`HVzDO9H)cxOUHgMo<=l`H#`4kO!L!BFHg~Kv3p#C$4pPoy=cK- zim50wdK|6aJ%5oAin77TUzhJ{VLkGUzv%Ojg?_S}$usosZ1ilLLY*we`3ubFF&RDZ zVb~jn!&4a!z5Gb&47nR$CdJpRawk zm@RL;iIeYmF`TKP8#T|J^(csCKQE`t;AzulIAMdw26l`-Ry8trwul-q{9>Xlb^Yib zAFl(SJW_P4OtHn8+UM)>VL#7bV!d9HV{jR}DPGmbi(T`>e`9i~&8gR~xFjh~ALpe| z@A#w+V)rzyFQi(n2Skd^v6SfI9pn3IzddK1s!SP8r|0Qw`+NqP&KrFm^hjyYwS8uB zKAa7*H}m;apE?&?@@FK^-gEnW=39O8Ses%tzxI4E#}Mr&wfP)BVKko8mxmtxKwqv_ z`pP#ovRNjBe`Rnl*|M1;-g2R!#ei z@3!P5`2Fl z74MbwciGfSjYoV_`@?Crdmj3Sy|x|CHS7H<7bp37SY!9ZM1j|T9_rXVr7#8BJpys3 z3;FwGM_#m&2(Wk zfA#)UfJIHx-Z5L!HO>2>qf=QOijZ@gC}k7|Biqf&8D4^2lGCKvR(6k8^Wrddmi1*A zR*qA`H7%}=bIY#NqV^p{y&dq=cXd*4!uQl&u!s4Y>|9wra~&b(XU#4$7i_~D^-bsO zl)fIF>B)CJ^xo^KxJ#*NGY)g;iU0bPf6abx*+(Lpz_a`=8vS|xraCN!9Msp22Z1rw z8ZBNZx9e%Nxu1qpeNk<)-B5ZJyGFPB=`l~u^|-(4+kQ4@Z?DvD@$xO-q2?_8wesFx zw!Md*O;FL#(nNWiJwB0Qs)nM*^P{dtILqUQe>Bzc zbRND=&A}>sU%u~yakPwj-Zq@e?eMC&+aNV3_hwp!yT^sC*FCBz=9DwvX=azFiH_qb zxooO(@o?VvIx$b#b3H!ZF0<|9+&GmPcv+$cLyJbM=is7`CdnxsEsXQRUn|yq=4L}h zkJd(?jZUj`+}q}QW6Z-yS!ZGNf3~)F8%-l;W3A>B?Rl`q`Z}f2rdpg5Z{2vxsT@D+ z+*hpr{_M8O_;NYhY+$||IgL?kNwRLP)A`gKUFCDNdrt?K=cOk@V`dxUXLL80{+i78 z>N1rxi2N(LZG+x@Zw#tQ`<(u{r_HxJW1(E!{&HB=>MCQ;)bR50RPnXdf82x1`uW0v zx$y_8wqBd4Ap)txlP6Xt620eXe+W%ywVn1aeeJfeR1_o#QHh6hxnF^GSf@i&?Ac4n zRWlK(dBKW!vv~k(mRdSpvj`emQ4^^f1?2cKV%8W=r`i$H*F{C^TC@9DO{>dAW6y@C z=KOXzQFEfd{X5`GkeWb1f8VKOSws7ITPlwW2m+wNd8FK|QMAmb)w#zr|1zVtUOncI z=}FxlregYB7*BH@i0x#4M=yXY==FTC8ShHPMi=aRzK|;25*xEkFU%3zRL#LG>$DkI z561QLC6IP9CxsX`RGE@Ta(#N&`+a@guW$SHMf9%pJ{^vVX=dRJe>M>3rgOG~HC;qn zW~_|Cvk2|iWgBTK^se$g9**j1Hhm?}on~K_PoEpn^z8WQ_$}D5PNT$KWSkCf6XSFk zobH3;b5NJ=F1*giYoe9b^R-{?`%CM}d!c^k{Kk(1j=+>&dMrEp+>Fg~cfT_|A6_5H zE-{=noU+5sOLvYke^cCM%7>%V#;_SpsIpT=3-30&s{MInOJ7zRpY|W=Rmu82Wzk=|%Tg?kD+g%2IE|GY z8_Gn#R1S?~rLwUd4le#GUn=uSb{mAob!yg>zqhur4>m`4f4e)3Y4k7E7K* z!%FWDH5U&IZK`N%Iy+Xo^pQhkFHY?+L*fh=IxP5bu>~X0i(aOI* zq@S(!b-mj-(cUF{2?R~I$WRDuxP2IL9jUc*r8b_2Hrp?6Zu6dA(%VeAau$=r zB@G|t$-d(43=LID*5-P+Ico3yiC=q06P(-wx)cRIe=gbaQ4?jp+28VGaCjhBvKtuh z%`7{fNYR_c`MZwag_x#Ox>}4Lm&rh}*O+gra4ghWb$0}x+V>_SfRgv!so_?@T#zNy z`c{-9o{168HfY~!r4 z=dz`3fBX8Rbchica^YR`h>yocUhcE1c%AMkU7I^1kB`;Mnl28yTpGqrfO?aV z{vhgfw5q)EbhuXBz$s#*plq?qQrn7Y2Bpe3eC%Hj+v)yjR~yOa*)HJwvmsdQbQBkQ* z=O@p`kH`I;H9p-3w#P}nPTX0vb#|M3dUWF#amcd>Ctf-U3bS0UZ`YC+*?qD*jOQWZ zGwYJphqB!G`CgyCR}b@q&{5q~4>o}O)R5zGsc!ni$3|C+hj}vVp_uMsdPZ8YY2JHh ze-S=uy_q~p>Dh6(vX0hj5ih)P5(a&D_qttHqvPJ@tloslv~RMD?W9M~(^WLn7HAgR zX>UY(exFTjDWhCj^~Y?*d70I(z7ei*Z*_##SUGCOgXq^sjnoCZ;Gpm3YB+Bn4_2>a zvB&RzFwfH{XT9TDJMD3E#;EV+m2=#hf3SDr%aloeU#}nIOBgmXV$*9M2Zyv@%@5-R zkfD09>(}Wr*HX71%3-8Lqv{@&E7vNuYc(R7a_~23;uo zMkscTqUe-9&Z9LB&S`d>?AIE^Uhi^k?!B?*#t^-;p?Tfuce&)Hdd22qokp4Nn&o>i zT@qUCrtU1{>God4@%Ek_GsQOSf5{l{x90Xh#krh^@#vcELUxj2l-rkM4v(;)& zsduzu&0*orHyRG_lJK$#fiRvY1Yw0=a-!SH1tRb{#zJi=f)9t3_ee=#Og{+yt! zS-oyratljGUu@sRoZa^9oMCIc#=*h9CH`S|yYybuOTM{G-j94^EQ3X1;QlJXQ5L)R zsu^VIJGjDQyIz>y887JJWuDpeoUoOavw8Sl>u*6Wsi@Z3WIX9#S1#iB;j#3zmr^VJ zbyclmC8~}EpIyr-E1E=^e+*yO0Ke0Dvgz^L-58GT?eY<2Ykt`gzD%axWrV!G%!IdB z@Swj>&pkaDoD+6a%lqnSWRqyevU*qY(T1h3$;}!pp9ZM?+4NRDl%Yd}V9w}#MQ+Nj ztOhq5ZIWg^dkwPv-ng4X?jPg4?)Ojri0oq+8+iZ|BV!JPUcfm`e-kd!s&@yLv~)AcJEsNtHGr_2SdkB7qHrKbc^Pf(S_4lvtLrI4~Ox3kna5C z5jRbvnA7ETf+HuKf4wq(>iaj_RQ>I4WsaTW@}R%`$mQc{LC(kiC40ESJYOWdhw5OH zHjAygrCR@4?XtNt_pEco{PPv8_YCXCaJVTSiy3>mn!ddC^uwsH=;woqyomIzs($7dQW)4mRd}6oZQTT7 zJnhk0sMVML!`M%F;0|H|#qdrak#jV!hBjRZBL!6KjrlD&YR_B0ZU*Y1u}0oLz*o?{ ziT4}`r-?{)f4?=0;j|2neRi4OTxGd6&u{VA&I6$oEV;~~7=vSBXWZiNIp`r5rCAyv z{m4Q3v)|Wtf$qi7-oGx-(c`K0Pj1>+&vRd$YTJdSZeldWo}4$ZmxX~ocsV^Gp_;r( zPw14IQI1@?gvccCObB=cXhc1AjDKpVfZ1l9ulF7+n z>B+GlbD2hCllvms*!&O{G<_UUx|tPRvKD)|Bs(MfuD{&6_AX94cu7qo=%fs+_AJjvpg?`|U1Ix6=i}QAq zouhkre|5CbAO*i%&*OQ|c&n$p_H%b$+{ksxncds2UA)-Y_2jl9qs2Bkm*rI7-?sJO z_H?+Stq+4`?=XFr*HAmsVg@h9*0o{}q9jxgiE^xuscg>NGw69$xGG?4qe=J;K|4oG*J>U@ZN; z)6@F#NPkTGn!7kpSC8ogIb_gp2M{9n`8XGGc;7L{)>ebnb#a`Y@;IlNvaX};onHoO ze{4>TMtxfko6h!+{&iCt!Zj6gKCCy))T@}bIsx>itNXCm-`sD`I23~&9))LfohK-` zUMg~W8`shL?my#8@|Z8rqlq2}W#tt#Ud+vCC6v|n$`;G{$xW`q^uAbcgL=5xo`&gc zx7+i~w|JTPBkTHhOZPY#XOEF}dsm(ne+?$u?m2(RN%kC#rdsdGo;H?+Q=|_XmPY_g zK(oI$WH);<%WPRZa1JH_rlSXWoc3qbK5s^P+wxY9hUa|bZ*Vcr+7Wo31<_zUEEeZ_ zxZFZejuC$>dAVTc=%S3bJu5sep1b{AM4BIuHpXs!93F*g3b<55_6|*@el6l{6@}6D z{eLJnr*W~syLuhlYb7_=Wm%k*S+P0V4n2CrK6+rE?s9dwaK6Y@w7=R}4`yz#j>Q0POpH0PSDXPHsY+Sh!=~{qla5&I=NHQz{v!X^UPZI+&6GUdBf)(~F9e9Y2yhA!- z2Nkn-s(+42541=JUc^V9Fm5!=Fn?4k22n0dNlg!kVK*g0z;xgaSg&+uqx#|o5;rfp zL87wyXGKRqG2)p4DI-P^Tfhl|{b*x1_1O>E21R5`TC)hOK|-+rX9%bQueYfiyR zj#E{av8h9f^`qPxg2psgZaz?HEBBQbmNtQkAWktZ(He(z>%0YKc%kQ8U7!oJ8oZmxwE_+t=*$RxNd}!C?5G&q09eN?NbU{Ds&|d!` z{_}kf?CrfXg}&(dd;FWd+kay||Ja)@jDWZAPmDJkb?0l3X>G$ZA4FyuZ`K`yUd%!b zf7@?1v{j)FZ(3Jf8U|ry)Adgj3v~8=i(l zta<`ho-fj^4#GqrLy#*n=K~{F+H++TaT#EsG3DAUHXtz;M&7=EIe+jXr?^M)>w3`x zvcO>SZOBX_6pG|rgB2Q3nT9$S7W5?onZ*Xr^f)2EKS#~6*7Y<5Nx3dcQ>Ul_PSIh+ z4NLa({|0G&Y~PzI&~dJN8LFq;Q&bzn+C z{`h3}W7nl|V6WEBKYzAI0!;d%pY;*|9?#5fyIOUqlyO32n)_>7)a`$}@2@^sGktLgi5>m`C z=JZ5@DzzG!AgRnv%vU5YZI(mf0JZ%>?k&kMaJmG+*Uqqyf`Ty!sewXHY*8ES?=Yc*SUQ+}ol$P@ekEJ9WD{ zax8rd)W$4kccutxyyXCM9zr8Bgv8dw(9otf2&k?fYD{mm6y@N8pp1s_>OUzmcpQQh zF5%MlBu8EM$*WN;%o1*C3){?Ess=>#24NS=Lx0w!-}wH<^C$m~kRX#!4WXs~5|eKY zane9tK+X#*tW_wqb2x}64lg+toFc{B;I{s8T=ZpzeQ+OuR6sX22Top&jN^}AeSI`< zeIR?Y)-U0kD?Yx40#th-M3z9cy)9*gxyC_?U|uu|3-|;*Y9BbyH_j1x>k`pH#*n0j z4u9H^uji9x;ycT3=Qsk^T(e*kFg&a+RvUt!A|XW@8abUEA}fz zq$zz2my7>iZ4XlQiBJiMyV!?m;Yop2Sra^R#_6otT})~N=RV@cr8*v3Yju_C?Im!1 z4iUvgYY(H{@y@d>ATOt3#V20Uw=2H3KYwIHqfxOJrJp$ZjmhkLU0pp~cJ?aR4*p#K z*%dTFpPc(ASIf+tg~YIg2@sSM6BxDfwsx?qEvSwJHE0kAh9qw!#a@qU1MxaXfr zm7fb8BQ+vI>Q0ejkSRtXi%5_mHmXcW#nBcjaz(wul2JWRb0jkh!s-lxEas2}Mt{TR zNO(QO-{P{b!)kOyu`7o_lwf>=mq$SE>pVLC$rT@tC8Fd7Hl5^}uik1Ks4>^0!H z^`{V8`~Bv@EdTz0Kn=$qtdyR~n;(8^4SQ#u?VzkYQt%Q&o*1j^3gQ~$fVe~&w7ifd z5$p66rbmzhFLJ_A)ti56D#LEskT~y!yR1<>O~@w@%r+tU`$W=v^`-<47m*QE#MU6#+Qt|lxr9b4lb;$?&&pX~+nfPYH^Bqf+9 zZPG-*Icv|GwJ1I);nMP)_Iq$>&-?y^-C7?_6C~C{tB7@swVjUG6Ao%=iQ2Zpih$(I z3&p65zrJR{RkZOJ{&l`T_#J+7Pc{TS8$u;KBALT_=KUvB9Dg4H0>C9;H!h6Ufe!SBLiz`D{9>98v$VBn_*st*x18)( zdZ3@$YC`CbfGf{vS_SD*+c@MHG=+MwV>UO!YKM5dH$e^XK^#o1-N}Y!*t!hgIa(~= z_iI(o)*|FrR*>d$E=#kCH#Sc(4ps4(@-9E^yO9kA9|bhsI?5mW#D6geY8g6R{?z;K z+S)q*%@rM9Ynal$)~)k*eY)=>EY*l}3l^z?d1;_mwO+qg1<5lCf@2h=!wqOxhNyuA zCzxwDw^WBZI12)s-e^B9fm4j2iiwDI-~k0etg@K2Ea>38~Z=N)i6Hk$6tH|gvpq%Op$BoDiHzKi3wxCTJS)Az@Xllxs zpa2!Vj6GebXLhJ_!ZS|?Zks{c)_a5x;>1dx8H%#Qxn~oe3V$B(BL>P1oOwFe)`&Bo zG;|RNW9_`iBvj}|2eLZo*@*_45(Zge@d_iz7!HABK`g}8-m1lmq7*hKR1a$p0l|oK z>Lkk1eZ^dV$2}vFNg#>kwyasCo&mHl1I0y6Y2twD9w>=jNTv0=&Acz+%N_QTl1A~?P_shS6@3k*sTw3Htnz)Cq&oqx$W0%V7*PvrQ)M2hTEdARz_ zM!b4aDI@1CwnkoEF;E1Ohghlylhm3O#1Rq%AxNn$7)GKjDK4dPunjBM0uxkMhl(N; z-$@XAeitX8_TMU|!b#eaf^IDP)V2511^!BE9T(uE$-O){vKj>x#pE4@39FcOe4LP5Pof*VArCLRji*E{lJQ?Uo5A2AxAFfZk&my13KNz|+%__vUB2`0%opOaAizUw)bXn^z7$_#*z~ zWbw-%%?`UC*iY=Y$D#?+5)^1zSB8VMb7mCpkN9WSSh(A5a9B8`Nbp1L`&`Zf3sbKu}Z+b^0sbxfIjXA?0OR_cv z0!d8N#Fi;F+saD=b4IyfE{M3z9e<(PpGaENWt-6GbW@Y|G{9i<>_6dDwRq|PdIli} z=dQ$K_7W+QthLMHFTJ3x#UHKj_2Vm_@BdrpyL?7Z^bgNl7)|!-aCy*bN58!5WWTbm zhQl6&!?WbL@SLTT7(~N37blc^YVxl493+~f^k_vj)}@ylk{bxIE0esPiht-eNu|cT zLI~o7MFJDd8(2adhIMEi^Tu!U3^XXg#I_G|ZIqXzj>;b5_rMEya*av{xNQ)KsSbi$ z1uu2{>=gq#bvbtkFMaajXFa%obQ^2h*TAmf!hvx_THR(I@vFt@%Xw$N`ik+%KV6*s zL)$(42R8cQ3|&ozh_~_f7k{?-=-u8OQi@9x1TzUDMUbbdB}MI({LBg&^H<#XMQrT_ zac#VFlDyjcaoi9=R4d4KjMtu-itfAJ4w7HQ@pEdWUlR+zPea{q;s00v{zpYz}A zxwY401@wPikM$Ri`__x}Y#+Wkk*9|?2Kq>zoFH8QuXGEiTCJC7E~@2Rst?378$%kh zTv-!QTIP%V$Mp^iF@K4Js*Fp)9ZoP3wXK78W1(t6!9MysM|T*dsNmoLid&vD$C7tE zhkRl%rym{O=&r9IAAYRKrNKE0g<%`Pa>kl0L0~u#mx;j*4~G>6t^|fSh(A9I0HL=n zAiUzP1*C>W+s{Z0#-PIgz>9}Amj8|GyPBLBP29!zT=3T?Pk;VuPCr=lTQj=&|JLje zhb3Qp`O!4GbwPTQ2^?&-Kr)UF+QkXzk}XAhdmaTI$6am(hA=N(6S#$%b_eKzX9_I> zRY67rmfo<77m0w+~8bzRJ4=re>jRZpLO3hjIi58-JJuJX^%7gR&Jwh!fw?>G)=r zG2@S;7a)&eew}0Hc+qjb5Y;x-oB0aU*bvm2ONtOL&*p3QFd}}iUgT{dWx_;tg3Kt^ zAbb~;RbbNoe8wdbln~&)8Rq38C=2hMN0AX6FpxyNL0PV~+>q>m3KWGeT5jL+oJfX1 zRAx_V9)AX6hNp4q1zNsW;KUB{bP|BuTOa_~1?mjAKu((nLc(ud9(a-BQ|H-;wXxpy zmcBVa{BZHi{I&nLX969^?A9SPg7}XX^M}7j_J>csJ!$18uN((g2qZOzjkZTox@z$> zZB}j=z!LFHN5CEO($+{&v<2o;sP`a<=7c4|LVsY8;$#fTVcBRNEiSHWh?)qoLxSv0 zJykp&2T%Y7Nl?q*H)N+t5Z#`GPjD|C2wz@tE~cM2D=!*#EMr{ZD-U;f782qZcOz&<}R~3!t~eA>=)A^=f9j~F1r5KsekjWP~O`dk07-Wdvk2&BOOf|k(c-SjLsddZtbrV zq>8N-kd@^caeO$Gfzfi=PLm+E8U0sZqPtTqy(&}<86)8BKM@;kJRYF!3s(Jyz>p_B zI}wg8l(w%F;1^x;)^EBo6js8uYigMFkov9PcyAthZ~lj)ywlMK3v_<+=YQ{ibk5el zzSz6ddltq=L;uxeKgZI7zF2F+R!{r#8`WUH^}ee|d8hMy^u70kmdM45j%Amhur1Uz z$ZU?Alwh8&tYEyK10NE-y6ZA_3?b%Nco-z5MqZjo#FmBITt&C|Rl8gfbP+1T;T;s$ zF;vtL++K5r@qXlB$-6wzVt-NK^rKz!+GzE=mP?_#VzitQ%W8td?1FhU9|A@6T?a_S z4w_Gg@Ht0pA->Q#x!rIZ(}a0NvPV$2K8!)c4&GNrmq;b&-I@Ub-g3w$#@DZ&mI&pe z|F<(K_^-bAi~Q7s!^g(mdOT2Aq?f9%oO^!E~_U($wVx|AkuGzEBUp%SisDHHuYjw$jtnVz?p2k8;rS(0L% zN4>se1_){*P`MGKq_V8z*CnXVSpwv3TfA`=x%Q8)%YgR zq_uY_zkZ(6(0{{Ut@o1yI*!Y}IJ2|M&rD7`@AJ*ob3t2<_5N_JzmH|u(Eq!C|H<)e zL_V`V`v3RXX@-I_Nk4chwGd*jO*1H5A#jU(SVXo~ijfrzzQ=GK^Ez4M?36ZaiJ{f< z;)cbEby^`7ApXG?jJIJ%5bx?o)vcM6Uw^LliAO~I7*$~hUr zG8qD=Iu@MkLbPiawlUD^{{6?HH-W$N8$lqn?|It(3?=<%JLC3`xS4;kNwO> z@dCPabcFiPb^NQx^zr}JzCXO}TQ@s>hyToW{YP)Ah8Cl#pBlhpLf(O5EyR-WxU++& z2S)JH&Xu`HkqIP(KfZ}uTj}52`(umFj-b+36a3h*J+9N) z$E^-px44y%R@a+QN&e_DA8cT;uO{>D7loa6{8y}h{kff``N`im5MNFGcdlujSw)%= zNxOa-Mp@FA8cU@_VF@AqXeLmTj3@N_mKYf?*7EycNVjt9e()JNBS@LtjnG0>xUb)Ik@d1 zwSW1PzP_5+=ks*$Z~-h+ig;|`oHQ0x=jEt~peO~%$^l*#hH=Tvg3m$54k?)L9b$x< znA=ccSCG2Z>)Ie7L2`~v9{uAi6d|3d=x@{^sKwYvXv_#xaBcPd@JWK>E)({?+W;`WUw}m49J5 ze+2Q$SU~*8KIe3s1$FxM<^EhrEcdOR;+N)N;0ewvv;=PVMf%IR{)*!+r**jLn}5?< zKmNvXqba0J zjJ9t4@YVgY&5KOX_VJ`^b^wu5IDa3fAe3Lf3*xNv+qU-TO;FllupJ>S_aTu^2MiHIV<{B084tp&u*_;4lEe4d6uA ze|+xw? z&aU*<&nxz?dfxHks-1fo68sbp16EpIeC%}F&)myzU*X?;xWi52A3AOO`k!1N%xafv z3x_>o@{-PK#69!eq>I`yO&I9zo9s;?EM2IYotK+55-T$(17W3FsDHjDIuLI^tQBuD zJiDQS(TW*F&&I1n4(x(IY)nr$L?bqsmF@TF_|rRU^Oy!iZRg7I{H6TO@BeV9Km7iu zPW|+_KQ-*{di2VE;`q;dKiuC$?|<})&-U#HBUYs2Guo2ka@)>lU|rqj`Z0-3na&aL znn7&hcK)KpcEh%}$$tofRA<~;BgF%!k)r^WjTx{|mum1={v%j0pFr8xZe#uyrV&(j zJR%+l<7q~%z|AV-oLc#j`gyw4^ERj$RCBHW^#cWr6w#-iI-oyW{H8)U*CZDAAfD?Yxl%J+45VfyY=|x zTcVr${b~%^2%bQ(>(u5mw&UHrzgs-DBE;EAkRWuXlSJ5J_>@+E|gpT(qexZPqN5!kE{4UaP{{dhHt<4-!nEp_8R_|S*Z`c z{>))_I%%8t{*V6towNFDPXGOv-jxUgxBLWB{i#(6h<{vCuI}A|oZ~|lIZzoUs00bV zFaZnCcA_I)MR1>W|JqFs|0CA}I{?s|z4SBxsJoBAgRes$2Rh8Rn&~Bi=)T zq!6@NmOCKT54YzGK@jK`(%vp1dnD3wE=~mLG1epS5YRSXCw5W#a0bQ+FgIW(0(S)b zf5nf{vwuq9gj0%fR$928>p|wXqa|RR`%~%kp$1sC^+`$7wm#X>cSfnz2>v}Ebzq-0 z`r!lxJ%2T}&&Dha$#QQVz*2dHV7TGW^BrXKK`=A|@ zH*fT$k{HMgq*!__C(2obq|mqTpw(EeP_8U}kbiMjiM=5P2cMV7Y&1jXnFXA05Y(-< zrhn!L+994acawDLBZ{abEPkg)=R~B1OUgL)k=Kb{CJKoubotalG=!6oaB*7~@vFq# zav{S;Ubgp0nHeG%6A1>q)seNxAabM2ZocB#@{lz+2Qa$%it-1)eYNW@N3?l^w7FTT ziV&gW?-t;{TXwiO$+QNc)2wDyK z!`FxHJjP76x=YaFeDawSHNL&Q9{k_<)n9W1+3z-dOuj}A6iU1ukr04p!ecs zfuwKtw>JKlj?!{`b|3?vGrU2iUA$By<;2I0S|IT^jv6q%X>nAVbCH}O9~NX`WPgU# zym!{-0p|SlQNDOc2jMQq4Q2WuLk5*0q|nY>VkAXubUgU0k4S33b9_TXq_uV%bard` zQ9F;ZeiQy@zV@qgTP+TPF280U%}_9%{x!$)(LX--I(>OTVj!Bdvy67py{A3y)g34N zz5yk{puP;w3GByx#2GUiMaP062Y-vomWmCs;_iv9mM_)DJVS=4FJ74sBVfcurGZ+0 z`a-_hTrgbK@S~+>1p32m%Y)l`g*idKAS&?8CWzA3UL;^4Zf6Ic;|!f0ZP)sw$ZQgD zyYnTkC;gsAdg6yic_;mIe}?Xb?4KB``O6nRK4f*dtgTD0*4N_0|J3nXEPwVnKC=JP zrGI8bjVAqF1Jtc$YN0Q8gF~#q&+o#>y%#GHd~sO=j}+xQBhLupDRIKe#vp9x9qiis zH0g_fcor}V2lj@(>^*6^xo6w{9viM>bKbP)i>V!~SRj-HWK9q+`J7k!Z4*;n($wVC zMtG_H{><(7KQSD|1V~H-*MDKLJ*|dYd*P6qBY*wa$L&3oA3tu_4XuW&^J-KhQ>HUrUA@CV1Y;hflPrz?k=UQ(I$ftF#Cp~8VXC$3h`N0G)3^TA zr&en{*;2)ww$h|ac3}V3cFaF(-r`~UbN|@+uX}?2?F%M)Rk+P59e+0La&vt6iG{h~ zUtjo38}IO$OpJhgR@riV)1cqxknh->qhA}MVI;%pkLh~!m^ zRoa=ksnyn4Yt_~V-FZnJ%hE(S)T{h*G8(RT_p|W2)o8FU$dM9?P*E>GF~kw#!X$%Q zB#5kAP2D(Dd(1g@4u7R;prGgCy2C{{fN+W9G7Y}`jGJZvY|Zlg!;{;&?tq#sY!B@W{cnu-P=EBdCX|_u{Bo-AOg1gr zwfU*N?Dd_k{c0rLnigIEzS97^`NuE6up`41=T#$$)}~%W0$cY?7gz24A^Kpqin_yW zy~?u~b3v_w)i}>g#I48%?jT86r+ef^V3E={WUHN4p`WJ&3Ew}UY7I&C)l*{f$6kqb zxVSFJ*hXCHx__WnH_MPiOhn%A0O-7GHS%_k?Jw82) z{u1bx=^E~f9 zjV994!nkevP~BZ!Ro7LL?TFZ@2{4CB`@sTX2AbSNWwj-;T$w_+bKsQjH!oOkDmZx# z<=SW!P0VVIb^{8vZ;8Yb%6%o+-y*SXIeWPXK|!R`)Rc4FC#i}bUL>xQ9sVb4Tv3%cW#@0t%{x7@BN@`^HRuUrC zU`XFIXmxT{e(4k`qY`Z-n!`DR5RDRqDSf?ur?AFDrZkF~wgkI0@by#Ins8W+E zVK878anVOx8e2S*Du2e5#3M|S0Jk8YFe+1)NyZ2SBT%VBq#lC+0+u&W${7s?DTe4+ zwSOuNJinOuyS?Ap?(LSV!Vn300X-o5(-WxrWTBl(ss-)r(uheS#b6-m4r~()24NpR zKBUmakDGTfWlKNr&i~`!z;C!S%M8EcQ!n>1e^+Oo&-yENKN|&-@n~4LF&B(o zL_s>c7&5Q2BS;ol1moeDYYAafsYNzTZGWb*HyY<>BN7&CVbAh1GzIfrL!OANDr#>1 zxfu)%;MDHSvTSPQshvN9PsGGB&2oY34I`?wqtb&ySq>sNlt%1^KZJ9?U@|!dDyu5l z_0K!@4@aH{T7po(&#^jL;;;RKa+kjTUQ4f7m(JgRhc!@X{DWV*+L`hEco#1mNPp%> zLun8rRRkC}MCDWH*GiDng^_c^&L>}P`M4sj`4}9vu^kD^zWT^aDbc6Dv<*LCp-8B? za#=3Ykq_j@?u$M|IfJ+6$(*k|yLnhno0=>S^@@<1QCU@No}R2w3Pb2`Ry!B5Lit+` z9)K?)^TEfsl&0$AQaYU7oh@G8*?*jV*(a}9@^0SV*(X1t_Ff%gKw=pL{VI+9%7&F8 z65Cpl$|zIGseZ7?sSX2sTwJFh#-*=0D5`i;Ncb%eT_8n(W;%_|`1s zmZ(YM=M5KkaxaxTLVGnj>Cedvgxo0qhvtW$S;i!QT(ES(?&KjsoAZneMk5mPWD%yQOADKdtt)EkLHY&@ z|IH9=&km1f((;Hz$(Xc7(ZyR^8!OW;>m(jQ1%C9d9;LJCI**oRo!q$hKyY zK26EXb9~W#BTP&RjbR(4m>!P7^1zkR_%BwIcx7}X)u>y z)k7%z_4QiEJZa5qtR8gd>Esu7=7vN5o2>uDn{@WpU%0#5x&2#SI)C{ZSm9(vlwg*q zY+EviLAr)m>O_Q~cC%}hC|ml~61p%+#-*%L@(qSCO7O)E{fmNi8wQZCTgt;RI> zzhGS8Zq6({MIANie3t#3BXt7{abPPb!K9IhxvI^niNvxppi*%V3g`cP zrpbUwKj*?Xe19!8VkulNOgPIq@@Cj3WGUK-n3?FF#%41xr4?o`Tu3$$WoS0FOU30C z=IP(o8p_d?%&BvbPqdNx&X6-BKmcnBfHo)_|cYyvrQH+LV*Jmd1)^5_w zl^L}|DZNL)xC%}|ci^oa%$j6Mg>NpA(?beww7FoyZ(PM4j1%A6ikL!oK+SiCN+@>3{f z+l{~C7uZ>O7Ipego;*`wO|9>F$X{?Ct`+JrS?8xVdko5m^)~wgZOeCYQs3MhcNoat z{@Q+X?hJp{q3j2%(dl^UpXa4Y4?mneJwC>{>V~^8+5TZ(Qg}?KO!~?fJTz&d*=_O` z(H`<08o751Af#m#JE()9guiKgN(HkiNpVdxDG^qum%XhC7YokWUSA}cO^py<{g>aZ z2_JupOAy>3d1fk~2JOV+kQGMGNTSIV#TWtP*O(1-n0NELi&qOshgTn?@2ij9PFnx( z^Y&X_|M{5tRx8mM&jfV7Ct*;Tb@B+UX(P^iswRQeB%u(3P|#chBr%|i9W;(okKmD$ zkdzM7fYC07!tdwYvW%h8mL`$m`nWRsc&>lH^c(6`cz$E3C(Tx?tU=2HQnM6^eGvpY7R-YJTWjaz1jY2fj(wP#d&KT!J^K!vtjqwjQJ`0UGpVEI9 zFfK}%C2uUj$qc^_lgcD|&ZQrjLd?VW*5&XXI_+jQNavf zWN7oCE+}e?YR~d)J2fiLGIHIQ$_1fvi)Il9ZBh`<)XXl9npBmMt;~y-Q}8qcG|Iy& zA<}gU$!3g{i-kH)?1Yz)r&}u*7QBCdY(P%XPGsTvp;k5nv$F#{Gu825d~;Lq_HTCf zr7ii-nIil?AIxv^{d0Esv77e#QPSzdhT=c5h=&B3_~ZElN=aPUnBER%VWz30XoKiMF+H9?LHQlL^aLvnixIJXer5S($&K{_03Z zc96!`^8hzm?afmqfeS;MngxaQ;Y}=fe9ZrOU(1zwterg>QV00L?)!TEs`#uwKj692 zS>d_Tl<WZCVDsEeRRWMRi zmU5osq<*)<$_kWN(r_|nL|cCo$RoR>Aj+aXN)c_*M?jyF|Wtw%S{UoIxa3 zo~>k8Z(hmxdN??jVhqvLT)3J38Q_i@YJ`4RaZZn|$xJHK$8{#kbV`Ty6w z`0d8u_tR#?3}+;4>vxF6#)dvl$u6svP8xTo!v$s%~oC6{g zXe;zXd?atf{g*(o2^oL&yoZ2t@8{fbha0}|<(>Zfhj{Y7uW#{#82ih5j2^Cy^E<9O zqxIJN879)S_1AnuM-OdSpn};Q{ zREMYdUVl7cv#Xu@@-Nq)?re3#joopk8sGd5({G;DpS5LXgjIUCR)}fgyYx_^W8!N@ zhs==j(;;&26H!=1%uVIzj!0~mB+1>Hz3=cJxL3w!l*$cz%rCbF@Jm>QAK z@<)GWl4@;2N?(895%b^q!RCX3B##o?+r>8{t{v?P*Rc~F=Un&i#xHucOW)Set4`L9 zw|U1|aOFY#(PvND&Yi0jVw5cDtO(^N`#{cdkwk6*3oF!?woxY(D%+&~*o<}0cP5gz zdg~QpWlQmB#L6*c5>mxtkhSI%^;g)+*p8sK8b7l|_0E5w@~MUrp2Ui zV!pGAve$?910B{Cm5SIM?xpu@FR$a)VUDi)_%~MK7pGhbBO>Nbq(p3!YGLs5W( z3s>dRq7|2yMHmP@u$%1tI)7YKPUf+wh(jja$o*HsDJx&$P!qh{RKHkdi%>g z&(-am`}QAb`#+GYzjU0`h_*?eCmH_7pD6I+wnG0Ce1X9nuePT$Ga5Dq&%UN zuc^vR()f0FZp$%>Lq1W(r;e}L1$VZb+g^2B$F18s z_Q&(|)xLaZ*SWrDw>CjrGga@b&Yxd;R-PtP;xkG(?|3ALBd79lOxq3m4aDv3= zhgCqSvYbo#=^U~Q$yZ4Txe?7%*}#IuS!{{k?gLV9Bc(bU{4|EwAXKT>lzl&6Qo2Ar z!0hwBCV9aj_yl4BS&g7bg8^2m>E z&}CZr{<9wH1y!~3DCbY z;qS7V2n!;yr=ZXIQB_`Ey?F33uBj;?(7Qp@%8aHwrq^&y{jzt@n*ke{d>>8t^sx* zCy?DR!#7*+IHR+N%e5i}Uk z!ZJga0ae-7LXx9g!`P?oG&+(XtcHl~CnYNakuZaRNtGnDNd&CZ9jJd4W1(CNCLC1C zDM_R_%CmW429-Rt1p5C=ZpOI}GBwQ{<8gi05fX8}Ge0ihgF;*Sh-K@r@2Q3MPZsfI z*UzGcp>ioYS)9&~U3d03=Nr#(-Lso-uJT*l><^c5zZMV&@!V9c8R+OgT=7Uu?MMYg zWFGS3`z_m3nCy9@&#XRH3xr?;uCr9&hN|^*TbbPIh$tl5kWMMoMRid(FW|(2cJy1Tp zYwsY^oh>xSp;bj2kVIgtHLjD47@_7-&^ZfJTzB3E7oEHV#6VMAcM7A6N9ydHzje-U zeqMh*4my5M{_P*{=Z{$u;oDP%X+XB`uFzv>hF_M@EE>s`AyB6jUmnJdOFseO&wS#? zPQ!d6)Rq9-8cGYHVGLuXd(0j3zMu{*6ylI_pffy|AJP=#40@bIZ-4m%o<19~=C|(1 zBZe5*UmSzIO(whYnB3XkZtH~dG1lrj&#!-+N9xA$b3gb0!mseHQ~03|k z9@y5R(v+rTk8#MHZ2=F6kjB^~#PdTaBIH&;Ou7jy#@H=JFqV#+RK|Ny(jZE3BLT~a zxKGp0H#B7jE$Z`f93GyP_*5f8b?V-uTQgPsW`uz%mK);AL9>u3x&4;mvvCSI4H96UKx#XM!_ASb*~ndBi}W zS->$sG*+;Bkxn?|F=G~0lNmhuuVQ#4f(pq*>J-2tM;%{Rud6qwR4ISsj~0KC8>m9Q zyS&hod3NnQnOb>5naCVc@FR3FVh+h7aJHi+T1iZ1jaU%nP?4~8(Dd}AHAW4}+sZNKj99Q~TTkNm(8nqyobg*ZbV z28W2%2owSU!?fd6$e9^g!b4o(ES@mLsyiW@vMzVZ<5y(Bi$f-{RY1suLi@N$!53Uq z+#|^M$(R$?vsoxYLyFQDDzOUx!D`C4puo76m&6ULghk>3#F}-!g)V=VG%?JW@8YDw zxZ;xs$}yD|F3eI8kN7jfjYPFcYhhDyU(E^OKWXDxfQLp}3}KeZW) zStxfVA*L~Tdw6LdW7nuRw0+@zu5~!G@VDO1<7W5&VM4Fp6KaxI!c2Y)s}cIKc$n;r zG9X@=1VpOUZj`nlQsx!s*(?W_h0F_6LINcsCxjIFc234*;x^JZ@=w#ry%+sbS3xLe zp71|j$%Ip*8idK*31o$w*P+>HTb@s5EU(snn^M}c6^-hznbdznrV>s|QIK3kMmQm> z13y_rKM=txb07O|J-|I}6v|WAy#eds3G2XsZhz$yiNULd~h_P~NM)<2 zXfDWHFvmskfj%Rw7~yBG6q#3SZBw328*PbLNqAe&o*N!oq~3e9j+x+}1nrkVy$Kb6 zRS{a`uH77~gVd-Y1lg>4Xyn5rWF@zyB=8;-gEMmBO54b{QJlb$-TY?43Q#Yfp}yF{ zonC%jFSoEbBzAt`0Z?=y|puRN{GX&ug^VdvszkaFUrO3l?iK zFw!BDlP?bv2H_+%ghlMq4_OySt+mckV^sN8uS-_^1$m)*t{g|&Rb)2J$I?8$cX3LNESp?ZX6+B(C$7RzI&_*%VUT+*!-z~ zITEAc8;e!QiT`LH*@Ka@mjN| z9&I@x4Ir#$eeuGc4mQdMXP1pYRF**?a2t&f z%qIfUr6JnF*^C=-_Kj{?MXoYVg=YIg#H-81Hlq5;D|pIDW_YADR(R%GW>-77j#v=0 z!@)si+p(1PA+13rsOLu}g(AqmEE>;k3aoO%$;BoBRY0o0X=aNTlPF6Ve^UMbBW^v^ z$g^@Xs4}p-cpCw|-SwN_;l^EnSc%?ecjbq{ae$;^Ye08X;u67KBK4_D)f>jdkhd;I zgV-HD-?f8FWN2YiRqthS}3e`djUPRtM=wBrZN?S*mT9H0RDEIkVd0~hz-$lp-K`Z&7&?7sM0 zZZ?YRNcw{fzjEe7UcHHyaUOwTB;_Rved{ck0V*{qo>{3}qYuQAvNk}`x2Pl}*SCf! zOSh07qkxzhjJHAEBd|ry4Jku(g@LGPfTDRyf7#RE{{wp(e;g9b;j?xz_aQK}DZNus zPpL}2ELBshN#7{?aV1;AzN8Y<`4Eov%%)H&t|G&>2jGH%ev)~J!`+TL) zCS7ver^R!!;CWgwmA?_lY)hd0ng)Dy{D&!1d4_wtArPw;j7c-unT?F8sSq<3P?w4K#QE6U-_V70xyBI99W7z9--msHmW~X=wH2ZG5MHrxafG|zWM%$sI0L_oDK)qh<7W3nGG0Q zD9B)|?#gELhik6GOW+(cQouZ|-TIB=wl<{S=V|?3f4{Q@>&GwWv>Q9X^S*<*5mdJH zrE?-8nUV%E@+tj*iVmvyAs4MI zw->c2i9a8zOUGMGi^0Rh*dwn4Nip;69`&X0z^cxEdb!z8)8$|C8Qicb=I{9NzvV<6 z2eP*rfBF~;cSuI}F$O^%cvrEce?qnkP0ye@U}x-yb|xZdX*U{haTYo_?gck9;?T zDDGp7HW8IksRVPlg+%`0`_7+=0^&8uuN`)LPf54#@aPuj^b>>9Ve;Z$&O7ex25}HQ zKb;}{-~58TJ*b5u(>gu$J7z8=Q$mDQCR3~9@WFhM+#z6A$V+0A+J!vxXyNauKwBp2 ze<_l)p(^<+PfUTh#PS2enq;xm>4C2Z#vUThQl`X2ns#`0dAdF%-`(DMc@j$RC1uYTvrUe*B3C z<0tM~-u9O(UZn89<6OIb?Cl3h9|v;te^>QqK=X$auAiG%9Ko&sk!J<56+gK>6XXu0 zx-6No9Xm++3<~G1s0ox8ac#G$g|uEw*LLzMkWe~ZfO zF!hQ@q)39!Mhvz2f&6tH$9cGjLgYo*WT13>K48ch4w~_fJf5OLeqmyCu@MO;ZekF{ zN+zdI$PwJFkJJk1N#JKF4@@iZ@gVsWnf!5tsn z9k0(Fui|a5@jH%=f6bRV)eC`Ke~29qr9r_v{qQC;(%-vrm{sd_u`2KR<{&LQZqq-P z!!@WccjExQ<172-N+w{xjDO#|=O$%W?q&x2aR%Zaah$?89(#lQ2dwimY1jCaf98+5 zuJIVdoLzqGz}a1lz^{3L`HsiEPSKS!mQo`>b_SpNhbrG3=WZmq@wO_Be{5N1QBi-` zGBZh@_=b>luFxm)ydIoOMiqA<@r}iid4T>0@@+s41*OY}#um6fNZK5~~@#|bHa58G?JD$8@- zD~7|~pNz6Ld5A$H+NFMSpk^I!qEeZcFtoiyxnhNxJ9|HNAShTsx>y0>88V&DF7K}d z$u_w5XKuq7UeG>!!YOek4~9sl_ItiX zjy}Vv9)CEIlkvVFjTOpkjz}FbAU5)ykc&7hyBP6z(_`U*C^fx}3+CY2j?;|`^>qz+ zIYOxKKuk1WYKTicKH}O2jM#EAH4p655Se8dj}_t}Xsl``^tF9|X#y&k6;S<896-1> zXa=cmqA;dd--nP@q>h5;Sj`Jb#FG5;Gn=$#;bbhmC`7{;G zG-7{jlPobvA742_Ebya_<38;0COzL$ey@)Q9j-q6;m+&w7=Fin;n;nTOAx335k6KN zx64m#Saz>V-t+)}`L7@Sdxnrn zaAq0AqUrNs-eS!=o(^eEErh|r8B z)y|~LWg<+Xcogy`@U>)OH1C+75mIS9!dpr_K&o+nv~&4=eqtlL=A_7aK9pA;5MgVW z>h9sIGN@wSU$`g`&I}JCdWA^+;c*Z%xIpOZvcUY8dg z{m>2AH+$pt%=QaoaX0@)5UHI*)t*w|eq4V{LD@v4Q~JtwrnEJl>qUV6q1-XL{OC|w z=o9>Z$&YSqV9v=q@flxJne$P<0Wlp#hJuT*2qZ681MPudMVr%zC@Z@%>3 znD*ZH-skvB$s*TFk4!iR*$SHK24p6YIyH`eYHLb1wF^<(N=j-t;2b38ytWL0;2Mq-D?@J)r*BlIF-4qMN!B#tJAUYR%%YH;S3!}vB=>P$aD`c6{xiUxd!ViI#?UHbDQ0s*d^Qu|j@XJ-Dy~SJ znM1L9CFjJBnZugnm$g{$ZSxnI|3s+<{+- zNmvFEZCpGF#9v_urkBkc!fDB~y$96Kw|ZLB$e8N7;U z^Wlu-n_kEVcUHge+P9S}s}Din3%9C1&h6>bRr)C|K7Dth*M8KTy;*%uTYclb>tD2Z zU_N8&@nSyutF$>A_Q98vc6D$cyWSPo@t8fk199FPm&*`fVm!g0{tWalS!7nx2XP%V z`F-yR4BmfCwEh}LR8j+8HTZ&msRCOwYM?cYQ#c5-grFuPK?~THV1qLQe`0JUU14&* zIoi3+imZTKpqzlMoEcb(#)?evI{Cf^9j~|2nE@9xR@A}k4PM)N|9tnpU<9^CV`ZfH z`|l7Gz@*`${qs`ebq$72U-0^zA&t}kT;D*mGY0tt;2gjxjXmZMJ5?ZmKs3Ohzdy{N z0zU@EjJcu`hBXM`x?q-u4fG4f3`R02AV^sO#vDe-6OGqE8$ACl1T4TT2Lo&mi?IGF z*5BgP1o95KfR*^Zeub4Wcvd_CBf5P)-O{-FRaCrqCo9$C-eY83RQe z?9WQNtHB^Kriyg+k1$x9rEDwc5c@Zk7Qy;m+ed7y&kV245ng*Z{t=KB7?1FrP{3T+I`|Uz%M~8A zUp07Ebn-3#UvC2<$(lINgU9`gPG;40Ui{^cW4=TC04D_zD`yKuZO21fmS_uj->nJcV61!)Uk4lWWF$dE8YW+pmY%8nzFN4AOYp@wSp9b}R;GrHoEeEf&2YaB~gWcfe zV{q{X>`VU|tnHU-HcAGo*<}26*$jfW$#8I441#!~4K9&xy~))l9*h|Lul%b4YVHBd z=efCvKt|6JjHBt*u!3hK`1g@R(9e+o?GNDgTm6jsaf8px^!VsI^L zyZjgVO0GQLFi8TUoGPaI2-u>v;EB-O#u0&0gJaBX zDQi#%{47RhSwkpx9-f6ABW14QizMS4phBCrMxGFnD~V;f=C(|+80)~F0TMGmE;f7t z0-xjO8bn=#f2zscSWrLbt)3{qw`@}!B_ip?f?5ck-*6A~#FTk$<7JSa{ zo--gZW!KNeE0dVecwqs)Pj>HvcWJd$1xI`bj8bxzi}`V7Jp*+%;ENHiIZj#w@w#WJ zr3~hOZ=z?|bIiRIDA3^>G8N)S>~z+c3RBKLk&bMtodpYk+LQ$gKN&p z+koj&Vsa0dvoK5Ufh~q4tsy&z6L7Ba4Uh00rt~&odDlLLww5c8*=fmT4HC;D-}$B% zED+lK(Ea}mG-dNc4ao1=>GM>9=L*rao3wV=U@&r4Y|5-&U; zZd2zqBdhPRi$;pHNXqD+>Mqx4AVo1!l(veZC7`7WXh72q(DZe!U@=v&po+BBxrVal zeLJUOG}Q@^0_R0y7HGjlDgwQ@ZIpmmsMX8sc(4$l_%XzKAJaA7ku%<2_S&AirZAokcfT(EO|yJxi~-jA>M6VGG7%3!|mSj0*k zSCiI+ofaDalrtxB8$qr`b9AT4M&rZppSu|PQtWG&E=9^lv- zHD-H5e$=FwUo%or3!vdVOl!gqq9A4sI?kzpcXNo#)XP%bOqqZfX-q}c^=ra4SHZ$+ zOpvFV*C(s(#$#wyrw8G)R*1~NS4<*pO@#FLJb~&%-m-NP|lNu z2NBn}3^y>v{xEP(y%NeTihVP*h$I+KB1+dyJ(f`VsfxDV@FTv9)irv{+L7W#+du@^ zzJup6$|<%_)V1)t`%ct0gm(8|J&&3FeeB<~yYJGrA#Hd6-SZgl?_>X#-F>f6a^?X^ z=}394lBox2N(t^wBxTcoZtumpS5y51_fj8EmhGqVaQ!kEu3vUHFaL?0@e$K`i+j10 zVGFW`MN@~T7-4c zm2DN&Fgg84%Li1+VAHod=w$s`KAHc6c1#sSpZ}zt{e2*7t=DpY2%8nYUdMu%QI&fe z70Sj=fjPxSUwNc_43TBZ=uOM~} zfOD5R$*WtG*&%F$oSS)pYg!HLAWmEd!R98#ajohaU{RB9zh&K?nDo!oLV-AeC))F| zDD(rTFewiBd-dOcpNZfzYy+E*P`5}A;>3JluA-WM;vPXA1NYT%;rWb1BCThiIWk!< z5~tfYX^DGtU~?@yq)}j{9$4M}{U;g2y}7;VF`y&b`Sp|8hOkrLN-cB&!4^&0gG z`X`wobd~pd4k`IF82CTxPFCyZi(mE0aV?s79)Nj*C9W+v??{wN?>3$ng>v(9U#5oY zAPTgor-$$v>lWlwI#RQuMOjs58_*j3Bkn8xSXm0pF!c(UpstDE|J=jAO^&!{-O7?; zp-41$`hBT?!~{8zj{L(-w*K)v_dj_Ha>gqi_kC{zgWvUjH#mK~jy*wLMXpYN>O=oW z1{u`2#)qApua&67bTV}CD~pT*zH@qhNd1E-Hx*Ec6vieVmE zL$X>>m@J~F#s`+*`YPI1O^f=kWra0#{#G`mt(vxfpDwnin=cji&A)2Try`~QSsS>< z6Z6`K{rtO)`^ZuT4d_iyYB(Ei>&qF1Pkt$WejoQzDURZF_m2OeIgIPhKb$Mw{NVh< zO(UuZ3*?GU6-k18)#;F-k%pgmEj#`3kMjrH!jIuA?k_Om7rEgP$gl|P?%lZW%wK=n z*R~CR7}d!;F$JN({^F>UOxrr|WYGf)b;7CF-^aRpCs6oo6ybbHWcb|at*B=cGZe9R zf{|N{R8;(n`}o_KKjIn2cpc+dN7cDzTDNY7k&o-Ng?jYu_q`37dLFo_Kg6ilut{A@ zfUu~~wF&r{^q*N>OLqO9Kg49%*WqOJIe?@qxAQ#2fnLkCZ3{q5{G3w! zZ1$g{wgqwhzQE7d@B8u`Kd<`o{pY@X|G6*Ue{QiJ03ps}OJchSu&z(vyLtd&SZ-Z^ zV7u70T-~=n>B`l8`^~Q0rf)y#%jtdl`*L3>Lo-_ctQ$6<{x)%+3E;C?gmR}&HDg%5 z?dK>y^Qi=mlkKBE{9S&FI&u76et0cUe#)o#{uj6XSv$6!Y-^{Qb?boM)hYT^0_Dao z`|{6(V%Z()l<{}jXU}8gr>w%de3#vScXikA=gdR*?&4F4uB~`hg9rDS5G*fgU%qDi z;xF3Y<2ws-x=6MXmJ)-O5M+n8MYh1z}jYoBn5@BS`ZV7=q- zvTj$8@3M7W%ep^zdSF@4=ravWw9Vk2wMh7>hWuEAUnX|(qdP_#pslJ!n}=Y3m$k057b>d;#AwcA(aDdKN^5DuI9b@O@im1 z%Fd1%daQ&r4{`r2%;FK_RaPu&T(EdDFLVl&FT876v)t5CMheqRKP3W*7!y1{%&*S@ z3s-2vS!|LmC5(a8KR&@8F90sWy_Jb0K*9{T(4Xz(zhXK6DuX$G=;{`yx!CK3IFIgqeuXE(lG z6ExyhKldrd{hoiW4PX^qe#$eT6)}JAo4`0)h;P@{U+xF+Dv12Ko`NBNj7OvcwI+}O zc?6roT97w%E^l$fsDxC(JKBAH2@0Q_3OJ+R$49%jfg?>ASP5i`yn~g&MuG&>hqSv+ zU?a%^|M&2Htn9C;4)nk2ce}jbcfTv~cRehh3i65NguvfnumW-fOM{)j`wZrIZA!F< z2Y9{4@{Geqks2(4?{UC?7WkeTtP<;OVZAnzXc(WjQKZFotprH?eGltH0jAhaR^xLC zwr_#)*-XRhcd&HukZu*<1Ej=uZ-L|(hqcCf{`Wp-rmzyE1>48#FD%coKd27AfM=}N z7Kn!NBdH)mY}ZDS3ftAShXS}jM%bU81igC>`&Vz9dFBVMP99D%C7zq@Pr?B5R)urt^yGRFFBI=_Oz z-~Yhz+M#O!PfJksUT%1uYx@Osb1@@9~t&z z{`d0wx17u*%B@0w`5t4xZxNhE0TS8wPd|R$IF?<%$$lI=xo0}Q_l)oD<@+DVZ#RFy z#rViI`U`Cz%m2tN{+`13ruh3~d@jS^r-092@c#`yXLa@Yywe%^pACwe(-uIoX6g7f(w*AFDNW83wAiT88N z8?}MHU0Zx_CpUe$u3n5ma(fPtf4S=A#1@d@HJyN`$OV6=lOwxp?*;4A*HeLq<70DJ zDfq&6BfPR-r|>=9I`D<>$rL$U*D=+-ALYBB5B0r#c#6z$U5RggJpdT!KFa4m{^Q*~ zL2$M0(e?>{Zc!Zf6Do0O?c$53D6*`ZiX522LP;k&-G0`IN_}l&w{8^ZFLVHo$amu? zbmf)cN;skXdIIO? z!^*^hOtJqGAR*?_OcYiwLAh9BB^QFMJHI86Em#>t!yh$4q%cxO27v}6VeCO_zrOP#;u5a@X)+j0z)=77zy6p1ZC{;U z*XRHIpU$Vx-MM|={ipx(|NNi(>h1i0?AO`u|Gs{DEspKKx$Ahh{J;El@%HcC*Y&wQ z?*6Sfua8C6f1`GV%+LR(Yudlpr|jQ8Psjg%yj`Ey|AsdQs@m^I|Grt*yC?mB-)b+G zFR!6G%*l{4qjN~3Gn;Ypul%Xx6+o7PFXU@x5XNa`ixs!z7T5?1VdS7ZDJ2jJW?^$% zHkmJDzG+^MCK-9@6bz}4#*xda#d?$^&MMLt1AFkCQo|SFrJ8P@2@};zve2rB;Opam zX(?Xl(@Qw1pGMw9VzQIdgRd`-$#$zf*Q(i$YLA+^PVM1hxV=1oS{w4Xd#|l}F$`ZW zZ!am;9*TqI_*2a}ygi8Nu?k& zf8(xa&3Nk^pIZHGbo!W%r`wlFZMaV_^;E3So9WReuOD>-&+W-ypJylA<>u&k%(p%b zKXt-Wb+Ff4w@4nAex5A0)r_n@xMv=xYO}51wdNR|o(H4z`M8-q&FG-rf4;eYU&qa5 zE1kwT&S&q*N1LgK&#$*dIecCX9-;9_t^s{y{${cI)ZP7bDwo#h+x{X4$>Du?{(Myn z$eLm}nHhyIhM!dJ2gz~s2#H|t6DJ%9d(G{@i}+63@AI#{`IRhI$!hzuiw|!vgZ*wY zfaF!*483PbMtQl}&mM>GC;ei7j^omoWpX@`$K|p;4JP}|B3(`%$%1{l`J1zZ!DtYV zzt%6B*glGL73{(#9Gc~ruMd%P97X{>zK!!S*=a9_#b~m4ckJ`Y+DydSH`lxPrB1e< zl*#$2m?j|T`I#pNZ~Q_}6+gl%eQ)2w;XZ%;NDkpk@#cAX@CEzvv>5Dv275c5v&hNB zn)t8pubcA8TfN4c%5>W-gwL<9^T9f)#VD}#{afJihF1|hF27dxaQa}4-!G%j1T3*~ zIDI2uZxf#rnO$CWMY(71L&Zn0Pm|3X1gjC@&*SQ#@yY3BrhRRLNA~uXeArd9oG#pK zZ+>haKPMiU4xc;~k;#jHJ|pC~TJ9AOgEuB(#Fam;C)sjNQ=hktht4Jr+r??&Ps%2K zqf>KSEaO2D%^m`OXf*p5bGd(cn(fB+=%pTPwOVeU-~H_CWSYmJ^H%!W?CDg#7P-Nn zp8SnrM1u!1AB>jqpdP<#yXO=0(xF(Nmy_qQ@i-jEi~U0&?aSeR>kR4soIM8ldU82h zU)A90dGPY~^8ENPetz4GC#`vkRtq)EqT%KAFcR;C!Zvw$Zr{MA(|EdndwmF|Bs7jj z3=a?6u{{|-u9$kKhUN~>*5I)J8XtVO+!E?OJnHo0aVMTlb=-dqVt@QOdpT!HR38Jz zAN5ca`L@mN=J-y3y)>R^uUf9iOBM-OkDg>{cxDl`4rIDpkK1FGD;3K6OYQ3Y@!hrd z{%mhmzHsWs-&BYF!OV5vB27?hyOFzjm~WDcZhu(9d3rn^p{)31E*kQ3YTK{O)4WSb zL;EtRq+ZU>nbg+V^uwAD=MU$%^}IgpYqlQj4zD5IjvpU?FNfXxC@da#Yqm3eHFh2r z1NY>(jlLSu^J`L0Lunj}bz3vOj@6>%(t2x35?i7B8Obr5i|0iS)94_};S`))43hts zqPs|v5Qw5Ex*!I)4RNP%o8j)x>N|QF`7`c2=OFU^K^ztaL<{pYyh}YqARcq0_S_t@ zv~kxSDyBkzZ4wH*K8zjZRO4a_Apb)1SPoppU^bA5&|@D>Fr_FmIz{&d%*!S&L6YJ4 zS*m1!Z$28`CAq$*A2pq+Cozkpuazf88G?f2=4c|QrHmCyS%O}`U}pWpc{ zD&c>BQi*#HKxQ;?VoP>@drW4g(wk87(Hi~`9ND)+x4-O-M7Wd~JjchP!)cE!_$U;y zi>xEl>lqswiu$K8_9i9w7ew06Wz=+qjdxvCT&a_K#MJ@g1a~;=vYYfDYbTL>mIr@9 znrzCeW1jpea;FEeejXloH^~g5WCG|J`szG?2&I<>;pcKQwPJ%SiY@hn#Pxl{^~1pR zy~p)K#P$8{DEcES3Nw(<%d{pJmBCd~GpAz{Z^(Kwl}Ud9J55br(YePlRU`E(^|(t2 zRM{Vd3xn;dUePLF(cY|cBJpmjh|ZW2L`uzx#yK-1(|>|kd+Q|x2@uO#Gh=?}mQTWe zt?y?j1;%h+KMWJl)l)5qgXS%Hkl0uAW=s<^B}r34|eKMMzCPl|Ejx zb8?#(Sc0mZ3ZXv_W_`JHz8WHxmNq@P;5(Q37JN*GtNCoib<5bzMI*X}N3+F9&@~3e6z-IqAb_IWR~@@TUg+v# zGb*R76$T5eJ`Vv+%H1n$BI=16&z%xMlJXP00CjPgbG}x2pdDF88m%%+xo{}f(X+p> z$wKP~AO8LGXGd&11zDD;hCV|j0y=&q+wXpUL{RZB@f?}V$lMUp2{A)ncSLD_*>TQQ z+4#}uD^$f*QtCjY4wm>eS3?f#JF=37bnU(>o-aQuqVvMc^bu)W^XN~h&y1`g9e{RW zzj%W=-An|pMsosbiC+VkAIK8Mu&MKTCb)=Qi0V4c#qR z*b4~|ShGT{JN+oGsk18Zg8IULdxDf8JLz{KZo|4vK4!%+zBeuN=+RP0O%ovP+a%xQ z7=Y7EvBJDQQN{M=aL+wwKksE>%9YTwH5e7o1Wc|c<$O0}do#L{u*2f;^SVu5bxkcu zzm$J`3S)1#{oa`n0#;EY1%7%MfFS<0&~+nhVz68UzAI1Yc3v=kz$7Vus|otD(PI5R z*D|K%Z<4lXUnCIHP~&_Zb**U*#23!NeDpvoKfmO)pkbaGCqcR8Vx_)5MxCWz9)$>z zrLS(v-%tm+R6?);splgU0^>@BXLF0ezv0Ma{140SE|&39m2Jj|l-_ilm*M)tR#585 zL!&)RtT4}LbbE6@P6JGTKLsr^lEt`}qm-q$4qh!9ER3~EbqkxK$m2ZE)3$Q)qfB6^ zw0QBJLCfjn-4C~HQ9GpMk00_xh9@b(vu6kcN+-gKxuEdmJd!8MA4*a**;Y50_C+6Z z-{Y(s(O=zN<|@t#$wdvPziASqx`|Y}c6R(5=gg0f47T1M%T_smnk_Uk<^*N5QC3h2 zR7Y=?3j;Q+@zQ03Eq;Abkl+e+VHc)e*Yl!tDxaq;FQq+*sGdYyySj%2SZ7eJnYK^G zt|M_%t⪻d7LHJS3k{+=rHl2pA0o4VymgL2=St-ubY)^xO}ofiO`ol_~sqDOJy2x z9hjF)Y3i_O`fclfIGa3%!eY208LDee6#5rWOuK>+^D?488pp z`g+-E7vR?pUaGtmUGKqbchyF_?u)>to3imX{fe|c)cogv@_M72c=aS5pSg^^j-hL7 z|CpJ?7LCIQ?Gx|XkM|NP%^}6SVjCBgbx4TmASt99YW_fYlG%K&b8Xb7cuqh5yU*uFzbQCK8tEI{lA;P#Hp_?Okh}PRNH?aecWy5ZN6f3;r4OOUHH7{5l6-|w??5%x= zNBe#Np=l!Hv{FD1TH5FsTB~{wu`KK0nA%__y_T=*$>a~;R!grsbAyYH_9V8xu3OXu zqPz-ks`kgC7xP14$nq<6>NqxCHk*SQTOtysZ&~h+j&5Ss962dCgaG{GsYRmT)$Qe1 zqaSa722K}t&#MttiORTi1tK>UW#Ns4Dc<7Aj`p40sI|A`w~jG7X}MQqRssX9L9if8 zBcAyK!H?_j;@6+=W(so-HY@!W5F2_hG{A$wW?g@AzusSp4b=f(wTYc@JkN2>jU4V8j#*8no@ZHesN%JQ94^Z z(}2f0pkWJ$n#Q#1-un>pRXt+ydLz4h0fkCSQd;iS{>lBk`t8jEg)QgewTyXFXplU! z>{-}nO{SE#3Sn|x@SV=B-4SxyI3+sxu7y{yVPSNSf?Ss2!CB9XodB!Qd%h>E@YmW8Gr-eb%)GtJdciF$;eDGVL>HQ|@$?#xWW4q&;?vOi zuDydgvGcg$d}mt|t?-m6Od^$I#Y#@x#c^_?r8+BoZ*s1W;YxSlI4=KQ&+w#B!uR*C zaQM6*Bo;<2iIaBR>Fz_ySnK;bR6B`(@d(kmn|%b3mEMz8hDOH>p|4=)-yj+9O~C_h z*2*DTXu+xLqI(AGudQj!U1isRXmZjX6Pe0ZgK*%Cs`>`vsKck@Rc?~dD_&o~X#no-GhtPwZ=}gvfag`LNMu=dC5aK|0H}q3R(!tD z8J_!@nH>$LIWN|2J3%^2rmzDT-NxO=*W-CljfD1tkLy#kRL;@&}^IY!Nee^VD*W?GNh5racEuK9?UW z=L|npBV2rOf3h3R0;(n0tRA+1CO^#C;?c5r+o^VevgX$7-J_ZT`^S_q;voH8F5>(Z z;$>xb5m3Yk$3dAYSt!(A6UG;rJ}(#CfC6qm$q;w&7h0^tk*H9vRNj4=Tzr4a<-O<^ zWp7w}!Zm;}NF;K6-M*&t)-!ul!k~1i-WojZkTf9ofyBdUTF;L|B(JeBdLC7Mhkx3A*}8lF4i|!TC-}SHzQ5u12e>$~f*- zf&4&N@A^&V`SOIwW#njo#vgq@ANo91V2b6Cwn#u)B}F*F&rNxZXveparo)sv^ zPG1eB*oqi5O#1P!FCyGdcOICRN@@2R+J51KP-t$5YsVVlBE>MF8y|zF&u~n6aQhrS zYh6`Jv0moA7Y9a{5~toTxTtFdEz?wuJi+99%a!2;bUWGOpO2A$qo6;vGAdH}V8p_? z3rJsXzEPUJRUfSyVV(Zfell>Y{Bp{_F(hy%F z^PsvUIOkYNCE;h_&ih@GAB>QRfpG+hG&u!{hyMa znbvB!X5zPhs^>2lFw`ExF;41w(?9oeJL4m4Src*bOWIHAJ3J*KPH9_i?Zrj<;is-} zzJORtqn``#A_lkNuundsq3xj~!_6oNMuRjEkSYq4r5=Cu;F*tTTVz=Z5w4T`u52jM z39n2gLyz|Z zfAhsr)nf-oo!OoT5aUHLWydvNvS>)Yc+V)gqCKSi^ZBZn5s zW9waTyuu%Z9yRFrNmnWlCT{`8pS)~x0<4L_bdE>B$_2-e1bqt-W3!H9b)0n8A}O|F z<`fivgX-{pJlyl@-eHjbgXL9m`%{ zMP83}Y6}A2jvlM$vMquG6CO&>#^;0_GZ+^aNBpgk;_lBAh-As#TG_!{c~mgMC_gqw9^z)mq>3W!hpc zT02v?qol|;9&Gj|69ZRBd94T&ISTdTavB;)o%K|ZB=5RQIv34r=~wXixj2z>Eo^ar z9NlTirnP`*a+IY>k6QW`isO8>D>2JB88pu<$YD#jLP#y8r6BA;A;rOWW_XpGJgYMJK`clw)oG6&9@JE1~KpEw4LQJ>RQvMr61Z?lnx^Ngd)OFw$gqsatt zDPV-Ct@e1wpx&@+Iw$wKf1yoC*`)E#<>L^TLxJPNALJky2!=y2RBBZn9Sw453Rxuu%LW{%=suW%t z2_%SUojOcEu>*x_AZ0?Zn{Ewqc5+SA?L3Q|LmMO*eYpGhyH8#jdW&audI}rYEN3<;Og}K*k&78KIXeq7Lpq0!E*Tl$e*LLU zjS$3DKxa0%09`V=X?XH z(o4RK3&XBZ%xS(W6eeANBk>ps#_{R$<1e!!pw~G|c@i3#TpDS%bS;0u@fYdr6l|AA z#4uYRle9O?B+-Q`^y{5hmV%5H?TP*4_iMgQ%{Nb@LKrq4$CXjgB-}{cL9}6*n@4aO zfn^jbNf0ug@}w5J`5Xxq!6FM)$#tv-4O#J{>Hk8aOTP<;@qv|p2z$mA z!QHl^ef?v~$Q1(Ngd@!bp=Vq7u&M788Hv7Sjgd6P1SEBs;@F(#Yv{&A!Tjmcx~&pe zmJ17H){2E#d11eQR$_pz#1TqL!y24TVOlv3*rv8jHrR>n0d{@tuYC857WFp3OKouaiORUiTGZa&QH`#{df=+j3nlIu!=qgBQMnH@z?Cc5 zvy;tYR!IdT24!scFGdbY(bCWgudh}G)JQLE>%eHw!_3z0`p;DV_&VBoQwYe|NVh=Z6^wtcnkd`TjU3E3J3?=62|4v9@)Y>DQKe zZ**5YG_v#1sZT)a;1vl>)doPIISK?Qv3fEGMr09B@usa{UiRbfr&d!rEF$oOWV=#5t^OPS9Go$=26bu=3t8zHDx* zj2tWge3Z^i#LClclz~RLOeP4NxW-$n*p%zJRLN^&JMmZ;Y2PjPwzzF*1X^xV? z;Y%)m+Hc9Btr)!Q4}*Ux?I8UaCCYC{9RS(_RuXBUgo4EYs8D!GpF?Bq*(TqL^XG^5 zroC^OBEq1B`Jl!Z*`~7<>1P`YDg^?(b$TQ^uh8oHj7}b0*xZPQ?9W1K*M>=tM?{s1F{*p9(<`DnI#4fSP4y7k{*yMxcv?HEl||7zQ+1m z-V25NSlW~27XquZNz>_EZGRWo!W*PaR2kqI^U>hzWxL_IWTWdos=n=q9d^DF6}QQM zginn42!=!ztLNyTv&m?kkUfI^kv3mjB&|;{ki;@RPzri})2f<*tq_~P_WPK2!h#UY zyz^r3-jnS6CfnGqkh6?>e|4}BF8yFul{B^OT{nT)@6Sf05b1kS~vO@cwxzyo2|0dRSM?Uy?rwP+`Q*Ihq@ zv;>M)`ht~Jm#>{>wxaH3RF*qLaOI^reavM_+=;TZlvWQ0Ej$)%6Fr^8}!qS3kaF3y{+O>l5hlnSd+O|a&H5? z{+>J-U%C`Q7^5((KjWO=99_XyF%V2&V=75Y=rW*`jw&`Z4DD55q zx1~G6-;%)-sdSlTBf!rN zMmtu%J&@)xb=vU)1RYC%I3w?Pua!sHJm_c;d09RQ3vgc-w66zx(G5+38w&Ri5ebKz z@fcpI^_P~fMPR+$y0-piN&m67$5IKWGMtH5g3qnlI#Q>du2oFa;Lrsb=*dLZfsS!; z$aG4G0{|BJv2TZYVVaE;i#Sh|AT9#I^e>dV2azt2;-|606OkK#B8%R z&_J*_7;}`F+t9#;hfD9CpCNqW6eu~S)4lSeQMiJlNHvr3V}=)PXmDb7AZW!m4x0vu^36p1j60 zJ}m0hff{nQHre1!K)j)VMd4@yucp1Op~+DdHP>Tf&|tUkP`vVlRwdlwAS|mbD2bxf zE&0u%CJuoiruKzYihQ8pdM_^g{B)V`AWOwGqJAr_I|clI>23x)OPoulNA5Vu`5qi{ zq4wscu3ey}VC-!$pnwc@Dr^BhSHLOjIz7fh3B22}m%@a|n1|9dZf;T7@9;085-Z3Y z!T@YRghF0{PU)_TErS`)os1({aSJIrRyi{eT#yp=qgTrVb;?Wf`qiW4E*AQ%tCUS$ zA#~u-iVO~alnM!0EJM~vlvB_|M+PrLx>yVF_lx&ygA};DsuA9A!T;g|Uwh+%b19X6 zFBiy(;6T}&I--g@AN$m?F3Q-#zym&J5yIOV92ie@gxF7_19Ko79vJUHlpeylwFpE2 z*_g_#guYY^FglNZ03a5IWFmfa%Y=GQMgOlYl# z4IMHa$l~}cV!5~@OFj&wW_`feS5PV|BU;ktq_|x#Dj_hf%a$eYJ2<-<@W#{5X8Vp$}Mj4Qg zXo%Q)%dGmG*vxy%P#U|8~{r|w7(sWZJh%Tsxu#V3<8DGqZOL4B$3A~B}8th zP3Mb-Uo7BJJpgV(YfOJKz(5YAbrOXw8GIQAK^u!VF=@rxUl;7(xO*7o#%AwbZ#+Ap zfBJ$2+FIMK?w4%1!sFWya%O`w3G4#?7+Q@5(`d_qkh17SxJ?aiGJI&Ba#jXK5}pD1 zpEMA;gA2arF&yt)NHKJ(5J`Pbo7({6LMBQA=vPMEzn23;+dOI*qrBb9qB2Q3$Tnfh z8W>6MlrVM3lrT}~ap?CL;X9CFH2zBGe-I-;98#ZYg)|l^;{YC&PFraNQELRQ6a`1x~ z(Yp~DnARl)G-#toT#}T9fBjA1PqU|9!!Jn3NYP5DzSLmu+e`4muI}aQcDy=p z6QXZhWz1U}@;V7mMDyHr71+F2C@kQM$exCLVKH5b&WmLrMLO^o_J9)gPT={g1SuHw z;F<4|Kocnly=Db{ib@so&7728kpOT@FGmSS6f3AZz2h*S4^x7sXV^}Qe=^^rl7(N2 zj_&d8L~uXV4QE(&skoW2C4~CW%H|Yj8Z8R^WAa$w$Q=5>ht;u>IQdw@#qfrQ9U@2>_tB%=Jy`((3ff z=j(wM$Kc-Yw@4zL%4$(&e+auN>f8$qvNsy?)AzT#P0MMw3WN;}Z{&sRduK{K2WW4e zm3QH+1rKo~ij^Hy0LVOe9*SmgbYJihtB(MQ&T}uTjsdTyE^RD=K+mG~VbZ^uQN8)m zWOsakf=z6_BbjBi7)WFka)LY=AP-_?Uo|*{oK>%1JP*`z>T^0Sf2X4{n|yRDgts{7a8043Kd7s-2|cHPYHIg*-0cZl$R z`yuime{xrv($6+RE4kN`+bHudX^(<>PeY>M?)C#=FOdC=80(fQ`cbFx(7|BOvPA7 z3DWc9CzturBc8^}sH+L@?1uCpWCdM$-T-?&3{>y)W%EjdL4Ede*J@^J{aC@-wf}B( zVO??xv3o%XWy4`|WNG!n?1XGj=CALs}9z^lT5*p8Eos4dO zHGLwt8&uc9)`7H|3xz@e*6+ZiIf*Y-RP8xVe*C5rf1j-(USk*btHa}MRE%vneQJcz zn{icxRRhv?fr3t3qV(%xKl+)pxNRM9kdL1iaPJ1Xo_<*aT6qdE&9P2@gm<5QOQ47> zAaYuh-Mt*g*hJZ;5E^kZRx7HhGXJp89c`>~J+xTuhcPA**Y@wJS)dU~f7q%mpAILW zAt(x;e@-jtp74J6u^TDyNWZczt{$B%Jfo)+Z8LxsdEee_pHm|q4t`kpYh`#7Uze^~ zetZfD2D0_$UUt#0NJaL4p;0rxUkdF)Bpa@Tj+0tq!jy(p|U^fBzsV87yio9-s`yZ1FRPmASIpv`@sT zi+3xz%@`=AYYzj-kU|dCgGPL5mfujdqG(Ndburj2_x&lpwWouOK4q(ROEE14c%|~< z6DB-eSPz+S2V_hM4W(uvom`a45Sbg9sz{h-ruLYu$#^y6KQGGup?Tlp?;=2`P0 zf1g|{G5;vmWWv2j%2KA}yBU;N(xQt10v!ILPFPaeJa^BjVmTBFi;w?oj9H|p)o7Je zuJ%Ssp?8KM&P5FD+vXJr!-hwT$G=zWXQHhQV$y;U6D)xmQ2ml7VkL5lG2n|}B`bbq z6%tFuX4TzgQ=D)K4ny5)MQOO4wH;AUe~gADlr)H?K6Mx%v+F0zc?gp$b-_Ud0mc-c z8=fp98+Pc;FZUi5Y$ByH`Bc!a@h#=8va@!2&Ic}nWkk~I-?9cHDS6!!Rh!VDO_#E| zSx*o5I2j_S4QK(IiFnIZVR4oJksK+jE$TzU%E>1mWt6BdTJzjKRCM$sMihM&f0hYST{w5 z<ZyQ?|h1@qse{Ntzeidj*$rKW;(SR(9mde}fIFLRl2wl3XksI!( z&U|Mz$+F%`@yWBY`v`MWC>AE`B&s_0WEo@CgL%t%mCSa+Pd0f2P@_Y`%k=7s?!L|B ziZm}v!1L&SB&>+Tmo_x)=tWIHmbHGGp3xY9Tz({}iba1r-uJBK8(PnQe}((w0T(UQ zWF_t4+9?#geO~<(VAzO_Hsb}NuOv!=*~{E!aY8|<*?@Y6d6z$Tlf4gfW1g=xG*lb9 ztQl6X;VG{lp-(>7?h1)IyoPM^PNAaugmwETesNzX1|9Gp^4JGuY31nAwU6b89v4-^ z)eQK*bh>;%qo$t-p+8aze^422tKM0h>q4;ee8A9zCDU4g0%YVXCdFhTesftk-V$+< zw}7yt(zhw#4EQGD7hEdbpdh}G>da4=Jm{?@J~iO?^rltE2)zWA=r*+H>|d12r~nZ) zl1*uOdmTcz-)ybOf57rjJ!5-x38}k-VYjC|)63q;*Pl$~g)}at*nA=2=Snw)@<|zW zkh~vxoi)8Xz8!=J!@Ve@sEX08t$9zyPVK49QpE-+p;ay&2zrsZTUDV;F8R+yFHO2y;lx28hBmNac|unf*fXsPGlLEw5a_KXar= zBpf<2LB$Ru)sf?Vt^DEhiwx=YwXxq9GOUi;)D;j#e*xJlZF5a@f@aJYY&a7{&N91S zoGnAn74^_@{}LEs!J2eolB<)rN770PBiKZ&inaDrOlLaLfwL$0L=LL>Y1=jaSqI9S z?m_16@X#n!H%$Dr0lT03NUsm$Es)jbWo4N&9*1=IeD7*!Wtm zc@9+t2n446>HBL-jgO?c6v?i=h@1z|0N$e%I{JB^7J6ka7RtpIo;hG@w{!WKUmBW_ zGiAw}{*F>FIYSSk=!O3I@cq&hf3Vx@q?28Vxpjl1I6=V=l@ueKS>ABss#gEJ68PlC zp39?~OP=ijV`d1p?0aWDQBClDV$Lbx2*nHDWqC-dcX1awPArtW(iLy2N++LSZdXJ1 zCw`;Ul}-Ti6LWx;f(nG50LXNjQ@!MYSZYQXgVp?`)na0i4;K~XrH{ZYf2zS%E>uE# zs3&|UDEQhC&jHE>&u+#U;KwM@52}%#eI1D+p9n?-(S!Ot?Bh7opszXwXL;}KK^g_J z5xrf@u>%nhv+dpi?|lNUV5po|kNU?yRcqX29F5Td@KUb1(uD@(TFx=!>PRO@^A#_v z$y%&+2X-J@mmMZk(onLze?)9FLwlebL#(}5h`gcgqtgkP3p@w;4M%d=Y!*(7uBYTE z1KCB%SD0|tW~X1gPU981COFSOpHh~ zYP9U`jVKPcX^i%R?VeMw=yVWR@*K)%a5kE2dbi0~Z#D6+lt5=d;)?s?hT&YhP1aq4 z@nS?;qcle-8IGujf3`@i%Q8zL7xiqVOu?12S)?kMq}kSwx$9U0;4=164rX zMjUg1uU?d(9=z4I=Eh^O&+%jR;upeJ;Tg&}ln6p9XJ-gMe*^Q!@w=;WySQ2W2nSyH z?z!)y6?S`R=Xo4-7z8O9{p6lziw6~1DoIu(Sy3N3dji$X5KK!ddP5)Za7pFQw%cI^ z{Q(it6p@xK%1uHar|O$>4dPKa;zRA^-UQD+)eA|rYI)N41aU6ndlr~XKd}Lfq7u?f zv|X(-WY^z{f37g44=Pp^?QNt~A{eEJQ)PwHbP{T;T+L2FkqEbr(%jzwH1<&8Wgp7N z<{l+i4rPL@FLe^S+ZG~U1XOTX%Nr{7!FB=RTf&X zX8Gvi>7-5HrAYkGh&V{qZE|}MKqz>k4D(+xsB4Dwe|LS~HhQ4x`BM4=vn0~`aRE57 zPbbibhF2UOm?Cw=vchf5(vPe9D_a?$4+2_LCc-sC#{$B#nsWnnau@pbwIar{er(7K z@-J8(gup71@JWyzCIN<-Uw4)#vbq^}jb3q`dsit=00V<6QRYRNQ|R@WRiWoQE$|x* z;`6k|e@t2)(dW@dLwm)NLJC4dB=up6^@g>ZkyT5-7G7yptfFk>cnzw>azf?r{3)Q6 z5WT-^>jVmudKC2xJ0B?YVHU{Q=I;`*brZUu?Fxqz+yS@HUBay+ex-40X7piU(9W1*`XdZ zz`MxifF`)LZar_6`h9i1O6-6!n4tZ!xR-T45cl?SM5Lo?5MzW;+@cDx_RE9JD>6BC zf1ZbiHAaj-w$5W`TcAm-_u`$!NaVC6=kSnWN6tBO_161Y@;{Pi!=}~M1sgh4pY!$^ zD(ymw=Hua1w7`sS@$;~HXI#p@T3VlCfV6Y#W;m~KV#yVV_;|E1$L5VKQllys*H7S7 z7m;BNp|~mF;kYb$j-DXMerz9-Do=e)e}3B2jALdJ<78?Pwn)Zp?J`W9zdW#BhapBY z*EKwQb7g47$(d*NZ8A=7h)s)CBsZaH~)L7M>Q4$GHv_^>*nj2UWkmg`-o zEYr-7sSmW-jOdXvt=)csO(`2=X|@vYizAT8O!R#ilEbAtOy z3aRv*f~HZ>->Mwr^|_)8*Qmawd%>A|$QceaXEwe?et6~=^S=@b%Sq-RExc4ROczg4 zu-ZMCMD~*3ygz58qu1x_C`SD5f6Q1{U*HxRa`l|V6gTAiEpq*;!$mtV0Wxt^ zsJ(2$dH-toc(l1wf18)xS-Ur9 z+&p(y66oaam0~4@gUYSTWRXAHYuICe>{Ac{lrjuPu^nANrXA0J>jJ3AS}c?N@yCxp z-0cFYrSJjEb14xmTB4q$-2Rm=@Z4{jL_tr}X?Y}9NxbcrAF*b~uIG;+Y%t|-c>vB^ znj>VA{BTBV>2@feL&!%Yf5=^nIOh}_UO|BVc8o^G8#^J?XjG6-R`?BEWO{F%PoLfc z4)<(U2bwkeXvd5IE)5k*<-xnAYHGu=i;J1WEMcv#+>eH0f@_ z+pmybP3=JIu3;c+u0bE?z^5)KRMBQD9BtH>B`4zeU1`67zG@cqe~Od3Ijmb0InMH> zC$B6U&#WZT0LN?EShCN7Y<1i$1n9&DPXk1%J)EI`N{x5{vD?{7obu6ag1jfioIm68 z2Xcx-U`A)d7fB;YH^`8)PQ(yXJchr(i$ENA6s?X(3={3dD$2$ZU362Ymh`A>)AZon=FX=fhzs z=WL=(wirc7HgX;%z<1I2vkv;mj@M!A_Z;$=YBz3Rf%%W%T7pAB{NCFX4cni9zIgdE8zAq6w`_$)-#B0AzerlBWmeCBoR6&-?p(wzrsbZ`}_S`_QJINoiCmcZB2 zm3HR03Qhhh3@%8upA>}~yvCsvc{r7e#b%(mr?;_#lKx;86Lm&Zqn%v9U%OGmcQuci zh?>G`sSrDFy`Vz+TgGGWgp>m>TZv)a-kYs(>MQ=Xe~o19EyJY76*9Sac;oWgSH!5d zy)H*2tnA|yr8Q+cR+N0)Gl@yjvcwV{#32y03RJLmZr z@Q>yEf4jE}W7z~6f1fjD0S4qDUuWu2#tP%b#fT=hhgIRwT9?h!*geoOviRiAJ4-u} z{WAB^>@)|Ev45wZTUpD%F6+=r7C~S)Rk*j7!@WBzRa`>R2ozsrr$jm`%`RW={^5`i z9wELU>Yb$t4Ci@`X14z>`ccG>?!LdW*hm|~e^_1BkN1&D)`(j%9N!eJpd~z>JK)`_ zUvOXe`21v?7$X_C3E^zudk{Nvlo?lYSA*$2${{KZmyhZLOFFePjQ#ve@ECFbMC|^R zYZe`%5MdT%Kp$$+Bv=<6ew_68V`^xsTVaMh} zmbHMhJk|oeYzUdPpvKeL*&A%VS$o8Kmeq&X*^Qv^0WA+xuq&MoLv{Ku5e}MwPs^t3 zz6bbJ!9lk4lJa19hJAI{!cDfF;w+CRk3V$XkE0Cx$?TQm!G@4(@$ot>hGQ>e;$vfa*`eq?>os_v}kM8O$`cnhShMGz*49-l|QF3 zL4O)^XhG3t`frq_k@IzJ8HUn#^>Vp34O)CghBAY^PZ(DKFBIz6*OiBA#Y20Uwzq(VybJ`reL3$9c~OL|eRuMHe7nfCOfBs$C9<(G@Wbd)-b&J?#I{Z;0+(oHv*4sBj5M&*X+uhlp zmh(AZ6@)^$oiP3yF*ZBE)~sJwUv2WX zt!R3|=iFAJB)%8!&F&p4Cx1EO$NizY8sswAJd_9IFF%KCcqtuUNlci$A6Px-kF7x9 zO2wl^%ykEdzIzX;U#`b0_G7Aa6#V<(Zsm7`p$%VScNO=dgII?fT{P+ERe8M^li_(6 zV!2>K&CE*k3h*-oe{-6iC@5sV7!nq!XiN|T*5Ag6?@&Xm30lJJnDFj-y2p2X({JNe zd{(wMzp)gP+dPYy)rCp)Zip75OlAUd*OHRM%Qi1=9jBus+tszDd0UoAP?_6>>QH4X zh#!v2uk6cK9yZ}^WW%yMldv{Ot;`P*^~KV*+KvLXiM_XNe;A-h0PQ?Vn*bsh-$y>sE^`&CeD>t5Xn@P!( z#Qcq0P?g_V;Z5P128={YhDYv8K7W7W+WDe*EM;w#q+%_l-FFfQD`Rirkuj>)T+^hx z+dgp}_rK4df8LfSY!y@D#u{#lxb*xa32u2A&saoAK;$b|*b>*P-amox?6X&*X-YVQ z*102FpaeoHME zwWeP39Mo+E67UaS*({FU=Zh&cYlEXodGIlop2jZ9f7DmT&G`oLI5HYbdr_{=Z6f6q zrAc&V(co@*ZC7ob>%=`H2s+CEE+takRqzxIW%Zar&9KnFwRnDtD)t$)vG6af?nSfiR8irG=}Ov%4;layn=KM z)datBf1!###FmT$8k(`{C7#VIUvdd@>b}i^)uPwWeyv6G6o1CU=`IhYN(b5)pt-A~ zi?^QU6SL^Dzg}eEYz>+qqpagJPWN13DE{S(bSSqtLWJW%5Ym#Rr0Y$ z;kBymX=-MU>~Bv>91kSDG(TaWqU>>FStGHN&Xedo~JxAe_s98f-mFS5AIqzwK^8Weo7B@0OQE%A%Wln zOLkpeV;j*9NNABh-CnZ(T+kuOB!3k5Gfjk3la=m(PAPx0&d-!mPg5-Hn0|NV(>e`S zBdUChcp}a~)385G54k;r!TYj6Am#~GzECUH3hv(WaeCgDAMA<^>c*&0a)S+8f7@(= zskJ5y7aU;#%igS-is;y(Fp>WU!2WN{<9|rv|MkEB>;DH8|94KY^Zz)--+ws8@BKgf zeE&lzw*8m-^RKY~7OAY{+CLOBpebpa}o-F3WtAQHu;~I_a}F?j;9g~l?=n!^xw;i z)n5zGzL8bodxn^QsR6^kk^cr2l&(nUbcCBy{u=}T7f<=WNwUUG3fU+gf60$w+~zNy z^siGs08zy}cZQk}!?61CPfGKrLY5-BWgHJ>-{jx-`!^CZ{ylVcxNA)+&DALHFqWXv z-CE6U2Y(gg_6d;%IS4l#kIVRYrYxz9->KW!sn7WAmc~7JOwJ^$Ec(QpHiQ8r3fvV= zQgFii>^rkVU-Q+lK@wA5f9)F#R?u)!0r`aB66M$WKN9va7$F9NpkVX}wmGVhNt_<3 zoBF$t*$FGD2^D_B9KDUi)oLcd+zaN^{R=PnTda*fvKvjKLO+hO7z6lOZ}i@RewP@$@vo zQ9ebuNBFwpuv6J2fR3y-^8J%pBH!2cI~JdpS6q?Pk9BQ{5S%40t2nm9AR;`#qrYf` zS{s5yLP)~{{X1!Ne{0I!-!}Hw+VypYP1I`qaT3aTa^=O^YV@549D9N@HxF=fWX&K4%BT1%bL%Iw3PeZiFl`2@YC@B?HzP?<{gYDFci$KD^3q={!hD!X{1r{(jBW zM3!x*3h}1yDKNME7zSv{_X8Ymqsrg%!w?Cy>0_L-S>c$Ee~-zl^k{oMU2!O&k1)FG zzJZ@fQmV=4IkWu8$xzA36kRCGla?kdmZl*phcEV$KB~23zKvbemWY(3&FXt96Tz92oS`0B|^$(I7Pa&K&- zD>VU{UzSEaVrIoW_Fs5(ib7E5+GbDuZPt>kX+MZ)iPa(dH}P2mVo+Jq^RncPvD`Vw zxEO)hE2QV*w&2h+zDNO8<4)oHWgop@OrUzd9Itugf0oC6-o8cV2lNrLu#)k*C<{rQ z`bIiJZ$wtYH9~H==M)29n#AseJi#GHc68U^6t=T1ttuyr13jcp-6vv{D^Yof_bbF< z5;xOMJ10EAn$X)Q94xlDaMaa|<|b;O6NRnI$F^DdxsA}sxstPRegU0$Gbd&9T@Rp0 z=PTEXe+jmn8My`vrX81+?aCPROPdkIlFk{n$LFB}yG{@X6xXTrKy^q>_dg_>acO0v zfNSS!5ibYCTa=Vx>I^Hw*AA9I1>Bs1b00*LMNdI+Q;1B7!fhBkbUDe}s3+tLET1zR zM*#Ui!@^KQbez?$P!=wM6Bne?z(89hs01cke>w%0HQF{w&Gc7bsS!$qll6?~L8Q-5 zvj~uiB~YgYb&2~bTA4fn^G`3#FHHA`(6UPKEpIc3h|oLaTKnlIU6a(#8!n($a7C93 z5kN8GSPI2^M<;9L6~~ukRc}C?cKm)lSZgKQ1NNcdpuR_SAN{Ol)Xlt!H`Panr>AFl ze-Y>y8P2tJ{AIaQ%`y6DI!%Q#!7fZ30oB5prCh2FChD}3-%bhC3=$RfLQ|3IGyp-O zAX6$p;n>JEzXPhi?K?nA`am#zjL&4G$gcpavq5Mbi>qVB(bO!VTBSLU-fzGBbEdUh5P8+zAb$h{jo)j$y3X1@3R9*Gu zTVRCQ$i@HUB>!}8c$euXgywG7BQ7}4lu$Jgai~;GuF~)9Dg2q5v%~EK4U`uKRjq=W zs?=B_5C{)U;6}Xbf1}A6zR1d?CHd}p2-M#{h7MLTK*mp3!g~hY{>I)7q|qnP+3Sy7(o?uKtB!0V8DUg&giAZOoTb2ktFp_?Ur0 zr`6A*9eiTwuD>~e&LxVj{Q%`Mcl}3MZ^IZ}CD7w}3#fD=u@vFhTsDq6d%LCpIHEsw z(Tc9RUC#@`U4Wl*7Jkwuj6k~@xxg<(gSPEpBAgA7bF01rm*)tz|o zk~R$ofXNVk_}ud#z{k1Ohm4aB4xqTqI0p1N++G>Fft(~XY?9!`El|BnDA@NNG}%t{t;MQb6+P?G1ll-?#XxQwE-~b{GfAMo@BpEk)JwXe<+K)ji&p{8Q|7riiC|DFEJA~Dx4^&nE(Xxd@qEXn<(dvw5S8iX9;_EuP@c(pHaHNhJ&e* zc*A84*RFQffI~$|@HI)Ze|?1%jOP%VCgIVC;z$&(W0anfff|8?}fk7NQRqG;BIas>;eJT611HDx?H$-k1KX;%rx&6Kj?2 zz#FIxmezm=YGUyDs%dFGx`_-!N>(8z9SwFWk%VsRSe>s5(o=oXf5pliLEDh1N=*oLa07+Gc#J7@`0S;4$V39s`JlDZ>#-GQO^poO8O zLK0BbFBjdy^h%+fyMNLU@Oi3~;ew!cqIFy|em-|1@>$v5e@N~VH&5)a319>PPl+>cF(PIf4B<%B?7>J=(l0mskSC%^GUM9mSt0iV zg$Sh*fAzZo)o*{ZiWN)74?nkkrhIt8y=P+F^268~ERO<%oQJ zSl|2X4k*-+e=o1I*pp0PIkglcKTXb@T6=rOiTV17SJb2EU5|cRH%{w$tarL0PQN4Q zmxGo!deqFXI0{tV^HNlRMWuKW<@m9f3WA_mHp~tF{cXM{y6=)U{9WW z1N-ZRa>)(iUZ^8-6|5d6!7_G8$iYKMtT$?DGP;XDK2eZgLe0gz(LDdG#t3mWm9Zd5 zU@%u8iByDp`0!`MSUwjB2Hw2}e^*CGMno}IMOV=VKT7<8Xz*}rSCpj~)~_D5O*uuJRf({B*4Icc1&;6m4O^fb zV<%Rm8C44WOus9Fhx>q1S5HT|V2n~|+``yuTc2Zi7q*?jNzO(L@a6XSlPgrBbe}r<-vFM%7dTu=_{4RggSEm z$*nnrgmn}Hz`kMnx^mT^)T>RX=eA%TupzI-zf-ieWS?3Gx(pk>3@hQ_`BIk8af z56g)VyxK_i+^9>$o*45a|Ljgp|7@28(rC*%oCTbrLseZ4 zmII5&g9D~vyc9^{OdTJe577lL`(WlPZiOY8#EH@S1AK1z6}(fUE2`{8A1*!(ae12D zM1liRG@uCEOB!zudoos_q)NU4n&rVuDl(|NUMm-{bumlmP$INz{+%m?f9k=$f=dlb zf`gc*dNj#noi%4YlC18N!&U13bJ8a~Fmp=T1E69awGbj~*yVoHHf(MXu)^jKP>IT! zK^q=ZtRyslwbprN=nYzL4F{59Z@NiCX62*ptD z4IfIbIwn|QkE$>}FP-fSl%{ZkX?3~#37>m6@6do)aA@}g-Yt$)e@h+oe0+u;-T4~` zqs#F*&MIU5)(j>hVqkYAlm=_K0#C65=N&xjEyF3p7PxUU<@pzAp^u3t0+wU}o zlNY1!+qucLA&ZYt+$#0u%2u8YRfjCoYf~1V%IC?iiy~f&SHjs~Z@gAjch`<0mNn$! z5qddO0b^p+3Lp_Y!nsOfVe7TO?8w{m1ea(x5!X)L5%60If1(Z6D@~Wr&HFpek;zyH zGOyMXp3~+xBazzRhyj=Z>fz#20+Je20=Ii7T2=4wcWehndC6!(rau5aXumcZ*I4iM zhwnX@dhhK5x7vq&NFZpmrOb}`TN0!`MnuLdrtk(MZZfycf~@Z^Gw1jF5msx^xgjCh zk|VCkY}hX)f7ur4X4%t_$TFD65t3C_A&3qA@kvBCTT=VP@dC(@G)z;Vy@9^enk}(V@?qf4IMTfYXR*o#eH{9ljp}a=oKktDV5Xg=*41tGcb{~Lm zSg;uUf6`N1wo9dj++JW;)*tFbO?E!#cvWQNTHF1FYnrw&E5O$%A826VXPb8j!UH3J zu!afTe*9lNv z3AA3HVyiG9KpIfAx{s{*wvUchQ4;khwJ+Cm%AbavN<9xNikvO7j8gY??5RBZXzBacYRnQm#TXdMf ze>Jyc`9k(p913*BU!dr~M?rotAdBbr8DKyz*g>FPB>gG>j$V(dZGGiQYXKaJc<@Lm-~!Z$4?h>3M=8!$8%&{*v_<1gVN2P?i?uV?EorT& z!d>@n& z&typd8yXtkCO2_17OwQAO!gnP{^p+;1v--=p4WJvvAwWH@HlnI5)qEhS`TuxCcdlF z&IsYHcD(o7bPOX}B!8nz0TZ8kXB-@JBRe=_-t z7J)PAy&H5+eccN)J!N+HsuwU56k9QDbLbQB3yj&vO+BT|Owyr!k{x;gRVX;HT{7SF=&c{5!%8>asz=~1><~IzZ zx&nha3`Bg_^SK&%f2A1xhIdic=;gBn=3T)&XCWxmzP~+hKZ=>sy>4rt{T0@aV;PCh z052d1!4!wnlsur2-@LXleT{pYvEN^N`0tNG^+U(S?IH}H+-BPa{EijGnMHb2no)Kc z@!oY^rAnlkb0iTS*si3-FI7=J0rd7dv6f0C=6nZb!sob=3B;K_TF zN9}46X0)?%wxCh;@%%cnLFDjCxkC8j$0QkLK{7)6+a;)VB?{Q@VBc;DqMn}}qzco{ zc)hdNw_|9Z94BbXI!RNlsI$sxRfeEQim!1#7n5?gE@&m)7r?Q~dwR|1KN-%pz12Y4 zL@Vkq?ppRPe{s<7U4RVaU?CKTFQlL0MudT48+3rGIqqH+|u zgUP$?BT#%}mMxf66NSBG8yQ0$mR)j7UrR{5Z5cZDN}yMhpTWG#Ca<$2cEHYO)&l|n zuLLUxf6jC~U>C?{Ht%=kip1bXBKM3RbX!X?eIy3m z^&A?smNU`ds!Vs--+i4>^s$$IUy;b%wL_xJjV2Br+MdguBgz4|ljGmCQPxs^Esmfz z(mk2#uX6#dkD2l3LjxS^t;4Krm`xdps^FT$e^8>fIRi0N_9JclIq1D&LZOrpU>{xv zm=o?@ex3vc*oia~?jx@_$hFL2tyV3Q;IBR3?V ze^avh)ZV>}bMwgevMFqqb!+9|-KtdarZ%v`IPgt1cr)MPSEB51q^wYrSB&3C*N=F- zD(6DZGp-a*WaKu@3#(4^ZO}--=y%{@dTmmCO)e2+OZZ^vL4~Bg*83mtru#deJZ38N znUt@LeP(-l-xR{|$lO%WavYAl(RZxx z+RAJK6r~^qQ>VPs@{%hpYn9LQWGlcKXmDwg%&I_4%jhXbOvL15utX1lw-+KmofM(C;TuAPux%CqR&Nq+GocI#kxSB}!GkWKz*T7J=Agt5A9R&BCTN zFAf@kMPt~Zr9mlCbTH$%N1holmY#s;zfX0`vNY(IQrf%~B^8lH8iA7)0K-+`v8sf- zun~CiVTF(?RqHBBnhmFtqJ@4ve=jmC0YuE_$TO}_56(c<`LW%f*v4BZL{xeUltO9D zVbCf&;mF?LDdcHN<6?|dpj0y?C(d1qd|f7OD+Ynlpl%D7As@;VQsv`W_FF`|bJehg zIBKeaNjnO9bbt*-M?1g^+)PUQRVYrai7olOnRW~}8?KA#6hIk;zFh5sf6sX%!$9#| zh5TQWf)Zxb2dC$h?G;WV6XYVH zZFsfoV%ilcWW9XVYt6}gt*wb>u;`d!opOHAX~m7JG;L@$4SQuTgwh-7;vat%6WHc)b2_L&9f5*?#fr49-$J$3MsX=;u z4F&5-?Hxw*kqneq8P+!<&V|LUjolmHR_RSnInTk&Xpk%}sNK9#E40+`0BC8_!qHym zIu%=b-0He#z>u1%U5myg(^n@myy`qz2fh7xX$vEYnJ{zGS#6{nn* z{3>Q$6`^~dE%TT*{PH3LC?X`a&3EEPU&rPC!9FgXfAIVHjS#9DcJ*W(x>{=pTc_m8 zV6%pAWFvm_PH9&^w6Dt(J$vIM%|Y0eYCm^uf-+>z>%`+6*^|lM z`=5?NrSD3sclXSykSr5~tPSSnT3-dz%Lc*zSnj7daT!+7Q&smtg7&(KB-tyB!6{Sd zGPA5>EYhN6TW|@_X;{J7*E`Xaz~*pRT%;usXhk^mkAH7Ql`BtbN(94X+l-djv$^(e zRfD!`enkF2dj$$^hC?y4h&zqogOq5FP!Hn#aTsl3OGp`6jcey;M)?k%2Cf zfcs>)hY8S!`Q44X8Xoz(st|vCQGyBi#{nd5gVbakv}X8PYvN4%V8{g~ZF9r0nKHBU z;~8LnrGJjzt}-71B)pZtbst(-imv^6z`i?!TWT(cN-QLEQ7^IgZK7xPg&k(g?3&m! zoq7r79w8+VBtoTC4Co|E)5w{vbkNTFG8q}*iJDnst#U1Vt zXInpEx=R5o3}2fBq5$A1FZI-XR4Zjh!4mwl@DPwBfqTy&{Q_U#xlPwGB7AazMU-uZ z5$ZGzx`Fr&1}dY61-Ie^!H@t|9R3R7F@Jg_dW#QL%^kHtK=8#?rF9?n%R9EtGc+Y* z+qjF#%A~>7sk)R8C2Uit+e|GG6Rw%eL9yG+j5&;JaU6Tm!*o@0X#VUNsj45lNgcz5 zHyVr1E9qq4a&aFe$r%bZl7C6ME1+o2oD9ql1;5UV=(NZg1<>xfI7UkN!5P#S5P!v5 zXIb%(rUvgh?6;<64PSu@A1NzUQ9>joRoIg|O56`ADy(_84?x#^ol6)T<}~ z4y9Ndx8i}uPrNrrGuikCi=2Dac|biV0W0u~c_|CGVUk5)kJZz==@*)c;l~f!BB#gq z8Q|DC^ynCp7pWsz`$oSP%xT$2ln0(d#8nbH8zRRNeT-Dvb9k`wkjly!h<~&~P-vgt zCro4>W6wiWvPfs=1E|W?iv%|6fK=4@y9A_-z!IHBo07I`B>6qmgWHP@NK9(_A-k?( z6;n2<1F{pl4mBz0l?Gepcl^r1cBH3M`-4KHkTm$Z-egu3 zBv}3rWB;-BJgjRAq7S5ovMe{LoOA9eSUG3u>1%xdTKk+m>Q>|0!+#AW2;w9F86+`( z(un6nO!<(;5ZNUvf+uAtzFsQatcYjf5Tu1hHh+t}!nsOo&UvWU7J_9+c2DR6i3b;4 zf=fsFNh%a>ZTm0^O*fVy_%$uHDYG@dCx!*h^?v6?i^=o{Cbx zkep9--Em@B21!h!mnTU+`$3SLLJas>zPln5a)u(Yis02CLXGp<-_7tvXF^`RT_Z%6 zr=xhv0`7TdzVlB$^U0R-8S&dVeQvjTQj@Si8es+w$VWUvG=B$u@^I^$JIy%Tv#UiM zdop9a?A6?dPS3FX`FbH4F(6}~UUs3GNCj!v_3>oA<*MPiS%R^ZJXO}T`2F#uO`;#z z8edW6=M9kV5ygFfzX|+xz)e0#bt0s{UZ{GMW3`?i58nSqTV5Z0H81h3r;wlrrGBY!PxO2#bh5Q6wxGd(R3zjqn zo%9}I+$&MW=i{E4pVWY*8F78$TB?&(ENYhDepBI11Gw)pinuoX-jpG*?}yye2FR*jG4iqU^=`MH%7Z zh)$P~+{y0+?iaB!P)j15m0pL`_Z#@xI6bOXbc>cAqb%3W)94=IklN|^IO#<^MH$@Z z_v##hh<_`y^c#YQ^au$UMY6+c^^Mm+ajbkdH6Xuv=cXZKZxZS3zbcRswV~XYtd$53 zxdK|DdWT->wuyE+OXB!R+s=~wfvgH;Og;o6 zH3MqBOUgKFhYg>UV{NwKmEL$!_v%@#YoE|2eXz{)48K5OUDUX+`uXb1VX3X0`|JGb zIDcK^O<^ZZ;j)f6QL7oWGebZXomkSBoIdsGK+~|91|bIhGIPtfoZFW>*|Fc<)Ws#7 zDnDeEBfQdS5-LTCN*!5C#&g&*XCQwsnR@HHjlB1Er4UWkQA{<#!B5+_ktFwuP~?8* zl^i++Z+ZC+{-C4>>8^aZNkyf<4^56U?|)j6Q z8O{={GM6|hT=uW%ya(E5xAaIOD04C=Wa7oqCnITmj;^7X&NQk1)oJAW9RdhGBXssP z4PAb2Zi``&(w|Go1mYF?#g6<>GiKMd;WL7EK11aNPe%_;8>K&SySlvHIEWk~%4lHt&qLVc&$QDSt?pnl)E6!R$fjo|TQC`qKi3dyV4>25>t6-s1 zizNxiy3p2VAZvJeq9SqKmLwzo{ToVQ4=g&0@z+lsX?~12v6O^5IJBib(SL*x9!Wk! z{F#ajw4@}ZeKyw&E*$X;G{3M$)mB+?%BB?#Tw6fc5#~B{)-f5%(>B+bx6E2&)h2lU zav5>@NpDGtI(^JKKsE4N{$9ZXB_#&PzXO&htIzHtzDZrfubi6{F6sTVWCQjK?`nN; za(CtLw#Eg&%$t<;Tf6IR3V#r54FvK-tzL`I%CnI01*=TYN}0*O>I7hK|rL(RZyodXW(sM?S^Tt3~DLFweR zEF9&%hB%qJA$xz%LE8FRL$cNJl+KTR$X*P`J&55MD>z8~!HV*m5gsY$NBqLm*sj-lL*q}9N)KbMP= zpkOzAoNZ$XZViJ1Dt{f;&kzlBXN8UF$g0OvMvrPrHHcVxD*P0gl};KKIWQ{{imFw+ zZ{#DclKV>mt=Sm|tBm%OVm=EKGmo$XXvtI#Mn;0YL{{yf#oH2J&jTR3;+0 zgZmOp0-GldaJO4X*RpfCUxQZGiW+Tn^x)4p!#XAl~Bq^FncZ$K=XV;)#w{;6iuEhyM7>8VFnKhkW9j5A% z`JuEe!it!34n^5b?*#Ll2kirLHu>)qA&P8fDs9f8M1M?Hz)Y`5sX}pm8ljA}V0I6? z$D*0XU1464+OHSdx_=@v1>Y}DK*@qDcP3ypEV%cKku8so=hd4)!~puX$~VPlC;dR_ z(2&93D7_>~bf@YThx6ftKb8FP_a+!VbuSI!Yk&MzI@`GU_w=oz&bc4n{>#l>Gpne~ z<3oYrJa8UD7wqZ%73+jHi5eE$u13nB^TWWKBD;ktBvVFLfbSEk`$VRK`i|yLi%nT> zMUjlZ+1w5yIX(_}N-)a=N%m`7syMvGj<+uZ>6VnPR0XETqFP`i{`*ex-@@WGgD{D? zGk?Ap`cAJE5({Z4o8giq1TUI1xj9(Zhzlgv+7g!a>r3jXLjHmOLwgPf|2v+tHctw z`n%u?={bqv8F%=WZ64#4gdxR=i5;mXi=c?H8fwgjS#sVFY$Ou#rS?gqVxM?KIZ$v& z(Gi0!cAG((FKhheTDvLTSn@nAIC9H1-QLD#?V)(_xqMKX;Yv{T+&gOpWkPMiYkzC6 zELNSkwMc=4wSBQxI!G6;m-CizBV~744iNHGN1$0f-)v|Y*+aEph0xW3 zi7&=BVXY`u_~+!F2HQ-mE*0Od(x3X^0j0Nf=DLs*a^z|Y2Pq8aKFoJ6~bNmw8>x$n5fa=EJ6G7Ja87!(XLr6q7u zQ`XQYXF)I~riFyxZra+}Io;3CAm6%UWkFG0==AKz#I$MFckr}6E@qYNf`6&z`Ot;O zC6>Lip`fVdWc7n4Xlx1!zPQ$pZ~%_3O{f#B0utSUVW~%JIGyf#~oG^7Ol6!Ae?Hp-t4IM4Mv7n8bm) z&xh%JtKQ+q)V^U1%af=CjZ`J?_%82=dXqyS z4R(PtHK{>_KBXDRTYoBY8@wHZ-#KtT%<4iC_s=qh+Xo?795j;7vw7u8)NB&bo}Vkv zG}DToci)u*{w34VAs@*{cy}VJ;#xZ`GNV!YE&g$b-izM3)E3ADmD!?cJ~b%ALw%z6 zO|(Yffd8Q>-j0ON^@H~4CuY+`kaT^%$j>zy{d-0t1YM`Vt$#B#oat!guZ~25kI`QZ zeb0dwAueDOf(qJtvNpJzK;aLbAfAfYVTJ zCSkYOky3#^BH+|*Kk3Hy>C)6A5S!khfZP_%dFUYVQTJ!+Chud{qQy7p!` z?;cx|cp-J8>|jJXA=>!H*Tg_3N-|(j5nR4*i@+Uv2!EwxZ1U%1fBHTq(N~-me9zhu zp&Mj32|EerHu)sEdE&Ns&B^oa9rW}D=;+j_PzFv6X(v9&+afZl{PbdjU#)v1QM8(u^h?yxpw_|u>0Qn~3u+ZlIZavs;x`$?o-+r&eR6!fQU(0af18%;b zjqyeNw|}#6Pho4(*hR}AJA1=s)sWO-Y|th-qd$H0NEAvpCtqhCj4qAE@K!<3XSEj^ zBPCfs<;rH<(PcP3x%4mZJUvwjRC1}GO`dcF;V2vx@XEv6-+?KYe)aA$bnB*mH+_Y- z(ri)m48>2MuU*CNTrzrN=+o!&3Xf5m@XH1M_<#DH@r1=5?S$IJ^+2P8suaCd1(pl- z9Q^L3clQ=1{3O^r%1d<6NfXAbpWI&`$-MhrL#TY8+@o7FJwN*wrrqy}(bgCKD41o> zmI({rVH0sS$mJccoliza8PpPVF&WKDKJnvMO@2-%Oin0A^B+$le+ECbH+d9|td_K3 zD1Yn8&4vo!&V8H)XM%gR1xxO^U$6GG{N#9s(%HS)ziw!YLGk6ah0eyV5`&(hVj$dzu7?M zz_*1|1JSMB^4;~DDe41_rr>owd@Lwg>|&7bJRjD0EC9D|!`0YygzGKsi*ESc8ycySRPJce z>l2#Eik=pbU>~b15LWNh-(-lpV57Y}BEn=-#~*?xQmFm_3&?wgM5$PH-|{G_{ z8H*Ge1WAB3VNZ{Z0Lu}lOC_!Srhif4s?H}nwq%_(x$#zel3uN#8BxB1Vht%GCHc-N z5Dgeu`xU5V$GZ`2XKPm9%q;~}_KYyL+RF1}{InM4%DjDr8n)G5!EUmI*I2!!Y?dln zGm!{MX$o=M#fU{n+j*xCzpG8Q_Z&^Aja_vP!<$%pK|WqgQc4^-q((A%Fca%9Z8WEsI0H z&tZ8tb(~=oSWYIoL>g|8ld6wvMB%i+k@S8=u5A21nml;?pi1>A%Uo1sT|V=i{N%V; zQ~nTpbi>q&2rS0F6xql=u9gRjf_$R2n%ltNLtmkpZj5LyLgs68t0Nm+SIx{)G*sgv zpUs&MYM0LW^I2f~LYvc<*41Hw> zv%{t-86vKW=?xfxT5zEu>`6_8`m(U+F3;W}DfE_IeWnj)1}daYCP%PdBLu_(P4xNG z_iC&%&0Zh5qi<8!o`NtGt?wz70V3p#oCE=Tvux>CLv%k{bQn91kt)`>Wg`?Kq4%J zwrb0=D!Xs1Fp6`j+Kn_Bj@WmUp>j)noP8Za3o%0H@v)q&-NgxPd~Np)4$1?DAQbQ6 zM0i@^lz)c^iu8qIpzKW<>f%#q*(G@ff9H{;1pX zW}IFpOBVNOkNLzl-eB>`r1+K8AZXDuW;J<=aCD?56c49S1l@a8@oB3COxvO|Bd?%h z_W2{>_EdJ(ud0o@l-E+?Yby$L*bU(NG-!3ge}446(Gv}EN%>HR434I-;%JYJEq;yu$PcFzZuKnCU8wPjncvAV z0b4uV=(+QaJ&_$a@`k%*;Krs;cY%<^b88b2_4FFaf~wO;eh`?iG(57&j3-ynOMhO> zFbe(P&sd%zM3(|lmpg@L7Kt+}A*q4NuQs-0pPlHACn#4`}xDS)1a~&lISV-cyAYq z)q_{%i|ahggI+|dSIZQNptoh`_J8ygi+ks9j63#<>x)!#^DSh7*P6zmJS_qXg7_-3 zv4dxj1}DT=g85zj5z59r&g$dl2H#Tky0+N(M!UOUHAb+t(Kgr^?ft3VUbiPu&0V*V zqE@4>_5Dy!X0c9TmR@8RC3Qb2GL0#knLYpQFV}hvOR|vi=}L%}(Ry}3<9|@E9}-X` z1A^JZ0IqwMdCn{?)^Jf$pXnae!QNJ=dk991phUf;|2ddGY5~{pdM;-ZuBd;j%l8M+ z?PEPAwtpzc0jY*}AI;$t9arZ(#l4;gZiLEkIZ|tM0xE!i%ga-uO1lkv;7j@p|!>IaFf z>2iT5lsV|F^4HJ@|HJj)cjHM04*rdSrH`h!tW0X zDtU-Q&9eiZ_p}Eff=e~3nwybxtO5q3^oCTjsgV;Wd`Wy=_uW}u&3}GnLVK9{Yq6~N z>Ot04RC$gD#ZU{Xe<|nqj6UiXI@7(rRIyEa`%XFO`vZnMRS7SoMOD%2*s8j1TLR<- zKh4%sNvQaA;sbC9AAs@jP?bXdG_w&Z^X2NNNTAWI?BgeVaBt-$tL z5(-wvVIFQUq^wDtDJ^C%HT#k~m@u<0*3$*;S?~6aMb~+}{CZ#RA{~8BD@Gj&&kE1r zQMg*z6i!ho^(;>x{^H*ZgnyNpdHt7vzl+Nu%4s>o(kQ+IM}LF+zPc4XayLfpx2@F} z%@a(a2OG~F{d2PBtsMFvu>mJmO^)8P?~L(9B1^1n!QK5jL5r&IyB%hS^Fk6hPtffj z2$~G$p{2gR700K$@4Eh`maPv=ONJ_MIj*ahV!NtH{(4z@#XgcO!I}LA8t<6aHYqJH z(N_(Vjs3{6Xn#6xcmzm}@FiA+y&tZ0@%V|3)N7kX$oyBX@?S;Di~arIqM84yK*m7+R}%8aRoDNioXJ%iqU|YGwvlp zVB^^@&v zU7Zfcb@~jDFIU)RVABzY#}>-4E^pFKz|@5( zU-5Cbx9HeO>x`4wZt2BrnMmq6&peD|PKEE_gVfMJPz<)Zhq7mKMc(dYcebB^+vLa)|mX9iS)8!68t^FlymQLM>g=V)zeUUYN4nh6o z>$kaRO}RFtK~t^ZkM^M}jhHUp;|x9KALf-;y+fX#r*8Tszfo%)0Px^Hcrdv55vAy& z>8}ke09F5=RZL-sgPF={!9n~NH2Ht(n-W-q*?)k#f40L4FMWk}fmP1(rtPq1`-DyV zgjIVTwpq`9+His~xcDCB;FZi!;4!bF!sD(g#B115V?TiV5^JqTVpJaSF#)1-pz1qdkoOq~!0EBqdyNA{dD^E#vv* z;u)K>PJTX$!g8xev)-XhJSrcUzEV&M@si$7g9i2u{~ov%0xSIv6dbM;)~1?VHF> zI}*0FMDp>c7|$U(Xmq-rL7J9fEcmlD6oqKU zu>O3e$y-qATQLa3E>pg#(eG(#_j^NSHN1t7o}01nsR3(HalnQd;oQ0Dzt@8V7~`5C zeHq9i6zroPB88%o=dXg&^&9vZa~q3J6-z5V5=h;gV+mg+%%X!^x#~1A9oWiTYJdOm zAB-yQU)7XFrjJEtiT9bibk4pYJ{eyS>#cmtfEVj3#;ff*rA{NRUw5SOWpZFrE%{a9 z2an~t`EBLp5+XBskSV71NBA1c#K;2E#ekc0o30oF8$DeKY~hdC6cN88!Cda=ToB@* z91dV3Qf+1vQ=j9rqo*ky2y z*$vOTi>GlEQZZv5)~ccU#xeDen`sFz6J?O(!u<=<>vCvz@=TLGXr@W?iPvRe3-C$5Uy-h{sCk2PyjVC#v{#Csi zFmukf7arW%=VWZ=ET`7_cz;EVo!HvaUws50w!OI%BAeBD;g6Gjr~T(LY0yoecG7Hg zc{%bXbEu};V%zHNdM)h8K`3o_e$GG+*Dbwj&Oo~0D;)mzU0-wfdc%YfOE!(%Aft#`;afRTF_F2*T|W`G+E;K#vp0;8jr4M%`lj|zM#&{XX@b;3 zD$xKL>qSu42p`!tvwwrZDu6CE*D_U|{FS@=``b>*I{N0s-5K1rYxcEDx$!V5I3i^& zaHeEUj_5U@gk6b95aUA^2US*s{0XVcv%foZh=FJl{0W{7M8=6eVeD!j)RoNJGgWSH zx4O@M)VM}=KPk*nYXjF}4YDxYHsHc#10gbzK2px;Q_d2h+v)BRbUccdlo~5k7u&lJHIG)C|7sV z-&I%G^~L@%cYkpmwHWvg`9pm&;Cqt-1LZ5b;lumM@c5m>!8zx5QJ=}_@U`@wcr={F zX>J3%6y_4Rva(Oro7xoq`|*fqRwcwHp*8ok~%-@$y9davg)b86)-Vz`Bokor^C1-@YyJuMEc{MsvCXF0?W*pt< z3JKd>sDDeUpL#&nni5Yr!$L(cU zbY4C~uV@mZzsE`|qR~PXWc4xdvZfneHSOTLI=>p1SdR5;P}AwQpLbu-*bS<)M{h#t zK3SOh*zDi2>4~w=phzB$7SAE&o-j00?`U@+)_?kBp(4DGXSPpYmplX8r|Ligx1>4D z_UV4d`N$n+pCg5Q<|Ne%Q=dvq>o~4P>?@=aYO+1xgaE`d+%Y5MMb%+r@`4jaH5bV% zY7WKGMBKIcb>_e#XAhTPm`OvG#iWQu{Y3M3y%-y(hE>Erq#f?R#E|ig<>G^pZFE?tgIwYFR9#2}hYcM6I{uA*o2`81Q5?MeEMO zlkf_odfwr#+LMvsx}9M={S=p6_LCy&!|qcCb&MbF7=&Y7O-k5;LP%v_`}V$K-1dRB zQgFA1II$!;$1#>GabwA8{vL&Yj}L!+HDj_&;Pwq$&+j^}(A;eTi>INDznc4c-+!g4 zd(SbOP~ot}+a67byfss2UNfq1aFXnSL|NCPKhn5owkI5>h&F7@c^qaQfWo#GeZNgf z7*bgH&)K->KnL$^5chLa4p(eN;SVrlkT^M+l&ZEu#P`ZQ`n_ zJulq(USHH{wgyyG(j+kv<|)$TW9~dNhJ#y3@zmEb)1y@%3P1_dYzorXt~}cc(;SG@ zb?!@t1%sBvBi3h18W(0KehM}In!65-zaF%>@XagP$`WS{YJv<&Pm&}%+kb!Sr0BDT zD>s|b)YfGciIQE`Ex}K>WK!R`@sQnL1oaPbfb=&4&dSUd|lj8>c-T%!6k;upRNHPWv4o zn~99?-?fVp38iIhHf?1VZ0E|poQt)(qTIa-A~&8yi)zm^LPP-141WeLV6_x!NPY3s zDSGz-1Qie!+J|;AdY}_}rKqv^GDp9>IsqM@FgOIOpt}ur3R!kIVt+(G9kiWJfuyC$ zlE_!&T=@<=n$) z-p?D2%X+t^&4Zzxj&G2I&)Rw8-`_f^O;<$`)_Qk%KC%*+YG2*V2i?+NuFjfehZmu79)K3FGU zd#uEm;6uL}eTO79GheFHtzW3Z-lRX_V8;jr@Z_o(Svn)5+XX#$Ty4cSAy6ATcE4rB zBlF|9Bq{B}%aWbHe4uSqqiZV_rHp=hlYymwY9(=JH`s;dK@>=t+-gxy)U1-#Vo+(z)`B)Pk57 zW==wKPDyab%_+`>c{0srW$P(pBKXd#L;Z}C1?R3(7Q-VfX8R5-O-Yy(JRs&|yu2{E zpU(tkU6vM-=q^8MvrJXGkr27bn_4$|=MTC1yx6Gs9W>AuXWCf*P-U%THv zqY(XQ>B~u2L(lk0HPxqu387a?Sd&i(BC7as4oaq{-?4r?g_lig{VNV8j$#tP2)901 zw@0N?|iBU5PKF#N!>;6OjT zsdKYz5~J;Iq}o;aEdz1eJ!hX~*Y4)j=}FTL1SRx?zza|=*tWe3LkxRyoA`V(EtAG` zFTS9zZ}dhvNg5;0RnuRQ7cohqxLp{0-=pY_dQpT&oTp`Fk>}qp_r4Ft7?xF9Ir~6g zdY`iFgMYtxm6v6yB?9{W(qF`~VEn=0uS^-@GxHE!RJ!On+avMa8#H=A@=UpkL#va<-AX8QTxqet4vp#--MxQt4iaV!7-NsIM3O#fj8EK@1~DEbYpOqJZ{{) zuEQ}h*{H&rsG>-Q^76RqhK}m0p6arm;Z?7cR)5v50z!Xsxc#G|CUtV+DsS3&CsQ70 ziunxaTIP6vbTB*sB@6>V|G#)@{(op{_Wl2`)ciNAWByCdTukMm`l)x})Jzos;{A`> zCH}`c!~A2J0f5;K@Q-z-Ek?Tn{gfCL0HyYqg=5lWWX3F%4>-C25b;0i6_fiPmYIJk zynnv|u)j||W>P<-(zSg5JdOExewn9YP$!}>*n0k!SiRX8y^aIH&PC>mTe-De z%>~vRRwS723yGj+t!+7#|)_Ae+kJPrXlcN>|QZDkbJ;u!IO1=Yp~?icx_yNbIfq{ zMtT08E64O#$Bp;j^GO28O;F{M{^t1q!REFl=FVTCwk7$>MZx>qpN9g-MSrHzTjnAJ z=E_C+kq5)Gaf8bt(B`;&(tp2S!dA240iI=SL}x0soT}JjZIhSz_yG04ZH)yw3&=D8 z_T;l2sso$nnOYVZAeMaN|0E~28QT}xtKvZQf3N-jI2QI5FCBj}d{qDE;uHyffMNY* zl}($mfyjdb0K*3J6{EG|a!3Mf`%y0*(_c9h?)cT)9Q#lI`!7z{AAkP;3y11Yz`yNL zx&Iyi4;D}SpW|&D8EiiM+h66P;P}39$GG0c|B+kq|B+k$SH>TPZH@`A-O7J9@EXx)VhKQ4f*&(n*IB@pHP+Ty<|Ju&(%HXpZBWGm@N>qlc%FAQneV=4bjK{HX5L_c z=n{eF09pXoG|GY?NLAHwRmU(@u2}eG!)GrU-J&aZFG;Kb3V%0$%b;Qy_o}702eM1( zOch%WPyG2u7=|WJt~C9RI;{EgL$UBR8vUOi5!viKj2V~P3S{pG*7%KxypL;aQUe>!;f zs5J3^FuvmV-+%dy{}`?Rr+NfcuKY2VAm!iOOU1 zLWSmDCu6_nvUuRTpvo`p%Kz-O=A8_}ohlO;;wMx*%YOrY6Iq#6U42*TZjjo1WG_DC z>b#b#O^Xqqa}Yn{r>8R>dN&zjvENvi?>uJFpTb@Wqc`y-IJ_@aKFi!&tJOs7*jvWh zfBk71#3!a%Th9EKuUtO&v;^Fmdv)xMbn+vIqNCfoL;F%43&56n(8vJTTmYhq`Pu)8 zN6e}_jDOAl+4O(><9~1o{)6|Qt>*u=(;PGGKm7PjxVKHZ^8(Z|DKmu1+Oh69h|U0A z2Q88aHL=O}qzKh+IZx#=6{3L4S51CPS(|5>zt;hqwc#sf8CyI4tH<9AoJYO$;s3_8 z{4XAa|0~`F0ux7n{YHlQL%-#x?s}dBl?3htb$<}r1@%=ij7Mp6pL%`!E4=R~DH6FyR5dq(uG+ZH&Mc==Wp9e$+#*Zo2 z*ndEg|NO6C0dadL7kKi@zyEA7)bBVF{SFq99NorP@e1*Z@@5s7u04GnQIc5t1^IHu|g#$9Jqc` zOLI&Wt2`4ovdcI6tMiP_XIxc@KF|%e@_!|c;aT<~E-)e@b5AU&{Lu8{Sn_P1v6YFt zHwL)fBD>vJsB)PBJS%{}P*~p=*p2}=(1dEUc?MXZrhg3Z4eri)ubVzXmFt{6F+Ffx za9KF&`f3Z~me|Y=4uGE!g-5=xyQE^VZtm==X z6FSiO0K%O{?S!bPaoT}je?}#MIM;gjK6~sl&V^&GsR*XKtLmwzo~l-w~`v(E5+h=-v{Zgg(9~ zH10lP+i}cCtEigHMA|+DnRyE;D`B8P&Eu~j?4rJnEbByhvH-O%c@s++tT^* z+udZ^`@{a+pJK!jbAJTiYhE7hd%&mtm)^^2z?k%m;qI_c^!h6I)lR<*xTiSckCz^2 z*#76a+~~vpmi_C#KeGs~u(7ObZ&WgQ3BAP%ZR<_1+=t}w%%G^^rhF1EFQ>K@XxbI6 zh0k@Xlk!ES?TSMaGnh;f)Ixyu<#M6&J$!H|-a=^gRKij~Ie(3-F`~;EBuX;p7~hZo zzyfOD+Lc=98)e4X%G(zHetSLLPu9E^J7$IERw!KyjV9-Ok-I2?MPPHzz^geZpCPKJ zv~_b{#X)Dc+UanynSA(mGKDKEn3>F&F`Vy;lskK!IYZ~njxM%L+vpBtAHOfA(BlX% z-sH62hqXN5HGjf)T=)1b+?~a$Q(kub-U(v{c<|Lj{OWPYURNdgp_QyoxktYzA)VQs zLJ1{u^j=YV1vJflAaunmo%+^NE-RUtT&iws!FJr_#bx5girE{C+<3HE*nn@=oH=o; zT>j*Pkkh)joYs)@wDFZGQm|t*pi3}@ff1_)>>@R7VSmth0jijrcFNLXrZ#00RN$K| zcbaVrS(R1fZI@U-r_1X}2wC}sRkG~=7H2k5y`{1GciN2UeTmKaGun^wn%0-@k^YE% zIpCwKcyEqp_ca{+C#^$vTGJ{Y>|j6Gz+c#$_9E}?x*D()y_Rz7!8WG1%I3vvXK!Sp z1Lz?2-G3@w+Hu1xh1h*A->Y&K?Z+u}dBP~kbY3Q_0$JiwJF#LFY*i%;w@tt+^YFHA zCD>-gJK#_ioFOE5-{3uKGj4>0bggfrZQ+|zW}aCynpOwZ{n7}=jONTbi7uZ*ZmxZ6 zXMH^R8ok4&WUmG_#e>Kd6b~{wOM>#luGk&;gnz+zAM?jrA9E1;+3FD2>f<^5&R_K6 z?2Ps2_^{@_nwVf=gb+Lx2Alv}M8U#qCAfRvl>(fOFe$EQn+L{h6tApaVD{{rPA^qg za%A2!+Ht3<@lsUneX=M2=}<#1!O0w4qx@*43$u>$vJe$w{%{Cmx8kZ3D78a4Mb_%q+-3FGD>O^XHE@r}+d z*{s$dV7KY?H4IYImYAwabgB?l{uWeTz<-&(mEab1Yi^LLm9S1hHF+1tNU+|kewpmi z*l|+3cONhLa%a1+zNrFCLKETB%vxkDZz9yGZ1tnE%1`!feOEO$!luml&Ee+cDm!-3@sO&4)eE;TJ z#vz|e{48cYt`iD_d^|J4hh5HyNQJ~TA(U)obpTc&S}pijcClqD7PW~)yG85>2Wlur zqYtPX_F~}$_!cq?(Wlqk>Rp)vXn$IlBkm$UJ>(guGjq-jlrIBbDz4+Itnq+1B-vi~ z9cy98{lEQrUA`W2f86p*eTQ2OX{kah7{7EQm6~y8#sO#RKiqf@Y;NxOMDi?Npv(WT zr0~&bU++X7&3`||b@OZK95N`3sSBEarFMt*#@l*+6m!8MZe&!1W|9|=K`!|`m&mhSk zrZ3w?z}%xuyidrAQli)TUH&kg|Bn2we2A)c8Kf@yDx(!FC%{Ry4fUn8lL##NcgS%k>@inUsSq(QD}@7Ru2R7n zj4rO9em^2O4Nhzzo!>sX2>?JqzrQLcHVEw$=f59DpB}EOA_SYq911p0`v1S9RDkj( zM|oXcvb{kcq#~Ywo~Cw6d6PKi(*s{|CI80hRh}Qud+$fTxy8!^e}8}V%>^F)FJV8z zfPd9({nOWjC zayNcNliwjT-H%$&-DDxanEBE^24EO8`PfH0ir^VFl&b*CB~Lsbt&T)B*+$7^G6j(a zX6$?zzlI6u7$FTCrNMs|rC-l4D)ip`uDF|4ZNH8$^U)=AIdcXr1SPHGXR&GCXdOdS z{Cke+ik>t5=Hbf$)7bJn&lz&{{fRT$$?h*qv@x6bh_g=|;DqUYpZ5l^D^6UyOw8)! zSBAfZ-5Eg#IabMbjYcj~ghXfS=0)VcgS31IE-xQg)hwA6$jpD@D#qt#_m^IVT(G53^gpD@%`dqOTDz?473ac>ZAQ#Ey zWGy!{rW*aqtU-SS{k8OxKOZ}$n)q&{SJ0j9j2inl21%0QsGSuLCttnBxZO0DqG zusz@uWI_9wXI+6b!-m zJh@G!7^eah;RlXTN&{M6)=(qcYT`}6-G6|w3QU|VKE>3$JxYirBAMl7GtZ^CE znra`Ca7Vw^wH~lcXZD2gXm4<^fqcNYWJmP<+zUTjfgg8Q+;4KQ#k}C{lp#y=i{iVSe;scZQX7V;8{w2I0{c^g6R+CWQZ}-K+z@wT|&Ty=~9kAPw zKiGfseCxbMO@65NKjcFw|6E8=gU=^FwaKrb6~E_K4-YyO@63u%-MpEnuHk4W3$P;a z$6@BD>K>b$Hwg_BOV|Yd!(K3@lAYnuILSRl&r7vDh7f$Fm!E6xy=~UcE|(jzs}|&r z3+c<5l%_y-6~+-O7QTA(qGY|~AZ9aI$KZdAqo^#egR4G6yRh5rWJp% z9kUmpx_&L!tN70yZ?+r+q7|HXJwEiAF>DHSE|ibv>Gs{`N@S5=2(RJ!B4sd~r%_q< z{4v#g;ZMol0pBSvmji#iKb(QV?)F7`;A~!pH7O`hI@AL70WQ$e#d`>e)nGK`VN^R}=r#q_ArSJpV#-CPBrYg`5!I?M8HYlkQ&@j7)y6hQ za3)u%67Dt^x7H(wqV+DZzUj}ufQt!o;;Cp~3?O{Ar+raFec@U4HP%B8(Z_VP(?jlY zB{qb+O7e}gmX)l`p?-AWoU#Fn^zTtCYQpRHLmz5Xb4%x$c-Q<;U!lDn@;lYXuK`OO zYy}rpe6LbWxEnLDdZ?q>awC6TzENHwSX!2JHo=uRapeOTwcxHqF!@Jp)h`K?N?H+v z(^hf#C9u3>gt1MpxtQaA!f0R7wJX7`Qw(C2s*JgQooOFVev|4uKIPFA*Amd^N_9oP z%22&`DM+1YzBBW+}Gi5=Ik1C<9DI<-5Xvx^^>gnxTJf7ifCEVM+D) zDNH%YhWLiOuYbC{{-5~E_#eKQuwZdkL)V+5?dgHfI`ZHY&yKm}D_TFg8~DyzKq&H}0wrd`4eO)qCBqXPowH-`k2N z+`U)y-{rue_OE9$F^|yO(IFNNoCvM==7%5b@rT|F`!;5}D*k`%cW%*@BB;#@O*92l zTnl@^Yk7h?YZN0~=Zr5lg+|*H*nvP5vAGd`+$pL(3bdEPWa5UwcG#E1Yos&0c3Prs z6}R~lSc)Ttvm{yi3vbP6KXKehm!la+Or`YmGGfM)fjjt1*W`=p`N21(=xaHDV-7co z(JI1#hMOXQ?xuh88<=x9-HLyEjV0V~<>mb1 zaDhFh>HVE18++<;<899m+wD*acc<@1E~doZQnnCJjksosTw}GUvTXYj9YzJYRn$Z%+ZV{a%1I4`J0%+-9Y5 z^t=i5eye|G<{RY`E@ozadkH20s{;ftAAa8304F~+#A>RW{T}^t;TOEOFelu$)6oI9 z9k{t6=Jt6UJ>vn_l3(d@#y5UD>VJ+p1OL3HCagcnI}`T!19B$P%O_vtGv;!_|BNMS zi#GlRU0i?7U763C@9T#?3^xeRyztxa=% z#zmafMi1xbz=t1r4f3n98#9^ob=Bd0c0b}Q{qBEd3`6~qa7l0f-!;lj_%U8(9o4P> zBeoL=1zkkj&8!Wv&)4#Y5DN*O!|k=ZL{L;WL~x!-vD{0TknSYM&G-tf!9q^B+K=yf z+Gaz&zR*E#(+u_SxsXq%FPhKxW1N~?vm}=R48Df%$To}rfD_9(H^t#>lD*~%=fCIm zOY;GRmjj9lDt|JiSPEwF|6oskpFNP@=i~pO-&a=y*U@tzhZ?e4^!3U^O;~@|B7L?0 z#*csP$4p(pdWCs{R)OJ1>AL24PZh9>WTs@U+-(pTh*q7XKW+FRCU~ zrr@lapF!yS?YDRBqu8rJO&uRk(Hd_xXwIs1i zva?SN1aqL`&9YMI%j26u7^OjXd!2cnmw(@G(A+#FFEfzU<+Okn!H&nt_2MD3_Q*f5 zD>L}HCG|Pv9ZJ{NEB61ymnVL!*Ha$qli%Ffz$4~?RXe#+`@>l|XfEja#h|S?a$>H` zf2;v3U*_L9k=5=DbAI?8yDVyvn02xD)~n0nF(A8j+mM@o;!Su2-r`#3C2u(|@#k8qV15qCa~}i)?-S!kiEibI$6vmX^SB!&rT@ zLU@~k%lS*@wWNIj`?k{PA`~!2+hy6py>K?3Blr(~HCczLs$wYTbFZ2_J$Sb0+y`v?be81kbE3!*{(q5i0+TVHCN`7-}J|4N>^P1;Io!2Hch#H>F9cP}j21l5T3%&+Lc zPac)^)FBt9cqE{9g3kDfZznFp-pq9^Y~M-*xVCWz3F+?0wXHi?-m1RNDoFh-!uU1>SSgWBBbJ{6P;{ zy!JJ|o!H4PUrFrs1bMHAJ^qZH=^g(wt~=DFhWH`6o}c*Umw$dM2@Uyg|Lf5S3<-nQ zK)Ie^i7vz@|0IneY#m3h*dB6vCT)&6v;HnIkq5PL~n->!oI z>3mt?UqN3(;$TcD`F7%i_4^)%uj>>+k$&_$KXB%L(Bl;vk`je0In?fMRc*zftDla3 z*#U)d8n}te8O4o7@YFydaT+zJ_Qq(9$m?_r5Y9QI3Js)!A_m=uf%$3m{0S`ccBGgHWy zOXH&nU~PW-1j4=v#Ar*dqh@!XIj$1p&Gu%$0E3Tq{C~)7u7BolE)wu@QMm_*Ypo;e zf?-{+XmCxWGeOVg1ZO#HMsHpq6C+?#Ak=mydv3qyff=2%=|1@q$F;T{ zz)Q(T)qhyPMyvTkJ-6^miEcXvK^;dvJXY%ZdB%^H315pO4w!A9YLF<_Gi{kiKk%8Im9tdC9q4TmKY zlaih<-j`H=qkQ+T&(j0Wv)Yec8!{tyNmJ~>h<~+j++l4!o9sewtGW17nNPvo)9etx-%h;5?3KDA(XDfjK=6Rdgt?p7lI(&(D|O`n;Na z!C@_r9DUj6TW7VB6SqdbRCh<6koG0@3v7zN&OqVKZO<3~d}N-+_z5Ss8Le8yH!|k19aL)V|?zpJ%7ABZxHmKsnOxII@&_}KC|sRvu-1tikP9g zXB**8GXs$iuFSbuiT8CMAz>`Q=smoCdK?~=1sxI(B8=x#?hU3%Ir%=@1b^`H z=ZShfn4`VdF}Q|$PX^rY%0BP+ZF}?Vmi5+{h;n(J^t>|FjkuuxO5z($fj36%^-IIi zl?B+Xapeg*u@85ZR)5njWIMob2R0Fm-?(50c4z_*3_Ufb`IO;lp>O9|m;AF?# zso9}NWB-Fo+^Jw<`n>B|x?FKvtbb#kGseb`;3)2z*>qEN!qC3wnQynz!cId3F;&DD zTS5IgvBBcYN0?InBr^9Zg3BA2#fMYi9Y3T4ln%xdB&jgmn&& z217UXStm60!LcvbAy6%p`jZWV@dSH2qxy+a20koF8~wJkvjI<-gO;RxJ#Y{{>0!@w z#Y@Tup@MrfMnGC@=4Nb}u|8Tgr@0I-#21-xNt|;;wGI$&nog#d{FMqA8&OgVhTl>T zd8R&O-_xuJXD?5ha9WEO!>(V~moJtIAAc9{0y5IM=4L$Yn&Z5R98P+>laAphKeH#aZX5`&fyf|%nX07tzmE)%Yxnw}I!NkmJtBAjo=#Gz1r zmWsOBu}WrfZObsRp#Z&}b?_Q=lVy;IOu9A^oq6z`7s#r?w+fb{KjoZZ@c`DuQh&64 z-3l>W23=VR(;ynPM|)#t$@s%uP3}ae0%^8pp1MkUp~$gS(@upPMI)+sy@nzlbk+OO ztqC&tC?Ev*Mfz7=7dcRWLr1=Juo>*k`Fw(Jonxm5K49P#vIF0L{X=u=iaFT3N5~(1 z&eR6$@yE<7x-e_H=_Zlzvh`W#Eq{70G;w2Y%C`W`N?0`wT{Dl;(icg~-d*l9^G8)q z`RekMFQmE%lbc0?TLbq%BqyRZi5Ok8+^V^*M({stenI7w`SO27oIcje4wX$oOAXk;0L#6{!n}4Tx*F+2r z12?`}V)NufGd3ol7oMqoc)J%?RemK_%F2;&+LzDz*V5W7O$6hK=_nwhnT&+X8M|CF zzpJK?Ni(DO2+|@GX>Nx7T#asxx~Ba!fW4kMYt*|K`c;T8qrdxn&*&^)>&oy$U%@dR z?0?qL%a=IS{=cn({7?3Ah=0$p5DzL*OjCXJxX;F7(vDN$s=0pNmzwwyq*ug>(5Vte=M}XiFeCybSj3qEPv;`W3NXoey-uS zmU}n&qLb!wbI<~R^Osy-?Gv~}m@L>rT{3HPuzEIM7NMVwU$wQfWdk;_LW9$Rr z%|GuZPky3O(~Y&i*gqK-&INJ1jUr)<KxI)6Jw(x>A`9Qy9ir+DI9dws{ig}8Eq-*{p3Q~zHc_{E;9qWMAI zSsif~XNPmM|Es^z%BQf>lp_jWwVrl;jZ4Ox{62(np>Xnw32de{lp-mfh0SN&l@Yf( z2ct_rbuIS2Kjr7t3er2QWanKoTW&NH_aZ4+f#Vf+WoU|asDEKugZ1z2xY&p<4(df= z`yvzdB(0LNgV)sF-ZS_!t3VNduBpcit0H`2)*D{$Jxj7mOMQqMe_M)E4)Q)uwHNWu zP@GyF`|F64J=o0!Z1T-nJ%u!115ZwPm^kmDKXlpa$%b`4aZf*d@W2%w&s2afTjQhf z!P@7b66-0n$bTK#nPyQ2DcemvxV!{~4dXU<(v?U_0qgXynWfs&o1Ja*=k+GnENz}i zkFHH=B4c-!3hz#hIr5!m`VP72Ap^0 zRyS&`CSeTDyX02z4s<9pjSMHRcV2$z^B-M8t!OS%-+u=mIPa)SsZaeR_SEy54LZr; zjP-Uo^lbl(XM@i_#r1<<4}NmrRwbg+cfx7H#tHF4s%e#AMJpK1v5^lm5aA**_U>hD(Tx*s!Ug5+ecqO6 ziPiMt#(yb4+Lv9x#=rA)oNzFyzz#X&3{mcUP;FS{?@yZ|?Bjv>8r@pCgf~hAP*OaY zbraS1w{+V3X=8o8?!>cw+vTC2YKl`|)Tw^;H=a7;r|kY+b!8%V`BEfBjIPZ@Mg3C- zMFkR!DPqiXQJW9q7M4c%0(ItBnxkobL3PbEoPRQemnmpmnu0C5ypa?mt9zeAgQ9X1 z0UFWdvqhTEU^jjdF6XyKlh`8(1ob>AAy%q@dc5Ig%x{fY(7Mp{{}^O@SQkO-qAESC zOW%KezApN-|A*er1-dai>(x8=Z)d#bq3-;hH*LUdE7^&4*qKz`H@@Iu%&fhTw(MBv zVt+_5*VZo7u5Dx|R)Hy`-<3}Sk_{NW3Tu&b;@Bx~f3|suw8mmN`UK(@>>9=_PQ=V_ zi}(AzVOo?C)Z)pnbw=4KRQbI&4r{la@f7~}bvf&MrH^*?)Z5jT>?(1(&s&c8K>nLw zKInMDU2)!%bgGXM9?*L=^dSwHVy_8L{eO)k2R}u2TA%koiv5{>_HB%bSLn6TYmwT{ zuXPpR#0Ckh{LX}E^QEkc^)&HHbh%#w<+u~7rw7>g>QkOrQqN%?q;@V?v&a2da+3>4 z?1Gu04;$9{45v_DTA0yG%cb?&I1#I`n$%IJjoi^WE>*9~%tYlI$_K?=<|gx?D}Pmh z;#F4LMUT}%Cps4?ZtMq38#S$Rh9EZ!Yy)127@qTQ{7Umz-o38`Ww@d-3~Y;wo{0}O0!o}R?$3x@dO&;$MTu1+G3iTh$YAG zPe|JyvqbcJOZsr?|KaF)Xl|GQGo6J{f@#_`f#}XQTHrVVD@>et9E}_NTYq9Ze73 zd#6Cp<`B-ID-!NnbGGhAB;_jDOh`W($DZBKjWaoGTRQ(>fKfkU{vZhRP+#Y1uELMFxjO6pzdZF)9oPKxS!H>W+2^s5 z%|n7oaTBKIG1%LCa&Upw5!CuZbe78II9psst7w~}>>|IbTDRNoWtyy4nfl|SLEN|P z?0KttoB;QuIrW>}Z+{^*_(<_^(X?+k@BrWOk8~OPpZj^nEA@R{r##@Oe>via;knYG ze?iyZI74lQobeyLK|tp%ANIIV)#MYMy^c4ZzRnhfOq!J8cnMX$P_`7l>Gf(pt5Au? zWQW?GFPW0?T5@UHsYr72T`@xVoAbPkE1@MS#&%`Mcg-u>rhkT{IPE*sV$FDRZ!VK` znL+?|dxtV60UY+a0n(`rH9q2{rU=0r?IcoWyvUtmc=xc-yRytg^D1rGVgkQd8b$s2 zpm|kKmd?i|2%49);PDRVPWlh|qwkk$`;-&todorBjZYf)AG+ASM-N?*o$0R@XPm~s zU1eD_%L~C5_J4PT?pi?;iCS^Cbu~J(9%|1iO(fbmDM6Pjie`|9^^6_QmBl7xiEHuQ)-gG|gCJ zd7g1>D^Znivz@vVSXy+GCuWR`jIEV52cKjVCgu9*lc1lQ;9i!-! z_L*h+X}VCK^J=^bjPw;M$*lJwNXG#-V6Zi+_FhuVKjtuA1-o(~Qq%izmb994SW7#Z zQ~gqU;HLZWXOy&q9;U}Z@)BMZBz-(`FvO!?Pk(=y!*kML1l*d%H;;9QFUS1kXD)Zf zI~2o=x=lYT^`0jRMQ3oDj{RP9c_kX8nR6~jZk+MiI3dkvAH!Wc3wF&@P}_Z&YMiFt z(2rnL9PTEWfJSi}?u2nF6U5PzuW&(=6fPu8V2VruX++Ckk!uCb;P8{C^Aq7kaFuQP zS$~gaP8tSSAoCr~?m5P3HG%tP(6M;M`iCBap6iE?Jw0o`=YPfF%b4`b8=6t72A$^j z4AJZJ_&m%vBzqC@Y%_@k<}2RinV--sGm(;J7*W3kJN-VjBB2HooqCnXx14VrOP%0? z<|#Wb!Aejb9QYX+S(htrG_qYA%{R80W`FswdFrx&I~orp(cH+en|wq`@mbW~gY@!N zx`IPm27c$M(Pt6xPCbArXW`Bn;tdfW8sAy&!z5xmd+;9x{s#Xy@i6$IHQ3|4tsnU0 znA2Uyo1gjHp*B6>?c_{U+{*P}oUIGh(h_>V&ze^KW+>_45M6CZHSo0LcG(*gULZ%+8c zrT6Q@vVMK?(y#TUs0yblO?Bz#nSU4hIw|Hd(Q83rB2lKF5#xD*09!{!B%!g~n8pvN zm%|D^vpNpCF%vs6rFb1w+e*O}1MtvR<~i!T=f#zGnI^)QZmcoONAu0h|4-|*5C2W+mGHsd8n?^&_W+ps&Vv4LMyhXSfY$qDhJQVjE8zkg#(kBg5w z1B&PW7Ng3a`Oi7ea1Mt%71MyX3TuX+4gpd zx|}1>dPv^nufeW;ll5giO}tf-YWotJso!~Q^jyu>yRt&lJVuzO5os`Y`I|{-CWz*8 zxXs{-aD+!VLKd`M4u(0F2Og`(4~O+BenVA!_uCIMV$Nq0{N!wUuI8Ip?(^{>ZyWk! z?8&F}eq>j*PYmqRuYVfD3|&-E5I;N>Pz1t_Kkhee!=O#W+ALMpMBs`!v!xV?cO4|2 zW~QC{ok_Tv(116=I`DD#&K}sLl9heNUY|B8paXpCG{eLz`#JF`@mq!9-6WeW-LH^p z8pg#_q6?(aeYbl{e$1^ zh8g2>r(O@URDTY0w1kK%?;X?TLE^rSY;Jwn3W@C}JU8oT=N_VJdpAwC4O+F9$>aq< z;f(rGtYUjQdFUwLEd=#jYz}zsJYO8_b$N&{0}k1g*fR5nCG7|+mHF>^@A0pI0M;dAa}XRL2qGvLMjXl7)8FMmk*Q*S`$dwH6_Uh7R{)fZ5l z+n?sh5I*kb(p_2;`Ze_0omtnkwxV%G`JtbbgvH{QpSEAZP#;?!2pG?%sa{6CZb~Dw zj<~>(r|J=5%XEK_1=ZJqe^TFxn!u|8^|^k}ecSh)REJ(=KRoFYhrXt`)0=}%q#EWn z9p)09@_(B0Wzc<|YH|Bh9jWij>$Owtps{%Q<`(oJ#+=oQj=G1$_`Sb8`OrS*kD#CZ zSf0(y8FS#~4-99T$xL6zG&d)ktmei`sQ0wjo4I^8uSL2E?Lz>TNCQD+<$}0WRjcQDVlru|e}{0e4Y(^uV1H?5L4kz9c>^yBC$-9~S=4zMn)b)M zi0jxLba~?_g4_n%W-VwPqhz7&Z`M}A7W^_NT#tf~R#_7X@ zZ-0Nzm^k!q&z^$kw>z6Fn)!{8rwnueCj6v+Q_>!f{)`cdlKO1KXd-xIrk}tjU@X=9 z->*|ok91ll)MpbPd$EqSivthSbH!C&|Cklq`}K0rB^~NGqytj+)YE;)4TiaTU=Oq8 zH|KM;=qx@Z^J4v&3Qzgo_=*`J3=}@=K!2t*vv{8)d$0b?a-7X<(S)|wUh%{PdMdVp zLzpC+aihQ?Z$6BuYWF= zJ68eOJp`lQUlR`+Mf1hJ2uH80UN^EDYT83>WYDso*RzZBy3u#7u`5=ARg3RgZKocm zANqkqPt)d=~H0m<4m=d(+pw`*|OKrW7M8;i}hi0Y)b&6Fyoa zgG9_ZtUu;;dFgk>J)))jeZkE3Nq^crH3#hijM#qrn<>v*ya7as|D4uK7UD{fgGfP zwSnmK3YS?^>?FYf6GG)Us9oA_H;3a+GEPVXFmrOXb!5mn6b&08%=uf?|*wvdToO~_BWjF zsyO6C{XQJ-X7CR&5zl6j54^?xFpIA{-{bP?nN{6_iTiK|VQ%T(fo`Gt-bQy3EzCH= ziGn0w$Rzh!McP`6ab+7H7ev0@y>#!4fBJf=r^`f%z zO;9y!e5R)9;0uijMqd*ef5-Vw8{d>+J*N5vX)T{u0nK={hW~bA4m}oXB24_Kc-`L_ zuKM|e%nEVn50F<@NwhkfR_$DNV{@2i#vk|IgnCB8J%7ybYFq~;VX?r?q#;ZvH`P~w?Q57!?q#M@$%_ZyI0lKe0c>Ts8E3q@ z)JxkkDQ|eNyHpS8V#;%K>KU!P*Iu)i7uI)rI^F5dnbUD%wo9;vbJSOKcL&c)fmM-C zjV#06#6?%1t57(t4dKy{M`z7vFXIU~3XLZ1#XX>9i zn4kQK+@IOR_w4tB|A`Z4vqwVfd5ds-y@5r5fdx<80&c>-RD7tmjtG(}8RK=d#OxB+Eh zV_0Sm_rlozUYKl{J9p~4AM(I2V6V@~jCxae;>*{5?x|*3ueYVSdxEr+Gv3b;lLknA zYNH##eBj%M{*x2$%&j`w$O);nFCvq>GRY#vI-h=O((5FeYB;7q6+Ox_=D0 zZP*Jay7!8z@jJNB*&jo#ls|ax?|G8;(D$$_P$iR8m{yvq&qu|Y?z7vrX@DYZfO-vR z-bMsyivFAk>ZRRy?DNpqfp*xJqhC7Azd4&9cbakSLt(tR-E!6`K4rPf%tWBwW>Dcf z*f@xH)Z4R_(v~mN#G(k= zjxD+$^O5n2_?h?r6+grB|1Cc=FWJmwGH zCxT%|dU3VohU}!oTNBb8Vvs;8!TK_&@(@6s`ltG}`o2H(YM2?K|MSnR?|&;}#Efqd z?ZiYolQ3fu-c5yI#xpB?-oZzRBBc2y@X?6CGyxN#Kw|@Ri)fBCZ$|v5?|;t^Z3WG# zg#Q=wLuq!&k9lr~9@~B&_W#8k&_Db3I0qD+mVOFp#=Qbuqwhh9$j&;sXI^U-L6BbxaD`se%{)(rEPJ+W7v)qjV3AkOFA{)nYy z13xed7T;LYmfmp3q#k;^`~4luxl_mbKi%Q9Kj?Sq-!n85ns5CAmD7Q_DejW(=JiE= zO)tGssuqvgMW+&ZyUtS3F&CL!gnGp+Y24Y}vgvz?W#4zK`Op)r=`I2WFVAm%|7lmw zsb_7N8KXFN;}BY4ynnddrsD1OJq2@`ixOAK-PK$D%9}2^^b9s#+3t54*zJ_e0d2#L zbxXQ)XyEs0_R4?!8F{rE;=U_0)gb$OyOziKW&NzE?|$#TcT`mBjGIx5L%m~&v6Y!Y zt-kN_I`s+b-EbdAex7qKPc`yUc@b^K5z>qvx{Jn|ChT(Dl7BJUTnbbIx&j%DOG((8 zzMK}|E`7gyn;z!2Qs4MBb2b1QUftW4E2AR93PZnlZ(o@(0{w2nAoVaJUX4C;ezhe0 zg3vGyN)Zn^^z9d%C&CwxUmxIta-S4*zfRAM%D(SjLrQ+*OR@h$+nY7J$}?+%f2E_Y zs>0m?#@NQ^M1MuJB*Z+AhMNw+HnXw8HbY1M_YUE;_q)II4RtCiYTYbkrG%79Nr{yC zO0S541AG*V}Lw*fU^vtjqjk)ed2crB3B?-YM{^^qSN4s_&e)4TzMG^ z1@Hw(u_l~2Vj{&8L$lR3hF}E}EXPq--#Q5+LvW8H5#_`mHhK|SK(Xk)u4m+2yc7Y6 zO=Qbhn16NsdtUC-I45>x87!{^Cr!K%tOuOZ9Wfe65pBq2iU6J zUwA|HMFbIn{$T$F{a-j)f8rRgxS~X7{R4xVdJ}Aie~z;XZJYrohim-Gtj2#&OU`+S zGOnA%Ki($(@e3`ldXfEuC&TL`{>Tr8=pS?LYtL25g&*Qyy84v;fr0)rZ|vW3o(tJs z<9{Wcqo=v+Bw;vbM9|>hQayN^CEaF0bKoYN= zIv%OdphAEL;9(f#nBc9F^+<%Tj>L-Q@f`VvQ^zab*zI^F{zj0!YQVN7-l6ZoYWpNh z`l2j6!Q#y`qT}9(KC>nJISt6U`)a?Uet*A*ir;eegBS6$7I?pW3I4F_BWJqp-^SR^ z{=lAC&j|biQs&4-eCDknJcJLn z9NDGZIaT+?Oc-THz)b_~;icOn=JVAbro|6aC8dlI_6S}B`G(b1zog@^u^k~Nv0`&xb%g^O@g~YM9s+WPGZZSnX{p!9|6o8H zV9dW*4?qWZ^3C?j3GT{+#GB8_x%qC|U&0q8{*~n3_z%n#<~NSwuNc(7*8Z>E%K48o znXkuE`=O`8+dZT6nKE;W-Xg{^$B%{0DlNz-=$(^bfYs1+BKQB(5*P z`FGgGUGolgL}C{f91q7!rcC3a1FY#4fS!#-p@|9!C?i_^JP2O>iUINyw|^58-5Gnp zh6p}*abiQk%_|3v3doHBnflfTb7sXN6wUcxjkWV!aYyfX5M5svT>FMb z62m@^@dQjf8*&axANtleCc%I76a1TVz7_BDkGq@SchRN`S9g8=Bu~)|+8=lYkqpM^a&^de zU!!2eZA6&6ayiJz@qbkkL7pEkBFGKjCwETpqUnzRw3pH1`FhCgoM4_K!8bfeFj9(e zAm8}poEDCC8i?4_P|Obb2ti=z{M6a7frU^tisIS?pUcvm)!#>e3b8lvWuBgKFz-{h z+A1I)nzL?S@)^^-r(2N3eH&*T*lWDt7+5({b9@1CjeBVPWq)V2+Svp`9!bQWLjJJ> zjDdE&wDhVIfZ8u^7no-G5TQA-k4=om28eut9BuoY`u*aCCy;suq#N+no#-LDh)PIn{yWvgAA~ z0OuTDZI$_nTkpWHz2u*^jDN(7{v&?oYQu*IVHyT$$}XUg_cCNvWJkK=&^ieK4=Oqq z3U-jEP7(5#<;=MUb7Jr1t0o>2wAxe?dmMfN=K-BZb$+^q(4%qowoJWG+fCL}MBIqB%#@fY29;%?$P$83;g}?DKsuAmwE%b@4 zOFS6(%U!q0cRt9oqqAHP80$CAv5cm1;&WZ(9UJrHMt|%+iUYE%Z(G7LAij}}f|Ye~ z0)IN6OA>H=gbT7EMTUY_9D@}*1C6zmjdHmLdj~#JTm$j5E+bo(2;Q17a?7p4A)j|A zeH<{K*S$00uOA_qUL(4K6<{5#atKI#MR~n+xbpc3e*NJbxB$uk6Mud+gYGD8$ob`o z;B;;Wew*7d#3N2^SAc4bPj&#|g?zxK8h>1B>6zyil1G`c>pKc*<(_vj8ic6J0{aUn|Wd#WbU zvE$0QcVaapBOFq3uV)y}P%^uV@9bKset&F>Eun8oTyK8_0m0tH16<=NK5{~D1HOZMQa-MMLHKKB z-|_s_K93A0%)m|uX0A~{wvl+F-lV^QEm454p*}(loNCbJnhr!8dKQ)cF7J@>;cJri zN!lgjk6kieC!kZ{l9T-1N;kq@>it+Pww@g`W+ul2be|_k$Ww<~N5Bxnhb8yi4hi!H;)XA*NWSb&?6}8bQme?JfC~EhiQtH@K1GO2b!DdGyu7Y(pn~LjTpU(ilLBG0Kyai zdp7W}o(CGaT{0tZXR;f&S_+Vc^g^ETYXJHni|zs@I{;YW$qhG&Xm0ly3D`h`>+|*z zAbabuvIcaSJkv%IYXJLFIg@W&n}KLRLL(t@1hv5rIU4|9&SZZLAbU2@Q0NfTqz&c> z1Q^9ZI}15p%V{aVng)3F2m*VXSF)=Y1B`il{zY4-vS{an4($QhZP?2lu(!Mu8o~f# zAtdjdgV=9g5h*LOk4V-?bkDpPFpR|+jivx2IR$JO)cIgz!={7E359w93qVxjFm#GA1yJ2Kv3vx;7m9|oJJvu* zcp19P2;DJvTr@%y6fr_0qzyaNfF#rEh>(HA8LxFjh#`OAEMJ}iy_ZFIf{cP>4akHR z)2J#sxI-$Z5QOgUv3m_58wSwu>XC>j*{>qs!KCk63e10X{v!hoBdr5q2&5h2tQVo%6C*N$#aH+IJd zj;ThX&^D_a5^pj9^@@zGeFVVj$sM%tm0y484}j#?fgftXm1#M5Qecuf#k=bqLb8Z< zBFrIx7~l~iyH;1e3S?Y?Pz;VOt4E}%(@>Hz5lX@@zt>W5)-((ScX$mj9|oIX$TOqp z&J&o{{6y0*{^k$EVEb#ngysnrYyL&ku=LGmhr!Oj`SUy>XU*?5r2jSkM<7P{<%fSr z&$S-FvojPjnYU;h7P8$6DR@cU#)4F|K|I{VB2fHQqB`}*ZjMt zVey+!_?xZysyj~zt@%$)!^$_G9b|qUK}aICa)T_QQ6yemWN!)~|J{25j5P38OZ*ym zq6u;RfH<;#P3!tKgMTe&$S6#tuY0+SGjg(P+Kfr&WGQQ8E|;S%!gw7k22_8Mm#6Fa z@bVurh)q}rXaG1Ic>Ek}9}0dD=|Y{#4WZQspaDxg{%7g!LFU76zxh?xf9M6_pY?;R zdH<@P-+U1MQf3{VtxOHm!1155%Zx@j<`#m|zwSR80Ve!nMq?h|!ZN-Fo&?Jrb7%h7 z`@{esPsYk-bXH5jeqBRh)X0B`&7C6vWbT48P|EHh$#h26>xk%5WFM7jbv6Npf@70$ zJ&^rG=)rm2LlM~%gKwD>>j0}GqV+zGy0<)o+>$%;zT@AQ@pG-tDrYu`ZXwFFI`IIa z0lav1Mq@;D46?5V+&^eEppvy)Mo^UL>)N)+%U=V}4n$T5m{oQUcqIi$@xNm=0O*H^%`K!8o`0b~ z^JEPON=o*uU$jv(0Kak5bzj;e@*yS~l|28*R`}27xmJ>x$+K!AeO+g*`ET28&*1$t z_Iad{Q^VvR^6)Pign+F3TAdI%7YBIZLjk0I(?l{q#afTaVL-2?5dJ-#ochAw(V>4; zAK51yvLE}&D9}da7cV;u^4t>Y_kZ94D3J9P>%+iz{om2Tm;3j$_Au~6UzQ-ZzVn6f z=6~29B5PU?MzY2`KV$!$f6>plUy1>Ym0kYGNeVQ39xAe?Xr<2n_5NrW?2hOof7J26 zHiw<#znCjze#}TMTcU?Md?eb{00KS=R~-`D3{KayK=j9dJb$89Hsf`Fv44=xiZjk- z997Pd)J@h=A^xj>*)Z7Ppw5R$2iPb4a)}Pd554TJvN%GrXD{V9pR|2GjQU!(Ci5kb z@%5{>5s7^5oqinwRw(V;)|EP!GdM%WL7q4#K$vJxg^ZU34Kzfkswydp%~A0h*tCVrK-fJ1Ibv{h%Cexrp92@&9u0;O4Ltgy;=& zPzvP_trLq!e18W~;e=Wz=Kw&6ksNKEZ#e*Q7&LJhv|Gu}9SCGo?Q=-6;Rt1B0y)IP z7}PL_*!fQ$=ZWN=3g$orYcxx=Vt5Y{b;xCc9Sj2E%z}KN*lJHv5qjm~fudIY#1&ik z^`V}ikKp@5Z3$kD@YngH0wfuL5fJ;_^=Nth^;g;kK7UaE$j^ZewCJz&2occ#%7*}y z#C-L4+J*?Yf6>GIZ}WLusX>_kmCgYTxDnVR`~V4%5F;}XYY>mHBf&#t06HM*K(#>* zc$tjG2>6=d(awbPBzhMaX$QcDTwx?OyaSO1GJqZ_m)Ni&>4@BGkZ7#{D&I7CldAtG z-aC;vz<+-)twDI)xNlfl(;un>QP|F!X5#|@MIe%qCO+a>6ve-H9|n!}3x*NDX8 z`kO}Vw|;&Eml5uOlmRV=FVf!&F}4sp!Y7u{{o=Qk6F-~({c`aS+SL<2Mud;z-_vV3 zX@8Fl;t^~JKQ#bUQZ8A?FDLwQfB3VG--V2$NZR{{Tp>WN)CwTdU%C2Ma+H(Xm5#_w zNBWu}R_3z%R|6RRb`3TqklPg0qzkdil0zYzyzx@$xko>FmAwc@U{LGJG9S7nE z{_WR}(64f{(tX=yllWHOay0Q%Mfq1wF2uQw?|?Rf4|!iSe&r}z=Z{Xt^V?2xzbbO1 z-FJeY;*oU#kEEUJJj+Nt9K-=_B=kC=dl0$20Jspj{mzH+Px<%&%n?&>{d*P>39$^$L?FQ`#b#*e3Q8Iw1;3h!{F#?vX17;3ZoJ7ke zSBHS`8n(X&eSajux<9J(5oP5a*X|+Sa3v@d$UpJMqA*FPU@+_Ya*{c+>vLuYV2rE8nJN z!C{0~8sZ;1eQ$ukPsD(Z>{yS&Z5fLgg<_3-u@-!$xf9yp43{LFK51^)iCFW#WITTA zTx_rY)>Yxr`TAWza=*%t|NX!JxBuhT^=h7q5R~Y5L!XKKr+K-~880 z@=E@VWKv^yyD|TsCd2i>{+}26BdcmAh;umzxKY&p!^ zjE7i8opx4aRn|t#fYRnm$<_*<1>{@0lya%6eLeT&v`;cA7Fg}x4}TZNk~aEq+9+6y zOc_SvF|iz@#Gd<14;DpzRJ)80Y3v!gAvn;c(#A!}%QoEJ%pX^!^p!@^RHvx)`>{OM z9anW+J}rX7(Ob_Q>5}%6*Y22>n~(ZTOt@_JZ&AG_cgY)?&6)F~LJkz@2Uu0c*^`-N zkUw>Cm~PdH6Xi1Xs(+zoxQ?NzZV}_n^EAoF=U%6d!j0NjMPEjp90^IabbZTy{Ae(_ z^ix`QAilaC=laJh8#X3&+Fr8$&brPhV_SoEZo3c6ml()(ZSq1d$H7yOpT7F=C*u-q z?_oe)V(Joyl%8zg$$WXmo9*Vop7VEn+I@D~_9GUheByAy_!1F54&z#Lx_}%M}1!U#$@U%!yCg@f+^yU%-=C_(t#J`&R<+5;;e zhcQoF*Z1{P!hgiiHm!1^&CfodKF>Djso_jzZ)42$GCH@fP>=0*4o>uvK2I-iY|i)C zsV>9WlVq+{Ks)Y74HY2M5$EobZMLN2`4Dm8X+t$V!}C~SVne6LZOMJkTUv4GD##7` z%uNQRsGF{QyrPDEM#Z@*j!L@YH{-eX{M}~2w5B|v103(PV})G-d5k>>s}8*;V@r>2VR;{KcQ|R!S2|Ed8WxV9`UgLa z-RN}=WqA|8H?>{e+w>Kp_$#si|%rHiiiE#Ivpyl zK?5aJr{f%}SF9-0SPc!uMAO~dw83$6y1g$#>@c9NJBv#Wf`7W&A)DfQ;z!=psuP48 z-hbkAaV!n)@Dek5(XC>`qMP~V_xs>p-6n>@Lj5>(QAa)NK;MI99LM2y#UR&?DKVvf z3#!=3s$id*jv1Cs);Ef@cdROI8KxCKIlE_f(drdI(33Qjwb^ zW!xr=-?o=df1=?Gls+26)ICv|bH2RJUVkW=my;2{kC&@tJ~!zmcXP1`hq-Z^=dkgW zD5?h2(j_~4XG0}<)7Gd&oMcmxqw6P{g2Ovg!rKO)ji9E8Uy7!1ro@ICt9mI$*Zc7# zSO7f`>YDly&DrfJyw z=WxFt<6F-hmeZ@!582J4x;m!&W}M6pnT3pDbfg*1V-}WhRxB8J(AD&wp_n(+d@Syd4Ak(61H?nTIU4++Jl|9#;c}8DQ5`S&zc6kU^ z&(_1;|G3(%(NB-v4k{U*7wrk4BT9~n-l4rwWQFB?W45J_Tm`2a-9;@<^w2Q&of=%v zWm=v9G}H+t+eyD_eB(bW{lQ%?xAWs3)9j$R=gfX|4?14pqm(8tD}dGCWq4>Lpze=$ zjODFd^%ucqvyf7c#bRn!Uw_1Y)m|!8eBdyB4CBrTo^;+vk6M~`7uvQf@sXtL)WGN# zKH~bss?YoT(4(Qdg&SzzR4Z{uN6d#|%Q^DwnSRj=-L`q6S*h~M1RWN~&oiS=-o2X- zaeKPxaji7`SSdHX$6N8B3wSaE^a_r%5(id!N({8ZE;O;^|VU?{Q^)5e%y~sx;3W# z>G*nd!HsHZNlByk_Du!5x_UQ|0cyH^x_{04jPE@MGA`C`sXC)}J2=MT zGv?r1^{kJRzWBu_Qg$1s$P2birvs|rLp3tN%xhBJf39F)cUK9$KFYbl`8nP4uLc=z z#CDvciLV;J%r1Q;H#h3jt^-f^{%H|7D-Tt0l^Rc>qu#jglwM!C5?waN`_Vqn&XP}B zc=aH|kAG6Ldo74D81}4=?OtfS$l{6@ymr~Rmvl$qVd3WfP1m}yIEsxv3-u1k&4H?S z;$2dY$A_y7wS)HRB--vJ;ka{CXUD8O`njYt;IV%+QQz#~Je!K#Y|o+#6sDX@?`6(| z>lORyBr8ok6_YY5-Bu}RrxsI=d|&ylGt->Y-+w)GuQL87+-Y7}K9 z?LWm3S{qnPsb)<#5=$UdW}V97pxCujY=_(MFkT;LorIF73?ySk~Z z!llVycZ%=IaG~Y)BwezQ~%$bkp+dB`u z==9fV5Jr8dA4e%+FK)%}QVI+QvD4*Rn16$9jOhn5t4@!OyQh`gA`XngU?B~WvUq#dOEuGq4753WeS*ju-Cgn(^++?ZI*eS}r_I%05XmyFclt^X9l; zih2VazOP2%Fpo6F2G&02Umm*am%g!g`MtU?XM6u@TsPdlZg+L7@zDUWSyCiyZ(qg5 z2DQ$odERfW2e^p6@^2m2JU8-1CV%G7_*iW7X2Er6>(1oFXF)Twm!Uqj8}NM34P5_Y za&d4T!T_0eWn)dbD5h{tw{;{RsnV80!-B%?9sBzxOoAD^8vpE+tg=Py>eJ$@UO1Vp zy+5%|0`CuBIcc8{)83Atc$>}nxb;vgNH=R1k78FFyGzHk%1K|mVe=R!Q-9cK6>j!D z!{J@oR-w3_1YpUpnHwfuZ#O#j4Z4AWc2C;n`yr?xlaMR{=@%IM@>#mOBN7wi4J z+yj|+d#4(nv%&0ZTGNa{WPc?s-OT!<59!*bTzNhf1{bFc`&IrTXQA6%0*4Y zAm1)GS4rQ|aykP)h`ZWX8cX92D9GnOFT_|VTm4>raIw#>;R&a0dyU3Ug2%46+fjJ$whU?;B%DJhcr4$}E zhtgPX71Uf^%~;{{oPPy*W(6wDhh)0@s=$>ELL;m4C2xuX9)caq4jtSi9=1X9-jZ>vh=%lT@HipRP7#?NRl3G!A+N}n(cyK!e;woOn)0S>>l)nmI%FS=NnZmk3P!N+ zW_g(X^m%=#IrDCBfp`iAzmN>Kon-%g752h{4@p(3y3_{!ByNIdne>{sbj?F3%?>7y zH;Wf&%9LzBz<-iUoX*g4{9$O>#76D0@JsQWy zR6{1dDXg#am-)4u8(VHu0bRDOBi!3W-W}w2a4BK1-haK|m5;klH|BmuX5;rLP5jWl z18SGi&h%Davw}_M)9KEQjm8h>)t_<*t3Zmi%T@@i9eXo2wnqnj(7By2WppSEcHUx;tU-PC1&AT!}$^JdE>`l~~p@%za^calt*(2v@a+1%1+tA9He zMvZt=@Pvn>Z&QC#>S7j;wxaC${O;Z9LIA^(qJR6jG&pEJJsn4PYp$)&lfN9gtxPk{ zRe4d~U^||`;l+^T10O|7^z1u-vLD4@6p6SPhaB4HYEnPdC4uTI72C5s=^kbn@iK|= z*wKO;D&_;5f!d^YpgqE8S253ef_mm8C1Pe;V$D-@2OHIPqZc-s=3PrjpTq)1!N;i8 ztAC!%>dSzS`+V_^h|^D{cx>z)Kn7o6?W*5z17Fw=2$&!et0tTp7Hd8Rx3H?Y@G0H& zWSfcjTn~kc--_D~y7`CNF; zzEpb;2e6B%NL)1LEJyckqNVjbOc6Ev$EqblaKgGhBwal$~eviS2#1znZyvp{!nQvVl3;!~Lr5PLLX`{(tm%?i1lu z1+U(KmJ6fPG6~OjOODkVX1{a6!w1?ueKdY(oI@x3fE#=^JnzgS7GK;kNgt(oV5QsH zr>}c^cZnm45k=VFC`+B+VHIx_YdY>6oDRmD*?0gmlR&Z5&d$84z6>_r8&g_t1X+~m zs8)RMaVg`&q>b+;YyBBbJ%6ZqczZ;~^?CeU=($k4?ESf!vky#}joXs8?Cjk!zuoANrOJSC-pN>J`>cv%CDcJ}9spfC#Zr}F4k{myM zcJ(uKLyGhn_61Y4Oj^vBO3*i=cap$9>kr2Gj)mgkdz*1{Wl5^~S&uJYUPAGyjfk#k&d(*5sgo25 zBC}Q1$(rLLNb5TFl8uo#QxI)kUbx*aNRc@e@$C4viiAPR)4+r7;u)Y3pW~;gDpJ5z_1i-_)xVN8xX_$x}OKsY0_Dy2b zwb;l8qS}MW`&MRe(N3RzS)wiv**o;9J>Fe+cN89c($nVP2Pi)z_g!Bdg^|pg+Dh#wd>sN^f?dMJ_;BE$rObKaNT=fMqs@B)8_J@si+3w# zIK6J@W`tt?s268fU8<|}RF7lEM$5j$nz5Ag?e*5`<3oR4mQ=qDiWA|_uFv#O9o=*s z;oa8V25_R~qayDwD!=TvJj>9G?bG=d&rNL=oqE;)G=Heu7PQ4f*VBpZ#``teYrv|~ z4Vu-mzUABQ^1OChx}%d#BEjG}TK4BAv;Gr{Y#dRxk4x$^-8Kk{jd0O}8T1nH&1*e0 zvsf(9GGosip|90}f?yRVilPP-Qnm~4luRDgR05@w-|8FjSgSKsG~qLJPZXz@Hauo_ zO&7=3OMm0NDRKOC)$lMm#h@g*|KK#sut43n)~KB-^d^%|On}>yxev zX*&yCuB-g37ft@T9r7u-PyRj*3=5X;MvCmnzHS|%WX`Xa){~$QZPwkDTc&@(37=#? z+TJE-Q#7&h(uUiO3m7`JJw)DUnO?;*^UTaSv}adg zq%ArgyW2IB+ns6VHFq^O&Brq$c${i0ttBlzq~KlR=2F=inzCVM0ZX@MKZ3dUe7ru} zYJVz5m*V6yv@0~bQ^q<1r3(b@pH^;?q)P6 z!*%qU@y60(-_cZ-HTIpYx7j=O>{MHB+yy*+7gcZjFo)|oXWBm7-p8Gz&E|f8Z=8WGO$+RO1&znjzfqi4 zpC-LJCMW#5y@3-aWe+C))Wc`15mDs68o7N zg?M_E;2tdeqwX0{O>r3P^nPsAqt`Ak?>I3J$vP)PbDw0OcvyFM1&$B*Z0|JTJj`q~ zKfI>tp!RC-9j8H6&bY3w$7Q;Q!mu%$czme1k7XwRF|4#yh-0Myc}_- zrn47r^4HZluz}8Xmur1MQLfUoJ9*wQcI~#hTF*H@ogx)@f?|khn^n2%gnxV-v{!VP zLFVYWA>iP)JqORbc0JaYP%S>H(-%#VpDh`g%f>G{jzMwo43R`%{1M1}#W=Uif)-t! zqXToZaW!9v#nmz4PZyH>-S~?_KlMuuD35a{h|% zjr^de2j+J`Q^Z~3w_9KT6n`I2>DhGBa6;LVcr2SVesu53*6jV_LT*W#JnY{K9)b;Z z#9gtq`@8NHExNGRAqeuYK4UwN*aGjL3HZRQx4egL*Vgqd?9A(4tm_t;Z99Fii|HjQPonRE@zTAvhksL8oeX(=14q9V zd>zlFB}&YqqG^asmY%vYc}2xjWVB6#wD}f1I9qJx1>a7~+mNXOpRdvjd(`g9H9X~x zIG#(|#gZYh%o``0>f_(!bMcx(Zg=BB&hH;XpvgxUdz*)Dh)=;SeCa+KcebpGu~vvQ<# zTX(x;GfoPn8k>y-`ggjK;|ZL({*r9=m;IILWRWfyOv0}?qXPKzq;#j#^I3}WjRv15 zdrpLm&#b3h3^E%C(OCQbXhltv`KWH=WCFdTrY_Z#&$dqYB85dNHiz-1C`khCcl= z&@YFbF}xv3g2=x;^JjfgZ*|+uMXXdI$QQ|xlW_t84s!G zKZ90Q3MYNQBpk5F7Nzt_D>^wBrkm-_tA1Rtwkd-iH-AnQT$;!&?>Z6{HGgVb!?0fE z+kf1W?Ynwhw|GD2=cuz{eHRlHOPDO`G^jGj1ePl~zoR>2U(8}B@8bOV*y}wq%=^~K z8#h~2&b)r2E&EYRfw@K$J3}GAKXQGi8U;1WUcw3SF?Y&Du6LU%-S35`dBX3QJ-O0O z9YXq!w}00eC%wX5oi*Af&I?T6@YZNdiq8Mv=Q`>8~SASAVcW6rz@+FPmG6yF~H&u-QO@62c zIfC@mo-tVZW`tX*ba(`<)En>0*wYXHMnT;`!ad^b6D;(w5cBhh7n7w&#PT9H zPAOyigRF9H00{rb4j>5y=R|uBuNh22fn2191_R`B>RN3NLn7lSiPXb!|g? zJ}#ymzNe|PIxAN((z{MG&h;o!J=@*XHJA%_il%Iv9W6q?==G^hLyqx!2%1w<-yG)t zKCp$DU(gS=yD%hTa`?8jSpxAmkJuYLD}*Ere%NzeGI0LC*@0mnq?YwZ5P#mW zoePT05O(5=YXa(So=&W5GwO=r+@Q%LyQlbalMh}dr|N0D%HBuB2WVdXE=}xtx6Kk!{?V;YvC8dFqn_De? zG9|aHzsVrW)MJCg5dOb2 zyxd0#m-%q$%^~9Yf}~8Pi49|P-y8T@Dzm0{pjSukp!vm`g(!FjacmKZK#(#_LsSI& zxP<5ge2c@}GcKM)7=5vTD{87>GYJ76<1GNb6Tyf8Mc`5lVFXO!L4WP&#EJ;KQh%FC z6AcDdB_?z-eXn342rYw_`xt6v&XT3nv$SA%8XkpgoJ-L+(6vY=T)0~MdH^uHVG2JX5-}13ndY7DP9xp~nDc!+;ei>Shb!uWMfv-N z^MnP%J}92e?>fr|`G3j`*ie7Y6eN&J>MR+WErLL z6HHAO!a>o+T|4$;-$#0S(Q8hpQdB-D*IqZ<*gj)9fC8Z3_J3SP*`JADbeEKGdwbu3 zs#6#G84)8(C%ZD$$G#*v6|M0!j!F;GOIq+nFI}tvP^5gA!^@?bm=jec5JE*-Ev#1} z;%+ld%E)lTLc&*OjG)Rnrbl?Xl}Xq%-L6+iS%|jMN zi^-yWxSk|alxlAVNZ_+CL(j)N9=MnuON%(tHZr&}p#PSrWnu+}GtpolbTbMDOVHlx zXzA?;Uw=OT;ZLl3=*?%74_+#Jp@|W#|bl&46Dk5~OY9#xA@hFJ_vLA4{ zRm-*z8j_2uf$Wxqo}2_5G-?pQAfnRliv#2=OdQtIOe6jHU&(8E0uPCN!m$+Urcwz9 zYRs1$yvvTW6NDV@3Kuf{i*@TG@R$@3sXwO2L4V(D^bR!TClG7=+#oz8?x+QN&J8}- zBx0KdM{}802>+$!)BYEdldW<#CUyQgaH?q{dd!}0Sb_`lJJ~t!Irl-4N=0}fc5@0d zi~Hf6odOCK)z{8ds(Gy~fYCs^X3HE5$*rgB=1hh(Kpp5F2p@~2lGH7z?|9$ztA4CG zE`Lmurh&%8DbQK=TJef>49UQ{l=h$t5?_lf|R`N_iu!U;9OKMG3MtTY0Nn}0)Rf9?uJmFwL{A9&- zUZGzg31$4cLSvE^{HTO6m1lBdYroB$w14pTc9b(Inr7EOF5fOO$D^*$plrUYLFc)# z7|Mj$S!gx&$&KfYitZZDNS(<8FY#&tAs8uw56$5EuL}ygfXq;{`5{cZZmDVBG_w3M zG4=NW%xWqPbx;eDV*!`#i>gR(Gz4^^bYEX7OuSi;vOn>SQ zk_4yGa$pe9L0-3~`U+Ohj{t4*ertKjD#CT^vLtfg8Xlo_Pr#?4gNed+Q&jF5Q1w>c z-UAcKpu8_&VeY#AD)nhoqmVT5R6hxz>?fXwHqC^RZ$})zw_#I5!-^F+$NMI|PBuZC z{$hRaxsZ%agTp4Z+~>R>OY-?ygE5^Wa+nbF$AhMVCCd%@ z5u?l|?jV&V*BjK14l(aXOD)_vPc@#YT9+GHUFmu5!V7t7oKb&o;{^NfOy#XGU<@Dx zt-0gxY(;H9^h5&(`6Ze*(DnZ64l~zZH3wEIl5LDY{>YBY{V0B)6TQ)e;eQ~K}~0-IW zx-xDHzSW4Y@#A8H%5a$(JfI&8OFgi+-6@rDcNif5lgc?BW)HBS)jt);8O%qajvt8d zr`)n8$JBI1=P3kAo<@@Mm)i&fU{ke%M_uSS>En1AXa^=qB7dRyoqs=`ktqeh?VQTg z=N!sPbfDY;Xs+M$yk}TIQ+VH2DazCZTKyAb@iJ{bCEN|b)=eI(h zWGXcfjzAK^4xH(py^ncSdJv3!_DZjDzin-Dxv|s;eZ+qeFI6Yl|MGN+?WO)2?e=)` zxlW;L?y$t-mWkW1gFgKUd;G~H%y*vgv?^D;ABgHFR3CwT>3?H;iK%K+RSkgyeRM$q zScC3&O^}+BJj|-)ffQR28S-L4=LhG9C3k@!k{j|4aBz3;8(FOUCc4A-0qeNyg43bR z-YxUPDG^Y)f%?kzyQf()On*OnxbwVNyHWs(N=RJ3#!1)@P)2PJVS8ADZX~Rs{1uyk z0ly!CuTWIFjejLC162n;D>+yf1?_5+BNZxs0)F+CS4K%s*zh}@Y9o-6`xQmyCu$7) zF7->sD!MKNRSNP#s>QA#{kq6o0(`F&e{x9GXb=*=HjGuxkRfV1K|(K2BPuvbfd{21 zmM+C^Y?5Z&U$VWUw<3v-Imwzp<{()3!fQ#{SS6ohlH8!|H zxnjb1@CRx)4mUqY0;xD)2Oi_gBi)kiH;Rk@GfN=ofz8O~@ZSgI&$n@o+7|&I8 z*0v`3hDQ>ae@(~F1DCxS5aT#NfU9qkL@Y>mo`(e1BJ>H&=KP`y`aJFx0IWoxz=7U| z&-+s~S%3E%Fo!cZet%3wstzY%wmV0r!;qchMPbQ;GO*6D$X)X+#G}H`U)A`j9)zj1 zdzfr)!tQK9`(WkWa9uJO@~N;wIOmc}p9oh1XF}+mxrEvgbG?9u=};Jc=!hjZ)g;U` zt`d9HraHTH#F+)Q(lO-KIwtKpzQ!OKL!ufw2Y>7TqiFnp@DG0?IhsZr|*Z} zFQBECdl`d(qYFP)sx1p(G$+pmo`Yjd%9#t>>Y;JkF3UuW2UI9*(MTb*>?lGYrM+Gu zK7yj6$a3?)b%*W`5DPB-@zi4%Vn&q?sBp3_lnWuNPz;e-EKSsyC7-PRuun-hra=Nj(+?hu#SZu2xLRe`)tt7I# z?C!OAXR5j3jZ0c2rXGrr5Qm&a#xEJ`>aQ)h@LgZ(BDc2FgTjV++36;;CGXfsLViPZ zP=lU0{dbj~F#)l=fnpcWCNgI0+xpmW^naf?(f9&Qe(aKvQ`-o@s@7hOn;Zf!IGKxY-;M!fiW_GnlbJeKF7%t?bOQ=o` zSr^tC0YCz$ITv2(l6tTM6n$9NaLcuusF~QQ?$a_R%y`)S3gYV>Jbz=Pszx!sGk^3a z=8M&hEIn)W_GFh2R=<3|{R&Joyc&Zn(a}d+3ujGsulN;#pe1SdjgFdLR4z{OtTnMN z*gDsfT(SBNTpi2mP%|bdr?-x-7qNLr^T|V@-TI|c2 zHp9=Za~sMU(qx{huG%zPBp@_7X*|5Rsy1F*IoFMF3EjO27b+a{fqbH27l_Gv>R-$Q zgBo?xi~HCTc95gJ^8Fx#ZX>miWs>;j;UYIL0_11%imKH>#Mm}qANU?A)_*NIY@fPI z5TQ8N!Ak_6%nZNQDBao)s*jU<#(}gT`=*m06-9?US^aV_)aA3NbM9`nKY9sg3I+k! zR%rv?D3!A!x3b^%=*<3A)WXf+KaCkcBARko7vyY*A%s%`H*TuE@8_j+ClwWUwO5%L zb}j-~E)dQVizH!ATGqU!4u2Ao3M^^yFFFr?2haP*k$^z6wTq%oA3_NnJ(VQGda|44E*jH# z)-N+@5x!o3BqSnH1RVX6-|K?ZfIRxYkdL!i7s&Bj8ZX?GB@V=3P@f& zXNFRU)x(Q#HSKU2)_-Q}>hjj#vwMv>6Rt^#6la&MIKRr}nzjQ5YJLt9bl!+-^+I_Y zVrD4eQ$fYz^S!=}ox1cEx%gx@uC+$nLf9Lqaqc;9V^R%j>eB3_^DR2X%@bT z4>Sk&A>>G2+akD(5I4GF3C6*B00!hWMP=b|{_U!jcwh54pnp3yuYSD*nh*3In)2Oj z9rM;&NR_C%#S+VB#t=Y!L{>a6Y?w){Hlwa{sH_C&0HE;Xm=83^!$pU9D=AlRn2Fr; z6*=x#>MC?vrfm%mbYP$t6yZI9&ZtGL#)87t1#8MTFlyL08Wm)c4z%h%vFw4oA9@vU zg#$=ZJwL+v+JDk^tPFEc%1A#xniGg}h;V~Vx&d&K#;(z@FlOfOyxgsIT`5JefbM?# zR=((ngEX?{zlT*)D-2Aty{alBFs(Ik%l92TSJ0?(wg5ohLbQ^KZ;}v*kc+8`-?F0U zgY5yX7XI&Nfnb9r{W$umaT#!u!0Q%r8{CZ0kAoEOEPn$4e`r_C2h`0H$?KBlq)Q9_ zP>xf216@%!T!!{69!-H*1P?(@%-@eeI>2SL9t-wX0nX{O48RW3;IQyi6e$P$>~MGb zU)d`LV?Du;XktiQZgP{`A{i45Lxer?B~{qMtc{)gdkkGeYD-FC+O)}zN~b2LrD`9z z7=Ws>bbo}m07F2$zs|&bxtC#(Gto`e_IamuwhNT2qrU&OfQisM+NHA7{zx>1m*F5i z6lcbJ2P`#evperz1;2nmU68sox4V|0cz-Cq(ol*%nqR_8=ShqB$iF_sHUHNTJV0PMbks$T;;tB)rj{^1=|Aoy0yXn}uWZF&sFi$=qe-3vd%e417J zw3%M$;-r_Qlv6e}hBk;`B^JMY8uGa6!`n@?`icUFj0 z(x>79V>ExH?f7jWVf`HG#<+CEW*bOOLS5rOU6IE(;zRXOnj39*SRcw4e=W1N{-Ce0 zBxqLYffNvULTwD-Ltfc;G?GTL1IAu}`d+cXsrX_SQrFsV5LxizLicsCYa?sfQ(Yw8 zMxdTX39?&bGb8?W)Aw8uI$M$LrrkH}9cZ%Rcy)i5So=<8uH!GGfzc4MJE7RD=`2ZG z5^QLk$ZTZgE`H45>GNn+-vBlLbSh-+2}yKu>vep5<~ZV3991W%ZnJw`({|o_LInp zOf-KVI_lnw*6)W?rB>r97gbb?4i(AaS_#i4SLzz!^>};Y(lG-cyYKozaCYRrUJ7n$ zO^y8<(<=8o-2oC9xy`UZ)EFPH-bxpw&(foKhvKwjbyCTerOX)k;9u(>$T#heTQ9nU zw@VlZ*D zRJjjnO|)K!sw?F%+>-W;V8e|E&CiEsh#3erBb&zPYyohO$l#g0@!&_hPyprJ z$oily9wk9nUM88POXl;eJi;hDb~@DTB!Fnmn!}?8cB88;!K{WeTk3&8)5$NM07miV zU$^*AiiYsdKcD;XmB7<)Q-Fkh2&I4_W3PUKs1WVzJtvngqRg^+Vs7GDA9 zTH9DvIccBb^Z<(6-;A_< zTv-$R@0>}uz$1N-e)S##X?1@HMtRy*-#)GF6sU`J&?f185V!KPIs0xl?l$#DZ?czY z>{uPvd2zDJ4DS=PX7gxmDcg3j5RT2cV{UOIvZpaO{&wH16wt)y6v~ z(`c;+9qQK?p5Z(f3!JieL;D=G4%WRefSlyu0!P05MhgPXkgzSLBm^0Qnn>!?K^49 zCGY$jNqb$9s1_3|&Q6JS6hd>7eoEOaqi}i7QIpW$?iXk_2jHS;;;?jFSyrMu{8eDR z#R^3tuxLN!_zhxTtF(W158){r3=RJ*=WG)ZUveE`h3wCj0WU@SP&MxY%dxi1_|R?O>g|jr^c>W#Hmir1#u zN$?OcO<#_uW9tO^dsaec+U=1ZqB9`RE|FfJD>z~S8t!RVrfQjTm7`y{>5=w4xvgV3 zeOOfMVM%>`P!aMi!k%`kps;edi}L>Xuzd=JUnfhNr1r1Cw%KAkh09A4%)}%GW}zfF z;~qoaTC9JO+>+s_lNY^XaVBvCM~ZPS>|;MVCkwO`Qw|Wf$p+^xQQoeZvDyRqe`62g^ zH(K|z8@&~^Q-E?CCD7P;oes=)q4ds{Rh-*k`PzS6<%mnzLR2Y#_WV#NWMPr8`f!r2cCirN$0HZm-b-NB_!-ZY;0;>J$QI5yj z(3pQrTahdv@;RE*kgg|o@#TDf7o8W*X6(#56>qI7U?ql2!)-|OohGL*`oh7o;693d zq75O>ZI$ISCKn3#YSf~fHPJ!M{s9H9I@hrMWjOguk4 z07L|LMDRJe9$W{kN21(I0i48FuBxYU;E5p_>0|eV4@zLQNB2l+|DYqwn_;E)vPOSt zgX;5!&TvTmN}XDINv8ExRt9pS>sJZo<9K=yFGPrb*~!E(;_MXAdpcI&U}{I`JcT$zkc|YM8rX+BcqI=J5V*Sb!;Sr= z2Ap|XpM{VE6tT4}xa;=r1HxeA)YE_U8GMhh)aG)4;t6G#w5+Fpsd6^cQ4$__HWYRB zLmtD*Aa5m!avO6xluGw?I|#s%D}G+q-3Sm?&<>xA;O_AlX%Rc?kNpv~S7~wO2}r9F zwgH-nYPUILd`nK4{QF2P#qZ}~Qu?~P-138qmv%mV(IT8YXE$&9Zj;E2Wc7b*KlA7* z&ejB57lJv8k;}mkJ5m!SI$9(yk4Z68^XKxW71ebNAyO2B)PhOQF?8$Hg~hL-W_~XU zD2&We?09%Hww0b4Q$qF-E@>4XtJv^Kj+4~_%wg1er&A{E1Nm;Ed(dRbDOnbapCi`5 z>}oZlpcZGX;1^;OMQ)_b*P>^u?EthAK(m|lXC(JQg+d6=u_=jh zY4m)quBRx8h{q|>)|;^Trjuc)psDIs_%MWo)Yky!jw$z4z8XT2tAKw3;iY1 ztpu@tFm*QU))NW(mxzDvI`>Abk;GNKV^mInQCu@>pgLzBs!5;{8?i5=)9!?sSNFiS zW|*o4f5LU}=4vI&Ac&}3mRg06xjUEoYXWsBP!A0OL;tY;|!- zncFe^i@PR-NV2<}QVPp?nzo!nGCECU!zm16GLER-g3^>Oq_uxjQohZIR&4x{ld`$T z?X@mm^O*2&_y2lRXh&&h#uLd}!bT)1B z4MNswuP!;2Q z;yNHcNQJJ9Te~6ZK(4WPJV)2GDgfhPL_f^LRBOH*f$x7fUxv?V(vXv$I~NEJvsAJZ zf!P6r6`n*9M6oCl%RGNtdD#TEWHCgiU_psv!z^>M;z47<$GvSoXHA)#8h%-OA^!V8 z)Q_}qF;+|#0QeNNu5aWF(tMoCX{wOgf||E}(lE=*EHA=Tu#iFMVYJ28UKCLdl783X zRiHHu(iML|skcK@N7~2re!u*%$ zX~KQ}PQ{8iU+&F`c`dq`3ZpXFs1E3eY&Y{-R5qH29>^kCt%FxF+2`JlOA%p6l?Fdc06 zBRzXvw@75553!mYG7+>}tu2NHwCM>z<{JcEbYQ@(W)_uLen8JH-(*OWYEu3{}yRckXaro~k-#oj#$j zPSEf}DN@@)ML`7oVvI0QpvbmcqG+9D(L0x|Ewx2W+@`*hz#^#mqqNpk-xk0Q<{3->M2g#1ARB)OD#r;~#F*i{?k%CVJ2sbk+r&#W)yvaZ{D$Ln zIh^Fr``AQhhjQYkLH=ft-iXk^53O1VSt%7fUM-imR6;V;Ay2fQ8M%O(D&A>xYTkll zD8>B%QBg)94+y_;kOhH}j=}UX^H+U987$kxkt+0r^eYeYwECaPKBx*igeQL;>JXqG zgdLS%9VJ<-vW<9Pa|V6dHPJT5Em`CYPrQ~&3Wx&_r~EJA&tAfDTwTpsg%_>~jGsy? z+qLPI-ySM^kSwB?cB0-~4P4hivDj?%^RtJ1q>y^+d?w#5!`e05!fELoUh(CeA|qX` zWqUI4O@)O1kMob50L?f(Y zwEVjJu5;E!*^<1shI#tzlY!7uJ>(`LDMF3Mia(x{ReSRT;Cvu7dKGKxJvD(bGjj(_ zvv&n*#QtnQe7+*U zU;0YwFF%riw`BJiDXVnAK^{xaDms+?z%y5>KB1w+H%GR8Tu10WTeb#rB-n=iOE)?3 z4&(CKl}VJsAD`BmqTEwZ1Q?b)g101E9GT5W?hZ`5KrP*oO4TuGB~=La-$*kPpeaJj zI2q|OILY3|?JqK=-o<|fk~*?V=CW``7r$oqKx6_jKK$ftfG0W0z$`H@7N7oDN@gRB zH@%BgtJz`X-|YFo^!?y&TPqBN6_UtR(7ax%mur)PXdnk6iECiPtSZ=fnq2{ER7>A+ z)GnCZWCEkwuC3tUfY`YIT@U2hteeIQ5zYNmhP1FBAzA{YH*tTXcx_8$B8nYehQPhe z`(w6GYeh*CYy=&1F(cM-80h(hKe>bnynJx44SND}%I}h&Fw2W$p|^ive~G#e+(A(d#3EvG?TTUG zx6Y00n!#W?nuQ%wL;5?*EFx|+b>g3H(u^Tp2>Ga}JWo3csx`=lECvwkfS*PT4 zQ_7~Hn7StNn5};Z#hJ%(_Lk3v$}LUQ8AgS|SXzId4D5CP5a9D`7&{-~W^rHcxhAnO zK>V9#(Bv^aKOd5LLvSVKZK@Yr8`x>laI=B4^6n`QkkQ{4d()NwY$r&`wmZb1o*YVm zxu6X@Uo>wl$N3aVV{QxA9L3V5og-2W&LMXIR|||8-7ZoV*k2?!_*|q*Htf6$xAT15 zU^susg@2%8lU#h8)a?V5ONaeNHd~W66C9E~6$!N)6ukQ)-UN#RJ>!uq0}@9UVZTvO zbtdKbt@^z|CbtkVf3EgK1gcYpU_%J9X*eHmFI0A`wA3UdAzXx+=^oUuD_HkSMhWBt zL6paNs>&h_0N@LXTCTzRMaw{cSs;v0*VNWW48JO7m2$+-TJ5{D zGdXIx4Y%LPaUgT0^Oz;3%JbIxxn+NHHLkY)d0bN6+-|(jEtIA8C3l^&rD{m3x2;UG zQR(1>nMw-K=p*ieohiE0&qn3n*k5Ewcw^uMr$@dm%$9y2eKxL%Y~+}~SuB(BgMTY= z9qwGOjZQF7wUq^2*Bj?as6yB3qV7((uSRNC+lSx#N!8se^R=abzVCe#3cY_ZCuVg> z)o~U8XHqDi1D*hZ6#$rfae4;{;CgKcJ3XW)AB=wi+y?!q`d%A#^t2u~DpP*CQqBZ{ zy<~P(Z`S|HH6G&X8}^Zz2Q(271P(s*d5p|SZTAasEoUzK&LpioCg|YR3hhcRIqYo8 zx5>6R=>6{g^TQ6ewgV@10CIm?pRK9hF=L!i7i4Ulq3T(HTgew@<~cOt^S$`>EvGO? zEOmsi6~h<*H{WVVRhtq`6O8j1et>-djLFNzu=)Y{Mg96kO<)AX55>*-ZyPa9wCLJ} zY`~HkA8dSuv%Q+NW5|M17(`5PYF$46UcKMofUJNOB&~h_)e|BEP}qO6u5M5f-#`_@ zF)a?2=G^+e8hb)g0A%V9CaTP4la#HEz*dBi1LF3|l_Z!BzuqXks(F5NQ{?G=sRHNY z0{R{Czg%!Zwi!Vzbwig)n*KZ%9WxP?sJS)gQ@HZYx`opTirnHyM$VJveE2K zPU8xyTEg}oTHAGDe`J65+8<%GpUVx6-P*?_0TVa)f=~&8!-SUUfSdgjX@tn5m_AC` zq2`h#Qvm&nnT$3AcW?=vcEiy!e2)$nZHd|0o-~i0+$NTeGs#Q-*Z%mP1(5XmPZ>q z$C4@ZOG|&}RmUGax;?{h*v>ct8LojrCQBZusJHLm{PJh2-V~KUz~=^|ZVK5L5r})E z?^g|-1qLSY_2z%Z4Bg`vH%UiX>}?YnUGClz@)@1jRSsPu{Y3!p_1kTpsExjd z`aK;FMIuf+p{Ikex;|CIFW<4q%9DG(yuskyN~6_Een_$usxEWq8AB*nL`R*Yay!>i z46JZ45$%8LysYD6u$}VgT#55@YmpB+?wcxd&sMg^+~5W+5|n3b@17DDPFGs$ZYn9l zNcYxBLJ;%r=yPe^Y+F0kLC6R>`23#W5E`Qjv*Kcr!?p9KdHXSMT0xVRnh{Cy*T{^8 zr%eNQK_m5340idFJ`wi~j;e88AKY36#4$VsKcatYHHe*NI)QJ3Ge~}YSWW^#$f7#u zj3|xIY(L|YY?yjB+z6^xdLAzl8MN4NO)|E#<_xHPrNvu#wtrYlT=r!A!nwMf|!_g;(trXS6}16-YCmVs1jx3^xfP62))h(DnmJX(0r^7oM%d zc{6{KSI`Rb$!KB9Z#?P>e8a_`LCWBVM_Pr$bGOok;c-(F5@6qh5-<`y%Fk!Ara>73 zJ2&zUmhByFnvjE7aYa^FP7-u0fxSNUm8^Q828Hq`i;E;dN|(WkW_J<`78>auL|>YZ z;DXFlJrYqttX^l#Z8vFHaTb{eHbR-Lc|L!BwoDrXUJC@mdnqjFQD&`AB~o zvPRslHC2s*L_qbH98U&$!9G*@QFt(l3A7wV-AU$IdPoP(=aT29p9jU-ufe@eBVCxd z#MaPIU_?SaigWtyI+rJG{(#Y^CHiCH>^@i0`(H>*_^E4$pJZ#Z8*e@~p;W{l!VlSM z1IL~wGr9=?H#Ph!y4XH3*pl$%amIhQnh6caQ9$AmCl1F~{`fkNoeg>B44+DrRelkP z-V$X&@4d65^U!-c`sn>O*~JUXjLg^=WRMwOx7L$r7^+XOyFH{N#ECBgG#a_>GVDh9 z3}i}TJF$l(aqhyEQv_CB}FigxgT&QpaURp6b?XIq|Bnn!NyApq^eUFk8 z3|r@#Rr@`V)ARO_SDCjt-;v^#8S5^A{N-OOOh5RNzIFjFNIZQFXjo~}Q)V4`r zf~2W(A_szaN{hdf7gupqnrz}T*u!KF&J(e1BgbCUlJCq`E(b*pFIgcD=hMGy9uXgs zz_&OAc#$_kFyzMmi)>7P6+M5u#s*$_-0#5&r*?m9sWdU01R2O-^~|JVVzImA;%oIy5albGg>ANqeiM0R4CK_*NN#U_tmcSu(zT)Dgk;7C89hPbq;U z5!n!|`_Y|;HmnS#zc(M#gsU|D#go86?!yqQy~d%9ot@9(55$;9UfzE>RBZ&n;AMBK zAXChXESvP|xLV$3tdsQHDaZ!jvfE}#=xoGq=qNOjF67Y>hp$vkFqy(3Pfot^-_XF# zYR|wP4^hT^;gz$vza{Bh9g!GV#^k?!+C2^aaW<3L{Dc;pXwqg4GV^Xxc$zXk5r8Xdjb8NB<-tqm ziyg@&YLdQzRea(V8{BCV@CAA)ON8G%s*I;FxY|y8bYZik09Jp{hPO)5{$gVsUq0Rd z4%Slp1|Hm0v)NoYj~E&lzN3jWSadO1zDfr@fDP{Olt>Ri$jEUb&IK-p#oGyS4pdmN zjOB3cRor0mzO6lu*_ZltJFgo#SV$zCx;2fy`!kA4(I%_05d$?b`k+F4jb{I0Yn1%7{$p@ z(_AbmA0-f*fVaACFAZ~ro-fC#Xs#lgI@E1CZcb!k4bz+S=vCkQjE-Q--_?Tc>qQ6j zkSD{QW2}*;X1*S2`u985971SScwBAV5UYJtbH+2BuXBG_Z@TU`zvWZS^jyl|lYsFv zjh9RuUjd-|72uxNONleJL770sbpzc_OrFXxFSMnvp1@GFixmR!=W9nJOW|pFe8H-vKJ`&8+xTC^2zo9agr@$v2g1 zAswOvFGSKEx1}eTH0a3!bx+Ba$r5mkl4 z-kQ|T1EXXO1i@~qSVM3U0zqBD+%$71AfP0Y1LNko{$Y9HBfc#g>`TJJw92yJT027! zi7Q)@AzxfKYV}cxPt#-Yg<-&U$6qYqnO#_cm-TMHg`~6y+N5ebnV<4yECpTBRvv$& z2L0f9oUlL`PQVtb-X4=NkxfE4eK!XWa2(iZ+F-|Nx^^5QuX<&{KLvTSd zILdF%6pVdR>_r1oZ@U_y6)Yo&8pv2-L+Shysyd)(`cQr1`rOZlTVy>M5#~Om0x~}D zYwSEV!ZQQC%TUzq?1uncG)QMn-j{hzo6Mk=OsGC`D9qm8_RZ`OoZ`S@ySt?%n!i4of@b^!!3c!3;0Gpo0ulTVFmdnDgE4%X>fx+5Er3rq z;}34~9%<0a#f^S4F|W(D*FY}hyNqQfPU%8r5#3`8lHRzA4D52)9SiDw2bK?yXOpV& zLnm2N!keiW!8~hDghqG$#J7I{egSwp8Y+9_+lX^piNkOV;Nbm23RxygWZECt9AB95lfo9$E63Dle3#>M0y9$O2zYs+&uPyqIZABgG6HA>%(n7 zP^3>rx9vglGCa;O%V$1?Kd4#+t8aS9+lPb0Yw%2cE`rvk|cn-E%(}21YTOmodueX9;Yt_(JJCBoh#H;QUUM^ zLZGzTv&!xmkYnu3idjsnXOMAEDD;x^;>hS znnU&}@dpT#fP$DJKtRMw@8 zCkIk+)+VVzeCls8Xy@oodII7>^fS$AQ9G5CNFM#90fu{{d4TtMTHZ@4YgJS=cvk&^ zu5}(yNg*yBE&_k)H+MTkGlCh>)p9oIhEGI(Td2&$UFW?=&-mgIPS9W6gMoglS$-2_ z9-5aD^QXRXdJgq(n3vj9xM&SEaK>TZ1CNNNYLnuJc;cf~k^AH{_4<$(`vrrmRzP&ZphCeG>sq!_#oTOA` z^jB!#^h`DG8)7;tszO_CyFw~r{)`3q$P@V8{`h~vbM72kYEX7-TiLP6;@!jF$B>Pd z4S=D`MV{`Yc06AUZ__!+2Vp&(HYjy*epC@AQxfW;uUv6Mhz`p;I|w9W!~UgA$82i{t zdQTrJv&`!CNL~-%7Jpi&$87Q=c8Z z=fBb}SjgX*5~yc$Br-K0V4GV9>AtI0fP@;C5T*OXt5G=At=3$;)tF1st*`gMT^0<6S$r4`FXYMx)Z1X*j)x zHkk>ZdZxl_!K|92p-e1U+|*Rdob{}E0KQl8A2nlNzTu3a9JxG3QoD| zo^soKIb8_KfyP$W1t3ld<15+iXie-m#lHzX$g)}|&2QI4RA=8xxru)PFqE#gGiDkk zBs0mVy6N^9*-KRGC{fI^1WjGn^P9!DBU!?pB%Bi>Mn?%j? zCZlPZrM5;^MQEk6&l%#`$+6t`xtF5AsFkgYc422pLLN7`-$_KT*V<5qcv5|zyiYv5 zVWcJt_IOn|iY);|@l$_U&LJ!k4t#08-fdVR9WgHc!A4M;EQ6@p1pM34X`Au*<3Xtl z(V#*W5KFIUizooL;KY0n*gNjaemisTG-KQSo_g!dp2R+J$Qc+}Pr4h9HLOdGwww1o zbup<(WM4P|W!u?=UVz>!d3}o}@T{vD-|<{sr5+ciJ3k{03LbxqYC0%>wvhOq8}bgx zd73I%^n8fSOnnjCdkWA2{8T(i8S?X$+HRRHoQQJ=`AN5Vf)1mc;czo(=IO_xmx_6C ztLK{#HB|@#UuKax#i74lm*h#{GlM=e*?}geFW{K%0$~JzBVT-Tc+L`Id^7*O2F!b7 zC#%_R`V=AMig?C06)4UWOVl;t3t z6=+AGS6D*{wtEN3o!}MAC(X@ItU5K7CMLHS4^!4*{)1yThZ`yR z%-kf}Ez{=`9}(}3%ENpMGWOe95!!T4`5qkTFvcf=5B4-7?h{8^tdM(vg=aL^LL(qS zQ7UOvaUqNP^^7lu!=(R`J8ZdEw32d%Q^2ZyxTS zp;=u%t?n=bbED!IbDE>v%IZ?9VA1dSO)VIho}6KG>eB%$PYvSgVXAux)tLDPLWNv8 z5}}hVO%#Eerq!=5K=ciJ6PU0Q&X zso{^`57Dv*vn@j%vt#FKR@qD+4hIi?HiT!{y%>Xaj+l{B)X8Q+U)+=1w&Z^}NMimF zCBei5NZ!GoJ!+}?0V9df2fuR%pdUHPp0W(?f;5ee<8C-~I~sWb_fk2^Rko+Vg}>Wg zwQ{*=!ivs^s$=9`I_0MDaK7|@~{H>(UM*!w10^1DPut}dZy8`GYn?Ahub-e+O;de ztuDlKIq7V)&*W(to5OMdjXo#H;DT~k z)sjAuo;UFG6eP?9Fvj%`n}ss9;_~8@Nq>E@?XxI92((|7Z*bra@haGnGe=?UpJYAU znEdTNXwfmNX?rWEsvi<*^0-mW z8O-vfz9hFq24anZ>KG(f6og~_dwO4@<}N%~T~?lC`69Fy-u?TD;tX>D#+`@)SPwC4 zzalgpo$M%;ZHQUh@nApmw;oVW;1INusr&^kN?)K8gMvat7Sey1X8*{p??ivtQ^zXV zpGDnj>zMs1mZp&8@J!H6{`XtT=J2C_Dy6|6xJ5&MObg)-mE}I;(u4SWZ6+eJw^&5W zVsCi}cga6TffPWGI#J6-U8xGD7$}Hc_k*m?V?hrH(^!j;;@c{^5}b)wRrkuOwFp+C zGCP>z05wg`SrLEHXrGmX{chr&=TCIxu$t@ZUxhUKFm_bqvSz^C(;;Smg`lmp#X+D7 zMovsB3$L`CU9+M~g_0CTrGM(;(Z-jSNfjIU(LFKIwEtanFqlS_*&bA(pO}nS}B(%Rc6OkBjy2 zT#>{Nye@l$#Dayb{kU?ff)HN3{;`>Ld5A9Ed;A&<985d?u?NK#!SV9Sq0}vz5Jn7+ zvZbp*#9va`4R*@B5Htbf39jMv%jom1vbSO|uwIayKfyz!aJ@0gReGo&v)rc0E_l7g z=RQvj#yEdTU1N;ZbPPKj@PGIsbOT&e6MBo>)r%t6N1~|xLUnjWZwaLg&bn}cn{=42 zUxTVBumqwC!-VveJcF8!Y8Kn?Zk)&vcq#*lWwH`omhy3l8PoC;jE- zMXOvU4E-1e<-xQVO?`*T=^swNA4^%}M-aLBu%D41g9A|0i^pq0bQUpDmRJ>oKVb`N z*fMKd5W2}EcTdJ;%tvuaBe=_u`ogTP-^_F_FLTRyWaIIA4thc(x!sKlVE1XVjCe0YP`#@%Ft?M9pFFsvVXL@Cx&?S#qj1a8E)yhuikSf^y6e)Zs90S2 z@R!g|Hv3^Fd4v)HX?ylO^Mtk|K%`ZuzP5k74a*mEKwd+7rtkau@U#J|+XcFwd;CNQOmp0IX-z(@Wkb|xDQ*)sKMuO;5 z6e0vE&If^luaOek>B0~7Zk_1RT8d}Sl&41*Bc8cYc6!V*-{v4588;PNLL|wBw1Nh5Bv*ubd3mSBc$+2*n9xiM#LOoeqDuKy zSj3UxtmGd3i0F1_%i=8@^HFs(HI1(_&o&9de3R3Xt|xm2u*lSpJyyXd*V3d(g)}*e ze`}$Sm<1$AsBX}jGLkuBvHLSnENNlvoOPm$+%HdINReR(HyD4WU~!e1 zGhTJdqah;MnKQXU8TRRtAo;1(#&kwfzzfL$j45J4U^^{NUNQzU9J?7maA&ISxL5>z zgt*Hp<*R;-HVOCt{LjDst)90rJO1O}@42p#fB)Bip4;#rWp-r$RnOA_{#pM!&-RMI z{x`Ac#GF%0%k5ek29hH;qxC$V<$QZK`>GyZod8h^IN&1ubw%p-{zhLQR| zXPne93J4_|2%Cs!!VeU{7AZ%%CEHfWpZll%f6jMO?zGHlB=!u$j(`3bKKXykkyI@CSZe1q z5*SA6gg>M5k5fmLn8L~=7^)bCN$0=z6@+0Jp){G%vWGoK%$8x;Uiz!w{dGkFZt)B= z1a2$&?|N8HBP2bU+!go__JIvUa;L*+I7~2qOHkLlhNw9yyj2uCmq1M{XY!2Vg zar{qToE8)Hc>Bp^xpir1tlSmzLFWsxXScZGs$mxdOio|wU`6pmO3lu-Gf-9yG$_Le zA_5v@s@57r+^SoX)rq4TVL$|*yr=k^Lf4S(w6v#Hr&DM+MaNnGesy>}NSb06;JF!; za6g|L>%%cU1qgp>;)fh}4~bN8msIgfiud&VIPlPn{pL#^69c?9u4)5Y7esaza~ETP?o$9Fg%|K$uzg z^|4*@Y7)>>Ji)=4k8B32a}zAZ=`<9mi{^1qA0^sn$Y*~W!?2JrqqgeWv1L)mQk)v+ zp3G3Ba*Nl0i|dpnThcnp)L)EKNfT-*XX^!XfnM$ax`Skej03&U-i?~-b0s1dD-PMX}6fD$zNn~p0R6zMWDo+|Ul+!4J zb!=rjCq7*B855*>!BN93ow+4dyhryl)sMaRP^E-TW_^9g?#62qSL8{ggG=J>^}$UT z)^M27wAZNKcMj5VUzzU@;ia~Mge?mh-sx_V?xKH$sy2!)Ghw6C7@2KN{2YHbJ-UA1 ziA9t0mN|(ON}4*p2dux2+3B>ol3D)2l0S}#Z7j+CDk+RsCaA#@Yo*>!@BC234DkC6 z$s9>WL3><9r7xef;%@T#S-akKDW}RnBkY~P0b^R3e*47%d7S75n$PO9r0k~h>ZJ27 z9dm!eb6}J~1%)Ie#f@+L+`!hvs!5zO4tUm#mMOU9!y+~`pAXLl@RW>cw%rciDLsge z&Q=sHqlUyMqhCNS;vPadq}a#8M??WxIbi#2WEcammD7j~6UG~De_fd!KWr3T9J>qZ-h;MM7T3{KN4Zh&ty1HVz#9WoE4)S1o%eU@@vY~%c z6QRMhz`oiB18pB3YCxNxe_t8ofTHi$S+U-H-$@fZ5PVfS$E7gx&q$!v&llPv$O9c2 zAH8d#@082G7&?)-9$zvmD;7z|T#!7}oo0-T7rvuGWg!Y@6itX zN4&Nh?r0DF#_*>`OXY+=m;72ag_`($kyz8+PGWqE(hhiDDQ z9{*C$qm(Zym5S|utrTJCw~=6uS1MQ4!_3iQ$;31WU_Crt<$>GY$&e^I6IYEpGSg2O zqXHaPCdp?7yg|F-eZ6d5Tn{X3zE{{1`6X0?FW7bubL9zGhLBPAIEVRcx=k)|8VwPci1!&Se^29y*>Gv`}`8*-G9SH|UCKXP#R^ zqH>7w!U%0pc@qu-=s}GV4blY2fs$owwQCTpo}sWAYWz?Y(`o>2`Za&RJ{Xf;<4^P+ z-V(l|NfF~TRa_8$W93-u`&tUi%0HXYq8SUQ!OEFk;`?`I(){aO@J%XR(R`z3RTG~I z8M-PKi%F|8#>la|1|+FRLy96E)jeqw^8LmJCZmww-i8tkT#$LEL!vhxDmNK=D94g_ zvlGbxaFzvxyBq=P07pQ$zdjsTUh0aDnVb-)zu%}r ziG@$PF0qX(+8W%?Le%IMgdYPZeW*N16DC!wG8DP5SgD^AiW8;R?(xQd6SOMi&tj?- zGRI@CSvXS4Tig}Wf|@c}jjx6+EIF05Rp8nHXvEPCiE6$^cbFMOn2^>A-z4)tKU}Gm zfji;BHZZ>%e728hi#E!L`eu z0@ORz1=_wT_%);U@J(!gBNRd9m)?J=lj|)eNkI`LTW2{x!c>2BicFQ*bh>+$ql#tW zq^8*&OceyW9EYYwzigGwM}hkz2S;gwD>V%A>&|zzPkCKq{E-^n;`!c5Ztyo7lG~T3 z#y;8DEzX~)P?{fHcZ7?aa51FXhPDK+b7Vs*xt3KfA+XIet%94r8cZ_WBRYiO;^9(q?=dgE8CQ_na2ZBuNMrbN7!MaZJrAExf6F%oAq1L|QF$$c z5kSgx08~u6A91dKnuv}xZW_7hu@7to{ z^eTFDnNK1ZNB;qd2O935#7t92E@^xu4l`?x-SwaO4)?thx$ufh!MU?v0W?{(SPnCX zssk~145~i#+3P}tj-MIki|vG+*ZYHodk~`#!W>$DQ-~nTs_M?p0C$;)b*}P>_o)_k zLJd)fZaAfX)WC7tm|(e)%w_3$!6-KGka8>$HHC8d^l}Ixj3(hx6oZ#C;P5K<`lFPm z9A34feu{y|MApmatOGZC#%a5C4r)D(`?KgX-*X>d6frUZCSc{9zhBFxH$e-n&0am= zhZy0v_3*gY;II18U_Cta)>^t7n4liTSFSp>M}>NSB}=o!%qH)5NFQBy3N+E^oRE{c z$hF+>6t1vWz!wC+2AWUF`}%7==LObm7dS5c$TFHFgaAcgNm|o5Vqc)NQ^XN4VK*5ARA;lU@;1WckT0~mr7q=JLDBh6xy z{2PIPD$1NFlc-#hdbG9MI3!1J6Y!dBT6hu!A96LP*2FS?4ul$)L~o2Q4$c8&i0>o7 z%_VNSOQ-KjSn5Z$^1$eNh@+U6wSui*$^h579Di6mBZC!5Ey3xgxq;^h+?EC}QY5;5 zGIBY6JBd`j<;G0#Q5D%Em%^GzjkMLh^?xVm4r`oNPYCCfEI4wbtor)ld71lR|oAa|~>?$$u;{=w%T zjHv=_x-a|;=#$R}6@s!LG6L@2oBcJ{D&RW5JV-7G9>`$^n>`=30|lTZ%ewHDC@)02 z?k(r{y2Z(8)R!i~O_wYqb=r$xx^hB)mR<8Vb8T=|Vo|PO-IncV1V{jn-bU~IJu9Xf z1^0U6SxA2n`#zvS$A$#g9Y}_hvFPWd_j%f%x}{2W+fvnbSi1sHW!uKAXQDs2E*>E$ zcyq*0=N}yeeLJ^;X>{zuZvwu(me_x+-Zbp0$tiK@3_sx~LTmJ|a*PF6@md>yLFewy z;n5D-gFF~GYILF3f@fmqqu7r0X=JdP1Bw?x#CUSXF`l=D%QmTNr+8iL!vQrU^*{)n zD~3<%vY<#aGQC^ErBoKAS*daXw9Y}Qg?vGOV}J%t5|K$o3601_E#9f?;x~DAag}wX z0^lHC=ce@VuwoL*iRgKwD&;S;Wc4!t&FnZSlgNXz=|)7Q>hB<@ImFJQ+uxhv^B z-A)H;gs`54{l@-`!ec0M^10uD%6gR zq%$`n#`vWU1RrykvfG!^FjEWtL&?tc=b65+B$;mRtBbRHzf!|&d*bq$gK(GV=(OxF zELOT6AmTa?0>$ewqhrv2o`4t+sT81k^kFDh>e+U+;eOQ>bl$?)AniYGWYK%r0aR_R zcnP7b`Rh9`v^v`xsgM4gexpCOq#OmXA^;Q>k;1P0@NQ($fq4t&rXjw(x}Y{h26t>Z z27^PdK-T9d9J(lnUW8!;u@l50V%6h9s)e-C4|!3;(2@+Z8Wk9S7ranJW`L?-`MHre z9Nh2qDSp@mya0H6)UtN*CTFroE*yNjm2j1Rkp^HH13W#($0I5= z+~$Kz$>S!}PVz~A!7D_HzZ+3Bn(zWoG$x1_`r*a&G_^~#2@r?I#@ z$yIZw2WQ=&qm4NLMLlk8pS7MdE9XXHg81D19{I#J^#7WzM)Rx%yOcA zIAa&I+4Nk)3E_=>?D4ZeOI?9{)WP&&6&~Yh+`cKF7}85Dg_WSf@^D6J0qz+G2A@m= zy@#ul2d&S4ib=dJr*3Sv!RfiudgjTQDB4@GD7CH#nYNCd9Yl{J!pQU3Zx;z-%xF=* zA;fz!HVhh$-ueo>q2gg+ziiLT9>GPj0edHXXM)C~d+JPwl88una~s|QwNUSJ^+DArcL=e-Ea-?kxXy0KvSH3G+#Fvg%Pw?`fnf2rms5{xTEm{a6(mz&VBp zjD0&C9CX-V(VMcy@51&^^Cdax4Z^)} z)t&K9%A$wiYMku*pn2^S0KSsu$#?nhK@MCv$Y=M<0{Y!o8JGJqh*thmsZ(yV&f~^(kWI#obcafZSxSM$TG+mf;vFIWW=#_q=FldE02UOO zZDd#%#Z&6<`2)QuN|sK8_5}&Iqj>0lpNg`4xYDi^Y;-{Qg9z7>=O1@KWec4u_bw~P znEXs1-seBui$@seTQQ?0=Bs_iC?kJ=S*mbn!J}XN&A2s9+F@+$VnlXI2`~dAYphc>19>)laF_QgK(c| zLgf4fCAZM_hAf+DD}gK95XxfKQ$1%pE0|OQ>`w?>|1YdM8w4v5-@6pz>Kp8f?UZ#O zUe|MMMC>2;Fe9(+swe;#EDMb`lTb>9P1`r1n*#4qXR%%M#{zyo!bV&u0u_G1M|1&4 zm1pRY01jPFJ5_Kbg`b~)Hk`njq4%nE}c1Ml)3^)y$#6&F9y%S^&P8Ol# zmODE#Gpf4zVU-khy4cPf4iO`O0NrvkKPb}?YqH0Equ@_**yo(6ycqiz`r`zy!eh}% zd-e191}3~$)->mt`mtu9|G0W#x=c;dj{WjirxuTtBjLIfez>;bO5tyRK$!iE8PH`I zNI&isxHna$hk!xJd(_UV+U_pvSm5Fcpafc(qVn4IWb@(=9iw*rNo+ny9_KC<>6G!W zgldx1t<%^i>9y)BSi^)hkr~BJ<&P1Pl3m{X!8ibRYp2jRMBa@hx^aF>EGYvh6Z%M3 znC*`%SC<*~%Ps@BwQqfY2$#01YQ_x2-g^uJnQHKKu#ru18|%bII-1#Zb5V1*&T-&3 zce=82h(*d6cSv8@bMtQ{$?P1jOKfa*qEYU$q5R5<+%N zI*D9=NY={1&o{x!=ZRD)#5lnIb{G^VoEgRV^GoDuR|L?UqV+qz#8$f2X`^#U^z~vA zx18dCIXa76%b^&EE+~UA4a3ZwS@4CKSKs=*g=^{Ba=A`5Xl>pE4z!V>7~f4H?#==I z58ED;*V4RQwp#gr6%(86!P6C6%-&!e>_t_iSxfbbV57S7ysIStOB*%q`fjSuPo!Y^*3xz z3Dy^^vGOLoZSQeX%^7iwIR{Rj&TABp7^z$97I+Kvo z_ov!0<5kvwyF9MwvhUKb2Vil$B6c#jgBvHOm@AR^ox5^@WbSw$oF>Gw%Y{-nO_L;r z#9rL^v}>U>XA+_hr#6lPlCSwwI&tS6m<>tdv?JuGpXb_q@W)d&x2Jsj?d`aIqu#WR z#D2+FjE5Zv45Pf{Lbo{*y_sT8^Dqt8`y=&6yk``Dc_i+qaVJ1#gZKdT>tCnom*u;= zTj`atb=+WU!>b5EBlS*`y=+@}6=2f*pqs$^PoSYOAtX$yF7#J((RYGw{a(saFlToP zEcFW?XnX99G-i;6Of@<4gT%iSgevpP zH{L-xIU!-b46sq<$aCE@fv>-aUrw}H{YQ=R@-B!pxy`Dy!>O-%`aSz304YHgC#)PPUQmyTx194$>g=kLf?Gc#0E2hAX&D( zEg>k^K|Dm(7x-rDH(a$)E*v_&TR$y-qrqYB!TjSEIz2$B^LT3c=%=uQH9g1!BkH%^ z^+n+>R-fBHJ$PHO;S)gAN26Ke#?&hUgkNaJ6%IPOp3Youk=Z)tBljQ_f@L09jO^ena`g8VY=Dm0?8G_I553yjdT0?gGnjy5? z@fl&z3-xeumSTTNOm~w4`7$|w)gH5rP0>s7$5siSO_b9zag{u9VezBZBh5tS%u8jf z^qwGS&O9(m!b`DE3L{-t{C;v{JEl*T7{iqGqN=UJN(4A$`IIG zOqn?Cz2~ee+xu2G5Lgp<%m#^~gC4+(YydDJ!u<^;3*wArIa&Gyd5 z5AYA3@+wj@Cmrv?{7o<8q4h1(7S}x+1b%btJ%D6e)RUF?2X7R@qg7rXK^g4;fRR6+ ztigDRO_bo!slcIAIB8av3QbS1j8?O+hebKm51jCwRZaoBy$h~?Zp1uw^LKXRp3{^u zcJ;*OzT_8&yz7ulMOKQ!t*3Y5Y3EN^3R*T4OEj&Gx^v$=VBoQS9=J4t_XKbqt^z1V zYUz=T+;>ktPNzg|uIaMr5h`0Ip&cTRj|TO=9mrlDXYB5GtrdZoW&u)C zvcbrr5IWn;wvM%b1d1$M+i~n$wsOCKv}3FBR#(f9jj)P8H{DJ3R$qy%xO?!r%JXRR zTAK=Q>Cfv%RGTK|9g)45o8-DKqT_udB5;Gw&@PU(Toz|X6;(5nBn!bBv!6o2rS`kx zSH*^Ac z^#Z4Ln2A6%j?49n7STol_yhQfOMG;S&ymBE^>*x?0PB-0dE}6q^|rLr{D?2gCLNJN z971!?m}!T93$IgZc*T--JuR}5GT_Q0sfj;L(X?3=9Xzyp^1V4nA6kjgCw|_A^gwQ~ z!B~?X00QZ#bxaJ-L%F-c%JNFc08nFHO@D4!UZ0G(eyX3T*hffoDdekY{Kn?@K3-Xt zH;8I00IwNLb+9KXmc;qm4nc_;78u|M>KtZ2djh?G;s_Q8ulKPC9^-L_eAG_vg!jtc z_!iyOSxi~dZ=XHW9$D~ASS8lmZ$zhQqa=K?JEZ1!M2f|bCkJ|NyXGcK-Y%>>zoq3t``f&W+2B`AYKpXFv+i8?Rdzy^ z(c;*7cf}6+OvEFozUmMsvSFD&TVp+B#-+S}gt?YOrLKq;YPxC?#g8bW}>7C zvBxX|A@3}=w=l`!moNdmlZ5sSca$AbMeQHIoQOHm$mWJ|)%0DKf8kq)i2Y>8%3GMP zhZs9Ab!o^!$Jf@=Y7!>&&DZ=+59b~37ZN@N1ZvFBT?Q<`t}(9+!%G}Oz7VSQn@tLT z3cLF|fa7+7ICZMzj(0Nb@2)1tq?EyB3nfRBWL6CQ5mESBQKs`T8^)AkD_|Li$uXAa zoW(S} z&>D4f7N;>>y3-11ah(*1=+Ykm9~l0B)7^5(aB+<&Yu?{?@_j2Fr)di*>zA-?l%z|Z zk5e>-X}g8QhF9Yy!n518xx1H9UNbD`IhgXEMW}duz$C1#+LtHg>Vwusqyx+B*=U3{ zF@@$ucMypT>F6aa+EZ$YH3nI1KLJuzinS_A@pV^vz}=q zJXHG@;jf}wDwOXvl5HLE?RU6ntObF~6n^u{U1fscclXn;H<03pW~7~e{%GjbP

S zfU4q!?A8*aLkX;#gOcIFS1uj0f(oC6I_aSssB6vs>wfeYSH(aj`L?TW@jX4U~`7DDGw=bR>2d6`hb^`|zr^}JlSpsDv z74%p5-~Ih$YJy5+*-PbrGe?AlsVRnKsvKrhRmUv%Z*&!qP@#p@WLl_Ha&4W{XggzkewMz(DCVg!*?+@?IHou4$j?YQ;<1QTN> ztwCm9W9KB{RAsAw#BUNDk2_+%CNML-sO_LuPF67_WHYAv02``TY-#-#F+q47G0h{MAGJgJ*a>YKeZPf&(T2B+YGOG%Sg7l=6N>bMd6}iiJ9k zaleC=(Qp^PNW;VBGGa%r*+;~<`EGtcfcy~Gw_XLS>N4wfY4z11A}Ou9o3eT975Hh{ z0yq8;CZT12(L_iZzft)TF=eug)P`tBaM{=whiWdmKFd=hZ~%@b020$>D$-@h}(9=zk*yHErb?nj~3rj zGUfNvVz=NjFMixQ-%Z*yiIT&|Q;SJ<|Ggu-SJCNzd!@la(}67_One8<<-VS$gE@=L z6s*9p^_|oi^Fg2#uZI`SvOK#L>4`%RUde-8K5M+6^V9f~t0BbV`0oXzb=-|4W{hV$G-L6v>2wV2EXq^Qw~_H`JjOnE`vS`DF2bGuD5 zu$i#g*0xF!UGy*3v0sv$P%yD0aVPGmF)QRX-Sr zsPx~p6Wqf_If-1yF$Fg)JpJg@Varb>b8zWD`*=Fl2LoR8Id34jv`in zsMXM7R!qq(*&kC*|E5&~Q92Q7)95TUO6D&EA-1C|{if2zkHJ&-ZqI=O9zAMoHsprv z4lpOskM+BU@-D4B*)n!UhHPY4kBNS(@Cf-q0gRsXlWS~-?0#Rmun^~i)Eo*j6Lo6iXj z2vCLng%L)a}q;RmQSlD3!(A*)(HbDgk?{WXXfd!?W^h)1a3$phbT% zkGL358d?WQ%c7WD9>jKaktk`mPU-+!x!uEt)rqbzwH^BYs&z>I?T? z$12lpG^$6tQ5&|P{Xi+9CtS&YbDAqAao&uzSJ#5@pg)RdK}z8yX145BOa(-z5b{?U zd20weOKS^*!XUf*N~%}VuMqs3VPWWwg!y(Mv0o0;36B@TK7i)|?+nedUhqc{_H29# ztxFgGoo26MmLUb*WuxWZtL#eRY&8lIOMVmAFDueh7&a=0jzSVNx4qea;TU&YK%GdF ziIpL7ip9|(BeSfsS=_hkyL%f=(>&2YQ=iwJzwhreM{DKKVq}7oAP| zqcLL7-BjN6>q_UKS_U@oDXG5aFM?{{=VmP+&C*}JT=B@+TptyGt!3&R+-~;p;zMn_ zebQg-)iKN>1uofH3pwP8sN6YDAFy-o0evtxe1sL#p0zSxuVKWwJL-OF4Jtg!$u*0Q ziC*m`Bi8?Z_&Zs4`#a&3w>bFRina6M{IEf#trFoBUCfJdX_qrAsF7Gk?}-1yN3F>3 z>idW;NDP-*aKcA_rt1^ z7W&}l?mqRt)usg!x1u+d&a$3fR7%Ea1QgaFTEoL*eH!ziBnVCG*r`zU7Van(mK{5q z$;?BY$l)>xs!kT`!YqvL1@p9Jnf3wh!T=hz>W)40xvWa-dvM`XJHQU~$A^Tg!CESD%KSa}5gxVs z$I>r+T*pJ}QFFF_>N?bic5f3vSCw3fb>9^+TgNes?(s7})P`=o$Ns9K2j;Jir+FM8 zS}J~-Eks9uLY{mUN$0%ca~?g%#Dx4!R3E?V6;8k{_&zx?P``=~Ksm;e&CO#7>8+rv zz?oU35a8;ZTQN6&%;f#Gz~U*)Jj!+BIY32zj8)05x_fY`>lrNiWGep*K6tb-EOP#h z6@2v8&*O$_5m{{sc2V+dmp=8OFdI4c7ULH<>mq-D3cPhpr|B}Embu!wd!)+EW3LAR zyWm*1Gc?*h}v)6kX4K3v8(72ktA4Z8UA-Ta52PEWR=oq8EKgY>V0GI5~~C< zl}(=YWW|HB)hefvfRE1_XV&}V0qL#)gRnqxs3We(hrQ1E)4wz!oy16s zsLil{8}%!oTd65;1H9=KPjka0yfZ1W0>@#AUT`TzMMjYM;?Fv;a zL_A-0%w~UQOQ9{cxf)7n4{uX9fywj@ui+H^Nt{YKM3zC^-y3adzoKtvtgf4rAo^v9o3TG#yWhDL^H^L;hVxUQw19A zRgMYTL`1?QUSAd0_UI($e4}heXGji2wIje7K`H^Pnql2+366wID2=Dn$nY%O-+UAf zY9z0~$CLIT{5)7ki^Xifm23Ix&C;&T!=I*xvSKua{tj*!ep@zeOxEhzic#uII>fge zzUandS!O0)898uhIk44#I}0hf{k_TeR413(f?Hzonx&t%sPo0Gl&d(9s-K~J30WzT(AsJLRF^tASOdv zU1xF?Iu^T4mI&+4$?6Z)Y;I8RryBD-tfs!N&jsG0hfA#m4As1U_ib9^<+XhpzvU4=Z{6oPil`lZXi+T_U8W@i>Cyd@ex6RtoRF4NRNlZ}<`c znI|tC!Px^;N{mY4Rd{p$4Y4540NMXyPO|)sil#;#_tP+eGx=Yp^5(9UQnfA4I6t4hDjejKf1`Yt z)F`>)tdS~zuy*vrKIqy^HhyY%0rlF|{JIg!yG>DlJq3umxtqkL_o8<_==#^#31V&b|NZmHR>(#dJf%g znR0)BX?VluCtW#UG>TpF)HJ-hKENRw0szwUTUm{(P*sNl(`LGd(yR*J`_j;4ZegLgLI$L)J`c%6MUMCi#2#V8q1r^plKf_E8PDS&K!y7~3_g zQqJh!ILDhIOnwab!#g8&d9lG)gcIOj60*d9qXKe`xi}Rwrw(8l#Z!W(lTWrL(8Aa_ zJNqTGab5332}8WE|UKPLw6akKuTrOL)YpXg1ob zk#NbVz6YtZsmKekn23bcv$Bp?g*DUtn7gGYa*VaM#{7tM<}{G0*A@P$o54cJgMNnKxul*{yZ*QY}tQyDw-MB6(H4~uip? zjuM<~-GyMJu8}Ppe-<8DEQr$ZNf$|{@7{BDNsiI7hyd3=Drb`%x3 zS;C{K>gu17_tGTj`X!h;mmF6#aTIcl(b0Af1=KS4o6-ZiG%_@udeyvtN$6uSv~(`i z7S5XatEsp)unu*?P?r73UtHv^k z%c%RJ4img@t41t+pqp5Feaql|=N(H2{CcZ08GeW5-Ya!~jRse`p=$G+PVEO^ppZG5 zNQT$wdpx=F8!6FmYG`N}??Pv@CDcd)la}txF4h*0<=xbN6<%67U4Fb$v**eHcwZpA zd4y~Ozk_M#9ARF66?C}T%$^s}^G)wH!N4Z28L#F2%TJiYTw{W77*9WtgEo3L@R&=h2yEUo#{ef}n0Gn#v5Pktv-?j5?i- zpuNkiOO@;O2QUQD+Xdyg`Z#JeRK;tdzdqP}NfS)K$+%^Iu8+(Jj5G7+QPUo^xeecM zz$fctXeqD)B7ueqJ#cFb_eT0^d9I>9&vx=i@WA2K}OVv6!{ zSV7##WS`1avU_>Q*A5~t#`z;%fp)Vtn17eE*m0HxJ=5R_a=9 z0N}nDVW*XUd-yQ~{o+v6H~9cQlmsUUHgG`zRkWqYU+Wsh;QKGy|5X8T%qzmb(@e!y zdhO*nR*zh&zKFPA7=1{Z({%=2I(qDvWgC_eQH093Unpy1yu?W0&DAbM3Ja(CZ4n1Y zuZwJYj0sp=f-A$8P0cTmyAGDw1PIO9XAuSnB(=hSg^QlXvUnxkGjZ^Z4csx`fMzaE9Y+{=N$g0*{H4Qi-^TOVgT+0^j<0pgk9qnTN*)~E;jLp9gZmR;LA<+@ zqF$I}vsN?Ha!Nn}8GnIHo)N`s^Pn`DHUz4Fn&WiYpZ8J9)*VIqZp7i{Qt*0#F--bV zJkcj0SfJu*T~mfH_GxFD?EcJs2Uq;0+u7I(l45FM#Rlaj%`a{UTvxjo+&eX>74lju%w7vh$G_%(Qc zZ&&ic^%*}v|016Np<;scvTMWLF~UJFgLIAONLc?1O+_|no2 zIY>F@V)xVvQ<>Ye2UVBRc7GKwm2>TX|G1vL-UfX%&@P)EG*a*Ip^71_-~^q^@#3Ky-zUkx~20? zyIw>mKeLq*pI4gmX6)a>Pz9$1AXCc;ZRA8TDwexT_x@UyE8|W|I8H{KR@57(l=PW~ ztW;?7<)vDg?z;NqcHB5lgD>o|+Zqq@>1YcqnRpHMPnh(U~H0ga0GXO2k9xN^=bLY4_5u%YDhQCOy`k!*i6mGHB! zmWH15SBTOtmSqso@1R_NJx*$LJCU6ueq8)uP&uy`S@(l_%?Pm>VaR%$`JPU_aC?H` zgK}Oh6>HKpij$KoYf*%j%TOI*se?u;&!8+JyPTqd@$KRw3~>o^!OLtJZ7FE;TrJySuzsHkeY5qgAWwO0lEs(I5%v zM1iOqs85Y#Slz*YQ=`X~G<2<$gn`aW?^{_wo)6D;C%ijcaz*EDdW$Uh2?ru(JJr#> zHyTG=qYT`{Twaws?S;}NW)(@rD~la{t88sXNZYgQC!u*bdyg$siKQycf}Vb)-SWXx zypV1=JEhvqMUjvldgOJpv0CdI8ug>-QDI+{lllA9={`4qpNxYJHaq9>=X2_``KDPT zbrrlm+iDLNxj4b|7cRLA7EX9*CWK4D;#HcpKFp)GYWP-#E0lIwfu3^|KmI{eRDq=L zTj2FYK7gBf{WvJw)ZomdWZ8F%2G^>twZnJ1RxUFs)Qw4?)kjB;)yzZl?n;9@j&G%P zscfEJ9Ch@6i#q*#p}z@CMnAjcIxL0vQ2~l1LRYz_kc#dHUp`HX$#i2DJ`};Pb;(rW zuJFKYd8AVzOwOV$DT8=%am%jSx`3Oj+_)2-kN7R!FJ*DJUfHSA#~Ui8V1n^)*2%D! zO36TS!ZTMEZW*H`y*7`a<|Ny8YR}j&H_w&g!Y2cNo%&m?M4h0eJMXfnfHLFz>n)g_ zLd-}dZ-D6+G8sSIl}DSJsg)WY%kK!y@97+iZ;TB#663Kvw)D5+ zzw~v_By{f3U*Pmw)y5hU6hX&T7Q(^rwhS&)%W;69rDztDe$z^P9iGgZNLRQOx3Am^ zcq2f6${QNsCIpEUQ&>zAb0KqASUtRJtP`s#Fv$w`JysK3XKZZg<8%F5x)k@j2QJ5mJm)?In09c+l2=TZy?VKXDQ$$yYkJGiDX-&uxFL$zfig z>cAkP(dOV3_IKibYcFa4<${Tw2FG&iyIQn*wO@ch7%k~6W|m$STfIAxi`$HaHzeL( zcW+*byh6(J@IvTds|+qxT%HdNtc{iG7Uvp9DRf?KY6LhskP|CMPW2zXjR z2SL9ZJQZHuHG``C3EX_U%3fmn&!BjZJ%(57haIi-=xol{m&=8oQ~1#Y$w~KqA`gD^ zq1wu!;F({p5 z=*U+uIGSsE_s~zR4f76(;|2%Jhan7vP=qy38LSdN(g3+%#d=<~_g@0)+*s~^;kP{= zb+@0efSi5k0KoAY`Y*inX^hQZU1ii@<(FYv~_1+&T;^`K6D4Uedq%#oe*`7`~rNb7+9}wAniB_({W$z^8wU*M@!y z#AO;4zGb(E8^bDcZIsIQvQJTCBRpz+8Ym^h+!0*1I*l+C+uF1(^spxGL4lQ}A>4V2 zSqHP6?|4S6Bft_4K})#Vc;lwm{?xcMR!GMn5KxU9M+;eth2T&rV_TemaS#whw{nD2 zpyQl+XPacVj3^ao|Byh`%JZfY?Di&3*;2Zq=D*3YUP=;hyS6H2oq&oZKTrH1{JuQIa<`f@z? z2>}uqlM;qbgzz)PM?TSi+NXbxK#CuMz!Y$eZ(7q7IZ9YDK(6wQ%_7J6_x z%?Ylx1#u@xIWT;Tzk!^L@@RnE)_4@DJ2Q*Cyx?qTx8dzeVW*>YIVu%|w7=Qt5mMNq z#PcRTTTqLzn|#hc6K#cgIVD`9eMOt>EmP7*vMB}?*^->-8GjFJiuqQCO3@>gN*tYt z-LX*sR@vqQV98T|I>_)LKTL$u`!ljlgP57O3D-trZs#&RG6>XGDm!v>O85yVDpoPb0yVd zf7i>=`S|rA*~DD`)tp+Ma2#lT`*gFzf_CE;l~5VpPYug?YWtU+nx6!d3&wdpM0bi~ z%5N?3NeDcD%*#^|ajk)77-Rl3wPSn6C{a!B8lLr)=`Wp3l;eUDa zeP3S{-j)a8Lh(ml0$@%k+}RLO7;HoHEb+x5tpUJ)sXR+09lFUlC@`y^HFAXpQ6OFz z(8ub(<)vUJZ5}!MyUf2L1CCI-<|17-B1?}klPpo#oYlH*!yt6`>E@Hftj)bMm zb(5I!-#MHN>b3Ka#eY##D$)Ba=^9b%PgtRdgCd%m{$woYjw0bk;QKcUaqkF8vHHCW zFzTO@`$H_&)Z-*8km)$HyA&4ghn433m;Kv+<#KK?rTvt;)*ca2E>pbFd|8kOjO z7WCW)K)t6)L-(>C)q^g9k&)~fSXJKMtChd~A$s<^?>W@k>zc#lo>}oXGQ+ozreuCr zZ=qLLZ|?pAr3zXv;d^|;n>Xe%h#Mt;p(o*^`2NJa1=5-FD=|YX)bZhUs&M27z)I{$ zYM2wttpZiA6^NT)W~kI)DS~Y1Rs9XLIH5=;eH_u%>5I_B5@d#WxzInJ+*rh0UtP>w zqDymFcQiOne)qOl@%OBvl^}$W>}3TuNK?Z-{W{~I0@VDB5E*R`+6!LC<06##$zgY)Y7VG=;`v~u-d-5%3COLtMEg~zt>$rQK*1}7D;TB`Bq{hcl z8Ent73H)6nU#(XDCarSBQ#^ugSm`xd&HBa}L71YuAR~#QSkO{^=cmp<4@#{~0(@PK zx+GJhx%B={dIs1x71TrRm!*=x%0WvRe|LdAXkU)bDCNA1#GMHx<9Psq?0=u`6K2xw z{Ze`-vF-_;1%Zz)L~2=GzSSCA8NywUASEJ-EgS=o;8G_+Q9Z^}WLP9&ghs+Q4cuqB z@#{AaqUD*E9XV<10_0+JHubV{nF!=!`jC&L@T5UNf-hUBL^cJI^(YAQqc!-3f2r;e z2Z%jz?r$&f7u~-<+g9D+QB^Z8$NqW%2_Rm7MK9;+450(gUbl10i13eo2dV|cms(!W zAmdAoxu5T?=!mi=6(E0QpiT+8k$hTS0NbA6`)o*}Qd)*1&u`j9#u@FOd4Q8l?douX z?C-u%F=TgROHVjYE2S9TQZ`PMf6TN8F5zF-zHnSdZ^+G2^k5C;q6d;M`=(ag8zO|2 z-KX5s68{}L1T(uh2a_N%WZI^mSpfD4^82heEmUI~+3Ikk^rB zSO`9Jbn0OxhmwST=P3~!Ysx+B07hy=o{YtpkZwVveP>W9sbmZkmM6Ox6?v^Bb9gS) zW-LOB4W-PQ)NhA^i7JB$>trr`YqksHN&VvE&_g06g}gC0v)@fPgrzZi*2FY}NKMc} zORw0n38;|HgDIiRFLxBhf170m$U%VImK97B6dvQDUoPkx|A>A8M-vZzAT$t2cH`x_ z;=5%L%p?ucOI@HFHzAI7{5s~1qUkB(Ije%$%1QH5^9QtW{z4)OpM>2&yKhJCJVS$$ zOo{#Z*`Yf8JW**==WTY2XSh znDHuuGJKQ--6dw-Z_~N`;8_Y3S1vnMkY~Ox0L57#hSdJH?9;r0i`|DV^O~+kc)Xv$ zVS7NZXC%<%Z8)!F>&UZl3EGs&8eK0?&S|h`eN!NuYTpYG%lw7 z(un9$W5SMB?glS_h#eTW2tatq$@Yxs=mE`A)-5D?vyx_!}Q)DXX!^ zXgVyPE5PglvnOjX&s(KvzfS>?s7#}ybno|h`}8Bo>UZIJ=s?xVJr&wP6s(~XN*0Ah z1~Wls+tQ>)!fSj;#%4>>*@1L6b>e@JBG?PdQtW}FDohuf z1B-mIS>IKHHQh74tZ;tvyYB$NGiF{65cZXC?_A7+`OXv8e?|fc?Tud+seFT((7_8m z?_nzre*qOb-VZehAEe#)W7(#7832sI<789%ZEI*J9G|Y!k_+XeNMV)fF~=^%mDXs<#Mj4=P2 zKX8QcZFp|$yTXXHCImX&ao9N14{N5ndgMDof7GH)>==B)+UTteUUd9Bn0{`5KkvgM zo`@pNZ`|^0Q=n@FV)Z#a3CR8={h{+_()(B_W04! z_aaakZW++}#+0FYY6xX4;mBpTU=%Gxa0{U0;o*Of3p(0a?ZMiAUFlmh>5+#!mZjiaTcW%OFix> z`9>OjVMZgP%g$*)OGNs#Rhze#Y9eC>MXOVtEGTjd5KUeMKzcz@&xb63t{-eZ@GR5= zkWYAf(3lx!tI zE!3Y8c;^azd(X$4AvNQFZo3s<$7-_CFI8OE@kHtR^aw;<{U&bo?ZpFoU&mUVj>!{p zig-H_l-CjmAYkuJ!B%2mQ-K?l_qB8&4kYP&(jld>!hYm%0udRsAY-d0XC^>oEgpxT zL^)l7(P%<@BIg|Ra1lfv>+qf_e`eD5euZ>jZua|XAjB3GO$_BOCKolI)wh6{dA-hY z|LiKW5yVcsf&GDDkxYHvKx<`e)WB_UCg+G03S;h@r5LYRAKC2*VJH9_Ck4e=x?Bw{ zv=D6I37JuVnlcq+KkT71Z>Vk1?VVQz>_dS$^{kp5og@pr1f>a97C>xAeOKoX8FdVLNX&mA35$T@PO!cW15o{u6hB4uhYf&DGMzc>_x~52M{KojUn(;QvQ zQMmp&m?BywTQ_T!fqrsHe||-NdR`CV(p2b>c{!#)VhCbWirN4~UHu%$;d*&i!A~#R zrS!mNr(&gVT)7|_Esk&^40^!kJkxU?7t6?udvbx(a$5TKfc>%!am0fCBvwOMVH!|L z5$tLLbOOBh2AZCSFw=70KOp@Z)BP)iJ<0BIwQhn6lA4xIc28wcf65ET#Ia-3Jg?dd z<@^rEKrUIJ7TJpQtns3OWt^ z3nK>}=;$RKiw6OERf=)+Kw??#GAx6v7~#phH#YDybYi z=KYTUP2&(#stTsJ&9`(~VOW*eAFfbi5WCQRT8bA3`!y6R;byq2v1Ft9uR7VoH2h}W z=N8Fp%E$!7jGVI6Kk(xCnnt>t7nQ3AS;irK!7?E%dVhl#BjDy<{Ul~Z>^rx$0*2JXdORFVmZZ_YAffZNXn6-&jk@KgGI7d z*RtYcwqbm{KC75Roll3Rdoxb%K94NFo#FMx%h*B%ZU}!9R*n61AKl*8tv?HO#t

5Ni?!xg7s~jH**(`U9$|i4r(CUKM~Y)5e{DBQDxR&hU$|*O`jc(4;Y91i8nUb)?F4g5$K z@E}_g-SLo2&-bf!w*rn-$6~#9t7j*N<(uSex(yX>f6|zH>ZWm=jCHA8)e4U!+Fmua zw;%;*l*6yKK&qS*^kF9)!wp_c@M2pSo`wsv_saGHYu^r$vFipQ7TfM{35TGXAz{ z$)%5Hs&!<{96FoUo91;9jbCifH7!p>4vLG(dQpeDyt3bAVU_7?*{XWs!r=2`VZm-b zX%z(xh|z0Yb=4vz&}FL5V!s@QuG>MMq0|p|rXS;L{yGx6rYhA8-_W*p6#L$_Jew+9 zf7E1d>4IIu*x+nlJHqWocOBmg=Qr5jqX#ywSMINCDuj~Hwn<<&1%>0jq$nb|ir&0R ztggxc+2Ift8S@uqUzF-p=DMWbEVJ>3m<-Rexd0Us%TEgN;-_SfAE(%izhfT^YM2Lm zoJCJ=2VCbHaI3&`LYV2H*u5FjyUK4af3`6riy`?j-iH{MTv~JeZ5#>02F>e_8W~p@ zgi-*pS$HDnHZ4Ct{t1c>bv|j^iv=C+kN^;o#Xb2zCgaKSp^d{U{B{*7-Oai;>;s($ zoUN-Zs|z0Nk+BelVy%eBP%vh!B|&%~qTf@|Jo7#(Ks23tPAk!KMSI@cYz;%Ke}(n@ zvV>wInN>!y#RUQbTfYUe;st#4_n^GVGfwzGh{>RJ_szo0MYYsGx~w=_#9m#n-D2So zY&TaZ2@Bh>H}j;OB>kHDbTc>0YKGc8& zHAkqQ(8GLGDy4m?rc7v4XiiymfBt~TJ+xU#D;nlU00CB-lTRaf#I4@fzY9OJRGigW zKs2Q$RCun{ESE59=4Rn=9?EV}+1@b+nVqtzV}9!GrY4nfaD7@Ho_ZU_j2HrZgXSmi zgjIf>Yag3;fw&0QfkZo=vD;oT#Ak9lWAXj)OCq@CoVa@WBV$fcD)fg&e>j~Z?z>rS zJD#qAc;sDm{I+@DWdur9ftv_Zz+w%`{KHJ)2gY9%7kNVa{n7I+$V(<{_WCj%MF~yqB6w=MJp1z7L|vWNYS!dh>iPl5be)BzVZw5Q;Bvq89dV* z+G(MLAngf#eMH@@D3mK?@S@JGMp2C*QL31`xY&vS{$%`5!? zk9SvK@1r>&POUjh!`%v(UqDJliB_!FA{cW-4&rXk0FN(41;|v_IK4vQYJAgjKvgGO zN_~dKswRq$czjPhXskSd=FG;XCLXR^`l_7kKe9ny1Sw~U=fYvxes?UVyO`7Jn4t}z z)iVSAUgPUde_gtPwRBrbh<{6J8Z7Sxnt%!_@asc}-Y6;B-%u@dE)Hxa9oJq5e$^M_ zpcA|G0ko*$fqlTKAlUWJhrg7;Gyp3N*t#^Wb-ik*>!3q}z#>^PzhJ(Rv^$tMWTf9} zsPA-S*2G7qW;2H0!yK#zh!b-1SMNi?VlKR=#ae@Ie~o4Mla9!|=|V+;6I%pzEbWeG z3~n(;4Cgr6m!0p?`J`92ZZC(%=!d*KB?zy{OnszH5X{~*|EX*JP@^pD!BAdXlR7%M zvW~nmOxL~{QkdYI<4gsddKylLj^AybEb8jtk=PayoroO=wDs*c;4}Cvxnj}1MZa$Q z>ElaDe=o_&yTLXi2-q)bs7z$$ zVD{r+j`2u4gsX1LaeA3$v^Eq2UG-KMFDt@fW$U^1plo@K^?6aWwirTd>fq&i9X2gee+W3E zjCkrT3}z#%w49SSB?tN%RG`8Mi5?fpWuu zxtqlQ)lp*O;Lnquuo|zmP?(&#>)a3y>g&1Ko@7OJ4LP?&E@7S=Er^QL`$=A`u3ZWn zesjLW0ufo{nU}V5EI}yK@OfZ+f00?qU-RXwC-v+4vJMdXO_-0e?thM;iE&ZmYN&`- z83|=9@GUEu5u`m>mVLM0`BkfR#qpl@OUB=d$jybJ(21E6ZeXh9?-gfYq3MPDa98E1 zTtRjaLvsQdoQ6r6qWJHt1;8L^4RZtIvwJY`xA$~j+>T$*on!!*-_eoKf3;wMq&}#h z>4QcI?_R>Z~cH`nf)wLvun42-JX8z}zJA2n60(7x^b5`YLI0~l5zoB=*53L*{9an&n z?K4)M)5kA4fU5WvMV3rFn}~w$EEOm5gw#&=uNhq#;od-9Gzg)of9L?z84G|tu6jdB zo#CJ;C+W#j4Hs^x@VxK1^&Ymn1cqDkW_%#)X_prK2-Af#hjj6(&wV)UArxqFAv~$_ z`iVGXb*=Vu1BUv9=%TXgA2}R$(9%^MlS6>hdP155HdtJeuWd^oXUA zhP!LL$P|aNDqIPte?@uo@CDN7B#2tIITJyE`zf6tTaC-Ze&L|KORW!RWiZdVG?1c3 zDE+i#X!%t;yOb$>n8?CHJg0Ha@_d`Z%Y)(i))&r_&+9lh4DILM=2gGFf`bf5`kq%X zTR1uUndxg6$>`5ZH8y=yUU|zNgGiHl98J`^lgpQPtmKBtf4xrzj2|8>NoKY^s`bzW zkQdQz6alm*)(QkKagI}qS%gK)jNT1cu;h>x^vE&w&i z89xap4GFW;h)5ofL2&8Fu9>)MY+UNOKjV6u@ zWdWQ7V+amC1-onMMhU9PW2P#y3ykR;Pi)w<6T6yA@BCc$2PaNy3g40};z=4AMm6UY z6?-q|DuxbF2X$}mj>yHRm+zMY9LwqlkvA>}4rJsPe`7Jl@)H*_-eU^S6G80(qNX#I zs-Q(*7)x+96rYRr1uIwtc@zkHCM#8=o}E+j;kF-qSJ7)S^y>!^TcJBz9lq?Ai_Bt} zB_u=VV_v0~$MKBc^qMEYH%pryDvZnNDY4usKMJqu^%u_?i3hl);^r2dL5qQKJGP|{ z54VrgvRZTK*YAvB`g zE8`iNe_S(foNKjxDMqTJ%WfbgB8%+1^@?uoD#IAY*C`PZ#Q_%w_4)pU5_fJ?B5Id+ z!zW7+cnlxx^jjHNUO5U#ljWSR$TOD0%L5@Xf9j`Pg%>+Uy%knAo+Ji$#tc$SbpIhL zalS}wN-;wiG!oumU)9dR!K|os)?&4UIml@^m@nuQujU~68g#tM^~JI)mE$_<%sYD; zDxKP$fNk5CgI<|6;AZ{M<%xVr9@u+noiV>?N%4SCbd_yk-C>q_o)2hjZ6RGajU{hw ze~4$MGF0|+()q!(`F4l^xIqKR^NINu>GVn7E}2nk=EaY^cR9yhBjbqf4_0hc`B4@hO$ZM&)DqG zP&wr#cHrd~L3>G?@&z~ zHvf3=`Y166w|Mps}bsqeV!|SC^7Ql<==jdJl$=3lqg#3@4Kb@hCR#4-nal z^})6uN0=~52p5M%0ESr8ga0XKZhIx5_6GsqD^ouqLp=yDEB@&a`2lF1`=xL)e`WJ= z!~2d!+7)q{HK7!(2^!_@)MHSwhp7=mXtTQSLg^zwYB;_ctis)kezjzH5aAy&eX%Of z%dKBLp8>}_D&+*$h1TD4y^TU3uyCvc*?N^+p_xZfkF) z<&roRCtz5WB@HDw%Q12{gP-1_f1V6-nlCQ?b}0GQ22h)kTHhT_G7Iyx$@ex>=;+<7 zh3aLvLb&lAbcKDQjhNu2P=g0oz#S5G9{~^9#8naCC3SV+2j`iXd6ao!*6cSz9j-%B z4co%OWk}z+VWV(u>jQy%rBaiZo84x3Z<@uWOp=ZW>drN zJ}I^9*7e|5(eH}yNxaLmMp1joM97ZkVMd1Fy;%?Jp$aD8id?GA8_myG!?I?7KMTn` z_c8?(r~q1LqK$!*IHF6de@J3bltSHWD>_a+IE3hM?Gi+L17Q-%*m}dBErj25foA8= z4wKT|#8apN$_pdI#Ee{76y9?C$26*G0;A-Dh)Gux8DDpe|eL;n}UZ3r}-x`%D z)2EsS#v+*#SyLV^QdeEusgX)~iVc)$xl6U0VQhwB9E@3OMr8gkf4IL!VmRaPDc6|( z@2%XWkuLB8`!|5Q4+PDQ5!w(kIsWT5Gn-**%eiEpF<(YGSv+7iip_IY%cbZJm|2Jw z8COKVMqgf-C2=mx`jr|ll&4txu%B+niXA`>_OPhna=sTjv*;KB7eO2esg{F!Vx$M* z%^=S~3UFQ>k7hkxf5m67eslTxQ_7zt%#q(|uDNmMwW!RTxV@p;`r^M6sg~u;l?z97 zN7{6$L(etPUxXoquAWG0%#{frbW>Owu~oDY@|y|W$B}QkOas9`vR|skD-#;0EljJ1 z@2^V6tqweeH4V!4`+`^fx>V0`mxv!Vf?fwvH==py~@Zqex)~feh0A3ZZ~C)LB7S z`!Zi|ZaC@iwJAtnsLkH_9B1j0W-t?mO^d@>|Gup0mHg zOVDdd;9`Xle~yNK^e3Y%2FegkeNbcx6kTgHncd!Hp7ck)Nd#wAOj9Tbn-z3{vIQIr z58B$skQ5ij{D}P?U;FH>;vbn$D2u;ms!LQ}OxvE&(Ya0H&FWsQ-q5xN3=L~*|M0!@ zQTye0^n~fHxH9!U?SY~Vz-|jGHEcYz&O=b;cAu(rf86c8q-TCQn1As49hvIwclnX1 ziKE6#=?2w;L|v*fVyJy>7JcC(z12c3&M9;sWg`>pv)Fni`Qf5BqNfOt&o_`c&xAX-(h zrfBDPe`SfBRWP5ZMvClqRXBRFuZ_K2Lhy@+UReiCQBG)91C^Z^f73 zZubWiuh|9ty`=MNfij}2Ua@hPy`V`*x*-4f>o@f?92ACd z5#t_RyM#?zsR0*7H-2ZDqmO3SbwKh(05~g>9W5GOv|Ah1)5{_J9)kSQ(AA@4z+u@q zf3+_06!-?i73-SSre9Ti0_L^Y6!INsNlW2es;04*zoIgClfb|;0E=6K!JUHv;YDFc zN2sy?Fpr!Xik9WDpal7wjO)A9hUPh-Ez@3`dXTuHd7{*`zb&ieNq4ZTK9pyE5+SrR z1KcYK7i=JNd@@c1;iZcMRC|;vSHRJkf4Ckx=q!};a%`JpV|X7l;6T4J2quG#uNyZd zR+%q=6(FKq1eqkdWm05RtvN}n%n`IY!LKm(Rx8R*Ch3ZVyH)FdxxZ}7R)*NKu6Xp+ zR@+kzwwR-oG%yKvCudy!f%fJGjSKL)*B}@MY4&8P$s0#Zf2T?R zG#tEXZ7DDYCjtJn^n8<|O+{jdSYp@sSomA#?Hdr1>0@HOQyzs8 zq^6G1dcQBZ*RZ?n&osVGo|!F?f31V7xo(FSQ5~CSoo65xlzGFYTcn45rRSJeXLxG1 z%bLZI@w0r4Ep&&GFk|Rb`Z)BnY>yGiG&1=Awe3L4*BOx#`D~a4K@emFMyEjI_hk!B zMsyj;9*zjNqeLpi)Y3ivVs2355wt*2FU3q2e~Fh?-&lQR zbT<+8MpP0|U0kp6ya~!2C<@`-?eA-c`JM!-5%g`)waQY=NF5?>w_eVAFYRS(_U&TX z)tk7#e`zAtPD<#G9zN$U=Jum5)0cxY;dksTf-r!LxozIc#?7Xr)T4De0l!bhlOYVQ z9HAc)Wvcw4AIr)iYRX;*e_gARhp{OpC4Ib20q)~r)`#eG*AA(kA_vY5Ur`6<-Lwa8 z{iM*7LPjtRvKx-opTwz-%i%5u3e^|7UFcfqOBqs55A$eehB*E z6+9OFSa52075Y3jgdX}GACe?YTj&O0OHWv z$ky|>*3Wu8>0+nbv2G2wuGCsa^zY-;*h8R(Oug)wSp;dj%q`(VNW&i4QZ_LbqK6tc zb4+&OtF(@aR1aaLe@=ZKS#3C+g)y8m*7f3}zOv#1E}hSqtfVva%8Ly<^6(P3sz>3F zT2b63)#y6t&Ci<*WD3628HRh_`&s|3FY6P+=$3g5R;=3)4URk3zeyp!P;L*a;HVNZ z4ipBc_E@-Rs;+$fD3+E2eks6X=5Wp}!7&Rwj@NevzYUg~e{Ej8PD*4X8fXpqFyJl) z)6@E(H|KZN5=@XB)kC(xL%fg{#uaf1-I^1{C($n@mze zH|C*zKT0>de~`BvXjk*! znVdJDjfj)Z2$jcpltTjJmWd(yh_`M#KS>d($&c^ZU+fKnU62!$c#N4IE ztu1GFQ0vJs@lrWJgeq95Juk8f5enP#YtwqF?Bdi}sTq$EuXGge)vL+Tp|QoMSzIRZw%q*1ObP;ye#vDrk-R|*btEsuf=>s}&+Gd8VMbN@86 zSst+re-q@S`$t`b}3g5Br+)sR6uLc&bVSOhTyL9#;vvlG_Mpb2BwvdyWsnLJgm^T1C z-<0?`DaM#;Qc<@A%?#O?CXJ+gS<>UdLGC=Te{su;V))_5P+)X7) z;8D5vW&GXMugt9DDmrsW6A75ic~cw7D-YWGwK*&;k|v-T)5|~Dc&2VcWx`(du};_> zuqV$!yTW7=)BNzxwYP1{QQd20$n|_y2?rPSQFj{2@aMvM!!;V#pIvD7akGfXN-Tmh ze`)1310GcSmP3S^g8!s%j&6WLvSz96q?8D)Gi)W z6>y<-yjFL8W}E8*R!9}#NxtxtHT`=0f3d#C#Fon9SFu^c5ZlI-VyD`D^uK9L~AFSoeI6%Uz;4K8DR2p_eD zM(RF6r|7u2>nd8ZiaCbQ09FWoi5>rQ>OC;{a%u@ zco5|*`b7rkxaDb2s58Z{^22#(Fo;R+yWcMKn42cCt@U$j#(VEVwcCQBVvS`%2=+Pe zxQLr03A{G_^&!6-UdQxJHkr$-B&D*PVcB}zME7Wdt6u&jVX4`nr^j&ne<2{#Z;lRK zu3wmqqmrz@(8$)4bg{QdX_#n{^?eBxzBiOjc|sacM|zmApqN4nSHn~OuvdzHKa^>J zu5_YB6{=Z26Nf#i zQam2ok($C-HB?k-GSBiNe@6HAwPH#5Nu z*sG*doNg45Ju3BT)CeM6era)gf1FZ2qM4&Yksnw?MFM*&@YBNViOe4DWKCWK1NkKV zTpv0hAKSFRk|4ChXd6Z0xdSI^7va@TV>r}6;5@wYBa+c4bB>;0e*z`5K)gF18D>l; z_HDC?c&-*Y-xoMZ^c6|97JvS{n(0$r==o*+wVWy%Mhdz9JFjgIWPUs>Lr>cB4_>C7 zfNZHqlOI&HZ79}K6mmiAGefeRO|TgpYcO=O+)TD{0J3^X7#~AhrNKTd4Mf=40WBRw zKp#;MK4;|-{4zCte+<$Q%H5C+dY%{T2@_}Czx|a@!teUl*E#tq8lBT!0_>)`{)NmE zYkqh|L|Tin3z0KAeHtO62U97#mPHcs#MAue;Lyh}M9os5T|dP!;@WU|+6yu;0oE7| z(hQCPj8|TaXjQGrEfJC=`?k6p1%M`Mqd-dYQk<^nZ%Bi$e-&ZsoXQ41{@}SsIIx+Qb<+QoxZcqC!U%DBn zSAOim+B)CAwJn1m$o2O3W28o!^%O$Z8sRgEwr?E-P=-1#;JUS!2R{%vsQ6)WrU@|X zu>a*>Z7lJ1!ZmFyC3Rv7!rtVp9(%^iF&r+)vBx|Ve{bx6eu>LHF~U10jW5Z4t^E=y zx#J>bccXQlXpQ}+yXO%D*~aQ!hG2ueZ%;{in%&4Q(HMg<8Iv=+%oxdZvO0}-zP;Ui z9o(Z|cyL0;6}=srw;7AM!aYyUyjUmb4}wGCSTg8gE?Ww9Dm zDMKFze+ZUzI_`N$irNVRX7^h#y6PO{;B2y)Z&&c24{WRb!@Xo*ePqYrwVtd{ws9OC zN}p~pHIk06jS@b3=md*$6KAJxuFghVTvz6k2w$Z)q& z;w)3q|%Kd0AjM zj&qe*P?TT%+{#?{?bx&}7n&veQ+~?O`+Yrhk^R1$t7WpL{9>Bo+O}_{pQ@^%p113~ zD9f6z?a)ngnyT(E8J2m8VZ=P!zD00?e^5iKs^d6On#O6Gp_7Gilw36p(>RH7TpXHS z&5!u2mvEoAV3UM{fIExr;`HWIWXFLmGX=#2C*6+lyA|Oa=fbGe4PI_0B_y#V(U_(( znQlTmS%h>$r*B2UpciSHuJf#tWer0=df$i8PWNf-tIv{}KjSi0lL*r|O|k@Ef0}K& z`Y@DQ=Sh+{U8W%znq?@WFqF=M_OdwrS;lcR>j)tEp&RB2yhsCp2r`x+^Wsl0?Pgvy zjq^P4CkxYLy1NfyB=+9uBwHmLR5xAZwO6WwL!EhZ7p(c-2p z4WX*&0j)v~hGQp0CaZILm9U`C_m?n|JjtGd@KOcRMf;^H$J(ZQ!jPBEqoXNHksQ5h zVX^j(KZyZK3L(3mPHtUGx=&B63uI2j(jxG7G?Gw|sCdr$!_07Q8ju_Yf6~AJ-`9*+ z>;e?EC`4fFr(-2vjX&>lLP$Tg#QN26exWorSL3nk{Y;t-KrCuML}YK^MzpQy zz#nm5ffw&p;eTyGMUzh_f1UMKVV^C~8x9Di5Fw5l91kf8p^#1PcV+58@K^>aB_>r{ zxBsE-+nSU2z4g9Ir&ryR&R~d>kaqUP(#e)>8Dt~C+z}IGW57vFBEzTe$c7~E`}^;< zdpfjo!bN8})e`_vfxvxe;8oQ5J zcz*r4dNPA_$xl<4Bn#~xoh1qG?$<15=*3A+>EZ3;;}RTC(fE$Bw~<^%`-fV-lKE`v z1f#`U{8*|>vwLya_~YrLtiBrWGncE&>LEf~QrF>PG(9wzyYQvDJ9}n+uZl+r*VNcjLG85V>aR zEHBH{fAdtsqv48$`%2KT-aq?-XTc16#7oVgc|@bt32olDf5G^v9h`@stCtRfmw*O? zF%a|b|Nd|PWm|t7)~EmWUroK;{?~u|@1Gz0|F&75*8gK$A5VkRY5PB4*3Ijq``@n& zz@!864bror_!8A(knSpgD0J+DnHQh3B^>VWHr5g^9i28zcAKNraBxkA{{yA70e}HIJ+xI5krpWKF*9as@x*v<h&X_Tv;FZ5d=4N08357J3r+J2O~rYCkY}O!eDK+@57j^CcLe(Ak_#8nu4bFw5_yfoLQJq z%uP6Fe{w$?jt;JZ_&qlOu8AOI9d_Oy;AiqSUkq z0ZnLI?RZjGtc9E*XL^=$Mku9_(3zKt!Yk#He!RJ)*r({z@- z`Esm~Zt+5zh`05MpEDiJoKcuKQ+q#?>G%#zg-;sIfH{Dp`FcL%es@0?c`q*Ze(W0O z3JT@_{NAnk99_h1t#`=e>ib^K!bdv4-D4Wh@?Cd-*L9&gbQ zf60AD#3jDy$@pBqA&`RVAb}U>J(SYJI*S!}XJTq^xgRUGOwLHx)Bd?xi1bkY>C1)$OJVaw3`b=gX^Fphs*H6Bj5H#p@K-bOD-R{=6#cOxkA!nxHwss`5=3|mjM%!%5 z+H4ClpxF42V4BqTV3>^RIWEVae{@%WO2ip$^Ac1$LdJOtaXuhGF)V9Ra2cfw#5&rR z>;`8L96nu$4GMtO@S-7>@OE6X)kN;9J<;+U8v2+2$nLLpykt9mAMf%zG!tj0b3o>{ z7x{=@vJvlo&+WK}qg~r>y^(#LxCpzKGdcV9ew|<7XMPL{ao}fe_jb;Zf8FQoA};oS zDFn2-EDlC_jPab&bG|~dO4oZHBmF$_ zH08vsc%*X#CHJGUK1@K=NIipc9D$wZZ06N#H1xJ$@?B8m6f+Tm6oE6-jU+DjhmaFf zCw`*QVl>ayMZSUAe@CKqcX3t4Sq2jC*vRVL=-DN3c3A*4_T<}mqALU?^@5$MFjP;e z5zkoeVM%Ed=#zvcN7M8nO3XNzUviA4n9$k%9VX9lq!so@&6i*B#Gfg!pJETo28Ixh z09AlK#1w#@!`AxyZVer7e*BiO>uzmK=iWyDj%%{CagDUif4`1vh5I$VNE_EWlU{y} zJx~sQ(vShHFGN!$%7!s4c0rjRXup4>c$Z&eR_;-hFF-jdk@@_DoX@q)Q?R~Xf4&^d zSBqtqz5#V#4~tC(*%VAh5PVjH6|S%W1Y|ovnjWkl@eFv486%k4c$uGtjfZWPT;sim z&Rx8-F;iNfe;yMs>M!KZ^z5mtkyc*-6gG_oJ&3L24+ z;!Q{eNZsAHv;wUcVek9S-gk_Yz3uap~g1GO#h=!WYcscd*MxSKs5a+dlL@ zYspO3o%`UDPeAA13dVEX#!ewc$g~?FpCY2EBGOeLf8E8jbqO}^0@uYV(#?YChUSv` z$j}+l7F}qAWr>!^(II-13A3nT(1YA)Q6604~dFiKRQEka@5d zD0P~MCLs0&d@@5xr@$e*!H14gsB&Nuyr!j|-G_K5@A*t!eJm|lA$^`ukC%DEXLx~V! z>pl9(KfC?5=ld9RF&V+b;sFa%O`I zY#;YXW+4WHmJn4~Q-REKoX9%FMu+g^*u4KZxU10x!ZjcX!shX*To6h7JpLA6Z+Y-f zf9%}{WfvEH2G`iw`|JC7mv!+m*r`Ks05?s33iY{K*f0)x4O}fc~V>7O*)mA6e*-G;iitkyxa}9Pl}w~?s|lE4>`5h zb8oN5*T4NjcUFJaSm!?LukRZB@;RTWzCP)5m1|CtdyI&4_qv0db8jf-M;b#ZfAV>n zz{ELku$CTzy7s4!F=#e>NuEhEa~}iX6pUK=XNo>jxlX;!CT;n2vJ>xVGJc)_Wst~d zA_X}aF=HU`a#joq^@mVd!*jk$N z&I%?ES&B=w_OVK+WsH_R-4ExA!qEgJg9X}wb2^c{cY6{55rFYrKsLhJ0 ztdq&mQJ^V;f^8<3WZ_}qMy+!BS-!9{XHYsruB2L?!2?NgAHS40zYG!6f9?zDiXxyk zuUL54*3n?)hiGWRJzxQWvG%7b27v!OueZHdMLfIKdTpMFR_(5J!&0>BNTpap?oRT1 zJ;BNN18YrT`}Dkjs4x^FMf)}fsklgNZ?0p{E<(UL?BqH6+}0quH`vKfajqsv<|sM| zH2ZqISVRU$WcL}tTMdcze`gm{_I{7sSY>M^yRWx=`a93=^61_lIO39B*mZx!h0mGn z^712_U2_k+m%WeaU(>{BTbGAlgLlZawc`^|ZgB=TI*XO9%OTB<_FS?gzts_oE}mcWDRiqr zuJxn1t(!gHDDQ2$%Vo2hAAN?MJit;dLYjR=hVoElpZuJSF>po*|A-UFsCmxks-^L6 z{FtHWe^6k0@ywWG2v$ssvCJw;?Aq-0g`jcDEcbw&2!TjX*}SPs=P=!6 zE59(S=v7zj!cL2nzlLQtpX_o;N%d&`Sv(lL!>ht?7BaH_3?%B~MSdR>ap}(FPFUa7 z0Dpvuh zr-`N++P&P}vFssKBB80^sc{P|wV1Y&gBt5(SAF=YbDvzaM~jTh9a!5(f#&1IoLv^B zyf9OUSj$OsZpIjB#m?~INwp6&oucyVZnQhaylB>bf7%q7(16K|p|X1NG~jC-;PBR1 z1knR1hf~7ee9h0{C0E^A^)>goUn4dA+a_#I)cJuMHrL@&(#D3_cio=ItDkXk?`PI0 zmQYHxT=GKr0@EW(_n{(s<0a!mOf{v2;wp7Nra0ckyXQDg^36)tX;3tCA{7&9wh3Du zVNk~mf2kY{MOe}hv^h_B?PHp-fJ$dl)rS~zY<P{~wBB8QCMLZ#6k0Bskw~Iux!L!CE&2vDxty?3|^c)%+A9xj< z^UjCBEr0*9H67%P>2QSQ)7_}PlgMa1m9CFvj?~xF_6>N3THz7*jV4ls*eFEx*u#*a zz}-&!XB3QiDEedl97_yp@CCnW3cTOk<+CGZs4E%CFV z%rxsdibxJ1Jq?Y`R}t9rF`0=KmiBz`^5)ybuEW57^3`izclCdxs~z|GTW7OZdq!e$ zeSb}7^Y+Xa*xgQXeu9Eq_^T$D%_{?i5s1jvWFW_i6;6uF#!Rr9fst?1;x|XYv7c-> zZ)*l>yzzJAcR4g!P#6K4GAPYh;_96vu?@gE1<1@m(kM)}cMHk4-dRp@sy$Nl@qIEK z4B?bvBheZ>*W&K`C5t=U9TGW#J~y%c+JC#|Dc`ZITa#b8E^B}1LiwLK;y1RHU)b;5 z;JPkXY`=S6kY4lM-|uiyNB_L7US|xlWo;q)diDR+9}M$JFHO;()^C*=+oD zy!NCM0VzLJ2Uyg$HXpqab>R*e*TszqEqT`Wz(p}M z284lecV2gvSF`b^$8GsVUF_b_!hhyd;$qj6_VLB$%e!CoppC^xZt|b_(yE!oi+lcL zI-m!(B9Kp0B+=T-5ws96kjNlS5Hyajt7*>Z6zieqq1_uplAtnQpq1XRLY~EFegGpr z;hYngDI^T#ATu<`S3c4*w6!Cg-4v?ApIjObNg31BMT_AECJ*Z+^th|77k_YR!Vkc< zy~*-$@AoOCPT0CP-1QLjxayUg_^zvd;_6S`JBkf;f-*08nU$#Qe7naID>3NK*(ffA zef-GV?)?2==Cugvxhl?i6~`riWTs7}e)Cq+L7<0aBJUT&lD<(>qb*VIkW|M2=WEao zfnlt$78c(+dpq;BT1d+`cz??seWzt}frD#vzE6-mU^a(s&1Ied!PFh2f-f;SDXck) zG^JSkG;$;dNf9#1^VZf~l4DXW?;bCEiH#SOrmJ^7Qq|DoDT5DqtFe(Vu=y*4tiw}S ztGsVtR(@l3H*T%B$sTJyUvVtI=X3tb^=yvQ+x|VgNe?aBxu43tcYnkyswtwYH;YtZ z0wTjzH6n@of`EEptY6_C$o*u3#CAPu2eFl3k#yI5#KEA&( zYiHUS5}fbt`wTv{hx93ZMZ!<2yW^|Mo)T~Gdx_=$z{WYtwBr`S4?wjgTlgt$I^TQE zcaPR~I_|6c>i8|2qkrz=nXo<{@sc07&Pv|J04lF%<96SN;Jn7T5N|v`e1vFK7ShF_ zA15zxADq+vTwuwUJaG%Ac??D%&^4bsEDEUGOt8T(x~7o22&9_79b6R^$yc3lZ2yL! zwj+TN$ea#7cXtn31faR{m3Lk(orB-y5f&E&yo0a?YU655kbms(Z|iG!aZ2Z`wRMNO z=Hc0`9+@+HpWWUo%YhI3JPH|$ecKI1eVV%-OEC|k20YXEF}to9^q?r&(ms#b?||;M3GLM z4M+-Y@R~@=(|^$rj&BdPt`E~%4L-FB%ZelEc}0Kj5js1c!OW~ICk2D2r_K*a8FqLk zvZ{fgwC%;x;y0I^_4rNiM|h%^tW~=>by>GQH|!}|-q?M)2fmn%Fgm)D_6v94uE)rLpLa|DW(HpGW!-hb0fiPDUWb z=g)bH6udV*PIT6PoF~y{=h&VPo3oqFUV2-ciGRg652*9R`kg*T-{N?7Pdj_)%n?6k(!#i!t|Su1N+3wP$B;|6iEWh;H2kpREu2xN{BlS^E36BK(d& zS37wx_jz#xh$bQ4zTNd zZhr~)-I?qCP(p9KbE{B_$lV=+h?V&Z0fj*;;hpmZZo%5hKmOibixC_QT|$x(4EB2a zFAUSW$FHY1FxuKgXAjNdQnIWjklo}x&*cu-_~ zI^-D7+y=>d8^BMTaBV#FsI9-;KjOsC+1&kLe4VDq>O%T``^#r^IaVRlz8>gaTYmp3?16U=EcH)`T(=M#*6`eh&1w|;(~>w@BA@HyETNifR%&Ubce zC@Zu!c<_-2#PUd8e5NESY7Z62)qh4#JZVnLp-Zv((;hr)4^?LE;j8nvvgbj*^NsFo z*)!8~OVGmd4_=96qYht`8Ah$e!v9#KR0lF8sM+JzNNjO-DZHf0*OQBmzzD^FXYh6Z z^^bei6d}30dz?PiU?f2@OJ-HYyhLIor*{J*Sa0Xnzga0^d~f|*+VPw2pMU<%`u~>C zul#mz1J>8Ibn(yq&66qcVPG{1B|k!|Jci_){-Zw0?qgSWK5l)eDB7^ataT?yCU5JK zzA>3!eHd{mTz%MUMG*%2s3{_}O0XOr*5Gie+BN86L6_og`p6`1;Npj|se==JO;bRO&PvWid`zK!Dn~Ikj9| z4ys!NdlnILJt(F1f#~jj_zY`VL9fA}eDIVFPTJxRR-3~IUzfW7kqvf@m z?i?FiZw5t^fWv3E^oq^ zRy<&0qCuEubugPVtmj8D8!u2L9jVnE=8NSJWKu8bDy%o}0fIX$8K-HSZV*Ra!Bg*W zihOdMsj@<(LfPA;%6|{?rw+*Ne&NiL%ltHxZ~k9(zzvJ&aqqqk_}Bg_M?u-;h#oJ+ zEsyTw#Y`TwwmGr)WMC6-jDtb9dfKk`OD*{1{5vk!Xo2ecDXN8+a745qiQ{KX^=gtA(yL!Z(i7J??*VnmvDI>s#mh4E*f%tyaA8J(u7jhrurI zwM_lF=wbt8X=pT^$oB8^VC&oLeWINhCC(|>aRRbp$k1VEvRRl^Inm=VtoblV=rTwe zkD$VfXR9?bLw^((G?j-YLqN$qQ7>Mh3a@P4XmhhwdQV>}AVk zhCHp$!I{S>3H(gf*h}iCDU2~;^HthVvu*K!EdLVa)}N{_c`S!ZCeEnC9e?64`y0>D9e<|L|Lt2ZcD1SBd~*4nds{8o ztw!n3?fECSgfu&J=eFOM`E_4M@ht7@*!A5bH6o1CcuO>!Pqfweae}E%Adv{QDnZ~d zA{zxJj|u3Vm%w_Tk5V*WWDQXh@?V~BN zI0(I<8Gn+EJ4Y55r3j3urB22O%MxiGF{~qGR2YyZLVER%@6Fsh=Nz<0wCU>TTkV7H zcjjPAC&W`qk|qgDt-pD_t+e$}V&ij~b~=-L_Q$#G>fqlzrrm=+KHI#{{x0)gbJOFV zH*M?ko3FoNEIOY+p}L=Y95r5{W4sMo=PODl3xAlsiD~K9Asx4*6>}sT3g(KF6x{Nb z$sDUgOhxINHUDNjvtVmzd*3&l;kquOwCg|QX|m$ zil*6of|?~Z0%1g}xNu{(eSBond5ue%V4wuI`AcMMe+Tf6FMVmS3*19G(x5XBp``E= z6(K23D39qd)99|2|0-**_N=C^bE`+r_v5Kd^vZ$kVkt6uC}ILxEKtV5 z#AODY(S#FtGl*6018AwJl<2I5hmCunvU;B{vne*tCxthWUY-%tSr`?Dlf&BslXaXt zyiTBi2W)Y1duH2=BfY)5d_5`9$_MSgfg;WctZpkqaMU)hrIF@1(wZPW`+s=Kr!iEO zcdJ?GmwSX4u0YHDRv$CV)tzkpg0_Fw^S^aE*)RsXRt^4h-!s40C1=C zGd^{%!JYnj()qq_&AS=_5j}J=Z8)7*8U=S}X!(c~G9aVpG1CpBX^l6AqcvnPX~!JP z+w*w|&I(_o;6Bc`SU!Kfwtsnq3i_U6wFkpP(q>aDUFY3ib=-ybY&HBzMo4cj5t5Uq zTGEIl^oA|$V6;DijPC4Md8GUG#3m>Cu6h8&C+sYiD-Z}7cLAkq{b$tG-#YxTZCvO> zsDDv6Sgfo&gQ4MnuATKQmaCX`dD9>E*5_*ZkAA;Z7!??{1w?$-aDNzWYA~*0i43TZ z?3$$l;`zL4Cs4WpmZvgFrB8<91?JB=XfYw@agVLYhONE#(}xz5t8baY8=h(|5SVI% zu#2H|X(T;;I23OPu2_q-IzX%VvU#^_@Am~v7cZ=a{mL-leG zd9C5Enqrs81`X}`{(pKDP?(2>+@k~>51lolQ@;fx@Yl<`B&vD$T5P! z=+j~gc%~mbrwTAccXt*$lAICgQb-+(MDCL@D) ziASvn(V!*BR3X-qeD(2nVfp)`i9P{3y@16k?0ODGOYM0`SbeZ+W-pwYEQCpBuRjN# zLMCkAn6lSTf%ZBkTbJ56bnQ8?+7(ph$M~|oo0t+KoyUk|!`L`4R%bw@69y1GPh>F$ zopYkOiOO;0L4WQ+0T##YClpQx7`D^FgVi1Vtk>M~5U>?@L8|(Gwy(huUL7k97yunx z9y*k1-uHi%H~rL2H`@Dx@BC~g=I9zvrg=ep+pjI^YZJ zLQ(~wY0FVIp+)mLB{1P5R@#qLBHDZeIImG__Pnhqw2FqQsc0*{f;OM}CR^|Via_hB zpRCJ?ra_A2ikGh8R#&Y-4F(Eh9gYB3Fx6ch`@ycd4bfwAO8nI1w_3K-AKJbXV)%|G zaJ=R&4uAB#yWiCd-@JQYmoc3x(ALJqy(;)L8ICbo#`D=M_p# zjJ)@M#$=XypN`?N$dTz(~m$o7$|f!J^+IaOdXv@I+Ia=o+PV4+tuNT|kAEPs{Sm)4AK8x{>kq8ydZu-E@0|t}wdw+^ zu|N3CRKWCypU4PI7BjHJ1I)~u?bp<_&CJMfF?>$3qCVNlg2Bt=+3J*>`jD2OI5ZyC zGKi^Qc*q|EJkPhbUUh;KGXe<=550Z~*4DRgAVPM((1*zWj^Ee5xnUQ}XMMeB?SJyB zdF#)q?Kkasq0DDp47q9!dh9Quhha&ymiSzAP#fR&3aFifs8l?~`aS{M+f|pWH92ef0yL*#;;Pi)pLNJFw)%Lh-7~NmTsV4= zKC}GsLZWlu=hmtj0c7iQkHkF^67pBaYIG+tKT^)iVU|Q^_HG{iq5nsec8h=MIBlrZX zl&6pev&skUts*pDQzT!B?W61TNm3%oF`O$CrW@uXnR443WaCQ0wit;N?ZlJcx-HUU z+e^k#F&;)`-o)qn=%2IomSiqZ8kr#a%o8IA?|h0O3xYyLII(_QQjGH@l1}-}Hx#bz zqweFo?)rnS@qm8!c(cCe_a+*A?4w-1 zQVqP%Hw(Ze&pAgj?H2Y=f8B-7!)>uwsd z2{!n<)_X!6a$?tZPoP4~ZxXDRVgcp-^+qmu7Z%amxr zNMV*3PMbx`IfdpO1ErXV2;Lm*w@pP%RU_@@eBwrfT82pWwFA-NbgygM=V`g+!FfBr zKwRUa)zgwrho4c~YJX_+qkqg9$o%Vmd*69chh260d;dEY7C*7SM!z(M&X^rE?KE^` z$lXZE*Qgk0zRmwvDh)b9rm&&?K1iE? z0vecb1Z-^m=dss+cAVnnn{S-SAD(2#N8MxJctdv6t^MYWZhv{+cMUW#{1UZ1%$730F{H?b9Z69af&DZf^myM9FIMo(o>a++fxQr>hQ~@<^k3AK(XK@i? z?JOsx#{U;*8h=?m69Dz$BZIK(&oj8MFMjfd>l*w%yVh<;mW%88?3*S*=4d4_n5$i) zAh-8)ZL|%hIIFduOX%mSoriaeqmC=Rfb){xg1WJ&r=0p_z;W zl(=Yc`WVB8mtqL17kcHZ{cErw zM%wss|MyIVU)oCaHwPwms`FL8pClCaOGz_EST$g+^U9plDJ{`` zJWX7k5UXGPkN3@2xf81Vo$s={&`7(FYftA5=!eI7E2NyeOI#X%KF@#opnSzkbBYsP zrhmJ-_2tzEwL|pe=>5ct+1maOTW@X0cTnVZU1}g*f6Bh9_gdF!A4rAu$~v(>KC z1pb9z%13cxIX8abxBdOb{d2ULwef`2D^9avSZ1%t5pPISNwGp0*AWFBw?96X_?09C z_&7`)H#Av7%!=_D9Z#`w%Y#`npmA*UF@G35m4nakGd+QQgXVL~1q4K>6<*sokf2_d zWs{zTv3AWm9m*ehp&I?1+3Z$74})7DoBUAc#y?Yno6WFNGXD1e3=za zN{yYz(uDrSrTAV<72xAXMb0Pgg*)M z8keaeemAtJ_&{&i&6Rg8V&bMJZ_(cAT_Sq0kNbFrULu$X=+fawVx2sc9(! z>wiA6wx8c6RR1$$!qk3rk$=8EIBSgHmJiP?o;3jP>xz$0Iy}M9E63k!gpj_@7c=}A zY5orlQO8SJd&IrIR%!JSkSeo0I@KWoZMofr04b*g=?v+&<7akvq#_*$xpja)Z$kkWAtHce}?Ukb$>eh;yu69>p;r< zFJ3USTm|`u#@PJQAl^8Qe&&sx8FGKMyWfwamY_A1)w`caj?6GkAY(|=2+m`<(K4k* zB7q;~SgVwp6r&bin9d?KucsK?Lxc^&Ef2WK0x(s{APdvInIPjK3cCTo{QgyE2{rtx z@5?_1?_-XP$BR#WM1Mx3+3VXT5d(r9%xcie68zl^EANXE$cBXyL?z(=VbiS zk9WAp72o>4*M6U`@#VWd>UvfFg_f}Apz~i``?n6WdtLYVRp-ExWk7~Wh)p(0&Xh)H z_i)IU&mpqsaEfw!4i~@myx)Ahoj3J2XIUuQ^Z37?)pV_= z1+H@n4MmX0#DBQ20TM|{8$XiO6q(a}Sx}DC=Y3R~j9PA2nR`#GXo3WuGj|$XGDdMV zl!Y@VMNKUxhqQ6U`SbqMjQe}vjscdz87DC7xm&e^)A57tjDQ(E_Vtx@AXa%Zw)0G^ zK9gSgrBOGdrNiT{z1!b&Hgc!S{UGyMDvmb|ACUaYAAfd#f46>c4jG~eBb*B}E%XON z%D4UG6uhqwV|gczkmQfDr7KcsKeV;zTSG8pApx4Pq4{VGB^cGYLV!9h%yM>G1$OQV zEHVO8d8FM)+|7~kEnjxFd|B6@ah6@Zh%7#Q_1#^)dba#~m#aDPt+!xz(;@b_%?*qE z*00%^^?y^pESQbc+B>5n5HHffosL4bG?a~pbn|9~2c4hSb|6rNnaLE=0+XHIGsqao zx6c<-Atc{uX#0Kc@)+&J2)Xr~vnm2fw~@@&sC2djy^TtlYnIwMOPL^25NPdZk&k?> zR}+3-MW_yal9;ONTd{oOVC+NC@r_;Hf_naJVSi@`(LOg#4yg7))&Y^*2crE0yxQ48 zX4K7R+S)lf+`j&?^UK%x+3^K-9z&a@*BH9!@Z6ij^9Jd6YI8Z)=}uSMfwlXp(tOVFnXg@C`C1 zf4m3EbK94Vy>P3wJ#=7uFK8^qTnoOOEVwjF4j%pmE43xJTd8h1*Y|x zo-bvpw*FcF^LbQkqx1MkXr<6f6%iRH7>(ak8-q2){Bl}jT)%p4J@b%GeSfX7&?1y+ z{h1=MjLxd?%V*9YJ57;HC37U%)qC3Nj21439VfV=?@^^^{+WJE&)suBrzl-PCC(~s zB9Ji_bN$iz0pPGS{2f-(;@FYJEpeolRO%}pA%AAXoI74=bk$ehYO=%fOjjPJg_l&? z6wM5z#_TM1tj%_CkPChm3x9SN9!|6t4H4QQkvGnh-VZ_rp!Cy2ci_?d_kIL{@SO7j7NC0HGU^*>vLgAmO#c?@X8fq;`%&&#&1UHk-4QE*Rtmy?(c|5qU!G zy`#`@k3ePXo9kSDT$*`u%b~s!TVx?#s@qLv8JYoxd#MesCG% zq|+y4cD-jv-}FBDS%2)Zv-lOCEicy1#p}OYAL#t_B)@tKLlYm16RVpkHnQY)R-Tjj zp3X69jZvOEMc_}DoLgK;V3XrPG@cM0qwafiFF=~vqRJAxPTT4D1k0DfW5)-5&ud)T zx>o#}|He&jIfctlu05U3K`N5ENx-!sQV~APSGaT%;^eK>(0{Pfx}NV|e{m$5Tm0SXa?)&H31o`Elj%LGDl8IHW5iGLG7P#N-n0k>h&lveQme%^ z6;ChEfk?VOz<(Ob3=aw+@wwu#%J(=C3`z0j$wh_}+ha?ObZkebd$2uPv~(%FW(d)M z2(Y5A~Kx3p=$F)}+k-omip?F(U zM|Y0dE8f-%&HKKM0fQ684;oq^F3co^Cuff*7qLNI-v;eC^e%@pzveUjg+HTG9J~0{ zVO8>v9)Az?qmx-Mo0lLigL7V5UD|iQhv5b#LSxVgNIO3=+d#6K+IdLyVgJsOcBL*F zab$A5PzlJshhuSQVG3FbCoU!BYyaVK+s*afK2Z+NXFQ8HK7=vI{8bpmu+BVB)}N3h zY2J?Ou-LbCLa7+)Ii$`B&(8@~yzkeb`UT;-nt#o`xB0@H23wa5sxg{S6Ag5S+4$a! z?mPU`=3|@uyYD$jI^FjDpImN-VGTPh>9+UR_V!{((#ixS8IQk#8fN&s-DGK${{hX@(Mlk zOoKt@2&HE0mHWF%d7Uw?DM?5%V_JQ1v%R~YK5hCKZs&(B2hTFRx05*(pjWGe#2=f} zYW|jn(zWx$QvQ2>Seq@c+PyimKJ{DU^M5^qa>b^x8E2Jul}rXXd7B>V4!5*XXg@ z{cm1HSe+kqd6pAgl>Zy!i0kIl*IJ}Axn;EZ@Tk+wh7AHUyL*^BTOWgCaSdqgobXIc zQn~in2~hi{Cqs4eNb|gI^{eH@L*)3ZZ9J5KXR&NBRKnU>OwL)!A;+uH_U;iX1b@02 zWZI<>=1vMXEnu~^J?4H|H1mXG1m_@><}%HxRC06Tw))6VjX4=nDaR<)^JCd(#{=+2ftggpz zwW3hrUf1*MGXVNLmiF_XtS&46^?w-xSFQZaJ@`ABz&X<*;bibtPvB^#WH6$4ID3r| zB|!--)dD%ixE4}RIL-`?^EEO$!$NLDLOChnAfOu<2LAMJY3{` zfbRUAowsMPz`lpL_?35?yu(DhI;!W1f1g3?AD!>HMD|Qv@_Vox~h?&qKS`s+;hYY)D||4(|A_4t3IPvMZ* zIg!uZSn3z>Z9Y7&mx=D`UFdt4cYn-;JZHap4ZnLv`#Fk<^~3UTt(AWIfx6r$>w4F& zGdS#X%HS-1wC=r6xr3b=3PW%GZMS;u4^6AR-oRKWzKZxxtyja<6Mr=nkxFb?b|NS`T^BNn~%a62^7|N zd0q~32AohGJiRI5hAFrF6PZ*l)Ni7Vm->b11EZK@Gw!_$eE8*mIX|QpE{Q_%I(0-< zwa**YSH7(t^9Vdcj&S(Zb;I#7Gaf!~2g`c1<9n*JT-NK8et*{gP0x3|pKE!}Z*0zc z;4k+fRc?EaFFNd_G{Y~_dBO!UN#Y9E!xu2Q%O~*UH48O_IhnJ5H^b_Re%Pg^8I9sX zTCE?)X3pK+jAlO}GZAg%LL_?=Q1rVLjd>SP=WPO4`UQh5jxm#eCN@mI>cPW#TQ^^M4;X_Jra61MCFouh=F z_8w1lwD4Bf#FgFWuAVDx&HXaA{HovWk$Ful2UI;LBiiNus=xF|AJ1kVSc@T~Ub*1? z(9YA{JspgPYv(77i#NY1<}PH6IhBOD@mxU8Q=1?Ee}Aq2FSH<#b6P^n@n-7u`z$x6 zPbJ1xrtpK+;@~2Ka8*2T;GCI=#c_WNTUm~qtfVTNk~MGKvOVXbpGKKQajH?`iY4y=xD| z4WkF^L9v=@UBYlB9y#K7Zu*c%@3eEL?irZjODuf6Whb+WL8Z<@U7s(0||cm={(NlzhZi8s~SE-jkRD#xh)RVunGy zmtNg;xHvWD+McUJ;efGci6c=ivNm(qWDF+K-E{JP?Emxt<>Q~IohW!G)0C*-6@QH1 zEApQKoa5@dnmT%Db&e4(GVp=1)raLRA1z1ocfQ{JO&>XqS*s0O$tx?P{Dm!t|Cia^ z)X{1&x}wSciJx)jfSD37H11`JY8|EPN(&Yh{+hqKlh226ysyLYo=oP;j1aFZ;_9R9 z)xUBOG~a{a!^KAa#2MeE)BTLG>VJ^+CuPdMCY{~eQmn$IShYR`_E~?==uyE#s~^1h zHXP3NCSLZrEc3%}%(O$4lMB*!w4X{E_Z)<69|zXK0kFoC77SE z=Yd#fs^aT|D|sV)f*dC$6OWb!7>%5FV)@3<@_Q1DPmb~wH?*~s+x(3C%bIjK<694d zoBYaI1J<4;;q=_L%*ZR}Wt)Xe^#f3ga1^VoSYdr%%%j6I@teAoP$+IBi4y7fM}b~K z%7^{NGbZhP<>`NkXEmZmC3Br8;+Rovq$IKM$64R?KeI8YheLE2L_F(%Q=?o^8Sz*7 zD)I+O{m2Ee+^#d{YL$JiXpf^;66UWMPKa>c@~lXP{Ri}Ov6y7fQ3$~P;MJ{*vxnVp ztfS%>C4Kgn96Mc1++X>#sExgF?b=>;e&QV8)&^w^x}Se^zUt&U4uUX8-%U!ao4eC- zrN`)7L6jLYW5+FlB){Q+Jw$I!HD=A`DOLT`{`zxQ@fh>*q04mlIh(LCb0WaC?_hbO zVv4OqRG38PDcAE*QuI`F=R`>kzl>)&2mS8J*l1Din^@O$s!sdyWbP%pCK7c-U)3<- z3l^geb@_i3S>B3i>r)?wSf84->7RXJZtB!$?ocs9jr35<3ujNT3xmNs`8?iVH6NCD zJRz$y*WuI$|4Ur*>wRs8~*e3a{hn6O#kG>@Q51!nUu)76qF`)KH3d> zo~~H;xj?QbxyCo#rO|Rm554uM)E^Z2)@9{Eqv!ItuXU;EFmZmH^W}>_l1DFY^Fs(5 zfR79#I&F1P?YP@|K$ObaDSu*fFj3N{znvP-@5#=qw` zUxNS^}o zV#S!dP3RgysnSaUwLd@9AM11OWkuF;~s7G)?$yZqxU-M(W5v2c` z^c(@1b5P~}P0*}2KRU}GMm9h85e~kX&Rl=c!DNEE1f@bh74@vg@5Aytiby<>S$`F< zML9d@#@CMD>ihuiz&}Fl{OX&P;=b=6?~plhdEI#T*RLDzF0bQ2-VlBIsZV}yeR{K= z|1##6b*Dj%hdj9ZeB~wigoBX7xJG?W*m~VT=B)(zCRG(QgD-9#yHr^g7tkTuiE=54)(s;8kOkX`XM19TTrZuZ&=CG+0S}@oa$ehW6wl( z9OPvje2ttG8*_Czm+bWm9DimS`*ua>D*eSvnNz4j9#gJ`kIYLc;OM5 z43(Z>-GRYZ!k|D7_bMbh@nW5NRIf0OAjV9CGM5CIqu2X!x@XE~FeJmq+>c|dZ)KFl zc}g4biMD=o1hnZyjWhM)YU-!OfFna(i6JZN)YZ=EHBKjLj(79JV1F?>=oezD$UUeT zQiEBkyAaUbpTmp}{h zsRZ}P`wvllDyfU-#j_q*UH-m8j)6)7X zoaWoQeV!L<51bJ)%V~-Iv-?kho_okhG>TF%KOc|#h^gQyC^Nwz;|yyLW{-U`(@=5S zCqs3WhQP5lV&h4Pb^X;I``zD=?qz+vPrU}gs_ogaYhtX>JC}v`3m1Rx$+vn8`qn?Z zKo9j*aG1YvmkVB+3D(Yu(qwJ8^xURk4I>b%bex%crDZTI1);Dnq(VPZ2LsPW5IGNy zA{-ir$yf+=l)hMj%i@7_9dut373cw#rV4%Z{#M*0gs0Th!PE%Bpbm*kB&1F+BE?wV z_ZwID!*MEy1o}bf6fu8I1{+SAhZqL4krAUzqwd(eaxgG7svFRH#~a_;QD`4A4KlfSx0d%PZ~hk z^l7WvzSUaTSo7?=jK1WaI4&M0{9zca_loKLQldAg<^kq;NV9*@L#jVUOuf3XE?ina zQs57=f`F(jOnvlb3M!@wIy^u`D5T_&m(=8hC6fhBodGTLqe!b@#301}Gtv|xTjj)0 z^{YtsAAV9iP?K{?^F1}CFIL4d!9E4;KV>`R1lc~NrN}7qz;rHI3Yp8Z0YeY>OzufD z5yM06Q)B#Tmc@VXN)5dPz0qLqnyU1gTDkH5HEsZFPp@9$_#yTeG;Qs`Z}eLjJ0VR5 zn#W>HNwyM=j+ri95pyTRJ{8QD!m{%OUWE7A`j7)uc3(c}FvMAhf$JCc%wJ=u!>@6M zJ&WD=lp)JeUieC_M&^_!(e9iQh$m?`cac*=VzUE1Wun#1URx zkKUj5fvdzA{Sn<&RtBb6LTOf@HZ=Zt_6Tt0sV!T@=!->I=b&j}IVc0jH30HyEvyv1`{6zkl(lnDVoa+G+o`^Y?gO zNAiDLwsE_!<2m-2t=$vr=Cqz!&uV+GPqCC`8I;H~Ku@9k)C=9-zop3pF;1Cc{X*x= zJ(#zE!mAIcgEWPYx)>sc%=WblKS)FXYni7sD_tW;OvS+xWJ*voSUZ&D`wYE!1!ygd z^gNWtZ!q81?+KDGKxf{o;sH;;c)-aG=dODs9fCR|OK^X6;Q5r_9r z8!tmP8+Q4wc>*2kM#uw4@0u@P>z8UZ&{SoduIjOf^-LK38m@{3C9W?Tfk`07jN#VX zUHf;>mg7@_V7itIUAp}sBm;a=X4K#V-(Oq@7_p$YH7FOXJwuuLM&XI3|o6yMOUV>S=r zYlX4&IY+lT&6zd1<(A*nB{4>}8Y6S_vn${CI^QnZzpIYz+%IguM%@9$9{8F=GaxjU zV@@g;`@ciTJoLCHu?#Uc%uI%SH9~)_R*zF31m&z6vzQB_H4h*%CHld|=-ED4@|7t` zT6{43_zNY9k5+!`7x`tsNHJ)iocb^#H+j^v`JvxgCLEXJ6zSFfJyHawZ z6xOjQ#PLI>_KcVTA-bUHPNwJ&2HL(Rg99n)kF#RBPw(}+7nFtVkLZ7NGPVc5gL-|y ze*Ob*d+n&LMV))!@y|IvZh3;IxWIba|8)HT^-Zyo#0W;od>ca*#9lrnMp0tSgp|+w zjv9Y?y67fH_`&{tYO=s36ZR4p=uJO+$>fBiN1l<><;2!+;?A)medGnP41i|1pQ5$` z&&q8K(kQI4dDpptECYYFMMj1F{5XFat}y@LmPOpgBmVq4-d}re=Y`{e>Y7yjKHkD( z%J}5)FqUqf6394WI??m9aBOcVVWuE#;y$>jL)TbC%(x3u=c*>J#{EHFKl^c3w92=q zzVG|XxN}=W?OkF8UHBqtbE&62>3453?-FOM|LOU_Jc;xeIn96f_lHjb`GldjtI^YW zr2?A8TUTRhyO6HRm#^dQ3VHxoK&QWjCS`{Al0q45Vl!a8T5QnMZJ=t7_h-?SEc2Zy zn7vD5HBr54hH7=Uw_$E9L!$P~-F4O9JX>DNYxeI3;IF|b$gPWpX!$Vc%ep>!44-4B ze|VlQUKIafj-1(#?H{p!jcASxBCQjCBczgByUZIKNb61{PhD*R#9Kfs&$WiBmvKOun3soaO^0c-HT z&85$0XrJ4^oP$(L&YiBq%^!1+cCOF+qH}cE9)`-+!^k<`*yfafB6jOrEDyiLs0T51 z4czL4iJfa_@9(u@E>ytW&y{gUjam;NJ% z9rWq?fMa}k(>IBK>w!A6c(RL@DOLCF+6I?1K0Z6qI$vLOjymvM$VVfdjYaC*QJo`$ z;A_TIT5IZI2<6GBE9M=4C4?9E!|8J}!+E>UAp zyqfU4SZXf>BO;2m{PBa+srxQyoWlj)|20K@t45uFMn>t{&H?x>{&8&n<=VIR-|fjU z#-`}Og|Dv|2OWYMR`H>jQ{dU(um4T~s!p(9`$O$Rmvx6xwl_m;T~U9()+a|Nqq^R% zKK7UEA+(Rz({1Cqd@bVBd6~*{+kY?_(=lmuK74d_z!w@4by1TE913Pg-9OlM297DO-N%5a9y5YZ~uU0~QFdON?FhFLqm&wVB*4%4rko_kO^ z%gtci8B5nU3mHJppae;D)+zA}r2x-DIfd+%I1eRP$|LZYWtj7}eX*D8o8p>sb3ZU@ zrm}wK*z02VF&!RVFdj*po{K@5Sq^rpKLXP~W@X{RQVj{F1HcfXGG*iN!~8JT&P^Q6 zN63de#nIQgdAknTSjNW7J?Yc_en3<%?HHDeM0Spcr?~csDMrMdCQN0R>+KYY^=m)H zi{Y2<0}L8}gGGrtOX+t4XT-D|$*!1@Wsm>k*uVd$@wmTxHQKX%!SUWtw>(BMID3z= zZg=)0t>FnMYtQYQzdq-o_s*%G4)gPuyyMlI-tGe}r>*q0WP9j5?^JyhCd;TPcv25O zh8pX)@7rC{$LBmDi9AvrteJVb`yM~`>DuK4^7{yXYFZ(~#Ej%7P=cTJz;UJDJsXo@fW zg7}B^=}Cvx){)dXw_Y&b{yWBTIq6&218|H>ksme$WqM;gzvzO~?uvPB42b)(>-8fv zdPjeM)krZVK~<9JEC|Xzg|gmIrIkBML3TcM`#jufwg+LkwLm0354+1cUfyum&OY7OGY!t+ zuI*hvaYup!&TTz{tqkQsy66K4dZOMFD0u9D;=M(NPG;}zPjA6Wplek(F(@xOs<1PY z3MTuAn7S9Y=NCpEo?WI3mqYeay88f(=I49LOeTV+&!EKS{tT~Y**W&9GGV{f*Kzd3 zPOsi~Z7#^(*Yo)T>l5qC`7^aOLp)=|60f_>i#NOnb1tdkGU5mb!_l+6?#&C z4bd8yU~N-lpHbm+K%Hi_@wK4dDi4SWdu;hM>g)$YnE>V)CDHkQH_c-{eOU}4_ZT=n zC4&Psynz(uNy;TA#rglhQkM|O9;7mKebQfupUw^jIc1s%Ky<>d=Tod@cdvO?`38S@ z#`P(&=eSmXQtcJ8v2ZCh|W=uB>V$-*!^#RdH9_t z1C?Y1hzG7cr`*Bu(X_e9>_*>~uWmVn_PJ@-!MHuIwOWam_l)mj z|DVq>_G)W6_gwK6)?rcmK$=H4(}oh0#p4PIcF({S`U#omsHv97X6I1MWLYmpsgZ!s zO_d#qDUNZBXEhclXAs<&`tuH``-Fx+WAVR z5kW4JNSG47m-HuM?L6~RJdb1NW!UJ;>;0AE!bZEpxPN^;eV(eMcqE{D!t9=>y4sDj z;?YV5@L~5&K%M^3>X&M(Q|C9m{vx$KgQvX(tT$<(fkYH0yb+Q@?ZUx->>-6uS{4ye z{iO3_7>Sh&ou_A*-`@>H`2-&vxgl&Zi;v!MuR&Fm>-2eo9@R$C_s-KDDF1G!T+2fV zv2SvnBS+*O`K;|Z{CC~mrOtNon7jI#oa(a2;h}K~N;xO+9LT2f z8RcHqlheq3kMs4!^O_ug_lQ~I#FRJ1o6AKEO+5A*vsFaC2gO%R)ZQeq%G@%j7|f$# zBU7&Zfg6jErE~0kC9MCo8+F5OR_!-o0&5!YwdXDPSUa3F3r@STR~+OpAjTK*eskDX z3R2jL%|eLDfUwLd$7yxtnk*IetzrC@w0+4%Y|jP<8)L;HIsJ`)an%dQbJx&`_=suG z>(qM0wA=6OYPyMQpThF_1+oAX=1}>v<(3Rkf4A#A333F>H^TNAo+7S8ojd2q4!`0R&j`F*$!TK`aX^)SF7s;zO6Mls0tSfddHb!FfTsWEe& z=q+#hy*N=T2Fk^MIy2EaqGpdKEL*>=V(M(W+>DlqzW=)SyFZsd!kN`@d) zIKoQLU^3QkzBK$z1=YG!z>D8>UvpF3xhWNqLT|m{UBBgjCx3UI2MihFr#^H*9+rrI zdG1_%?%Yxw`Ks^2SM7E_97dA93!TR=sDJJ#3?E=w(|1q|^&_3E?a_hHa)f%>4(814 zIgMnUXH8=x3fmvV?70S6@@F3y|61$GPI2;;2YY(W`n;DIG!iRY+C+!)z=5t zIr)LXxKk;Ak8i&MulHOt^<2KjWwC06d$)UgW59i;>Ht3*@42oA4z*_??c}ki6nfWF zj#lt*hJ@?CV4@0gq zmp{B$q&F{_>DI4Jtp&+uah!XdO5yV|AZ758lwUu8FZekDKi9+Eq$xeO+oRN8%RWHO zIMaI-ZA5O@U|moamu}u*{hbH++)nV^^_k;24`bopg(omkBd9WD1w7$d=X@?4jh|oO zOOR)P<@Gtod%`dG=7w$7{pFe8Ib+s&nnF^yf2+*H#!2{jnqc2%*m;`3LbmLI@zW1J z^VNNSz9A|fl3+Tyu#4>HKyIFc+`O?pFJes>eGK8En**2^E<28z#3j9aV*am(sr#8J$bHWr?{S6b9}^q6w_OFL+}gNEN?xt%!#Flb@Ib6zCzOn z<>biPVzG=!YF<5)do0dEsKFa<>VWHkAP_k$mLt~u_L^kk`kDClph}q4dGoijgp?{y zIGA{8avnA^gGRqlEuNvw_f+-At`c-2a>dkO%wo4gwEA?3WFDzQi1~_@gDHb*#2UXK>8Ef-0aPgP)KO zAF>%Aj9d~I%=BekzexV9B&lDdOU9?ZhI@E5|; z`#^M5x1P5+AI^Pw-i@Ze<)og@szal$+KEP8`rFSn@e`xAKX-%81O_=deCck}Hx&u$Ep#-q9Y8EVDCTw$Nkvh%D) z27N0613mOZ0wQmErf%Y|TaRCV>#z2UcEmmTS)P0=+MZ9LfAO(9bB(td+2rHNd>pLQ zh&YoZJQStBewP^=y!zdF^!xW=KZkTfXmsRHQ8Un#Kix%{D@QA5>aLYDS{F+JrH71| zIE~3T9{h0gz7{bI-TS#rciDud&T`nuFh6ibn?)~WyX8D{!IKOMFctzq24o4dqH$Yvfv@??DxcSV*O=@7`jAGEHWKktf9wk-!`A4wwH%g zIsTpMlLzf@nd%H8Tqi_-+9Ajq`0k@a;rfzaZ*q0kz6Gm1=Im5!Z1O3O!H>@Ox1=tK zQTrz-(MAyDHx(Ij8C>l4!yY}NwZxzIp+6*J(<8W^t6+f4;=aBfqKNC8u-NcclXPu87YNJlpbQbI~Gri>Doys$`N%=LdDnTI@6;L)|aji z3>JSzCL1%T_|%tgYnQ~r>wGQm z?$NK?a3TzU|NV1>r~4hHs@twB`Heqhdq%c-H8|xbZh9D}mY1stjR)JetV~MEU&%Q> z)AiEt)sIW3SP{)9Y(H4a9N8Xx7ts1JX7YbMRIkxN4Wd}_EKUZcSS`XJ>AQ!4{_Xzh zQ+_DBdOV*Qt?l{XoHLr;aO^RD>p?XbQp+N)mcgv+rFFjYGI`aFXnM542+XXGyIsTlc1^m8Cy#%( zo+kNO_kM5vc~8Ay=bkXJ%I|%u(XRnDOvuF!S;He9>xs`RmnLS$VpIDhkFoB_Vo0c| zun8y;<-Q6I4Kh9omxvdvYg5fb3VAL*?!4k9Se9x)hOx8w<29=K6z|LD!KU7MMh@Ef zQb4JO2}}ndjaBE|F!AK->Ps%X^Ll@M@}N6iuhko0J?2u?gS{FN(A^;J7nGRkurfW5 z*1KUQ6;+Zl=o8BOPgL(yM(Um*{r2Znyb4p1*8@*n7K|}o;#~f)vT}m5(*A)DiSv%R zC`QWui+31|ytqo^L`kkr>%yZ0_d4)_X6N|vwjb}j54ZKSKl<;AZPYY0Q7V574^=>D zow?<{96JT4tNlAw>ce^|t@Su}s+=d!QJPdQY{EZK%fjGc7neF7Ru99ltfMTcGn7MN zJp!nrj`kJMJw)m}&UQR+HQ_B=x*g+|5p>QKu4T(@VmWE!eajr$;#jLS`W4hG|1ftI z6-!^MUTSchAF>$gRTw+dh{J!%Z~8o{`kaotj~s$W4whF+95h&n@!1}9rRvKh&EG^; zt|@{jscx8VM5OmwXRF6V6%hlC{T&$3O|efk#l9@oM-JKlnfa5R&$@*xl6->K>uc+! zQ3iU*nN}i7vNsfm`uD-VA@)H#~LbLKAKfAD`vzZu}f44B@3yQ-d3#pnJ} zEqraJLk?gWE9F|$=sfu{9)jMD;`Cj_n}-)>_KNu*J}Zkk6JPg?zz~$A!~`$!COo#q zzIGAkrbaHeOEC7!^wMpLWrFqN<&$vBe3^g~P%vSRy$ofrqR3P=8EO(p0xfR^1fEaA z5}-zZ`pv2D=Td(Q3`+D>Cu{E zUdA{>3f$eJw=Zg=jg3#qcBOK61uiygi&DJ^)A9uZygv)TxHQK8u~Xg4uGb>TvrAq> z6}R7SKUc$^%Mwocj2^=p)>{efXM43ZH_24t8q(%XKJ|aV#aa(VAg3m2=RT)jIvAiY zL}2T~r+!P!89_01&8f%mAUA>42cZrUeqDRtD-TRHB*slTUw2g@umGeO*ROZ|@zWdL z59HVm$M?WYz;M0&Hhuc0MZ_T|L4PxfGEWfX!|G71&{G|K0npqb7DIYjYk?x1b%C7d zox%HAUiyEHwBn~+mrD}a=Db(geURko?PtwQk5G`? z&-kM9Xq8vQ=I-mQ0E~h4R4jf5ekZAKKAV8i`0g3&)1y=WF(7E7UDql*cI_GG-d)Sn zUd+KY|K>G8hkW_FH8m3OVb1FYg`{_x{CeT7AJ&AM3UEn4JA+^Rp*r z(z*9o`PX&a_Oha%!7;Lbr1dtCr+=Tm{)eBP|2?3nR}0erx4$oHghzFsa;wuE-aYkK zJ~4kfCEBHW-EUc|*2jWh|NZ-)zx!GHhc-X=9c1mw`1jxUCjKw}J}MgJlFri-pGBfG zzHv6#7cR(Z;){QNpUawjz6SVS9#P3O$!gAx_JXWf{oD5f<$gn4Fk<<3 zX{U?2?s5--0w5P1uk}9Kp86@yXAxVYvoU{fbJ4ueuM!goboO>9b_9DN>4K$8G6{^P zRGEa*=iyJ+@!Z3_FQAA$vU0}^xy#7$5rj15Zg_OY4CEv|mPOELN*X?RJ@GTLP6zpW z)H7huqUU~EVJ>x__8j^PC*~TL*jmCmr?o~lz1nT2?7`KUazXzjPtOZd)~h$l1;>9} zfEFU?&YHwgp{x0bntF)(FP9~s9x2Bo*X%qH0Xu$u3q6>E$Xn(^@`G?fy6!i!9BF0l zMeJmO_*gP^H{Yqn26<$^)jTlYmc4&YKzL0p#ywszgWfr9%>&d2%;Mra&N+N$)Cc%F z_w%;7hO0j0)CYfmJ0~vvpcfrsvCYi&^N@J-GKW>MRfA~vhz#>W3$_>{e~?!Dd(oKF zH3+QV61sa{cs!rYa`Y2GZBK=aXF{Q03}Ro>Q0 zEWhy-S6~jyD=@_lt&ffPwI^<2CD-lQbN`5Zr1l==_m@7#b6>(Czt(mp^7@%omTtF7 z?1E~{=7SU7?TRn~C8kdmp2JZ6MjET+3@Sqb9&X*t=3?6^Fk>;m)ZqG~SJTv-J;PZLwQ{Mfr z@}f<`+<8S`1s-w;rWnt?n?8L7gT|nuqYY}+1n52Co&Ev)=J4zG zoJ!yt7lY+xbL)AM&$!CZ`D*+u>^8PkfQ;Efpk83F`wG_M=Y#2wVN)p9>Ft;fUR!UN z`zh5D^9(L5(`WSHxy0na!7|u44xMO3vB7`6?KNz*U}qfzeqGL0F+~0p#<2uy?01m# zr*D|)6ApXoIlbxdEof+?4R}w%kqD6 zdF#`TT$0V1oyQZDV(~LajzkHM+cW#iY~Bx}`4lMgAKoMj;)ItaMIN`|?mh7(WS!LA=xh>_ZiF{A2?__r{kz8w?nKE^?gC>^I!6-WNMvcQ-i*x!m-P z+_)m~IC{m6VaxG)lwX?JOvoWryXPLd;+e&^(*OC|miBst%lls&iVgJM_ZatR zw7UQOJ_-$cV67hYAFk)(ui#n@X6^=U|N6}8Gp&zg;al%^ogpjNYRuffyT-R>_)5%d zlKLcn_c)_JPotcQZ9+|VW?Ys~!;-TB1)a8;o6 zZrAGgg<0vm;HUxGlkm=0~t#`+YQHHKDuv@ zUt+fN$B@MCKU`KVmrO4HufEtjzfj6HlYURM(_lf7T{_6{mfX1?2 z7e~(4L!AWW@8LllTvd>VX|S62)s$^l6JHIgAYaVMvGy0DrX-%EX-{`XoV*~5&7DN! z7pkgcy$L$s=52_4d|H?2|IJQyv{N5=570W871q0jr)}@2h;a{fmzORb3@#KxhV^#) z-fp`;8h+1#k2+JkZjHXgglvyE@O zDj(G;zeaYJcaQmH$M3w`{Bj7WZ&3aR5NYen{CMhvJ)K`pYZbH-Gz)`*oVgO z2h^(pEB@dz_%XS#eaP~!-5*tLQ|9^r%w&H*Pd4FS&!Ao)T`IEcuFH#ZmkG8F=uL zq5e;u`qSDN$)34n8?Qh1IrEymJ?D5tx9k1zjbX26sXh2<+Pwt*$MF4>)f2|p{&@>Tj`~ODZU@G?}zKX-7henS)F~9D{28%QxkCRCQ_8lOD<7IY)M%c9Nf0C%M%9^~fNv z)Nk_?XRfvWh5z}hACcWdsIDYk-zT6xo^+%VrWt(%_`rFDF*csz?e9PQR;R~29m}?t+U8Nt z&h`B#!%47uh$z0V;cQx-b0CvUL6Lzeb8ZA#ZmR-x^RwV3O=(V%ZwZRu75Y&HN9PUH zyQ9q{T2QPrR1sl~bB>of#*n!hzJH_d7O+zfL2Rh2^QlWtj827Iy^A)q8?3VdtnwZ* zHcovB-4jRQ*!p|o`^GKY3{RH<*(!>3dXgp_OymGr2UNjr0e*0~3$Z-E$aVC2Ksa<0CJ|Ua}xiuyN{p84_m|1 zE`>=JQRtJ-+|W7R2lCmuGyWWw73L?fpJH?3NLx$|q>=?~G>5i+H+aYT-NC@(#DVBtbC!Re zg)Zz_&7B*as(-^9JJ;BeWw%%BZZ`H0TdqVNd@RiHs6T!8Z7v-#hPYTG=E(`FAWR1D ziw?-hZcG5i^2*#rZV+^2ToYHB5G+(g)+RUNp>S&M>GR{FnF7yZPht zn;Sm#Kg)qQR1b0*zF^-buP0hJs(pU%`_wVR+|GM?Vji^pU+W83w!P2oHMZaXMW=J> zz3=_OV%Nh|Wy*oAQK@cqU5Wb5!Ib0U4L;PT8Gr^57|aDweE+W2xOXWXl&ck1@?`< z!Iwnc+TJ#g5UV5VUGyZU{p8$_^UE54zD9b9DQ@|bvtN5C)K#ytIP?ZGR`MyDvdJP!8O^CaO8?-Gr1n@hJUO4eL^6PxA~YH@JxqXI5QQ>993Kg z<4qAj% zPJcSvzk7Q4O@5w2QhrgX^W4+hm$lh@FssR%5q@VAFWr)hupX_G?tXyJzgiAT$P{*R zwfbmcpK)*kgWT(r*F}JUC#C zr98TP+Jvjh>m*r324CrGR*?6{e8)|{9e<|-C#f2y;5v3w3sNz2)yZ=L+TL5N&7c>^ zxfeG$e-`U6(>^!2*0gzyGjpT3J@ZV^r@}JJus(R=hugLJ{JXHPQ@6d4Jk3dr+A1Ry zGit~%wwkC9^l)MdJq)-@^gvk8QBy@YFPYKEkwvaKZ+p<(EY#fYO~|)`Mq-_uS$|Jd zCDU=^tj67VNhXSd$-I~&YXg}O{HQ35SK9iR5-7Pd#`n;=Myqu6wtauhDd=2;R7lja zkm(#fustwho^FL^y>iDCryhS^5i`lqlazeCZAnv*RPQ6V^LP+#U!Ohyr1rd$VZ3@} z7-@lAW*o-;%egWlauTt&?nm8ZEn*G%o0AjMBV}H(ulgdav z-vsxP&|Shv)^!4q4Da*USEF@b)m4|fIvsF!x?YBv$nVPPc=@sA=Gj=j)ePdi&fNIY z`|4XQ`}GX-SLcB~0gepcAYD0IOym>Fzf;Uvfe_P}vzv1O?HNw#YCepe*a=cyKtqf+ zStoML7CB1{)N|2H18x2){(rx4dE94(97D@BJNp;^OV-fahGbYX|LFV;`wl&r5$ZJ| z<-F>n{EIw(pCbwGa96>NR>|0VRAD+;VFSPso@PVkBdo{eZ6Jn#5{IOdPHc z^*7bIN4GDFq0Z6Q!YLI;84{_^T5keBPt(q)4rQh|Sr^Wy;x3qi|F`@aOpBd)Cucqn zS@qACLn;g-Ywi)(fzEz>XG$265&su_m;I8xS7V~|&exKna0>gYnQ?r&p1V}5A+Gg1 zCmRuyDH4D1DaP5LWSr*VNxzPK8lP$rVO@06-q-s}&i%l5VkKRnE_$>7>N@{D%ZMi= zcK#fX&!4+*xbSOwm(eN=C4V(TTXQgt8|lk>>~UO=nL5`YZCvwTuPNv+v1<35k&+7g zUP(6jSB$i~&pn}~2tj%8)YREH+y6^G8X1m}8?$-|Kid5Ti)_?U;pd6h($+$4Pcc_E zKX9$HmV#dMyO=lNF|@rf=icDv+yi0Rncm6kJhq;R*7p{3gbr=bWqo~pv#VK89uxi2k=c{E_a@Y2C#W@9rLa7X|5 z4%vIJ{hd1Jo_i}QqMmBKERsw~sT?F3Ntx6BN#mn1NP^yg@OL10^d+DmSc3MtwxNhO zU7+o2OM`%GeDESDB!3=4-)=pd2O(-fXyEbjb0!#%jfG(e1XQ9n&5ml=+-D38+j?(@&p9m@K1)@HfF`JjM`cBEivDl!l;a z&enmh_%-CAV}G3VLyB|J!F@shKEDQm^4@Eye+X{#NS16u8S*|FA6W%g)9|j{)xr$sZGd%)_+3def~a~WeQMZ7}6FGe*WB# z72)$zLJC^EhZ)nzEA$nt1n=7B5u6sha9kkI3Blr=pnpM#VDST(5)yAmGn@ojF7gNg zLox00fHXrPKm$>rXkdBV(A4NwF*|5a8&~j7c@LOHK^1%#S|i{R{y>~x4u}MecI;B3 zh0=BtM=>dm%P9KFWY0WZJTb=lYilodp+akYGZBvWxTX%_cSD7dGq4c zsDOlJXX~aM z^ZL12o4~r4muc-&?#yq}SuMEj{eGV9^pw00**OVM#|vYghh=xXu)FiS*{A%jKJP>z z6MvAj4HfJNQMZ!`IgQrZv&sve>De^HAH9p`IHKq^LFi;B$!OxrSz6g@vWG*j@<0@p zD?XIPYh3s0s7=$YZhBH#p})D!NsGPb%UyikA!fzpCjICw$^}J+)2!P?iwveA$V(ua zT|+6CTV!3DV{_TWb3*JN9>T#u`QEchM}Kd{yO)R#ODco+FkQAad5_CH(RU5seE3@H z>)*IFzx>5y>=O`g{QFG;!0|`$&;IzGc4HT~jClY0>r}-L?)V*-0018Gg#mzK0WEN| zfl`z^CwpHyM{6SU?DXK*Ng-bl`;GQpm)w@_%<+ zpSNK66Tjt&VHx~AY+^hbzwoF0r#>DP@bazS(12*b1Lo%ga70g> zr@SYCWE&3P3%suyfuI36W(a@{k1%}4d5bL{K;n!4)TccF95J58Co!Z&8Nc@zL4$wi zOP}uv_-q7lF)ZTqFJMF=j_=`wAb00HJt@UT8_;4uOV)2qf;9)F$jjh`Yf z*p3P;uVyi{U;6cMd#e2dr&u0RaXtVc#N{(^(f1o}PsZ|q8*bmhuz~drKl;CZ?1w`v z-WUIEFLIAk+%wL93s{)^uG503x0 z9;O%VpYg!|(tq3g4GRFnC*1z|Yma|l|8MxCAGQ8_e$4vYAE5n=U%=o$=Mz5gNB<%y zkcjPJC?pO*9JdM07=IRvFTWr8L|EQZis9S_ES9HKm<-?-Tz~xS-#?KDTwsZRPlq0` z?eF+n+Hl6OxkxF{2zbZ#(KNtZq7I0*Y%gEf0uhcACEEp{VTa|7{bTvcPc(2a6MPzk zFL7K+v6t;V{n)d#j1&L&>GYo!zvX=zV161Sj zZUATjdI)B~OTbJ4h2wgkQ)t+t3-G=8LRZ05APQI)EUa#^a&QEEzgR6e76qt)sz9TV z5Wtrn3bg{8Vx}dJW2`wwDIpro99^UKclm=Axriw~`Q&bmMwUth&BBVZ*dtVimCMi8#HLg}E&!Sq9t|&8Iaju~# zn^KP`c&ka%zT7LX=3Ez|Xi|5@ovm)9x?i5>?fIe+zu)io^?s-Cm)mZ?XW#fI>|xL9 z`|-AG_RM8pzjn%=s`vJpEA};Y8uo{IAM2<7_8#}von!Xzc5hvOIQsQ2_ZDAN7LsV0 zE%F^?X!%y8JJ?g)BuORO_)YtjEdyG!D{<|Kv4M+nmo?tYYzev1rv>~ z$J;DGHtEoc4h#i96J{CdK=9;h!}*--14JVdTN&RzZA^YFXdie=s^|dOn~BEKfC7Pe zuofV~F!H-iEQC)vJpqEQ172f+Z<9r>xJEg`h_|=jv`wN&b}`~FKEUpRLFs`_@=joi zBZ$NRx4HR$k=ITkE-TTGP|Tn@{*f0T+2G*2uG{9gIir-g7VMn1xIEM^CiP;8^XnHNw63on@U zh(nU;0JtnJTa+VMd5M@8x&4&S-Gf960M__@$QB}hkklv$oH9rN)W|kcSO7e+Y%$-F z4gxI8z&$L`T?)rV3q*byA+$ikv;!T;3ZX3snMm4%PaPpTEU;R>G6lwI|3F#=HAowR^X?M5Kh zBM^%bq)$Bos*b>?pL}E1Gq6Eep-;W|gj*2A3kb}@(ZBsf)wUs<1~%a%t&+A+49c&4 z>AL=pzQCiUPz0|SfiPe>+bj^&02EHx*P#o48$}1jS7#s@d5Gm(NGt?_-(bDkf#gRc zFk0mx3cq>a-G5!@h<|760DS8Ckx{pR&8tJt5pdu21Xv&cMRA0*295i zY{-W?5L8dostRe!0cQ&6ON9k6bpODA*nY82$8Q}-4ZiP#1e|Rm$NiT>9iV@-9o!a! z{Z0UqQhao(LlBDX#|}gNbH=#Fy0uvV&I3LgCVuS~dLzL2#`gjpxC)^ZP690KQ@s1} zA*4u%!bh(0Sw>-j$ZoWcTt%!`AeN(JD;kS*Z5=DD6S2bb@&-KtS6vsN=#JBuXgLft zf9RhDQYcJbfQ&*P0K7V5sbIgw0h@6Ye(9HA!l-abCsZ~tk3d{MV~l%Pba$y09oS)q zWoj*I2X$X8Y6Tq^a8(k;6(cYbd z4_>Xd6}lh%UDpNai^uX!KX}dER)jwIr>+a?7f+80zkMQYjffi+2`v(ihKT2;0P^3t z7r;;lvsMYy$b)f2+%O^zk6-I&{Mx|3wlidw7BW6_xgw;6q65o5SlA~k1%G4_f2+lo zVD!<|0n{PR;f(deD}S*;!GevCMj!+uFFZ%vgQDk$hS(%Zi)oD!=-{le|5$2NGlwEPC;HzqiC_RhTM%IkQP|JQ28y%N?BJ^#$c5T2WVuVV!J02_q`qJ0n>EMF*?rSyT}XHFY|`cGsp)Cyi}ps;p2ftEIF zL5(S4AHckP`4F-FN_f5a&vbyt6X1^Z2qHYTLyZL@93=9fMV4%^My{R@o_G(2`I2ucc(e|QTo z#WeEth;4O*X>AXHR}+9#@2jg3KrcXaaYhD~8Tu``wxPnWkv<3uaO^=(ApA!EOc^va%S6-Q)GhU+Vau^I?bm7rui1V?$ci;dQvfhmu{7 zz~{r@vxXGxgOGe+5w9Qrx&MS8+l)T*%NZH8vN#{*dEWijzjc^_}TtE zd!J`V*A@8dz599ozQ26wQ~d0G=|9i@m;Un{eCa>WS6#<<8zGO|4$$`g{KnYEX%w4r zTm%3F1JlL2e~#}C{i5429e`iDWgPK$PVYmPedvJrp}SVw=RD1qjvL1U00*z(B!Hjc z_gNUnLiqJw!Oy?m8y|WX>q4?W^xtRiL;rmaKlI<{6!QUq@bTmR(=Qten2*@Sd;s8l z=z82Q_=|4%_Fw$aCExzTFCFpizxSb=zy1G3hu3BNe=_`K+*t7X?TqHd#${yz>z>nC z2r+$HeSGv&W)u`m|HkX#KlC=EcT(Qoxs< zg$UCYe|SxQ`Av(^r|utG1M~4iTYh-v{vt;te(K(S$wk5A6WITjOSygNzcN9}r|uux z2G{*VTVb9rKeW28;~5G6?yMGA&Km>@@AyTn;NN(Di!>G}Q37~Xg!Et5juXJ&s*KVN zbe!gjiAL}<=PjcRWP33BPfnph0Fcl~X(`SSe<<2-ShYhR$34O@@-!BHBSRnn#^-1Q z9gpVrXPZC)XnQjFo-q*IRRxmb`PE`8WjN!w_>v=xZUHM%3};Y)O00g-I+4T*cMz6N zp!fLd8-xNe%lklZ< ze^>wsA1VO=;VVI4l=sI%`DC^X3s~WN=ISTk3_w#My#V0%=Pw>jIRDZR!sWat_iy7{ z{`;o_S;6<;Aq355YXp4n!+&8LctHCL9&h#N;U9Q`0x;|!7y{4$yZ8g+REs#M@Nc;I zdpvKe4G8EzZ~+K#Qt)Y)7tW-T+i(L?e*>Zg_BbfJg3nV)2EGSg;Qf9Bm*g9q-Fd^3 zH*BzAO*;U#KsG=CH9*S0Y{NT{XrSQzQRxG>7!DWM;w=OPK=ljbUGn~aq+J}e_OI>I zxLp_1O9DvnzOEqPc1j$~A+q5F>|Ukb*zxE-UwTOXetrQ@%5|DAfl1VjbDzXRC=3BIB%EC0jveu3qwfOLTm zytV%6A1{2ir0_jF1t;^6Z4ech-UAtcFC2U-w!i#;6b6jz;b`!_elY3{9nhGD>Ppi(SP6i`2}PPq=1|Py?_(eTZWUSNj;W7jp>pF z^YzKc{P8HeqFz<+#v z>{#;f`v>+%1qX4Eny>#!^p~$MKO}}D@a6MkPg3#aM>K$IL*v8e0++8||Dj#uAL{+u zNAT%?0pB0{#`FmMvP0q9AJGQ!zv2%8+>Zm6e{5sCfXCbMSHCDs|FN47-Ip&MrSn%g z0`NUR#n(0^UEgyGnQjoz&k)3?)PV3IR52l>BsLm zpf8x-z;q96ml=R>Y`0&3SpI4sFCcJu95H_FL*uRJeX)S;UraktE`FWQDzFvX_&lS8 zkHjE6Z{Xe71)DlPY5~r8@E2@+p3%Vj+de*LF5*1Pu)(TKaMxuUz0?IX zIL^O{<5Hml`_B`{*Vl1eOhjWjEm%(m9xr$s-`Bx0hH;)e2FuY}0EgqkoeQwXY~pkm zm-uY^wowr9v4GA{@DsB zZ6pDO1`mLEpcLu+Ri(7a|Wl8$4`L#5?FtA>#Pv(6vPkcq6<3 zlmd@{mqI)PAD>SW9pEd_TdwZ7~Nvhm}x*K!M?cmEpmt z5bsziu`NFFmy`sE6MqP3m@HUH0!$0>jg^(z;sIP!tTg6(uTtc|m4LTG+<#V1wuJC; z?-EEN2t!ydk^;g8R)wTvC#-GB0wISj)d7(Ln?>><<&XF!f!a0`WLVAcHzrmKu)3ki|*YJ(0BUi>Xfiq%Op^n(0^9L*jDY>_AhaeVOl?x z%|{}gWQ5QTnSb+S zU4HjNV1CJdGcFDN2meb>wFu4P7v2+?-+0>-e&dn-yS%3gKlnHJjpvQuGVx5Fmwxl9 zj)~-V+3$WFoGOw1g%AGV)2_Y8vTgk0zX`jwNvLf8YY`n`cOv57dh%=%UHv(}Mp&ko z6-Ep(e^*~N3#A7?VTx2oK^I226v3^%Wi3It}-h11;im$$^&B zLp#O6TBg5sRolz&`c2+Y!^3HgD3!#0{CO|ff4|!$4KTIo)qiB2I7R_e|JZRI!5{kO zx9uvF{e#aCFT*XNAm)n(r>&||Mn?kv3{3-gWo>)HiSQT`*-_qW&6SZ*pZ2h-{lGAxBN)^2k)cb zW0(2IXG#10gBP8`f2Wez{I&zL4u98ke=uuB2oEb67Qbs{*(`?KltJY|I-KJ$3`iq`PGL;c>iwS_QYTHtI)2$3iU7N`0D`|GKzf74R1a6=* z)|+r%_m?Nr@O&ZOM%E~rgCxX;88xaDYFm-Q?AF17-B&Ijk!O4JuFP74z11y;Zq2ph zoir}xc?iHZRr+;(zOpMiy<)L$cSL?jPQ$5AUY3?ut90u5r@_LB5l$Ode=z6w>rSyV zpidSvP$2a1xZ z1`djFY7RPvx~|D_)z8a`WRx84Hoe<8LBK>hx`oPz`4 z;md6tnJ5_bb@Q%M-$vIuOAL;UlM9nA;M@zxnc=-8d=WUpDbnxKg>4(iJF(U{qq%z}Ptr%}8IMb28Uf+oO@woWG|U)7rwHfJ_Jsq;y%axx}+i+l-+ES@%7Ax7n8 z*q17-uKkN4e7#F z!eAy2=qF1#+6-|)@LG0~@j{)6)6k*d(Ke=4&h$O0j{T6>1)DpI;}#KW<9&K~gz)NH zPs^LGv$8ZKrHie^`fAm}C;vbhFjX&Ejz%x+f3`F46=*viA7XodluWoIpF{nuZgp-u zVj0>rC$V~L$~V3cVwTl;MJ8X@w2I%S^IM44i`+)mWfJtvI7~tjmsf(aeW4)s=m9-u zY{0?V2%%)AqUN}RzQKl5?n^4Xmz#H0q`KNb25RW6nGs(SX}8yWGN8vSAM4%Cnbk>zc9w_}otnq~Yc8B%iEK6L~{r3Sh*N%v9;CybSSsn!Ube|s(T+A)zPDEjIe|C78 zke2fSJInm)I7=@!cRPLfz3gZS?`U-GXu`V^)w}QrEMd4>O4(=1+8lGLKd68o8S*NRnVlx^2he|4D$i&@~iU_0LIL&?yVob-72E?($Eb#;0Hr7R-) zV(#4BAQgV;-^uOD2h=r4Z`~uB+Ejk=Y7yL-4uZRPzpxNUkND03FSyx$e4#FRq+O@S zwsV;kyaOq@S3>bx9_h<1=GI`vev6!xcMh*j)O2XSNkBSjX`4Ew=h@kIf7HURi#Pl3 zc01iGAIDuZNMa+{rQ2rAP+s0oF3Ux=5Z6O}w=8h==AE!LGd%m_L<(>35(@gX_K5M-&x_M2v=ibw#Om9D5lqM z8*fUkBo{QLGd`bmUQB3;f2sq?w3A2usM5D6I`>F238pVKWA0bW-R(SawX8Udqtd;R zcPSr)N19r}WZQfz7^L^Kk7QNB`LfSk5eSFaEj$FtwC1YF~ z|E1c!P2SYM#N< znk7#rWC0KoNhFD8SEy*53*fyM(3EzOHs7RBQe>k^Nx~BCijFl{=c$c+B^~)Lid%{& z`KO-p6@TRWmCJjlC`8^+S9e85G}&(?c`dSwWkoj@pnks6& zLlC@?n~aq@_7_17qULeW5^u_!=$tQ?SY6S)#f$TBf0O6@3UsHNqEPO-3!}QO+KHDj zb|7teKF=3dQ-qDLE_bbH#jY^3V;prd7X+@i`Pp+O^=vs78!zP(QB@=nW~$&MMXdne};b0>8D&NopM zT|;U~e>+--JYSmRB{m}@skT>ctx4o>Q@mNNJ+(=#sVCC0p7%C$q1-cTusG0MB1FRo zl5n*3&SkG$W>{&cyB+%iXrIvf317pK3>We@1kXTufJitE-_f=9bBuwFnuVsb1WT zQn||ApORg1B%m)XM?h=LWq(Rid(>QipBqYePTZ-a7y6Oc&+}fYP@Z^}MygV57+2;g zPmLvB`LkMqVWot%Zi~bHgyL<{E{h~C?V~H~;X*ubH!rZ7tW_!Gf7kJRsHp7Z?6=Nc;&U5l{ShShqIN4)H>JG` z>PxIvZ_9YyG`K1cP>>QT=YqT)xf>k@j#_Q`47YLskQmFbDdf>wCCuo;Rz#Qc9tSyp!a_f91t* z3v!wWMAhrlj-%yz9S7>{$kN&fk2@t8SzDh>)_NK0|ld6U|M z^O_monA*k2O-pO7-gDxyiK0nWwD-3_6DdSvbTNjaIQKgue%1hce1}#e}!KO)77%_ z#$mdMv+`cO)&zS1!SdL_$7#8z`}5_AD3FiiVqJG7s@Ip zQxTdzS7%;b7K-h~UT>7^k@ z@%q-Ieu^S04i4{Je+lK70aWSj=##x7;f5w&*)4`DWu*({IjL+a3y!?!wJOO@ zP(STt=Uqhi(YYV8f@Q^1eWEkS6lM{0fqta!{z2P6Ghv*mO>;zHzX#xCcXA)N7QSi&A*G|uULp-O70`gE+Rm&=)QrJb(H`3iT|*4pHb+0&<8rtWunP%a?u4uGInMwk#zSiYxx z+oF!0Y;Wzv%#1-YWqlUCButjNrB7uBxFphZ(ZuzO>=z=FATrE-#$o+JR4e) zXuVYH7`>g(ul~%naLUyHRA-*xgeTYg;-GJ%0vt{fe~a^B+P}K?(5wt`cXsntMj9zZI*mSGlT3X~FU|c(G>?bRjl^DOh%55H=|$qIym0!e^7OJ%}ZVZ-Xg!3x0iv4?V@;xkYV!o zXs=c)w=cdrxwa&~tJ;1r+S0`9oj%!~>M|^2>U5s8lYF(iB5YfbSr>xbwJwJsbFq6K zosji9kN!#021ng4gfnG?mp-y`(C#8#J`7j8n=OlMX=f1P-kDa2svGK0Fq+_UiSGAi ze@)}*82X&t6_m)osAYQ(iY5ZiGlxg@X7)Cf1WNAiDMO<8qtU_XrV&}ZE)VwwNLhT= z?*XnMc(E+sX#%fox~BpugVKwY`z(Xu8{Tp!1kzrg5zZo+G4JT75t%l8`<=~ERm z6-eHrw4ASIRy}s7JWNU%(WXM7B)Ypre?mERu-puDfF2nzjd2|asLQLkIBC5Ok!e2~ zzgt9i8X4(4-_N|X6QsGEuLGDO`65k&Wt!W5d6fI!X)O8B?h2LVBfb$Ib>t;%1V~ak z<|KD1+q6G0N=GskYo_l!7>DO7F?$zY3%R@-&lmmVBe>A#^42d$V36;Amr%h$e|qa( zG(CO2C-*)owPa< z4kGdD!VXPvzxV0GdC}`=Er#Q&e}~)UG8&vpK2no8+z_}UqRDldMNMP8860|o?V7!F zBWe&PuEouxFbc~9N*dxX8qYkGapU;lF}pIE3cg$muL|r@4bvT8*Fic*$7^r8v7WaR zqF#A2Jis{?wd`OT>D3M^zwWfxs`;U}dw@%)9CitCcbaW0FMXQwc%Gsxe-YeMte&^< z)n?Nq$Y!U!yQ`_tFHK+%>FQmJ*czCYvP4yGW2ta2leRNtv_!^GSvLsoDNJ%`A4N@7u|BAo0=?TLzm<+ zdGfE!NL}j0Fh;fWt~+;4f0L;nF7b5cswoa=-ZdR2BgUg0scp;q=967spc1p16y~-H z#X}F$myzAx%Q82d&xKD2?#3YP$e{?`5J+=Q_?Iq{Jt@0|0yVLzFEYV44a6l`gl5fT zH6t#u*KNCHdGGp%({lFieCU~d(XLefU{6N>(udX6)of=Nev+Iie=^m&{veValTc3t zdUM-Y_mB#lkDN)iPvF)EH+ih&tU5%YL8q_xE#Lyds7JWZ$&_VUJ5!JIUaO8Q@6NOz zEqtGR?0v#g`)u`$V4WYr>!rR?{K}Z^6jcXG6_p@dtb6ph*sK9-RIF^sBlm3v zM&g~Wd}szQ z`m%nV0F#;5e=wdN8`Ed`>%A+ARq-T{bsK0nn2`bB|E8?|Qo* z??aJR<6$|C$DkY3{AqV1xoO(6SJwj93vIHczg#aul|yDgX|fPHN3`J-BONP zL)v7`WIbK$@jmy~97hj#eq9BvZq?g8k634)a?Q@*`bDre07l<58)2hbT1M$gXnDgB z_91>1yWv(X&22ANB5g{fOPxARBPcexU9V(3`khz3H*<}-?XekdzA|T)KC`lhE?0RU ze+fGq*-od&*`iw4SVUC5HBho8=;#J_W$HGVQ%!${7fuzQ%t<9}9!a8hd(+}sZ)T3| z^j@1?<+@KG1DGcfDt4jiP3S%WF$R-*WG=~FEZU;oDj4~pbCx_)?!4Z)WV_s*MExb- zMkmR61|+ql-T(}$HM)0N>e8?Ckv%uFe;w?1gc!g0^4);=kz~uI?oOp1bccYF(v0Pu ze0Bs7zWjaXjF-zmJW|W3%fr4n-dyeMDQ}G|_o#IGIjA(JGlik`O_z9I(}6?7+YIvc z&PId=O4W(M=&s$7>W)$VtjJsXq~-SAQ`ftpg=b6A-zTb^qd}4Dc}L8z>W0p|e|h(~ z>~^-=_dwso6Bqji)Kc0dkIfVwX#K)j%jNb6%%n84F;fh3CONqx)mLUTRXekRtLabK z5gD#8&c|~rjAj@W{q^Z@`&Z{*a;8hJ@W#^Kfj$-QBzq^ubGrn`@YNgVEmPs4el^cq zxz6mM+$vAcbd#y?%~2q4Cp0?uf84$00NJ%zRq%X%d0|JXo&89sm)G;pJ7jH``^v#V z)Ho|iJ zi5OogU)*RsWm{nuIy24Tu7NUU6gbVTkv&2#SjFUS+2LT@pix@+LiOe*1AY)4#&97B z&LGiw+1;aZS<8J^G@jzQe@|IOMq|6xu}A5WRdl6vQgOcz+`_8<^A^*Yd!mAVghd=V4A~dFZrxrk*^~y*`Qk;qYtDeI|zS4O^J0 zwKLHDo*-B`vgaIfTgw-F!>`7vHX&78@~}U8Bvt z(J7Rhrt|_0#;z+qgtq9pv|*r+Dcn1}xxx9uGQLofi7i%TD)y5-uil9v>19z{QT9tP z4|F~dTF76mcN%OUx{DM>c{l{Wpg_zb1~l0zT<>2*JgP8)fAqGVC(hONO0vGOkvCY{ zkaI?$B*DAQJwxEu>XWg2Z-A>vTZ2w!GSSY{8O$aDj3i28u-0D$<>(En@+u0HDJVxB zT->1Wo>VA(R7n%Web@@a4TNVEKq>FO4-OZ1X4T%q&6;IgZAn;lb0}{gXLzigT zH%P3(RbA4_e*vfF*t@PfSaD|_&eQ$~qTz_6!lkOb?sz9zNEt|#@7 zVG+juieek6m%y8`OQNP2~9KIrA(Qz)N ziM`2XxMr0Nj_;z1%l@hBalFNiTKh%ll@W3DbE-SE!Mt&>Q`*OCr1STf?~=2m=S=L3 zA)9kve`(hGQ@Lz5+*iVa{iSX(7iywzsCjYdr_AMp_$t|v?mKqAYg|@cElr#&jV41? zvN|HtoU|uWrY7iVisPFCu$dxfQ}$}*aA5EtP&nON@aY8+L&7yxWje{c*LtUe%07EO=5+#k7;)2 zqqYZ64|9ybb;ozE=)7rf`xU`WnRYK(n!zfuv-H@0tp?z^wZ1_1Pl&d7O zf4*js9h~3Y_aVq_^8NmY0e1z|-^!I|H;z~H_cjEN@hbAvg(RuGil)72erPHM+^;Lh zR8pO#cbT)Zp}PHM01bWkZYCP`JmUj@7}{XR>|8MDi3J#9b$xjA(pz zk#iH@pvX4|Q)(15EEu21KrqQX0eP@rf4L)SmJnA>DbR)tvryi{FbG16VV+>T_W1J^ zo9g0S4yru`(0BovZ-`BC=N_ z;Ep*A=UI$aRlWuSmYfzz*(Q(+5_5YQsn1?RS8^LR)X+0FZj}xf(nMt1< z&gWNA4BN(sxv^wG4`;vBYhrZ?(ALAbCry$?HF3y%X(SFo8!{Gj-az`XA!WyJNY%29 z@j&~jJF|t6j{mk3dr9S40xK7;f2)W#H(%%LEsM5Q6?J~-q-%f`w6H;zvrc3;By6lX zi>=tvXPnGKR~rD*{^p>ET!7g$EY#b{kxkAnhOGr|W40c_HIS1A)fWldxmIiZhe#|R zr7thUbp=bL12Z|WdVVFB%5T@-m!NE|g+HoY zrn$`d5D`4Gw@JTRXtqA3kS&|$9Hk48Hd1^`SYD1quXWM$IShMLf0Qor;7Bo#z;M8~ zq8IpGjaHkfT?(%uvtW5{D3Zn8QIG}b$Wy$VlYgwu55?b1pL4V5UtyCB!C;i^h;v!i z-|mAA1#;rf2=eh`%}xMhR`~ju03Fj=^-Rv@E=;99Nhs}=#JN%gMTjfj0MUNEB3;no zDSsuwC|=*k@5EHBf6HU%Ho~miN#}``#v}c?=)yR5Zqq?WOO3*Iq&Mr(sL*&=I}_Cy zY;IE1tq3>l&{hA#WF4YF0n-?TgPl>#x&R3i3z4pyLpF@7DZ%)9;O(hY3Iy}u_n&8r zCNEQ+fM3C*@sv_~@G-BgeFGL;I=J_I`CzOd)?NLNssV ztN>t>A5=FOxD5ato%_Y44ZOrV^WePEO_z8Mhx#ktMf6G7!qE+!=EYC%QF+PiYB!8y zG?6ok{zw7le_-04hFJFLLttUZ@S6?MGh(}5E{qZvGY5CA@$T<~=x9;Vf4N6{vkPx$`rd*pm9 zJZkJQR>Fnr3bSFf4V8zmm){l#IH^>Xr9hMfjt9X z^iIZm4gxGU)qb(kXQaGZI!NwAWvMZzbJ`n?NoN6tLMPxoIPShJ`j zkX0hDe{`t@vwWeJt$qPT<8mrc&`irRZN;h9n-iBR8At8cMHP?SVE(#TdLkiz`W8keFm_I>+8O!i?)pm{vypZx5Cw8*c0$K;8D@#+dW6k>F zHz%j9?ZEug7JaeSgcy2X*U8RAvK~T@HR|8*e?rG24Z`fkkxxA~aK;ie-qm&%zZN67 zWFMO5H2i_JoClmIPUw*z84=+ws_g^I#0<@v+fMqdL`GE@H-eXDrL8zAgO)?ib?qpG zQf*HC)U*7I|e|VQ=CLwZ{XYfad4#D$3gu52uc=uA?sXv?| z2{scOCWh$Fiqj8CA-0)qP!pzNKtH=+ENO*$2O_=>-_8kYSL|;fy4zM|G71vwt1HKo zUPm#_6H6hIdpBs@=;RuJ4vRQ~mi~4?uAQ%AhR*L9Y#ziGOUZ`G9}k;Yps}Gdf9-vf z$_DYUEB74F>$d8*HG}RgkePnCypHdYT-lI|xx=!Qn~?LXY=5q5e7H2(%ww@d&vDmRC1+myHlA zk*KCjVtD zSE79q1_S7^c-%09V$K87hV>Cy2yhb8X}ID?@*=Tr`Glwz?`D~oM*a5tJ?rT z+$|fAAGsoXgq?I&SnGvF*3X()B(;{P%O8XSiuYv!7o~!IM|=wAne5 z>>z|h6uX$A3lNw5OU#4Ysr)J(*;ry~IeOCnGRqULd0y8mfG^`$^RG1zft&CL*(8G{ z`Bf>QeA*9(i9u=$#+J_lUJe&&sjtIA?&Jy0oPasI3z+kHmUL&b4 zn1Hdb?g7y@JsLl~g!crKle?S%Gr`(ds$(JjpwL1%a;egX9QF2QczM82hw$TY?MN|+%P)$n05e5-5 z$S$_O;O6&0ZJO!w?X-x!-?j)j^NQ;l7X!6pE+3&i8GIT9eG?ELa_T9+ZhtG*9zD?Z zgNYC>Uep>P`K8Wb7lV>t{M|m&Pdep>l%BGevWdF@b||jlWX=dD$*TIDX^vhsV1vd?n@#!esWXet$55^ZTEAAAHH=uMg)Znb68gZRnwf|Zl=c6@CCr=Mk z1$&dmZXRx?dh3qI9cIS$h8xV6G&cBGMNYxN-L zK=C_MQeL0m8l_bUyEE76d+P}t8{kI5r<8~Ze_y_gwe&vlurL7*#T^vElyBQV?L=~z z&(cElwsuj6g8_Xex)1@=A;0?69w7h`9p2nzb+prQ0KYtjnkT!qgbQ&Id6R@f ze_oKPe

vHqZAPX$@Fj1#XP!VdE^I+xnyfW;1L7uS%n$HJWXPM^`ESs~-Yj%(SG zXv%1M;iQ&x{Tg?;d(^l9Ok9HG2fU1xS8uB49Sy?>o+C1${S54TM{MS3euP#^_aU*S5ZV0A;By;le^a!zGJ#cLHCB+qR z0Zl5jqz+glQfh=hi_3$7I=;oqA;kPe#^^eb7a%J&c*EWXk~2%ixOLB(>3hGkOlKuG z0t@6!Rrme@P!E-YnbNBGuZ9lceJ4ak&%s_eL|$(yzJaWJ{bS zKSG5ISZR7>4U$A^cln@~>DX+m>Y4%l=e7Hbc9{;orUSxeTzn?nK%@fq3-F$v`O)eg#lwSY^z>?5wPVZ>?w%PMH!)Yg_iWK?Y<>^ecf5(CNj^Ey23W%4+{P^ z^j?Or)}MM70nI#zd64xOar#i>ht^`^o+&R=Vfum`OJ9@nHkQzOm<+=DCxKrqU0(gLH=Mm}v;3Gx}QhZH+d^wrb+xr~CY}c&ErCkEi*@SQF z`eGQU$2E{GY-Jh~L}QbC2Sc&)c|9e+*)8aTo0N2cBmD_)v8bbhH5l<-`&WO)da_DA0p@)$og;K4#|68R{L$bx6|a+6q? z6cq12V7FN4XKf88C65Cb!JV`VBhbnyAnbT#e?@RcEa)K$B}qDaXm-1I_6I8Buq~*n z$5_M;scDgc(R9$Pe~YnzG(0zaqX~WQXGD67Wt0$WLhxwVF`>bG1g*b25f)`OvPM}9 zABV7PoMSZzc628Pd{82~?E0DLR!-j~dcRind|8d7;S)w)D3w$F8Rg|#j#ly&Bg=yv zfAc&1S9xHZIps6y%Nqvi5_u%;J$3FKOmdolBaylbZiLcKqS4aj=8T`=uUL81v{9nA zmag=Adsb)8OnM;qcGBn1K}?VCYe7s%!`;5+SZVh%00D<})CCRGKI_0qY~Z7=@NTIM ziKaqg3eyjzLV1f7VHGe=VrJ-;J(>B@f9SY>9jA%KAS0Nv-=^@Y{L$Xan|@dX{FZw6 zxV&`jLePKWVoU~!CJqXJ7@!byd|F^&Q7(tx-P6FXXQ+0#h#Q>|=$Yd9b8x3_TXbE- zF9fR<{(x9U+ea2|zl{6fY8@tzF~*KA!9X*z)S8@?7ek+@E$9Jz3s3Zjv!Je$K8#f$diJ+gTa1g^+S%HJs|A18&Z!|l3kkvC0bk!v~vNG*DEvtze;Y)VSU(1?5GwOv=t6R}v5M3Z`}1d}fv7d?^gjAR z8FS$YV#!esx~P$hUa>bYc zJl61=l)}%{5+zZ;o>U@5FjLPd*p(?FM!8QrG%zB(2CjZJL616HnMX&Q@I-=Xl*)(H zk37GUz7cR=ErlAsY&cTQ=BI|6h#?a<(IOQrzY~CY<8`^CJD*jv+G&t%gTP%OVi(e~ zx6wCD`aqmRsZ*z22GIb0f9tl;US=@hE!T5kqHFmrrvjIGY&pkNEl$gTv;dS9c7J!o zPk9Yz3IpCbRyRISh)M{-+vloZS+n|CLnmCTPA^fy97}%5;2z^UH@L?z2!z=RX&C6? z6aJHtpiTn>AON>)kB+;zh~#;c&Hyr6M9TLkP)(}-1dEcX0tFq~EJxXo+!A+!`9F47 zrES3mBNQUTKp4$ue@H%ANAe*7L)yD5cou`AI-c4fzkqgfP+3xE+=Q$aiygo73bhTq z_KO`~F?eUg0xP`Og=?eTD^G}_xz>x*UQH}`5O=Q}s$HT^u=93iw6+E6fW!Jt^oU&# zLPwa6`T5hU`fb|*LelB5i^G^yR@EDbDMPdf1qAEAy8xj(e;}+JXe)MiW&}B%$&xSF zGdy?eRA%!)5HltqWYs>LH4(HKxjP91%IND2s_^dsqRyzDsRWF_~7LS zy}Qib)?E2@Tp#+QM1(V-Fk3rtcS6)hF~9PtN6V@$Kn;NahN$zb_%+kM(Xp*sq$(DX z-rp9&%S3Fpe<*d_NV|LcjSa#>_(tU1gff~xyRiq2A9`@Nab_?+b??jkOh?+z_oLnX zOX_%P$#=NzqW5ulrRBKb5Athkp&d_^ zA5n?{qH%`9%8rAPcS`1-26JXRByx$=$4mmBoOoeC*^E*D_1+^T-I25`^Cip&X!J4Z z#-7y&f00cfprE*H8;w)>qO^wP;s9AyoQxY*#IAz*5_f9VOdUC*78c;N2bcI<%5U7@ z2CEv5{U(PBpLhnlXbQW7tZ?72ivT!4$G_!x5f`G)wNv(zC{g!~w9Hi}@ScfCIk<{$ zLpU*b{FK}g(t68fQ%ASho1YTvI!R26D=mXbwp{kCn}7JwRMcP>zAH?nmc$vdHu9eF z^+Q=yB?6a9qm<49T(om{wbm35%*l6wL+bP^q|rpU_hLOp_EBKE@~aA@G;`Z{Q6B_~ z`w68pF6*($#-QbQrITuQII_H8f-{MYuGwuh-4f$|FzQ5Oq-}tku}gsB=T^O2aIA~w zzI#zm{D0#kG>$0azTaz)rc504@c^X^pb~36vO*EkMzleFwx{tMxTR=ZEjfUI?+NvA zp%E?5ohnJ2@CC1WXc4I|;8L%9hCAC``=?-)0tSt79%s;!a>N3R&QTM3CD8_gB?7#Y znK5k+?2c7-OkoH#07L_-1poLc8zRB6Su26Tseh;hk-53ZR(Pe|ew;nLz%R4#R!PfF zTlU)vmLY#rhFDa%j+wepK!?^IB&D|2PM31<<0#I}f_yEHhJX|^x#eJUQ?_ZhfiLYR z;8m0@K!kAtuz^~&SQfWE*o7N$8kK`h%FcRw+xJ=x`U40NAhjH0(#D}$Lp~XxPYXzK+*X% z=<&#zXtuTZddD^O!_8lN)ZOPK2Ks_t4PoSpucy66^dhNsC05ZO$;9?Yfc*(~3me!# zh+qtSgJ*J4AgN%CmA^fcH$_Z`fuP+duYbPn*bhho%VDRW@f94eXTgDQCW*Ph1Rek>&kws)q4WJAd9fNFL`k?Cr|&*Qw;5MTVFH7VgA${2?Sj;;79pR zD8gaaj*xcIW>Jpnu4s_Rd2t9FyChe!~i(j^RX>Oum|A$64kU4w`|b zI8@J>(UxtV{G*v%gqKcr$={HgK+sWdUKm|a6->>n;UA5v4Jx(4g`YU8%R{-G#HS1C zxJ7TLhx#`_4Z%Si;_ZBk;Q~arAbd%G+y36GtQoN{B1lb)k1~sf8Mf}f^M9!8a0$gp z!b0a%_dxXg`0-bD$S9rh;h1gd1)@GCHpbYu?PL&zQL$Dx%I8fDHL=#ztWQ6t3|NMW zhhM9#bE-A6uL`O=NQ=$%?rH1mJTtDsf4HbGCdWY0kL-FB9-I-zIi<&9#tE~AXqEMw{S{M$)V!uf2jYwFujJ3Iwt&hSvO<%*@B1r^E7`VI? zcLwImU;rCEpP5$8su;2O$aEE*fy;(r1l#h-(*;C(0y|4iNTKEZzZSX>z+@o^;K% zCMlg2ontv*2nAW@`N&TzHvj-WJ4{OiKC48>{J68Yv$#Q(cF3g!Y{&fL)r5 z%uyU7S3~mO{)-VDQGYiJZd-e|t|1s=9WY!LQ>ysDj$H=cCAM{K+&$%3*MkLn`rZ3#vUCy$kIj(^d#e7fKH`|TrQ25AKn zmSZRp-NCl3Lj)2XRUVQAsa5Vsd!bDpFlY-*KH0hU(9~JV!@l+Af(ibKUJ&vsuPD~em&BW&KJg4# z>Zp^G7SqN1G1FM+!@y2zdGJ)fY@{qcN0XcIvsovi1b-kP^lX&aB?nK%)MIo|EVg>a zDEM<1%2G{)-N5%$OsC(?A7_LsNi;IMgFl=&ZUCGk%g15_$7tRBPR13?p6vQa73tW~ z3XYNbN1!I{FgIED`k*$AhzloPS_q*h%v;+qi;$HINx}naW-Q#}1hOL}t(noXS2cQ0Bjw>< z&vDW%;}gV;lnOe5tB20AlCYAs=RhlyK$-1H5Qd&x0x9{}Bx8u^d2+=GH8tR3OBL34 z9#U#@Rw$`zol##3{eC4Z+4`mueV5D6a`3p^8S$5vL%tV8_} z@8qq#p!f(TnjjMK*B>{Wynh_P=);$@$a*+#3fjW%%QCda_4l1+LLdN!-;P`hC8za7 z=^I~+ceq+FUlk*&X>p|fm=wIKsbA#{)_(Frl5JB`A;}??ffB)^fpY`n8enuBcH&4^ z7k{)GLtn&UhP>1es4cFxP2_WqQRSwSi&=r(mt{9427@xk8GD!%bh9Fu23&Fyc5Q=Tv{0sOF4)Ioo!7!x)E3eVC)lDoBXy0yo?B27z7WuAWU$=- z(o^gq?ajaQm80B~l$=uvBHFy2+i9ow{(n4sw>+09OC=#uH8W`Y`(285`;VdezyBZO z^&f)ufBet?_20p)|CP(?{D;fh?f>Dj7WeGYqJ{?7}**3QQMJj6KS z=uZ)V{FATu^*=wopnx?TqY3mE>7@VU0N~$})_x_U@R&=*pPCWy|90B`Jf0x_8%#{C|BrDwnl7Efjk^H%=%D?@j_TODd!c|ejouLNwr%CAk;)DDz z6B@}p??l-*`KzDc|HJo=f6qZz$+srf(pZJk4k+oILU4H^2Vv`2Vw6YXY>u-9S7Q1i<9{E)Z>jw4E1Dt%~%ES`#txJJsKuCvkzeywNy}Cznac^Qs(-oz_4b{*)cA{9avMw6{rRny+G8*JBZpGrw1kp+d3{m~ z3wVfew#KmUoJ&BJ6h7@vi}rwrBm?za{bPyGcd_q@#Lpz@)=|lr$jO{v8uh?9*qBNh zw~DVd(FeJu!N>c+uf{hXi{0G+fx;y|(dAKdVl*nziJ`AKF@^e>dcFhaHSc+^j33bwnP440Nu8 zUhSC!b3VuJDxE)x`#{v`r_1SK*W6?`&##>(dH7%z>cj;O1y=vv7^d+xRX{ zDo-;y?aGaHjDNOEv7kc39<`bNuz~98Et`H%Y`QOLNC@EOM+E*FrO$&YmB`*}`X>x9 z{--fN&aXhbZ4-AbA;|7qlc8p#+8IacJXe)O9-A`9f5wsWezB&_I<{#1H7>oC)6}hd z?Xglpa$o8lu;@!EI3`-F-wk35n9XN69)G{*=Tm`qu7722dNt;gV($~0t!S@PMg1x; zi_BVfkZ{4sg*KB$#Y^W{ZcY#)2?BHySeJYJrCU{m_jG9uZ{=E*=5s2+{nX?^5dNLDW%S< z-goly6+{ z`$4U?9O55D$E3Bm{VmG9_c=t&icUP*56N?mEq~l+hZF>jOJJY6Q1&a`Hs+zq_lP2qr46UTs*TZziGZC(z~it*41Y5PGd+1(!$0+ps5#-pz9qnVf;PW_g}X9@ z?S#OTxbRLt2SMH+~m^U;S6G=L&qi zvFz_hbZdjQa+y;l;}z$#e4kabXMfp6uE?I)@;ydA4hBaPfb6A)1mN*tGGmp|S@Hw= zPjD?x@P%J93f?e}lJwAL#nzl5-7CjVPRppwMzFJw!?h#YpWUD?xM-Q|wvgimiKW;i zd20=f5^QzT#8kHUk5;HWPzIuVt{O#7a#v{E;$#A(srtqmYesDGycQLG+t zw-fiJD2v_19x7yRMV4pRU5EHpcwIepxRzHMSrx*&72O#AM&(P~7C#GvrpHRlCS($J z8)#(5lDjED$bvZJ{8^7kBw#064E4sOv^o4r@ax+VCN2#2PT8@?^R^_Duz$2>Ow^k(R+Te%W9Q~ME3xQXAWdEjP21ue24VVPhXWk% zfiq1U_l|X6Lc<>PK{y$)#9D=%fD%QxA1V>Xixh1`-1pib^lyGqHs0UPYWi!}?ASa( zzf%;f<}Xr@JuP>1FY3GeLGe5(6@D}XXvi(43=@8SEg@G9@Us3(*njLB-_SRj8Y4=r zW4U-`3mD3mnMGH!f|^OlGtq97f&JLd=(Zb=K>8=p7qX!j#vqmmPup`fsEU(@tm)Z_D}Qwc@=#)NMPn$^worXV`RVn>Y)SglQnr60Cu;9Joy>h5AB}*n zVj(0#>O(RXQHQ^R_!o58AQ8wera9Neo)YPkje_=X5vw-2J+zk3n^>$qj5E;?4TBb= z2z(SxO>edR?eh~Acl(@vtloL-Ub4${XSy_M;I~g-2A`y)-G6OAF46|k3RfrMfG|Nc z(}e$opbE^NnIXsNW4nSnMW8rfR~FB2ZMweSb0fjTu? z-0;9RiqRv21WyLyHG?}a3-__sNUU2}y6r=_T>io!zhT?&_TtX7AraL5yt6<<4J{q6 zvo^i0xjB_W0G)rEEey9TVC{P;rfS8>5<+Yz!@yIE+TU$DL76S!QXl&V3AdX=lVr)Z zBF&p4lz*4ymA0mVp#NhyG1Y(?4V$01_3-pGd0A^n_)sK4vXImbPIKJ_HZ*}lp91vk ze4+3e2{DTP`^nu<;7T)NI!=3Q$LwKo$p){`vOP7WHoM4)e6_^O!>{%-JuDtnW=B^L zSSTaTANCFgVGXx`Cv$WXnqxl_Tw6AkY@!ef<$qNd{l>jzw$DWMuimpVbLbSuHN`E{Ol7KC_8g42vT(T=`{F5KJk5AQNg3=*l z>JB1jzQL2%!4+`RT-gE&#@LcqG8EUckVTz_5jv&8%+ghOr!xKy_v@fvvRnUCkB7bn z)7$vf+?JX+JVpB!*@?IFqFW!06tNTi;Y;Q_VWL7m_t*Z4R$h#xhrho`2OyV}mVcm( zoK&nc@SMUfu^pS0ZA@L*os=YEvpCl(8&u%eKvkA!5*|J8s{SO$#P)!xMN&vaQkrd{ zJrJ?cWyJ#Z&mIrj(o_2~r?MD?PvN{ZoA9UlI9!%}W7MQHS(@EcOk=Mz%V&e9=wbkk z2t+9gXi*sioSW0N8z&$(?ELKdN68575%IGWQ64pT^qjftH{aU=Q3jF_TV4ESE7)zM{5*zE)V1Rbu8HLQ$4n)D)0g|$b$gS^C<7PDuc7*J4w2gG%zJEv&_ zKfj=n%2RuDOVSS;F4kFu1 z>fb?w8T27H2z4La0%QVU!Ql26Yncy#au7W(^Grksy`Yj9YvMuy4gMe`w3vQjaveKl$-d4eKK83I~F&!BcflKjlC|Yh^8BtMrnV8gq>O*&0(|@)QFA=Z1iFeO0az zX;LBHhdREl5fYIgD1U~+5qX@BwFq$cF(O3>qsOo5?%n-FH3{+iUK#RhWs_wpMAgw? z73QB3T>^k#Y%P{sTIX7|MPFg6WAsc_vfAB^_DL<0*e=|lcHtZ23oq3VXY|W=7*V<= zEphV`@96hAppx|r{JrG5Jc7id~ z#=rX5kN(cxqny2AMkrnJ8#E*$JDj7wI#C$h%X0Y zGzsxvdVfW5W$kY4n3M_ndx^~`e5YnaVW8hyMm(&Fl+ufQ0f!h++fw=arF&Hnavpt| zs)(E;aflwrMUe7b9FDoM-*9|-u_^PaE)|6I=(?CsVadIsv?wY~HTz-wO_c;LUY}AM z8uhK%8mh-x)~|Baf$a}|na^?8BC_^_2ZyH(+<$$E?3BefP_^@*K`iD-ghZFLc?#3g zUQ4IdW!=Y5pf6O~Y%qjqJWGgG?$IsuIjj9XFEWsYMpV zEUA)%W762BzX0^*`gPvR6xJF`1qeA#R+&W>1rQ0stMSnaZHVN$bYz* z({+RAB|WWnu;z48bd{A%37X>3>LYgPpKKObMxtu+eMM|1=r#ph`a0twQuCpZ!R8$D z&unRhNU$C(|0=2&e6YK5D}6PZv%Gv%r;;q+Jg5t~$riOo8z7q^u(MVYXloNfb{b1>?be4MSMt36mfczB9c? zMJyfG3w{oY^_-#4+r2G->&`*ViUuwZoAvtF`=M0(a9K)OiH0GvaxL^m-+$CYCvuZh z4%3lMoR>7(izaV`-~xnF?I(;1rwrLJzO`1-C{) zIXCBVZ3U04Y3ZH)M?VQL(Z6p^rM_*|4H(0VgK3?_Y(?TRh}rxpFdgi{4mB@c3C>JIVwSR(Jabu zAoNNJ7_Csrzmy+RcYh{8?yFQTDkLN3y3HL(M$0gs902mwk+elxS5lFT!m+X; z0^2A_C@B6|u1SBLUTixpupqWv0miWTX}L!~aNP`G4$G{HxC<|9%47lXf)qeyL9hJ>@J?<{%Ai9v< zI*EVXgjD}V%2Td;w_N7sGxVBm~$s^lF-Eo9!1`Bcyegb)po@0egrM|HGd+rN0rTwwy_zXNJ5_>W1CF6 zOv58VcO}5|Jb3JP^Alr;kPK#g3(K-nSfgs@tw&7~M+~hzW5}NHt9^uz$EO4W`%bUs zfvgU;>qKB7!7#w{L3q%M$;sJT|GYT@uz2^T=vfW*WoNqxG*LOy!3@l5c}mj~6Mr5&RL4|$r5+mv4*@!XX`=};hd6>I zDP~G1zTp<5K1JcDGXV$D;ff}b=vu>>)t0UuB#6en(WYIv_ndi{TwQyq=UE^=%(jXb zLBlX>XZOZa5|l(@XeJ!Q4<#Y&?`ggA=;*0gpoYBzlbZO*F{spEi#d}#o|g!?LDWg` z(|_Jaa8;KEgrg}Uz*D%(4 zkwFCw7#*A4)VfLbppo$wA@6aJA`OSN)=JI{ zy_KMbb>`m8spA&m*L7vV+Pb-R`#^Dg%zfr*Qa>x`-)P)zTXjR?8fNY{QNDgO zMZiRj?-Kd~6!Mw>Nd;xuN@bbJQW5U5Z&}iSHcD{cLL(&?eyY0u`D;QAnUkQyxPOj` zbQ|Ig>1I&ybNRX+pOM7jt&3F(=Dx5?_2fu#G@of{R^VKdnt@1X(Z+A?r#+IW+{^f3 zsnup^bZA#YDL$FDF#$MvkRND_<7`ru=Y7XTM!R^8DuFH1uWBN*aC!;)7GK-l_7jLb z)q?8JGR6poA|+H(hzGpc-5V~aQGeCmoWtt$-}=rJiOM4{MDnxqc7wQ;PIP(YVZT0n z@U~}QxKp$VwAzn|-SVlzm?g8B&rZ$9L7{RH-@&umANLiwTto#9NLIIBNaJ8r;&D@SAt7BA>zz!^RF%2k){)yuQ;OMhic5CE-a zMA0)8`QBnxFF?KKqKTwYVj+2*(a9ibk^X|uM z^I?2C$GJu9*mJ$f^3A)ds1AWln5@1DM9KwJ3|Myd1}OV|cDru+z}Z9#q+35kRCdq_ zDcvSHR&U?ReGj3<)`Nh2pWrGEzmgfO6e}8)XNwVvHb!T@jUO45{5 z-+Oa_x#Xr@9Zc8~X|2D5q26_;KlsMOWvk8N;L>L(?5d?$$A7TID$X#5fN$dCeJAwn zdCa-Ee-&?6)R8T^(2h!$!A0uF%tlPdLP98I2L@+n9ohn9;P14KFMWMDIi~bJEbWOd zjHbe|lk^(E2Hk2^#h_r|TZIdH;L3WzFezCO^uQ=K)>@mR8h^J%+0fslJ*eq9X7V){jWi5_AE)E@*`GH!Z@e0x)m!ZXnR#C1iD zt2F?50e^V42BV`_V8cArB0qOXR=IE=LTMD$i;NT6>*HMEIR^%M?V3rN%ew7(>4QE4 z?KO6xr1Eydu05=5pn@PH%-u33p)>q-rhB3g+Nfl^ZzfqBV z-}BM3(RXG#b%LDe6V<=0H1Vm#QHAO#{RqZipMSa7SX#`+V;U3@&>myQc#~6iT?eQ^ z!mKH?XG)0}Dsz~+?YnF8vmKrA1(WU@4a)IMMuakv%T&Sq)?}t(r7k*px4p5Jur!b% z9e+G|H8kqbqV>EHPMGST7h9raD;f|Y6w<$yU*GKb8KLOKZp|{sn!Qu%*8xCDKXqfP zR`c(l3q09l3G}zI;@N;qmi7)V@4wxO_>z&dZ zE2H&Stzb6_Iv@;r+!P4haP__8WP+~>Ox5;fOUOfa1d{G150Q^swIPl#$sRK-LVx?f zk{%{Tc?hvho0a5S?puhRfd>P$GOXFUrgL$`5-?(KnMvC*kClKtt$;rb zXF2qhpKFp~x}5*k$2CQ_%y@)cS>@+d7N}@~90PKm6@Le?cB%TT|+MF;QSx zGtsB_?~@LYD%83k#%mf0CtgP1<@MdbDcfQ&?~bDDK59S#^jlaRTlY&*n1v+Y6*1Q| zkAie_7+Qxee_BD!HgvmKqH_wm=GpyZZp9ciii)_DEH#~gultT9mJIRJZ-2N;AJg`< zeyQZlsfHM&z8IB_rpk^UoCV(}YLb?dC3|&H*@8f#2`4q`(JFO^Qaf+bHS|eRuAFQ> z>eE_u*&@@@N7m|Gp0X)FZE5V|i2Pwfy|6s)(t{|iI#K-8hTpxMTRp-dH|%aC^IkOe zfC)89eqGTxafq%5)hrLaBK%5PV)2}!!oVIfjAIiU$R1Z{GifJD>;=Am} zR)b*$!tJRC8n}!BLnD9}36tfa#1yEzS!fFR*=WeZ^wOA?eCCwEZRCU|gFh$!ZrLv7 z?Q}d?(E1XN0ZC@N^F_5Cb0E`YR;*sc>bh&cAKCT{8Pa;{jI`O|aevNr3Ffq%H^WB* z>d&#>0UG>ycYB$8h6z?GH4XVi9m>xsNg@Ic)c>hhtD<0)cz0Otd5vZkoo2H8MGx8m z4PW*LGVpRrHV67lhDYPA+dgPcp41kZZv-6Kiby7JE&;sgAXi|VhUD;X?vB^N`ufYe z`FNtoi?HYBRr7~GWq*0po1tISBv#|hntOQEdaqjkZ(hxXQQe<`Y}x=&-ZEqPyZuId z&pf}&%vtACpWzf!01?@a)Jo;2w2?y8X;xqD`g?)e4*7D_k4zpbb<7D5Md>(pN*T}I zJDnZ9*c~1=AyCQC79#P|Eu^U6RC){=NsCh0jx(N5QiuH+ZGVY;N=+Y=0# zXZW|r{8Qlh^%bl&f?9+y@@)!pCt2P-B*J7ppw+TJ|CVhjLwR3pB|Ut)BITb+cnrq% z8OG`6UzX7|`c+{jFr00?TeRdEhWO8!|kE1t~L@6V7Es;khfX562E zS`55P77gmTiMv((y2lZpJOZPTf$|+dV%(z{`hb#b7VKcob@8@5_|6obic!0707j{6 zu7Oqd)&rNTAlFO+jG@t+$C6Us zECVkZvg!S(V^AWBk4c)<3|S1t2-=-9CKp7)+$usjf5hjHLv$g#(yPj0O zg<7IUOum4qynbZLm-^K`Gs`bA-F^S5b6#0YU4L)hs215E3bMqpd1jgU!w; zY-N|G@{AlwZEN=DIwDsf;icHmmHuYdZxEErjGuhvJlO)1}eNS^G>5ra; zl0Cd4tHW*W;_J7M7vDLk*Ggf=453QCcH-($m!gEtWa3VoP;TF*Q^ z7k{ms_;Wv|`Qshu+c{6{Ei2P$GX&Xgi1M~!n19nNtA66quFU{99RDJ)veD}x1VMrhzf3b`s$D=h`yOv0rV4i1Zk+$sLC4P!>xEOX(tj`j zrXt_#Yz^Q8c^!GODxegpL>6IsV``2BQu06|mfM)(hljLi!gQ|$cC&w)!t4p`+AJ@O zrG$|^#(_l<31UZ0}Mns*BRTk_-i z46ju>iA^O&*FP;XLHG$9yQ6UYGnxn1z=>!UyOF$axHY$6_~b+m`0k@!LY_#cnCq_Y zDmw5;O=1KnB2;!Qm1UY7G1C;kNd%7HJFTfa!^*K?T$@Y!s-|TuYC5vQ!hhTq`G)V? z(gB@G)%q_DL63vU@@!6g(VbO1JC>rWSlXHiVv2n(%4Ks^VF8=Y<4mQw|KpiQUyW88OEUKw{2OO-p*ZYv5b{(et$)4j0t8(8REBR9H(T%dye;7Mf{<>&*l*r!D9p`KySEun z`P{bR-|&`>HuPh9_GBdP`1mwL4W{4SzIDZJO=W6fei?Ri^x(u;P%zmP&-CCX3Lv5o zVsim=bEs*6=^1@YrCZC<((d$~A%#9Cs?^V%U%2!evAzOkt7Nxn41ai#&*vG(PCG%s z5uph-Q^6yh5law7=rfHWjWj%$BUtj8PY=dv-wHVllDo6}HkTeiSXgN4R6SNc=zi=% zc;Bw@Cvr&!HRfaY+-f!3ao0MxADaA2T z&TSEr(x@zla8d)$S*KC@zP{#>_M>W!6BI7a{C)Q7CxN!o8CkT3qrqhrKJ;h3Ze-ye%U>J`FmZw*rjBx(QAHYuD18Wj&HgbhR zg(yosdghJO2?euGbET|Ilpkg54wMPHDOsiMW2ksVw}0+5XT9LNlKKxafb24Za4|V4EgNfY-)T$G1fMd)wHK)s|7NhQbwKnbYS-#^S1aDkEl&FZhj! zck>qS+8*FI&z$WL&}wQ=k|wS~p-ZS0DcIgDm?zEO7;IIWdCh26D(jDpKxKkZcje%R z@rh)`$bVTMO+NK+>`!ERC@EK*-EDw%geZ(#;|=Rv#1a#wL9hs}@E4_eN|mkHfailq zUd=5QM9&PC6(&RCnhS~fm>tD~W&zyO%$#2Uqt@hlGC)u?9pBBNlKhpd-7S@Pk#k-F=2f1@zcyijs_B}ggST?4W*Tn>^9#LwJX;B~wx8Df%Hkbt0_KnN36ZtirW4p>K^fz9EGbF<3N5T}5K!v&3%RV0a&r})-zndo zdVO_*#Hs;;BBGMhkkk?zCjrpqU(I;m)PJn0d^#TRaMSzQCy33;_<^kRS~ys)fzSE_ z52X(T9DVWLykJV%Hk0@Jo(Y*i!LA#CWg5vq4n`Ncmd;qRoQlGI0Qk5V(WqFR%ZZ?t zKMr(Lgt{J#r*t#V`^fS-m`%%nKpi9!uDsNHGHsX)l-~g<4qeZ%O=Ga{DfsyvU4I2< zbw6h*FQZ3~%zJ54HJA?=Hexcff#-7V=iokWGfeMWeCYl*a9FZGeezf2^Z!OhWny4W zm_rQTNQ^6JaVnK*@~ug8m-`pcxL_B?!Nk(9Kf2$}X?F_7J*)jf;qtrYANlikANMtBg^2jT12m9 z6}`;bT4;eRsk^UHxF zYlV|vB{mA(izNUDtq=Vt&hby5qJ*?QT%#(xgNwLZZnhn;W5blEK&z*lj_-}T)0_Ct z;uZe0Vh}Q@F&HOz_>l85nzw{evOsJ!PoIeK26VNes3)xJ{C@HDD(OwA%D3v>KR3a5FS)iwmWU+&cPr)>Q74X?}dnL&~^>|kRQvy z&ReegKu+Uo-V}|v?6;S@Qm;!yYnCw}KBy2Qt|n9J%RQ}dHJ}$pJ+YO{=e3^O^8%!Hf+O>3{p&$a`BqJ)E7* z;7St?^$Ji_S~yp9La`xAWUHZz2TfHJphO>eI;txN+Z|^!-24^<{zH^yIn^I({S&Z_ zYzBQ_6wuvfW89$iO!qj-p8DF0dwvZi#tyA{SA0!2r6U1HOCvN^PKVayF5UB_UddmgGq{8c=u{rQPO-}IqHW5rJL2BTe;_g!hj9RI8T5z zyl~yn1}Fsuq;bR^PyC~pBGa8Zc305KrCr(5(DY!+2YCIXj=>Z9dM;tnCJGa-5wp`M zZJ3}4NN{J$TYd6;h#RsciH8XGjlb9}*6N`~|sih$`-mI z*O|!5IDfX`I+a-4R2ntLPWTK~8*eeFPBP=3yDV_JTUmuK3v=3I8p4 zrdBNX{?Ug_lRN(?@2iYmrnHU%Tz5T6_W_@M2q;>dYn^Dc7;2!J_+6UU#fZ29RA#C> zy$U}eTqt7^b_y~Mwph{NQje9=qeTlv1%J0h+2=zDb#+G+0OXk;P0t<3T@QnzQ|Ss^ zQ04i6qQ|mA^~iR(V%WkAYxIl$lAT?f-&b5kE$4t(zyKh)ZAwfXGGmlqgc7~dDW=UO1#Me<{c?><48GkTH zW|1h%DDt=kQcc$}8x02fosFB+`-_RQUPV$wGmmtmF+7`m zxZS4t-5+q$;vHkz+YA>v!~)TUBe&nu|)&fsZGVnTW>)8nHM#4cmuL`-M0g&aYlgvdFwb!eIOQa?V+Fw9wv#wg z;ux>~-S2cZA7NYEhR3M6Ssq!?;mhk#U_hX`8T8jRdjdn=jyqhC@g#^O;;C~Hzl^Ge7ORc{vP(#P7XM;vwKKCB3iPRd5V~ji$^3y-922^MQ zPVpU};2*Cx-T7ZN*iYnhvgxHeKl6{58l1mB64`6N$aXF6a{vD>vYw; ze(E~Ed<#% zbd}u-UrU^Bj{X&7O`+r`;+vH{63?hA(*J#LHvIuyM0L^#k5%C&m-42KOE%pMz+f*L*fMfhK>_XLCk6lyDzY=;?Gm zT0%0(l*m0sLOY(iCQ=EDW3p+qmY&BmUI3BX;Jc86rH(+WEGCHisam7G+Cv29T(8)r0=NLAP!!x zVlPLbi!~r+5|Mx85#HB=Mpvf7vomTF=3;;*E3LH!-)+@0@u6Bw#%asiv;hQ9+@qOT z=wYAe+^BO!hR0{9=&Z6;Ag3NjzxKrHAR>g8x^;w7i z>`_h=-oH|`^yJy{8tD}`bW<7l#x)@om|fnyHi1WXNz#9ebbmj#@lj?u)nmx$OS?Md zwK^%@HF^6wgwGu_+^>UA)|YqNjwu>S0F+F?zg3#_Oz`Ez*c+`lgciQ0?W}qXDp6*{ zV_k(%@JGQdVbQ-^y~|Maim6wQ{5f`~d5&!1yX^YxelF#0o<#~K$Q9i3`~W3+GN@Kv zQr+g*wkUrwpG+KaN;hVK-omX_-G@~TswLYqhsmfaj1eT~{voBl3Ig40@zuXh$?gXh z>twj44>>wj3LHEHg@H*ABTe&%=ck4pZL(IjgY$opRWskeO9X#%4!@WcxO=e)GuYG(m(uk8k+S%lXAEW08;Tjs(qtNLd%m8TZ>a-3tbUz41s{v_4iF$=*kL*l zCux6JVkT;6W82@C*bKu%>P7iN|>0O-vN)tF*@7|Ht9}E_uT_ z3}gK3#r>nAC5!_7-3z|C|C8i|hVd9yVT6}V#0*n^|Bth`Yg1KMx*~4;5Rq1N{2)1nr(Z*Is+S=bVf6%=NU;EtRTKqefMNK+Az)(P&h!OyeNd zw!gOwjci0Mh-p@Fjf%4wmpkm=Zsr%?3#4AT#1c`r!t%?**&iF$eRkq$ucCM+CYi}- zT4fo3W(ggQ^N+wk+TQFO;I*(1v&$yiec5$#jT>=rj9FPX#n#!7+cUfNoy+WV5dVM-9yu9p-N(g?`Q%wIhNtk^ z7ZHo0)Eii*`gDmmJ;d_atv9pl{pRsCJd<>2E!%SUHRY(acsLIZy&y;i`f>YV?)#B> zm^kKj`iYa>Xm)&<&efaO5M8QLKTY+MpC#=;7L#UX1pV85kLG3U?kzt0D$2pv^SMud zY|4h$Q|;G5FKENZwJ&aydz(W8=bp20W=8Y4ngiEa4EUFOtI=$J&aGTrWN^4AkNrGv zL|=;aXId|BwmCGnllx*=qIv%joO)J1InF0vPVY3IonU44&$<602dCP&&5+T28OQbu znf3L2CzV@>C39!n``qhA$0u8owhy5)=C&aCjx#pKa5vd3^5LQS|vWz9-WSI)z{JulWhUNkV$NAH%$rk!mccU`hONRJzTH&n#O z479OiCfDK%%0e!gE;pr~wyP>$HoE7<8zdqtEy7DHuQNBzvdgEGBLj)J_gutcy|bC4 zfpa!Q3>@a}n5)APkKT=CpV1ZsL@D$Zz8M1(g+tC4y-q)w*gvP*M8IM8Md6Z8N8{6e zHad-90GI-Jn628+IgpYrho{ef>z5|!-lBOJhI2jF-G$+@s$$h`)wZ(AjVe4O_ryF6 zD&{N}#-P8a+llv_RK6E8TAKcgE)lP0>ET1x%_^8)jwY~fIr?tc9gZ?;|ntEHE z>n^4I@M1Q%0$H}+BOZj)tDM}|8=i#2QtuCEE*==OsW#iu`MErv^Y$dKaz0#M_Tg-D znS%G+Z;j7WiRmX#KTc)s2I3GWM7i;SV=1@PmpJ+Ef zre+)?YS{aQDfgdNJX@Kwcztx|&v^gEj`!oRms!0u6j#pVY?7z-Fh--L2c{ExymSPtGEe-{Rm|DADyWN5iosJ99VOpJ(mq!asXP?O)s+ z-LedyH|+eZh_>8+pYG||&!5Ou0c5oMd2+sxtT&I+wu;(JOyeotu1C+?$3U}pnD5JA zBF6Lb;RrspA9X?it=flE!99beARDOkgD6JYrL{W=-#a_(xol(G3;m;Th~XQ5a_xPlA)ic)v^XVY)?6Mj z-I+%s&(CdRP1m_DH->Q+FutApE$joKKgoDH+LqpAI^1cl?_`mYQMTSDv28^(fnv<} ze6sk=57X1xF87izlcUd1S4ZBxN!1(9hwNi@EA}7BM^=53t@km~W*57Sc12u;hYIuL z8jZ~3;ww3S-Ei6LP+_(ceDJKJr;SlDo-Qw*ji1j)o74+>@@Dt?fEmdy`e*_uXT2 zzimh7lg(MR4^~rknuhJfXV25eXg*t`d1S}E5$!Gd?8BBaOto!)!nT|jN!9d?V269# zGi)c?d1gF`{`suXc+GCu@4M+ZSZ=o?S#4!@!foGQrg50E-uXJaoN#@`sPCqwb3T}G za-!RRlu7>B?Vgic5Y#ec)6YKk^SECw^T`^>aD20?=5kxkVz(d2VW@?p@(~tW*D7Yo z@;0%vV!xUVkGiqggt&$tSwtk^V`O=0(Z23INto7y-sDUMN!sscm3>%FMB53zF4Nf zr=N#KvM5(&o}y@V9`ixt%){GtU!LX6Rp&jNYN6Q`3d2kMypDF*zsAY=HgJ|@b90PWdL73XNa|+KOqm!)h-2gf6JQls}WK{b2BWsII@v;A!+WKhW z#|t@FFdP`q_@xvCm^|dKf$de>xfXi`9pJ@iFCJA1JA}&3(4I2Zf`r4=pk0_Y=D&*qZFH zpWFAz_&MDC-k2T9!epYS~F)~0vGYnnIal})cJ zwwEEVq#smgkJmKZ_~L>Xr6?F>F44 z+{Vk*zQ^wmV>q!7n`fBp`0YseW;OM0BjojEBD|A^2mMog?dksDx?=Zn@z{PD$wzo( zNp&pvXwTy2<8BQ$Uj~f(^Xa{OYD0$z!JN_c6S*<_WEHr{Xuqm=^Jb8oPR7F=^2Iqy ztA78o7?D#16C?HE!$_C|f#-97PU969aoIZuxwio9mongsOL?KRjI_bsw?ilUbi&EQ z&(@c9_<-cPz8u@T58J`5xcWoKj@Pg~#P%xXcy+ma80$)-MAHYCE1+lPPIou&yv>Gd zdwyFt>x&iM&~p@Amf<~I-bOc0=k;kru|6C|yFq+hte#O_*P1!qe1716&B z-8RRI!*Od)obx8vn?>mI$uuL^bN`k+U145t658Q-u#fBYVSJ#o{#SWSmfF&@t|41o zKf!9}ST}~leeqn+*_S)hH}{^NkNTQ^&Bw_5m_an}(Xn$rZO@nHd2_zn>1=w?Pm{aG zw&~|RxvXv*um3qt+OMX6e;=mvJOsl!dbdn%%}>DQWy_P5XkZ-lhWp3*cDO&#f?zLc zZSX$w!`JzoO}}E^Kh;{|Cw{%)JsX$D<+8D`-Go=?!ylS+|4|wDCB6=OG(Bwds#;J@ zorImb+r02xSdaG9QLpm4%)(7?H7HMq;b6JM*=W1lERD}pKQv!|e*S!{mfn(@`MsIW zcKLqSxQD3-HtW7^?!V|sI~6Ag?2DcrH%sf8eI4P&M{^0ooj713WB)el&rTsrw)1}5 zpB;KE*ak?ful;`i0R5DRO?{&NN4iIYV5A526AgAFeJ2(TJ>5V;kT&?%@$U-~W0RNh z1zln@Op!}B5SpZaO@x4_kA_6DShRa;O7W4bB&u$g_<%BlE}FG}NlY{pdp$0a)yKtP z@x`&9OBsg~bB9*dIej|xmVSEsNp?@}tYvL7Iv$w5?4Od^xYe^>xj6baznVVJC~&r` zS`VIOVcCs`pI?)EzIjCVYL{HO`S|j0r`9p^6B@`9V|GD*9)q5{U5uGKs|JyE_AaB% zd9$I*#A_}`v3Fg+=OoASB<;S~^hY8;GQ@_o7$@w`t z^m_F(C?11*usf4&eyVhLP4;cQ_E^)CIHJ;(vqx>GYbZW*tu)Q&=4)q8NN|)3Mw#ZB zX_oHY?0o2d`{Q7GTdY3w@p$0PR@+B-Ge4Vp#6C{OaYddN)0mz=ypd^K@7u8%ejMWK z$5U+6FyBtkUUDwK#t%HXu6t#VgTcvqZ+?_odYMcH?&@pMWM3!`t2jJ*J&irAX?i^8 z-m~g^S;oEIv|pd%`Q3SbmD9wz>-xkSHHmHCKgxW6Z?V3%&9m!iAH@2N;Cqd>16%Ay z?Nze$`8byI&78RNaVrM>>Hhk_F}(dH1G?VR@$v&Nmi5OrJ!Uh152fbk@N!_wU2{iP<)d9`^uX2N6rYH@ zsW0}i4e98k7RKDks2=M6X4*oTS#f!}mgmpvV@AA@dC<)d=1>#qo+*|LPsYxKyX^J+5U#S+fDbkbiZX;SZ^=f_9!k&r@&O@lY4j`{Nt9D{j( zubjD7GrLyWg=+Kbr=iU!+eE(fms7tFF#E@Q7XITk3y;UoTNYlb4JC1esV@<*#GJPW@1kkb3cxybFMQ~4)LRcw_m%?y)Hv#Xmnzjty=4M_8lWqs zCL*cKLRu0eEeTk2zVL<#y6PXR2KGc<|Lpf>n1ICqX5a{*4}az(37BOSp;rf!z6eIp|)J23r5QKe0p3Jm=B@iS1YSbqnnTdo;14-Q4pk_uFOos(cZXHKhq5y^~CRKdx-w zbT&?-2r0YE*g_(I!>97pg@+S7>UqrfmsM5lC1fc-WZ(XvI0(R2t;P4GF))N z+>DQ`hWP~X1#lW;Ss~!#E#V19Tv!(wnII_!S^9?H#1A#0H>6mi*hZ3KJ!Lx;|7DutQr3-4IG}#z= z=NAJHCl_1!a(7)k^g3I?@UW?JT?5dz5&0&0?0Zal3z5ndS}qKJqXgNXP7C8gVRs*$RMZ@+oPio&?|FF#6?HER{7MVkGMci-2(>^C&(xMg`Vgrr=6 zpmA`~IBBAP03@+kL3?psozEL%zYNA#s!^peF2*)x&VyPqgwF(l7`D}wYHGmU? zXoo1&#tTTaoi3?i7V3BVBjk^zqaTdzhE16+QwKzkaFMFFl7JMt8O#o=HNp@KL9d2}R5-5-Cjwl;k}JQ6A66YJzd&|>#?+d=@S|0j(XIW;fFC>=czV)d z>ig%J@*C8|SE{h+l?T3UcCm%IGJr?8sR{989XQ>v<|Y>Hjd;Pt4D__-QD&2{I{BE- zY!EBk7i&F`CTbz)Vr&6VaKuaEWKiv3hOJcVFD_eei^{c4zK;1hz351-`na(D>Z+v- zW3E+y+kzHW6W8X(a~#!OBYNNE^a%$E3jt=;#&L(QHQ%tb^Y`WlH{=Un|HhCPF3^UV zzkSKh677Yr$|k#vNs%FLu2N1t5VjwM!a+bV3i!sv+yK6c%~s2h`CK3_Y{a=?Ae941 z0fIRMMF!%QgCtXV5nG62WDJhMn3l_sQUhs!2SX&AVq8^zrZ9m|OkfHJC|U@Dftesd z2VL(^C0)QR0vkXBk^sIRkQb7xDK%Z;XomycdUm7JqU|D;>Tcbs3pud@{<%Aa-K z80tQu?u)*0^kO$jhGe`l&@QJrIqImmFJMd{rRL@`;TVLrq-o^@11t2&9>SZxFl2y# z{25=8grjWRE~Dax3&9;;I;bwI5DeQGz+<69VE8i&gc11zDv_YNwHgoNwdRo389A70 zL!D@lX&$b%6>dN!N4_77^lZz|%69c7XwG8FFd*_s{_gz6%tbq&21jq)9W}KOz z(EmIBTz$4T zC1Aj+>+D%jC92iDQ+8ReA|I4pT9*CfUgi5O4ah47y8J+DwqbR=LNfl&6YAg2W-1Re z#mPdhRPPkYI5JxLlfPd$8=qYvG%t)YB$Ev*NQ613dR=8iM5vw>I+EiR_PGs#_H>$x zDMCqn`)CJ#M(V`IJVA!65-ewbs4wA6n;FMY*oY^oLCPH$)(P>#LV^+C1i{K*ZHFQt zKD9t_j071#Sm+qI3;n6yuNcxH;sz8LQW6rOIPB^ZaXRrN*FkW{K-B2g=8K~ngp1v#kHD2ig=!s9@7>_;nq3={*W^92;U%SwKl zHAxVVy`ILS4HT8rrh8pR)Dg^r`nZe(IoR68nKp7f;!Y3vOrse=AcTpQ(>};Z{XPRh z5vUPJ9gLGx&`T1O7AbgUaP3UeL*mJq0YVw*?4Qzt`QdD`G7kF#opnK6yF70 zK(7G1k#v#fLH_T5{`ac+I^Orfv(5+qd=4)gUa4f*&^uoJwp%hRg<<+DVIau}5(ya| z#`f+y_J9-KZ28HZ_77O{?lZ=nWuQfLWgydU{fkkQBRA)vE-L5W{V2V~ybuOI9*3wf zx?1YnceVrKLxtcpE&ES_L6o^Q@J%;Qv~Jo4S4u3Hz^8zJovOD6>Kw)qH;^xafw$h zZp(3jT&(F>3Jg-<2*w+b@!F6P0l_Ly9FA+whjz$(qb?l;Wk`5QxL}5nQV(Ptf*M#x2T*IqD`UhX8$-M@ z{b=k;fq?g8RkI^mT68L?d~kbvajm*;3i9AssL2>Jha@ZO6r5*j953 z{p8s1nxEr$9{P@VRRdqu&vZ$@uTl2zYU6I*zHzi&EvH;Air zA~u^dMOHn|-4rUqRSqBoM?}>!yjv@oM7mBdmQ0pP4_M@-j%8t>mK+cGRvL&HZ#=EJ zAlL?fqG~x8`I-)r8(3}*h!+&4(SoLiql*Y3@Ww|Vrd|jiNyXRqy`GFL5P}j^1*S|4 z*c5y2!xAJlaLNM2yA-+nvAL?3mVF6>7)wBVRWCV+|6MO-EdTNQEV;aFu5h!<{iB!v zCYx{m*y%#A8tL*~cYkXRZ_Q$5pl{BQe8-r7vh}MT7c4hqp=!pQwj3dz*gh0GDi%2V ztQ2O%{b{^*#hEi1J^mGM2W;o8o4tdDk2WY>1&OOm*;|Up#yqlEhQ1u5LI$Ef5h^z? z8HgyyoEQr$t+6ic`&@d!WrHwDfm4b^*z0$(LonhoL4HDxLCiL1o9~-wy*kq`y%o)W zUmQTdw_g*G&QrUjbX9banRHGjvEsQlqXU97$}lAl;t`RuKujl7f&lyCh! zR~=)+pC5^gDdLfVYx!8}GenGka^{BdCnYl&-soGtlS*<$4h zrvf1?Qb*Q&St3@%3muAZA)~<28HJ4REgc+?$}m@gb50!V3GCszW-gT8gN<*K6V{5;OeH>HkN6-2Dru@BDt42MJ*yPITbPXJZTHmYA6c zVuHB1n&0atnF}&TN(;k(U`}E0NZ^d3n&~>{CJ2gv$1aj8Co&UMj?#3S_|bVYn4$j~ zvx3CnQ~Zwib*Cl&#JjW11)cA7b|X^gSi~U#p>+0df55fEW4%)T(EJ~G+_FDh|EuXQJX?){)Drenkty|X1^?V+ADW-2HMVjz9i_oJlnQ->{og|H0-6mg0+l45)s zi61^3j!p0dBBImE-*`kZ5uHu&Pn@U6r=k9qj106KtNi(g2LtK&zM5*j3-qP(ydgp5 z`G&su=1}ohQg`wDH}CKI4RqDjigm|ZfArt~mUp^(Uvzl!!(|mV7yv&!!fs;S0E!Kt z|KeXAx5&bDY#@k#n3eOuk0RxtZ~ycACs868lGqrquSzmQzos7`w6{&^U%7^$)j2rN z0>Y!WEs2?)CMig{B7BU2N4X#Q$VaiM?1iMpBfV7oC_<7N;B!Q}JoDpYCdlQyCwWG% zXQDJ0#03I5?s5i*f99D4|KyM~#s)Cd-$l9(34^rv0qZe;?td+NeNg@r6Y#l@;l<0k z^@47_I$mao|F&Ll{jvS&%>jGi@RerCOSk;yW-pxnwNA`Lb;phGkkP9x=+%~VrTqIx z^L=yTKWoB5KlK6WS)3{<`lv5pb)tzsOQvg6JcPPZ6s1x9@4=pZfR zY-FF)1Accj29J6uTQ?<=6 zK8?B^0Rjd}FL0&P2gJcqI(R@fn0NgVgZ|O?UVWn0w#roMsk&v5)&S+pP0uuLDSnhRyNc^;f5S6#DmFb_?O?2Se@xCe; z>J^X3$#_2*ol&?d?u*j!LR_lvt%_DXeB)QBzIQ}=E{xUpRyjE6eBxDizO|z+pMBRj zCQ^AuegB&OYwh#a^_0(`uBH-6`+mM}-SS&Aa=N$?%l!lqjI`CtC5}PYTVcvr)jAb} z$W*$2os-_~_K-&gS3EJ+8Mnr0V0cbld#hA5MjiRO58*FPL$F{AqO>-lau;zzVxz?% zi4ixx%&=PMqSHZqB2-_2U;@H^T~8%&vrITKbR=WmCL0rGq3VNsr&$l08UB*$ERhn{ z0M27zxQk%6!@wG+i5lZi?eoGy{-f)TesunSKX%*v^oRu0UOeGj?@hk#`0CrXZyO4d zz4h~#U9;o@6;LdsW9SbaiWvc3e!Amq|Fy!!HuT;^seG7QGRMt)pDVa_588` z?QcK*gI(*N-uAn_VFOB7;0hqsh+x(>||ISIs!|}zD^d}a6 zeEjntxtraWbRZj>Gd~LG1G-*o+#_j!w9`5FEs?0+W?%@#ld7kY#rc<@B{(pcrO3a7k+C3XzI@dx{i+=f#2ynb-eL^gHrLP zzN!}*;euLEboaKl?0ED`=YVP52fL|+TPe3+>^i!H)PR*oTJY}YJ8 zJVsm^1aK!MH%(ozu13K;*{3yUR zu6VRmbU^h`(3Ow8j#EW{Ep4qkMr^#Cel52oQ_q2A;(*lDI1YZ-@BMxs z^u4MB75Tlh5LEpxqqjyo#{cBL|E95gWBbeAW77+HPMf%^*XaM^T3s*sjWxocha1BX zvPNkhf9WeGUmJ?w@QARSXr5&HX-39~^98ACpsSvfpbBS>YDNiviCF1hJn|1NhHtGw>89ql zeZKhzv~S+_M}yI!jecpT|Ef7QGDx+VjIh8^^n}G!T$i`|8N!SEMI+g_6O;XsL+nC2D!MEI8 z!^VV8*S(s5h-gf|-H?b28ia!2*w-(d({mbE{cCYJ3WbiDIBTwcOFzd90Mf{tN+2NW%4t> z*Wp8>*6@$^y%f=(^?Suhh8m{y@W{)Pk*(^uzt`>+Q|fPI?5A(p<=yrl^E&_J^lf~B zLxQ&j7&MRYwSfm%ydUcBSVgRjKVdElj>YJ-@89Anh!?DSnT8et?2-w6u6s~=MSURQNZ zr-Rb2*Z)v&@LOMh)r^=35)c_7;8xJp-)dHWYB*N)_pX~eLz?s867j~PAhyL0#H0t&2+AmD;e*8z0zpmQ~LT@ z%@$vKv;Op<+Hq=@7uBV3>gcoacf>D$G|LUPzRkHqQf{C^X{3=3Twx2LogOJD?th(n z^L8FstL@&BX3vG{0oUgbD7az7YL9&lAptV};SSPduO)0GJ& z0v=JqqZP=+wn@ZL<0DgjI31BxG zyM6&`e!=)ttB<N=^Dmy)agBWvt;QJIxJE#z&P#@Y zFok7CVc2>~bt|3OZZL>q!F|4p?!1{jbV#@tqIXpz^sXBaA$Q`q7Q*2k$k%koS9F&6S+!D&8euybA?C z&;K*G@jZvt{rvZg=l3)I_y*Cp)**^Y%54#s9#04eey6E8S&El7#f7K*Lf~)|5iBv% zSisZG+Dvy5ln6D^P8YL()J59b@nfYmC5vHnFtu4vmu#)H&)_wOdg=EC-hwj(U4sae zj#c!n*M5V|kN(!>#H4xkisK8-6j%9<|2Nl`?AzDhdL8>U$NB14qx5?gulw0|&uZxD zr{ikP9AlDxF3?QX+uQ%Pz6B>fL8?dh|K0CnN@8Ssd6$b`daRp)vPk=d1Fu<@&&T0Zu-E$T{6iv2i5eF2G%+mM-lwYMU+P&Uz3 z%>V@WayEi1;4h9gD({BrOy%8;0m9`fE0)>o2sw=Mg&olmGZoK(B9(v;+}b zM#z4+lfh%_XD`L~ypHmzN@N%+UzbsUGLkHH`I^Jw5m~l>V?^YHFQGc1Og!C&Y^}Pm z5RV>3a9M6j!x2 zn&Bf!EEhw@Qq@2$ELD%-x>`CJ&uk$2n-^i&3~P5Z8&N5-i$%uLPJe@b8)FMB1N0I? zrN2e-oI-(rJKf+XzLH@#d-=^x-+kR~kA(a^^Zmj}rFXtDQLTgWnd>`mboT!p2T zgH;Z-x!7^OTHv^wod7|)JYr&+=^#T^90o^B7H-D2mS7x(%NX2nm{>*bQ9J*+{VHNL653;27I6r_%H zhBbtI`(dH+7si*f!{bk*KHEr5UdxW(KNKzr;S9m(_7mk@pI^;W{=~>zKglDF)a<|G zQq5I=YxHM^KKX^Y|CX2kp0S5LiOev*)0*o`L2bK{!q)@=E<7wG1uj(|cFm(=L4-$e zm@?ac_i_-`lK?}yA4is$8K?%+;LIYaXV_fYwm%-|twfu&jBS6Gp;KZSX}LE*B8!vb ze_)i{_hypl7+1tt6#>z2L#I~#epOmZ2e$U8vQ$e`Fl@)&)e3K_LI+EJoHb8X>syQ z-uRNDSVRo_(zN9G$2PcL1s*Q@Sk;qjf)JOs1@TE2kwqX@SnnEShFLmWTe!3!)nbb0 zPlhGM!kvN79lZwA(t#sE2|;D+H_-|6nfiPVK4p6PkI$9Aef70|8>2?$L8ySsZa;>9 z$~)bDjPJO67N0yVcZW2>EL6MpYn=$Yv~oD8{Y%zYdvS1d zVzpdbC^0dL$4Hu(z|12cbyoA3fYR-MO3&6{hqcNi&^H>0hQ6%y7q1y9-izTL!&d1e z1N!l*6ZlAJ!#{bv{^@PKdIUOq&y~f)zqu~^qfs|MGcB+DfLQpyI3lEc^rz?0*<9U+ zJ|oQpS?(==K+GUAF)jjz;t2ah$6VxUI9bi~>X=T657g7f#vF6NeqSJ` z<7a9flf^H+l>Fjb$$#Zrb{-9Xh_ZVYXEcj{W`epobe{zJC2gyF%|vd(f?_WzxGW1{Ea_F%{r_99%)mm_DPJZ^e-p ztJJ8{qj9@CX~KYi3uflF>(oU^c)D8+2P=-40jqRnWkz&B)J)HIy^J)Ipd!X~(z91a zr+4&w*Zxx2zWTntcfh=$a?P~p;hNh>RKBTyS2mF|S8341haDENfoj^} z84i41ha%@b!v1-cBjg-dLk?~bIYvD@8ezHA+gH7akwj-sfAteOz1p5OD{qm_7P??d zE1)Mc{n1G7x=YC>m9fM>5+AHSE8ZcI?^v3^ujP%nUs)X6(xBr^*~fe#%_+(UeuC;U`s-@ zBuMG*)oYJjGZO(3#Un-Km!A=zq`YtCnkumAn#?wadC((FJuE5*@qDiRWi#?U{~ICg ztoFo3u;J$b+g=#ZzEyK$4h8wamlH>|I_yB{yYLr(zWfub^S?2ooB#MT1N$8Ubp z#Y>(>Lh&~6)gI|GL_7(QAL*zj#ga#^O`-)3>D+B)WvAj~3T>?m=@}{CT0wMyG@)i1 zIhh@EB%}a2ao%}vX`f35ezihP@!{fRWFe88imO3vU|C(x+gNv{>N{PPPLwE}h$}wT zCQ-M4&*i;ux$FOC$rIGfe*QiO;SaC+uA~1v+xOnjKRt-QYs6czTZHk+^|j36VvQyF zWa;>^Z-f;hMjc;UStz0=kGG*u2o|%3*V@p^kIQ2dyKcs}mf&OECu=8FGg)fC19QD5 zzPp7$Ixac!XLr+&_68Sf7W9dEscIryM=W1|WV8D!EJGvXjg^+Su77^&PatMkbpF3< z4gPDrL-lNbFru5c&wsSk@A;6wIZoGmc+I7F|KLF{Jz)=vqRMD9{z6bPl*cVesbOT} zjg?Y@ImH|3_9I}VvlcR{axm4i_-rEj!?#huo(n&*STAx5SUfA~r&QOyzR z@C0@JqEVMezxkb-XZgoj6yH4XXSVExJLcat+|L;o?={#zeZ0j~Y6N7Fnvj@(9wxa1 z#VXQ^+`R)rEp&kDv2@r0vP*|UmPI&FQJ6`0=|Ups_MXcURqCqi53pk~CAtnhf;lOtS6HRE04U_exrZK-#_Y-8Zf)F3tbc z)Z(w$|J&Bx?@RvhaJTZ0N1Lp=pT*W3aSIM1TA;Vc&hqbibjK@|3 zYlPGWtT@#L(?IanZs?7Fz|VaP)AS)~fjaFYHUxXvzBo~(HK&zhWQ;%~nM z>SzH@LDasJ(iUfBkEG?5)iO2%X@WZBT+BYvY~~R;D@-Jx-}mT#+3=>anWiy4NQM>? zTUY5?qC6Y&wA6j}_Vd{gic0x;r%x}_G5W{6@T-Q9|Ky(WKXS@{>&&D#*Hn8Pzdysj z_uKr&il3fE8xK0)_EWaz%woLKfolg>$R9mRm&r4b(q=`xWFkUdMo$LR4){@H1gJo0 zd*(V+TYqf30gJ7A#>_M}or3ym&VxeI=c!maG5yc@&jee6#HtwIsv1sHCO#Th*^q*{SMdq@6wE&Yqlf=llH3%;vxL zTD<2~-hB#-y=?K@Pk(Eu*WYyuYW{&OsyF!i3=ie+Uj@+-V@4(_6H1DGH47w`RxKkM zOp2(9sgAt9k;fGVLJ%T51PRfLN}6>QVm~hIRBaFxdtHuyJp|(zapszM{IxacS)F^a zX@&W7A1RQO>TH%`hd~^%;0!l(N1MTtW2Q84HHEnbq2`C8rK?GfIW>%05b&8}sD7LJ zOw0;~CyQmnEd2kF_O8vU@?5*%U+LPPde`hO@if(US6B5&2oMfxKt%k~rKkZ>QBebS z^?&ag=sc`{`@Wy&-G^DLYKe&G40rtEJeE z-oD`peA_^O?!EpsSC-4)TvE(@@*FX;I86lz z)MZ(>25RiVwTDo|5)20PntXb7xS|sDKjxGDx7N;Y-7oUuApFD=e$5TmSZ_2QzUJxf z7lzS)n7uR~{@|)*UIx)>Tqx$w8R=X}^3*-{&<|~6VI^Jgt7WLcU`kb+`$iHTd_K{_ zCHstZ4LnADESS|ApLrJ(CUjyhixPE2E0_3U|n*k1`nj5E7==IV12YOxO1 zb#C;w*6fDWX@~7nIg4~ZbAalq6o}KM18|ss@R0ZGFvQ+LX`T_Ehx;0e4s?$<`XurP zcgTa6E*1Iaa(&zA@XhCVzxRLG-&)^$peJv`^9xJS_>$gp49>k=1nEK7eevSHigL*@ zCITMWNp90Bo>Em%2siw-*A-LNR(YHj2VE)CsibhLV(1irIEsrYVx!>|zi^i?oF?FZ zK94a__8mm7gGe~EI^!1y`ER&G3M-E|M}ujID^LxFCj^Bufgb9A0dzcCH2p2c@St<+ zZsKXJZJF{Ky8$(PcpU&nK)JsoemHyKhjD!KKXniqyve)-&RZ~U59 zvHhDl#7wEna^f137AIsLYSQRm8Ft*g|LYaj}Q%pk|! zCWuAchse(+o@6zFA_K(rARm9%J6dWY3*?_D?})x0LwKemLk1#{gab~ny_Wu6R4TObfSh5q(&)t$WhU+ ze@a*{UYK)3pEbx3weTDeWq{Z^-SB4wVw0e+ObEmZ(?-u}1&k>7h+IlN7)~0^lZMOb z>!?KtGIYoq@n0VH2|HrF=!ZYX>E+Gqw^d`ObjO7M+Uqv!U}`j2m^Z%pHP8QN+==J6 zMutj|@Bg3kUq2@{1nLH=M86M*O8#h0e+aWld;5gGxkA5qdn6Oc5&zHf8K{kt|5_9D zUm6pCT`&H^@fL603qe{N^)Gp+O?-;sn3|Y~V{3F8pT+SVpL%N@yw=QS&VFf$Eq?6j zAN=4K-t9HtaGdMAuH-*!arHef>7HPop4$#Ht1N7ziLpIgEc4)M`Q7yki~86ne-(v! zNkQ#D)bz83b0q}rojqXs%EbN`noxLS@N+_ZIit!VBrOfsFEdklB#Tj`D6}@+k`Psv zj^<&k($o<>fbAedmqMKSX@`*2hdhJI*$z}`HTGO1I?Cxoy>OGB`ini6hVy^e zS3F=Jc7AE_Gxu^;aQkvoniBeklN;i)kz?TJpA+%Te>IU%Y^7@*qIk{q6-c6RK9d0 z9@Z%-lQa(Ax)H5HF{LIpxbQVL?QssmQW|ELF9e6Q40{YPGNmdGj4sQ{` zwl@w9X60|ZYe687(O+j;dVVCLTv?ggPXyYP;>BzEr@3P(2&ufbf1CBTmMTox34KS) zPeU<6nb)Eu&N%V}a4Q6eckjUl?V5IxH9CcjCefFV@;iRja3jzU|5tz852pNU40$h2 z$1i`(we{$+oo_{lJBsB>Y+d>W}Dzu3dE1kqM%Wf-OsLA)3q{HnXRp(!0 zlIpTR{>q-7#Tb%!PgRm2zMsyDjk%9E?#vrkL)rLDLG^I&gxIP#XEdQznK)M7TaIg3 zcu=4pU`$bOQkIc8^q*8Q%@e3W^RLEh*>)PNm$%PSyP5wMe;97=#yJ1w&HR_b%MXaS z?jQfxFKlD{drxb={bj%3&6gJCS6sarZ&S&CV#dvQ`;9$++iQ*O|Bi?HVo{`_X3Nqs zvzU`QZ$ApU2sLk|S2FSSA~wmeuPa#)A$&u{x`V!a;KtH`B@rPr@}#h`T!%TKyB@Q3 z^iM~=FdNNVf6j==eFnJ!3p{iu*_!+LgjdNz*Oo9lgdbizX`{-B9g3luYLtj^2#RBP z+gN1Utf#SC)Qu_(%*dU*%!8m|EV4P(vSM_H+=aMfABq|e`-25{+ld0*^k?96L^#<)+D8PmR^0BM|%M zP$P{5Gx$7=M^+UwI`ihdf|ssaZA5__hC%J=x>92zy%h&G8LOD{niE;YcT4wrqPop| zCD}nsf0})OH64DyNuv(|CkzV53i;3dXu4-h0PltGc(;Y|$KEJ8+OL?${)@N8-gpjA zVcf96-M7E@rMO4>gCF0twPx3EU9>OXqkP9(8~)sTO_A0(oDU7AD@XS`%MIbtbnm&Q zP?8B|QK>>uF+Hhpxyo5??!%LZypk&1TT+?6e;F@9;}mk<4Qe$rlr{yCQ<4=3#@ZVd z+k!0jOvZDN$ghnJ1Go>@w&7P&%yeNIRE?6jH->b`%^4Z_fSQn~du?ks2pRYogvnufofBmrZeK852Dqy;hwBm?E0K4_V@wWxkXjfA= z{22S}H;%tqul{1sMBm!Ojo$3*IbUt`C-2yrI8`(iN9@E~^Al}ARj1npXPP~K~xB0XyfAzn;+ zKAW`$%1Tj})^Bv~vvv^15kWk}k50i)jHv#jjTX(m?yUj;9k+e*`Y)V^Cie2?Y=2?s zPyEL>eX^yQQs|AR{Bv&tQ&faDf5;-86=P%U=Y*%;gpp)t_0ax9?V<274e}cmy&I{$ zwikqq=3?a1p{7H&bZH(E)MZruP#2|R7#l@IMLj4SmgYVU;(ZVZ!Kyf;YUp^f@DZ2D zD!yT$EvR6DRZEvt$pk9y>lR8PpudolFFoI}Uc_@fBazX^T+_@kc{sd-cj9~GhgaDVcb5+k@qwb4qgdq-()Je zi!^qLNE`;tqRQnj9&pIu9XED`9XI%#21np+&@TNRr~7Z*jrI5VT7(2r zu^eaR-T?0d((oHkp+o*x-VtJIez0v)%p*S$#&$oZv`h?{6Ah%pQuU3S_5k!34~JjS zpKAi@smAEft(`!uscQVWi{J4fZ~u3@`;9^R!Z6RhXH5RkyqGn<7PCFK*G|3BXKjTT zz>%ACO0}{Z%!lHce`;&KFJ`De$r2`84_I3P?5s@1q(f5hHJ-HF#m>Chp)85L-wX`j zKi;4HkO8NYEU71TTug-;!D;YuphefJ+PlavD2Po5?OPk;H8Drr%n9vV`UfuW*Sq~Y zF7nSkzf85B*#S{4r_L*G1XHl^T9nf%Sa(A*lEpe()z2mxke&R_!k=`fELXV}Otk z(dbtHV2!`xQh&y=zsAB#kMz`Nk*H;61aLFbnA?6Hap+BG#VjMJbh}n3Gu0k8k?5P+ z7+K+&6=My@e}5P^*fNWFj8epZs{O=gKH~hzo^5=Y6GyNYj&QwP%qx+YemtJ&0)52} z5n49(MP!kD=Mil|i8tXQ4|T-WMsn=Qd`Hu<@xo(E+iH7D@=O$|J;p2UG`{DRAl@t9 zd~UTox^M8n4ewckdY`lM?$1De^r(65onDxtWJ2JVe|}CTp5wQ+M}rH#FprVVz)*rv zC0sxa(`We9NnsFQw$1(c;3Ae(X;ULZay{FF;2#q}ZUe^16g|p(N+>s4E6*~d$6x+c zk5g52w$DOa(cx8B!m`plHstIG#gXU~btGbZt_~l2`vA+6IJgy0n-RXP~2vG?CxV5v~-~d<=T*n8mR~^Qbcqy?~qE&{5qUrY|xb6!%V|z>&{~<=;cHr z&$q$pZq}eti-hnV=O1tc9Onwhk4wCl{}{1xfAx2K5ci?=uXWP;)3@+zZ~L}GoUBKn z7IPvR%{86+OmJv(y%MD>N9$l-rUa6Olg3-9s_^hg4BeL&6+`{HmJIb2aDHoeQi3sE zMcPfo2fv*@nNWL+#I|m3!b7GJ0x{Np(RwE2Q@R$I=ue?K>aFiio9DVC4ydXU3o%JCG7!fXUV4xc@)eFo5|8*by}KG zT6#7*;@Le!#*SWvoZE>&{F*QFYzo7I&GQyj`DvWVBC2zr_%Q`FL|+)k^MX)Ufy&r- zAYoAVA`UU5KC{iXSJ5t+`#FisDrv(vWoHOu({1cm=R&bbHA6)EfJ7n86>mz zn53{Ypeu0G|YYJ`F_qG*uZH=)*XSXRx{4#zIDB z6|;cF6lTmFgH+QX?oC;Y*brjrGIkE~Oww>`kc4a8=J-7XOR79OYv9Rys^0Gn;^4a zxTd$>+SmI9VY68vZGUb@g4tq%cA#Js^(YJ-7S@rOwq#e^uMEfMKGey?%=!+m#EeNZ zb`*2wY%|LOaWI7-a9Adkf8yLLNt)V)UKP7ZrFwL!Q6hUQQIBVS*67iut$spk-1qCK z2kqV2>wSOO49EAGU7feC)9>?%PJe1_ea|Iddyubr1mHJ%q#CsZ{4hb_-(22zT>lAUO2Usf%RXlJX=477xcl`ofLwNmLXR*mAhhyOv?>!4D%)@!3q14G1G4x)*sfKrT&$nAT94 z1B_W36A$HL6V$Z_^$gH?+R(wlzcQY$|5 zgV3XakYgBLDA+qn)m>Bx)kTi}CljLKb^=;FN%KcQ&$a7^ZSm5r6L#+lqsC@dHf;En zl(5Wx7ADByf69}?Voh_w^5I={z#*V{RBDLNeCdGJl0FbeiF%ri4nguU&KjK1V%&_m zKl}tgT20^Qge2cKkCDFd-!IHr^VtyJ+6&|_+MzGZ$iZ>=xyc(#g| z?F-vTwKaPFUh#|k669=W5P0a4%W&_UbivqMD>vrB(OIL{Y?y&$EAAH&1v9s^@^00= zfd>g0KjYPlr>VEb%DJIGAVhzFCX=^6;LW@LrQ`Rd|1XpM-#7-k_r3q}esoH$H@EYv zf8kpFe)uOLeIw%gG2Br+#5Ao_WXc`CL~+JkQM4 z)1>ERKI#Eb@Ozqd!k7>fR7#6lw~zEWa+wlGe;qukL%DHq?%Z!mri~)A8am&&(wm|; zNirO-b1J{=?bjakAMxOSaN8D5j49<`I>5~tBxsNR=(c|2&;Pa;Z?wndZ)|V;#)&Mu zzWOIl+b}a!??%9;ChBV~dx-a%nh@$I$!sUsj6KRc>L)Q#@!9Z`8O!38VHCqaA9ACT ze~3W{K_zoTA>Wl6c@_KfEX6f1Gisg|?=}pm?7X#V}S(&<_i#FWl_lc;Rh{V-ou!q~4dN5$c2@_FOk;D4o4wP{|i& z6UQH>wm)3kxDSV$;XGqc*N?&=f6O*$Aq~asfEF&uo(EUYNg{TiAnvR2p(0)vbsG`0 zg?g^3one5##qE&{FIHW;WR~ZKK5F8AUTfHaYlu1weP9yiP26wpO!Z<1qKW%`e_;yVXUclN zHLr31@a4yOzk_7|tIv0)ihtl)pUr&phdtnFUpDx6j6qowsKSsy((unMao9~mL1^3G z9YMsPa@OFQUmSqQP{)=fxnZu)AB6N6MZd5(uSne&CY1uJd^^c|I;7{+$$CrunIpVd zYc$JXIGt3tiqxf=tDzCIf8~@K+SvBJkT!ZGg2Umw)O(3-LT_rAeve2)V_CU0q5IL@ zeeOVt!rGHqukwrO_e|s!3T+cRX+AZOUr8|TJSAx7jvHP;))up*qypm&>~+@g?=v+1 zqUZRE-QXNQq`$|*H`X%qjGr@ens#k-@t3o7zR#b;F)=SWeON!88OnKaT|Z|!pGRi5qyY;V(%qf5^)lNh3Cn=^)bd^D;;(+oE5qQklFdoG^!Mj@Oec;>T|nH4ObV||z!S$G>) zi3iFyg;Wzxxo^zlf2R(iPW(8)c_?TNKi>Pm0T`(BHMgc&FL>)5{fzzUSKRN1k9cf! z_+Ik!%^d1|+NpPQj)$kqNX{(QNfe%0& z{OW0a;ZPdCWMdOFx_RIB)UUBTetu!&!|!uY|AGn146(XzQ2!}o9j2tAgV3FdAH8<_ zlZmjlPiH(2e|qXE$q9*-r_NIg4#`6DnJE`Q<0=Ia?{o;61EFDvc;e@1g!KclXG6qh zQRv0A(Qa&W15lnY2~0eDEX{Er9iu+Q`faxZ`QdXLBK{};>tAAZ$-iEYFzs6YuWH4R zN=JjRpeZ~;Yjj=qjIr){Jgaic&3J58 z3DZS!HI$v9pK;@Z2er52uQLW6GwAF3OUK2oWZs+6-kABrr{yPWON29_Vv@>yiv1FR z+)=at&;vgz42dU>~=b9)7cZoS;vm*L5%H+zR@#iHI@ zj-Q{C;P~n6hL?4HJvbe99R$_F72z$TJ;yx_?`=A~xLs^KXpVJ8ZCV;_e0x5ePNyO0 ze`;F5KmAQx=TB&6=S+GP=!t$ooyNKVM>!SHQr{@(!6*htSuG$vx>0a}VF`|MHJ~Ni zVEzH7j}6d6sv&Qo6T(=G3!o*v324Dde*;5$67(TGOZXrxgsGT3fyT&lD3JRgl-uP6s?MS%5UV=c31^_()++Z1k3D%<35_Eyra|Nan9EhZ~fEe-|Y-KN7A33$~ z+{~1L348{6PEHEuaLMty3qjGaFjj4-#ZGjS9>}>KK}#xNgWTy(mMiGe|jL` z16(i#FuXn^>yO96(mbD*4D02*8|SKP+*4c7l&!dSQy^yG~s#86^< z+tHf#0C>W1`1(C7Ne|yoT!0B?f49{VO@D;jqU8Wt5o_K0+zl1+yw~@C{WE)j+T?Im zcoYjjQP2l|&lSz2g9nz99zX?x|K*3+b@c}>0E1uUhUU^(;;^84HddZ&Jtaiaox@m4 zJi=`uF`~&sJooc81F^5dUh@)GLcPY7(JZD-z6&(ud2sOmi6ARH&#!<wCwdTZ&m%4 z74Ifhuk(Jc|A{}UfGSx6f0;*YzT}j+cYsM4-W55FSCI>vIe;RH$dU8&JaU2Bk=dus z#&Nvo9lqv)slYfYVBX$$(eF+4nDJW)tSl*!%dDdyQ+pnGsk;WWIp zxwjWrJ_CP@b=a3cL}bpFWg3&_cUeafS}UmU$_U3Xui2ijArhMRo!6{MjuDg41!f;E zwF7J{wE5m&1D=tXmf#(txxJ1k+jIZ!M3mx5Trpo0i2PEPdMYAns)=z}a+eW392>DV zPnRwfp&`>!yvqxyfAX5&fogu|5fM>r=Zg~uRM7*8V^YeJlBPU*gf(}e)0CkvrP(3N zXr8Z2;D-__JstprhfD#xH^1d;@I-ZUOVV$CZ{@f2P5|Noc%n1u5kazOe#qnbQ9*E9 zM!`gZLE`27u;gZ-3~b9oek&m?ISXY8;t)qdmax%7BU7Xvf724=UZiEpv5W|ACB)^s zO?9Uxc_}uT$f%+e^Dd2u+$#`m-H#^C|0USck0y!)Mao^^qNWb#7X`t+@*q5vAdcZY zl_9yM;o%VTcAiE7Rl*p@C9tbmn@Sk@_q%h~V7V^XqQ2MIMMZ~DcMTlB;~L*{j7VrU z_M2>Z1riT{f5a|WPS#O0Q1bPheLWsX-e<(e}@eg}(^~O4Y?$1fg{c_Le z`Gp4^rPp{nAmZan zLFCCh8uYzlAFsg|9gx`(WJ&PEMDJD-UIuMHE`^`_MxK|@KJc!*mBcH9wh;&I@mJcK z&-1(_f8%$$C`$o>XwT!)=YhcY8aj#vw`B=L$1x(VYzo<&mOfBBbQJh8_6OableomA zTRIfmk7h+7vuvD}1TGxMM8tlseeAzPkKK0}+5GkW{HSzoY~l0!9jf5?_rAkjm^9-K zNQV6o-;FTIvgwB%T()7xwLn2QQJ{a;sLlEAf4_4VFx@uhMy6Ocrkq7e&u~m>dmKX| zBl1?s7zFr0JU}`ASG$V6!1C8Dr1Y+`x?J<-oy6E)bEobo1K+P7UfngsuFI$ql(&-P zWhuNS-OTEWkS2ZYy3BZ`tFjbvlWqldMXDx!>$;4;(uZX!@+RG>)&A%GPZT*4cW%f% zf2W@c=GvME29Dd(rJgJa`5Ko|%x+;sJ&778iH~zkek-X~@|r4U+D)65bSbgF@ER=` zqc&@nMnOk$ibl%tS_+{4+>cAb_aY-k2|a9&F&_Yp8J97Rfpr+0h(^Hms5r~g7u*?8 z*Qv^^DJ!Tepn%Vpxh^A|mba36WhuDCf9JDlT|wQXA6%EQS30(D_)4$qieycC;kwLv zrDOZbCf&*EiUv*k-KA(58`53F!=*%)B$b@u+GI#EXx3f@5mjzh;99C&7aq<49yot_ z&HR+=z6tG;164K3z0)>-&Fie$A!Oem{2`@AH-4kN(AT41hR;fAgHLbTF=O zW08pu{>-W|u(^}5TdBt-*Om0^fB*bAcf>P!EA^yw(`4wbp@;7X+xg$F6E1-0m~WopOc(wwk(iSkGOHU5rY?>%0F$2I=$C#IsBcjYwtKvFVegfGS0wlD zrH<<)v77M{5bI^PqMgm=2Ep|B;;xDFpQO8ZZ}7#e)Dyh+e_R*(IM>bsoHv&S+7q0u z(2nl?)UQ{TQ0&E(jIlq?>beFXCVmbnel}j8gSrOs{yxLc@9$svE&M!x<$ryC<$ryC z<$ryy>u2NctIJzy$g)(;xjOj#OV{-X0MRjDl+{n(sf0b|N;$DTJ4M$}2Icz?suFJ{$`>gr=`hNe)FY$B!%HMoGyz)1nkFWgA z=TlP;APnoVqWD}4d~8!Q$9mxUjQK9jbBTAp*YlHCKJ|M3u*r9OJ-^Z9Tfd(Foe%9! z7&`UOypf=NrmJhon!Z!vyPwj#5@@Gd{53}LHv>YOfBa|<&%X0(X!HH#e~)RwzVmM~ zEML3+nmfA9J%YOuSSPFL*CS^@2sGN76>0QPdX1C*CL>Nx>ne`O#@hI1^VJaTgYMRMO|1iD}cfCiTV zf8u01@AQvXy3@h)U~kf5t=1m-cJrSb=8#ByR>mln?=*hfuWN$-)#4|eqTa#n=UT^e znV&!T8PI|lKj{V_UG68{{7d>6j)KUa>j~&W@1u;oIfMjAA1sa`wG)^UHuz?#L>}}R z-zc(L@Rt*e7w8;3^gHY5H6jem5E4bse_$rCmLS3KAa1ThSWB|U|6MGP<*#enLs$I& zzO&arZbr1wIN}l$QZ9TKnmm)uN5i56j+W8 zmcVjKFmtT8iS=4bq9N{ltw@c}wG<%n{w~%B{oMGRti<1i_2~l*re6T)}t#J%9VU!S}TVxHtU-LpOR4 z&!cz#hw3Xp`(57&yx!ZYY1gKn1nU_Kk~jTTkp4f_Yt^(P!*)#np1=3qPo@(4t-}7@ z#ddG{8^tC6)jqHH*SyEPX*cOxqV5z*F1&g zH1og%%Sja3zvnU4l#l)UH6F^}{lisciu20sSA4nE^Sz$V1jM9F5jP8cXr zO%mkQE}K`RAOR}C#&cL{g8lal6ywU+{u;LP<2@c~eX*gt0&??z^%Br~kU9C_qMf|XfQluA=I(mSeFpb3L@i=XaS;G0TlOs?^G4EIwj6^HUtPZ?Tx?%c#;~ zu{k8x_`EhpcVe9n886K(NDk2&po<6g>-A? z*1FR^$6XuiQLoIQnY`*(k;VAD?6gzp`sqe&+F$pBE-6-GyvI7@cO)9!meD39r`Nqb z;j5LISLFEm-V>Yl`j{(Ei>t5u`}?qID}TnGuk?fA?Y-{XAqcR;IPLKN`mg`nuDBjo zxBvaW+^+ZSf2}@m|I2^<|Nie?ak~AVyH&FNf8U>u*`fXqXEoa{{vU63OaEK*b$P1~ z+yBUJt3#H&zERsurnmp-Y1)4)ugQPhuZRDAv%0PR0~1@S*zG$1ZM`bDpS}NMqm4{Y zzA+sPv0KfUfNi7V+S>eQFTPM?Dm{u)?>_158E;n z$81SL|E+iPzH)@nsows%yJ7y_mbsC%+o~r@3H}`#D;+_PdWE zPl#FD+;OEQ*D77_4(G zEyv|m+z;1CW2sSZt6e*qZ_ak=NbQ`qDs8%V4)+<1&rXon+Et&au#Cz{87Fba+RAMU z*2#6^2`w{w{YPPGt)f*B{d9Upx829^GT7Al;Xdwk>{$#8CpJclXkca6@?78Q2b}D<}!l9q3MXNtu zwmv$e;lQyf`8k78a=z?m=3>J5$r_QIW%a~`{ z-nflZ6<<$b_X9poTBki<9PZI)otKna)^wUI>pHye_Zb_SStN!fFE)>p23f1!f9ij9 zF2+WFJTlL8k@Z~1tyX6=vyxrkTs-{kbJ+u0KiUKEjQ-SmC^02ZOKr&H^Yde}+Z&d@ z58P*IeVhksu-I;oLtnp>R^8k6h5GECKaCTfTD=)5$)x`=qROwP2Q?8|dem0Xw;prl ziVrJ1X4~tDZ`_kL8rUCtGMXoof6;C_TTHKAojso3c=^;@&(BV)Yjv(c11wPw0_3MVPExkt&b0T zZyt_Sa;KL^y1RTxtJm&Y(|&3#A6Mm!c0I>@^xcj(51&20+_kwBBP*ZIe~vIP2aA5+ z_!tDO6z0L`d{C7+PLj!ST@O~(;Q)hTMKn+%H;u{w%Wj9hX+|!avz5N{XX}Y^=^bYA zOnTR+e?O{?-7!9*o}Jl4)fwf%7>lWf*3~X6^cKbJFC0*NWUk5#YVl98PtT%bv3}>^09L-o~ zckx#p`q-D#y4dgcW_HU?l~9%gJyYqA%M?zrE|SZfZ(SijoX=p|t4uKTNwCjb;u3YS znHTMFo~}Me(~OyFf61Bu@wq+CdeS^slT(^q$<j!3DC$~=WcxdNU z+p$f5+y6Lha-jlEcO1==%YYv5lR@tq`81!oT04G3+iQ^OX_+`5!)TmIKhrPN8}yR% z;1QJ81&v&}4Fye~e($I(A?a`A7QQg(Ba9flv1fB5-vo1XQs_>7|7eG!dr zA7Zp?_slAZS066-yl~s@9zN|2@jCvKoR_rNxuw*CZAD5N?^$u;!BCs&=cuc?88zqE z^GRR!2+UO;U$@1fH2+F}ouaf9XU&ljO-E=_Ph_TpqpacJVRVwu@9PWs&q4C-@_JLMh*jQgog~mV4dz8g#KkmtBf4ve%)(MT19~{i~akW?!Cp#LX zy?d$CXENDOH);~^Y5yF4IID;IG1c2+6CQee>8zsJIr=#jE)YsrV$ImYfb+wc#t zy{t!1BRvU=wqBm4XcK!sTXl+AzU}Mx&%?lpvOzFQ*1Lo6NEQ3WVGd%ulgq=3e{|cT z{2Vdae{-wrvuo$kM0CT&O|PeQ6fNq-E!T!;-}vZspW5JlxI4ylGqtAEWp;yr4GiN# zDt{4t^nyA97M7d7JCng`ewlBAC^X0SB8n$fqHj(gYA`tthUfxf%=4&+B3LIv?(PX3>trc5%(8Q-3X4bX#u*BQ-Uj_XR(mHqQC<%$}og>x*M@ ze-hVS(p#F-^W)rx4RM3;H2$pRsef5+_x@_x-`o$jy%@Qz>dBtFtsv>z-99EVjh1d} z!`D#T!_QUlC`H&lPIcXiHb%bLpX*DTJ?eQqHS)d&o%^s{8M+&t$&eU%kF5r$)_D-O zs?{vYmdBB4b*i{in)9+fY_Dx`68pJfe>lA3wWsB1eVpBspg6>!18qL1`tcQ%+< z2T|=#yEL25I18lV78?eK!w7oaIT@Z!S?#msvE!%gbJ`L!hqptL?8pApmc+i&xb}E{ICDMj%X6FCf1kwB z&Eh0vbxsK_)gGu8t(Q|>{rGE%9ZiqYX!^5iWwQL%6*^N!;74S8hl1Hubuop zTG^yM?0P#vrE{dYE6rlWXY1&2OXJVO%7*TG0=t67G==@`p7x@^D68;3zsgT~jcH6r zU_Gv4xSpAHb$+b6Dk0I33~4DpeV;FfebS*b<+bj|I8M{v=x!c-LQ*a6U)G5nq@SI`aAddX z3|65waUflZ8)`$>+M&G|MX@5UUqAO3 zhYf4bCX?>N{HPw6Xk49FJ$bUHCzI#*C=;`1rmpS&`8Hn7^TLZBd6+xe{*aF9R431@ z7WwX24X<`}>F3$Qa5B@Le@(XG&8e))H#3(`4Yh2YSV$70%~XGPztX)Xw%V?Z>ek#n z#mBZj+DzRJ9j?6ab~@84HoI-_xjqF$S_a-p%SpSIbz0tceK$H?t{=uTz8rSp zby&=k%cbLg%uo{ex!hW7oLm!E&+N?E25CTyC~W zIoqA2m_Ej;Z>D{ZTmHkeW&Ppb?S9AcseR68sWx2M3!@{tV^iLyM>e^!);`!@qD1f| zd#onrer@fNRy`VfbwyR~q+ZW)&Cl!{9SY?+lAGrCJl2nKfA93!o$JXa7}JmQqK-uC z#8=U*d(BsT)wc9g)Cx)wSke74+|Q@yB02@&GCF3e%O}-ol4sp+@qDpcgMx%V^NMq=Hb{bG`}2d;59Y9;~0M zQSafXmHC&JIt?Cw(|V6Qe=-GQjAxU}w7Jh^O;(nH!wromvkN35$sdLW#p|_Ps#(-Z zoS5>;Qzl_lv3&|GQY8qwguc9R_M}a6rWd!goQj+W$~ZM=8NF00eR@Y0yxc6~BdUb% zimYU4H-dK4&1J4WwM~7KT}%^>J&hF+tmKReL*=XWa&h?*g?{&*=YDH?SrGW98DDZC zY(KZOIt{chfA$|+=drUZtOeG0$t)rg^`i{wy?18x-aAhp{YTNgk&=NCU}2150A}yC z!qqafB_uc7o#WhwU5#k*vKtK8BQGdV^qd@j@MEX!y_ZwhTKN!{bI5A#wY!#$Lqf6qQ8s--R7QcR_qQo`4LQ{g&x zCMzt9ax|e@@Acx_xC54(H{d#) zPEe0H7UIlvQHMiNi10gu2@-UmMDaqN71DjXEg>|{eBo9JyK_}mYQ?!U@f2rb36L3jF^!M1Z9&1%p|Lu9h?8yy9EGs1yre4#m0vDV-d%p$1hXxkzLi`)x zWzl{CXxj5{%>RfCpkDc{iE6sV0{z)!RQZYYeUYD78ohk}Rxj?;=50q}eSXnHF8639 zsiiH*yan|vgHg7jX^1*i0UJU+oa^z7=)$1T-`rI5#);r*!nY|CAbuhi|*Q&w+>`G!#m1vS8r4Rzj6GPlX` zLd~&ofS~LhtHtMTM-=dc&ileGs|oBdl$hXpL2-PTbD=-UQ+{;Adz$u!F<$JT6ahxJ ze>$kUqxSF5UHo;B4~4y3bxq7jgTyYGAs1>?H^1cc5LYkT&B;j;>5oKC`(#b?z%BDM zEAw(I^SmqbI?oDRV`H9i$@4tlD?YYu5d;vF=eD+|aXe!Ke6%i)9&i5?61nBmgLT?3 zs-Afh$T9{OJL=u*T$6VkoU5CfkkCzhtM5>5x-YQlPk+{$#hgWe<72j zJBk8$A9z>c2#~xrBYm4eA_roquPiCa_OXVlh9?!tE8eDOjIfNk%P?k^V}9$?0++(1 zV^t0uv0$wY&p?`QW`ma4zfV=}YD1*S><;+g2NdV1H`xJRx-;GjwDzZ4M;`j1#-vbC zkbYE%IB!EG34m)+1l&P`UO;S$e^6Yq|5*0_<--2Mw*HU*`M>^OnCt(|a&`X0a_!0g zuw485|L;NlFU8gJU-G4Cn)>HK75;fLY9}7FB{YsW`eVUj|61t(c*^?C%U})1XafC_ z_MPhfj}|zsome`lF&B%2rfFdLtJ(hQF+FYxsuGJPNB{K?`mY4_AEen%e~{;w{H^+b zwEfAXX{IXX)X0}%PC}ulX_^%Nk^SSpB@#aci-^sfggi}i$e+NPmi|t~+LBbOV=BT> z`KJ};|91W}4=|_YNL+jD8Dji-cnRb0AmeYGxYA(t%n#wFl>Rn%|NrW(fr*D+hI=yqOfUMRuj#ZZNiD+yh7*jAv_f3 ziL4suVx-BHElt<4fAu~$$T7FU(G~U?bA`w1SEQLukegm0sMag!m;fh0*uPzDQ5!HT#kJL$d&^s`zGH6;6!>ZFC0+kr4QKjj#M%G4H$To1z9|9IIoH zh}mHH-I(9#5&Xq`m$Ly;sLe31@#RRrQNs?60N1vt>1@mmFf~=VH$A?}G7S zSt#RG(-jIA_>5_2Lgtz{2G`tP6t!UCP2D3fVLlq5hp9=nTbEjX0v`-tU5q`xU1me27yp{2Y*Wp%1atSQx5vAa;#&a_L_E%x|(F*sUnw=v3M z&jm3e#(zOJtgXgjS#DyW8abk}&T*j}{oeaDTpD5XZ^%pr4I(L?8ZV9u;6te9@2I6o zMN#E3aD3|YBE{vOq8S%7J@R)QTMK4sX(%?7XptTk!tOqnBZ6Wkqc(;TWMY` z;eS%~&EZfr-uNpSoUtVeAsk0vv)V3Ctj+)`e=@BoI}mgn>IfH`<}jPz4u(ft`b}~| z$>1AGNVsW+g;6fyCzr(0CQ1zI_2bsNWJDYRiAJPI>?zQ04Rb|{f=-I(?YOG z%rx%kQ6(4G?umvncMY_p1QKL~!o!hZu7By0i|Q@zMO1)WHkt(32Jk2Kk*6GiQz(L@ z92d~9#Db+%@xi}Rs_HnK)j^rXlf!b5*pmi?PiIf+sc}CA#>^CM|BD?KtRqz z)|xS^soh|%P9}^l{jEUZ1#1uX6w5e4_-oTrFlP`4Ot*PUVSj%J znmNS6q`Ke)Qi36SdqZp!lzM#lE$IsM&MI@pO9uC&miqdZEYK*VoYBbsW=$yIFWsq! zL-Ft8FVbG}{sf5w0nKMZENOA_)qI5Ky12i^JJ8O=lHen)4Sa&4bK!)o3$}Mwt+er} zSBG&B^Y>+wgPz@c*7~dT)UBY3IDhj|5JBW5Wnq!ksC`GB{%3ZE@fxpC_m17inAFOv zA4(Mb2g$c>fr}&IX;wF( zb_y1M{Ia0!^J@dhGS2zKb-Y_2$5O2BeWzywW_;;tL87q3PNJ?o_#t`qj2EFoTau${ z;}OQn0`-u4A;|%N1;1`9>^6wFB`21rU5&i6pZ6APk^Fuh!CF%w*MB3i%$je9fsBI* zUR%-*gc1Rdm==aCJm-#8_?574cy%wLC=aoJ`a&Aa!&@QpM4&%aBt|r?3NLUg!sApZ ziSukETLeaRE20E8woyGs?2}Z^-ewws12Gy!!)-C}-`)xd_MvD5@x8z_K(OV7#o0~Cynn&qjFgPWx7tMNmX+vWdeD%p|h~0;B3$*JOT^zLu9uomsg6LC%u1h^I4o`Sop77JE`RcUCg6+&uDOH!QXI<-bmGc* z2rQO;;ynV5N}}q!kI+P$@PaNVMmS#HTf=!`-0R04_b{K^Dr`0dkQjX2UKo%!kfbt1 ze#arruj_1AfY@bCOz>=qe|qXsJcG7}*wR?LK}b*50t%lZv)qtfHBU+lAUx^VF{Pb{ zB=9^{qJPuCX=!?MmMV^GTCC*xefv7)f9z67i6AX{K@Oxh9`qEg?g6awPpWlQRTD8EC3@my{Ob$(Ii_<}*zX@Bs2Yrs?Kg_Qo2A@*qED3r0+;;ZS4V3`20 zj@}l=b&x?x+Jcox1~1x(l>s8@cFTvRZzhj4@VbD?K3vVzMJ-oRO1mKS5 z?W^=Qxe3Ip0Yw1JrD2B0{jJ(VA~$R_gV-K$sV@yy7IGJnnx)n)uD^bh1UZT${X1mP z!TM?{l@eg^thc$eM}!^b&@WBj%BXXVi~mY+L|Z&kV>&F*Ob_Q-rwn{jXnz6A96hJ4 z%C{-~CHrFdNh8`dL2hTQu;BZHLanx+KXfkJGVRf9JmbOu7Lx*24naR|49Q*FEP*M7 zmsO8BBWzldHPN2t-`M^+y*ACS=A15Qv2SKJpMGcX)98FvLBk_T{3zg9XRPT>eF&2` z=h9xmla3FlkcKN2HpwH70)G)Z3#Fco0R8~hJoVn(6S&%+`a{2;MKG+NvIupdXc6Si zQ*of!?g6HIjd(pP$(J?Lb(BCYJ21uD3ClAY2;RV41b4=i7wAcwPWv*Z)d@Io`~mzG z;S+)=%gWI$V8I|_CesTEo2e;L6C zdlCh=5~owqHL}&4HJYq2Z;y4w)=zM#OXdpPtOb45pdx2XlVcJvR*H=ztE{*ROrpAb zTxYfg+YAQmkGFwX=~@Cq_f8WiJR4WKT_^;e<^pzz3Y8@;&aJh_j+YBAUB!bm8-FT9 zCbV+4FQDG=G#M&GOEB8Vg~_hA*?_ ze)okN(M=hIAv08&u~Vt-Yl4?J)wP3Tc~2?;yCWYD1#v#!%Ox0Z2)QgiXdE$bv8xV2 zrxN&2vwh0WaMW`}MusSE-!p*Qk%vlHd-w>Vw<4`O)_C)aR-^beBVK$)%AkBM7D1>n)#D|Xx%Q@pWV}uuV zp4Ep|2UW^#tx|&WIo>m^_$QRS&b}58wlHp4Bg_+o1b>L{LW0c(?n&0R@1$6U@QUc& zM`**o3F*4bYw5fwAb_t8=TlWX>F6i`ag0(|04I|}e!Boz0qR!*6}v12OAXk@&Cxl#S`T*Dol7+1fwhOIywa-5V!3!5 zS!iqUcYoOXIq`K>L2<OnZ3z!v`8 z7dGcii6=je+GE2r@(gQ< z-fO%Rk2+;pI4FFQi+GqTL*1I{Qqxn18YeQcr^YpXiCV+(QN&BDU@yt=mHbK?%g&!l zmQ#)yYS6ZQ&T}z&aqOU}GF(hO$~{bp2MWHBR- zb@E-Df=bDYD#Y-=>Zn}hS4?KZsle!i=Ibk3-1npCAd_|cz$Axrn`e&f8Gle9JAViR ziotg{M+&lTvB2$GeSS$Q5<0=;;OB)8qJ8tY4E#+ymR8`+X*sbu58#r@YqjVLcHY(! zG!8z+giCQJzGcz@uXXQKDmroLno-k4K&k_3vfa<7f8sWY9HQ8n9k zE=Wv+jCPxGRnP4D7-yRtb#%CEM(XF&Z3Fln|fR(L#I?fWk1L<>63m#^FW? zBtGdQCJTZ_zm315&m2=$+c>>%a5q?0ykCnLf;RsN$Es5(0VgM8_ge zwe!N>-o-S)uYqq-jQRfUJAWI;5BPiH8nZ4F$yrAWdL3Y(km7jZ~Ud&Oy0T=I!-px#8Qru?Z8Pe32Sl>-_ z1~v;*klN$LaLM>J$Vxt7rGYv!ny+7&4SP;Sv!B1}jrE`B=t#!%7=P~VsRs~Eyk#Ga z4jrjINxBdhaY2Q^lNr?qC+IZ01cds9*g7ho5=qN8A{Pqzzv4_{p zuh>oN5PMYSF@nR+VY5yk=mepTafS8qTW-8mj&gPVT2{a_T32eqZLIjTiYoyh=q<_3 zujeIUt2inhAtE$)meFs?< z=aP1?F7$1hP1_6n{SJ?-!C%y?-s_P5+Ul^eOVvi|ZvX z8TO4wKU-Nh4K&$R=7v(gFy9+nd5dXtPYQQl7u!aucj>s%S-jDuY3hrJY}nE;rBauT z7~R26sPBA2^AT{2eB$*%HkyX7--|?`MF&92w5&!jQe7b_a&P43uRv)T-K1)!?1r1d z8i*fX=cp82PJjFm^89>eo?LAXWW0M;j#d1U`U21FC(>sXWv;@bSyNgOR5NzXHgIXW zDc{GK2ZA}IUSMt2FIdVBNlYVazpSUKyLE!Puf;e2&^Y&jA>e)v(K~djz5Q<-=z%SV zfLrk{Z#b4YB#+lpZ#+4WG{CMPqpz9FW(%A9?;hY6M}HxGV4BxSQ~>#GiZ~J1GItwq zynVuq(F;rDd~w`^sxE&np{;6ltdah}RW8FZZ2kRUZnD)|izW4Ud|GDKXLePA`}Hrd zLbVwO6|us}nFO_1LTCX%$jjmn4AFkTSwpNOU2O7Yr|Rdhk7)E1n^$DIRDes77;OMM zrSS8?{C^ixl=I8v*gaN)Qqda*PJE#bCsa)#g`qGH9Y*k7Y!p7>WEiy(c1^KW9T!9U z$3kP8gv3tFV?6*8H#2xUWNGR zZ-1$A&7$KdDEQ3xm-q5o=12tgMR)}P;Cv*igMSOh7?Oi_CaV>dbc!oAzu2`haCZ-OxAFG(>m`3Vu*kINI5% zly$Z<$IVff#02Neee*h1Ewr5J#X`Y4)_)?ffjK-MHJ9$nr(QGwn9vW5bHtQ-)c`vp z$)d}x=^~3mAmATdV^+E{dP^A632=S095D4$1Sv+s5RX>WS{fIfgrmrl?#0wW1WQj# z(l%lYcx+I};MMkfDq^nG_miULy>sLeKDzGrInl#okh+LP3)X>j837pVI9Av8y?;WF zScQCWyAr*_R1JHs1|a;=LAvE>+Nt;FU(&X}?G0;wPFZJ6mTYg_IYJ%l;f)9~oN%ZF zm*L{-(M2<2lbP-hY^3fNJlPV6CY4t-y;&{Wq!~L+&)hkbz%sz%Hi-a?KWWqD3Rfv8 zPf~3lArDnL?jeo~t^cf&v zg&z>Ua3r1L)13vNlc4sACahINMIz_X0;eZyTLRhM7*Cc7`dIKY%qjI-AAi(ST6&%w zBF+@;LZJrD9a@!?_hCTlTAfwULp64);$~V&#*ASDRDI30j8%O6l)gG(9jyGJ5qJ4Q zc53^f>(F;ij7`z5ap+04kAJR)%Eyi9w{1A6r5CHs{#o8i#8YM}S)4&n%gWqI8bKJO%}JHN|_~J7yi)UEA0G zh8IDEx`WEybB7R1`{!*Rt|wFdZco{)U5M|J>O5c`nCeSTd7f?aRDWte&xY9#g$EXx z)~GOrgsHUz+>G~5y)6LKwGxdVjPiJ|fhjg7jGDLNw;G=%=8RN-&?Fs$rgjjrql5Fkb9;;mCvN z#GeYBD1Y_GG1C4fh}PZ{**`br_Gyh_k{hyt67U!Awr$_1-lKn)dX9+yDD8A65H(ui z2EghOu0pDM+v4twe|TDFh!t6T{jIpcCL(==cx$T|hq)k$IDd#Myw8cR1Wx}9EGL6L zz7Ma?aE|$MFX&3$^}O6Smx={vLT+p}c{${^*Tc<-M*2Cl2Sv+fn@w^;rI zAuJ6VQ1(bA*ME0(ps;c$=q#ya+YNV=NGpXdhq;9Nun4^&PHt+&lLIUAYfRe;3faz( z?%>So#t-uOSNm$ZVVN%fDE-vFF}%RG1th(qVJx`DX9Rw+WUV)hTgYO3HWfV}sZYR` zn*ubM*AT_2mv(0!l??>0L_Oxi8Tje>0D)9^YxRg>Eq@F|Hb1+rg22UNW@AyKXNHj_Q@GFpku!)p>8<9g00aws;1}r;iENJD6s|gTx z%=hLGcz-_j-pB$Av&-PFj#};~!_vMyC-c4Wk2A~qm(G}DPWfX^rjPche z*5J}1q&+gX0loqD<9}7fh)G#~!%#+7qad_S`SJ{B%7B1*anOos-%}4r8`K+)hV_|A zMmY2Um`v-4HK{vFaEJT_RYT}mO=#17l}vtJ4S!sO#fw*{{}~;65e=Gv=Vw@cllz|` zp_O}HSnn|>IA-z<5)yBkpN|1jhH~J~mO7{+@pUoWDwq2VR1EhPC5^sJi0FeUk{s!P zXUsLudwYA=w&NeDlW_9r>ICXNdUC7sL!qm@0A*-upU!kP;TKJ#S7^|a%9N|TcAn3Z z*neHT>>m?8xqDt8kR^q44NY<&gy=CfL$-S~x@`)pnIANUml@aD(Ki?u%0?Tvs!lV1Ggoq9=Zw!8#6s57>Gjv=Vn9VCq#7#6eMJ zD*B6+aze8rx*$R3()&UJ+ZQsQCp9d!$EkVQIAA8urByEe>E0G~PTOS3<3ou`Ho~yS zK=HM;XO-jgEXOjv5|7u`3XkPlx+V29&(ePhIo~kXq*5Gpo-Ixv?jhu*HT3ro&wrsC zK}%fb{I^vn_S565{<}E_+(e}DU5$E-|Gg8^B-pK)? zj!Xm><)T}n$h6GL&-kV4Z}1sfW`C7l0Zzlo5LUsram^B6udA(C@t@r52oRo2gx|m^ zhRE~bQEL>9OPQmVQp|lu$=7BOKS*(G@zktJ$YYvw8kY+lOGjk@gsOi$s>!Rms&D(@ z(MlLC(4E3Cdh=7{AroQYGCi#%p$ zac|%}WVAu?m5J#f1mad}tAA4*TYLmV73hSc6{b+&#mW{6yC4!G>|xHy$t}A;0bUj1 zoC^}(515L;+2=Q5$+TQ4r~1JR4W!(6E;X@C1UdQefy2>U9KRJ@^wX4+{{h&KFk+}# zN5g5L=^5o=She9ezES9y#yGU|JUr|$HaoKl{|cbD3{gS=?=P0P`+wPW3_XBXasJX@ zH~9P%Apzfy*^2yIJiyzt!G}RTiG7MRKPbwUU|!3pJnP?4wL9ryFetSBhu6ZfW<-pk?i5vctI3*lmLz+~Ce|qxK~lh;lS|NNH81}eX(e}koEgK9Hh-BCI$%dP>>{~Af}MjE zP~d)3voXYB+YxL!TL(f@xpCEBHkqY@wjo=Z;OcIkz<=R|Jz;V=CYX3?MeV(Q0=H)r z)hWkfTz3*KN`N`DY{Lj4v5~wC^GQpkE$&a4(Lg zdb-}+@oI07qkr+uF625R5NmkE_RMc63xt|}qXb|Vg~3xo#>%O2EaC+wwCHKL*$sPn z1!!x(LAJPb@Mv4+*w$wly+l%~$)+HYl8PX|a@c2`;y)qJ6V``PC!f~kQ;nJ$k_QXe z&)}>>78dZaRf|(6y74K%+F|pFV(xmBHvb4dsZ$J%*ndQs-2mt2g>L}>G&w7JxV$IL zH?40UgiE-e9||KSV8S>46&oB%N_hzVSbf$26~5~_tRJM78wve3`%!1S)jC+A*Ptbi zH`U6`QM8$I_roFaJ_r@_fMK#lTK=Xs0APqd-Dbx15JBa9O$jC82BJV=p`_>CgrTFa zZENPi8h-$C)PYx~+$@_(?dPr)UViv}YAJtowFs0JIiG63L-}gdf%6yR1eYS@uz*bM z41ES}+#`BiTeLaG-%n53&d!p#wK8S>n{f%zzN9CPo0_tzfKGiod-S+0eR_6}uA0=n z*fU5yypEY$U*o3@#9yp##lxas##RA;@DLigDu3!-BB7R&L0|1v*as!h<}|R#oCX8} zS+XzBSRouyMnUTG2UJ$>(Y?aotlAAdSFaQ)rr>20+tpZ*!!xnx?6<;)Fas53&eJ;Q z!vPE!L@J4yZ=)r*XOj26jRT}n;va0;8Wd197S*z^Q7Wy?l&_#nu&DI9`vDA; zU4QT;%(2XKf}jVK;2KNGcD$q&9K>#@qVsg06fQ9F_q)<(h|~9Hd|2Z$ToFF%1zsJA zeJTwY-4B4VX&^CRW#vBlHViQ=1bzj9Ibd4kM5MZprp!TWgb(&HNTW@rxuqn_2}0Mh zBSmvkgEvLjyOD0+0fYvrFp@X`CT+A341Z$VB)~366&_qbTy)gE>r*Fz7SmSRPTw&N zUIqoyB8MRL6=sB!`GzsdkvC^TWOV)(in@?FhbHNnun3y?!++%?t{ba{Q~*@2L5wtz zW9Q$Ey(gDPkadHAYmYxk2Atq>C!Zqyug2pPy+`w%Oh!N!5~W26A`;Q?G1#2(jemqf zssAPs!f+)x^{SuE-A~3%APb_iYIIK~EXVh`;|T8YV35JB0Y$94@JZ%pH4hVNbl!nL z0DG={V~r0FTD#QcHkZ%R@oi-g6A#HjPxy4`%;R7_z%Z)%wd8?pu0NV-5OoPG>r!g= zGlDtU)UvK`^nx-<4rSh^_saf_oqwh}vPiE~CsrmSLDzvsG{a&!lQeftA#z2>a@lBl z9BEKdg-fiGysp4euG_2i^0bG0m92E?Uwfy4J}haYF*Y`E-Y5M}ABpauHT z?8}^LD}zZxegdR~O^ikD1qY8{Z(WK>3P^jOnfyfrN}V7?r1x&PW3)#$>uVLb5DWRz z?NJGAuyfob$1bnctEiHS@qaqsoxHqFmsMfuzDgTf`4!eK^ZH?+2>&q^rn4l&)AKys z9N(^7B959?IkJPwS^pPWIHFpY>*>^~GelLc@HqrOgL*#ed5Q4RkxX)itQ?+{eNK0J z0$r(t<5=$}aCVJ5c-nWPtvUSt(zzR`Q%9H|)Iuk9V{liJxD_o1XnzRw%;ZzH_x{-Q zQegYH0ylf6FR?Gs^q8Gs8CZZ(*t`at0JX1on^xm~_YAu(^hRwla9t;%_W@-0-L24q zqcEvfP|p`GKq;+h6hdL*iIVx_xr@^R-1+{(vY*#=npI9JHqqKhj;~lFY?+yoWHk41_nYwOV!=S z7iM^qf#lrz0<;S2HvDs-n4xToViG#}>((Q@15xgq_~ttHfIk+fl#!i9Z?D?@tH7<^eG4^phMyLWl-zs1 zFFufLsqRinT<&CDCt@sxeH}QJavvRss zli0?_M}Pe=G8Z)f(yIX8KLkm(DAjxZ>5Q@9b7=eS2wwXsK=QGp=|5;4LaFvHF5YqZ zz)4R1^#G+%BORc}s_;y#s2Ika(Y2&4v!8SpX$X@n!8|Sgx>e1!$%UOIp_;IkD+t#l zP5_J_l|Rt3*-)a)f4P%4>|O+NA4_`!X9s+i0e>nILD>oo>H!6ei~cQax2Lxwgy5;0 z>#4!$(xR)D%5vR0!R9B#7TiyF*l34xN&RE9t4Ty4-+Zs_2)73`faaWEdxg}}=1wz2 zRBT6)9L%JKL^XzuTJ&MLerMmIUwo!=ok3;xoZfl3EVB-SIEVd8qHi)*&YT%aC1;i? zDSxs;t@UL)r-QRNt6$?IKiCT`(rG&upBFF-K0cM3FEh|1a+4hd^H$r@Sah3Sera~u?U!jXPaGxj6QcFDq5zo(Q!aPjZmI*e2rhz z8n3!`#Ow-?x>HfoGjDkzZ=Gk@ls z!6rR1ys&oDt(5ovBQ-&MG`iJ|Npg?b_RKrk?mwq%J_*@te9*xwuG-|?_|ZA5{p*18 zPf>=x;GFe!C*BN*;Hs(1r&UK|_;TNtolE!f6`x!qKQ@67Ntai?Iqwf`g}5E-pKiCi zao!~7gLu0axH#h#Xx-1Hou`fKyML_~&*w|k)<)|HyWm4mv1KxJ3wD}l_wcGd(zP3X z%%SXs_R)2cb|ufl)I$8)D8=4$sIwa@2$4U*H;(EIwcebtxe$*mJl?SIWr_Eyf_ z1iaz3l67_ure|WD+Z*OhsJh3H@j^n3d_t*!{d;clD zY5Tl8QL=7P@i|>nk8&$DImyp-+)vg1ZQn}QJXr_hn3y)-yxw*hxqo9^xu0RvYOj^j zj=^(q()haBU-aE=rzQn!^~w6&p%3y85qWHRJKk(=r+fP_Pt$E%J~nqO_v*2`ht*xs zow#tNQ+s`RQ@2q!t2>CS3fv3czcFq6ICHK6E}vV@8HY{y`l!k2nQa2@d&4n%1-f*Lc;eSv2{rweZ*TXt0n6^{;?P13XGG|*;arJuWY*Y7L z?q93p5$xBnyXNer+{YxmOk4JV*ZVPOJCKY3W_)cJ^5qvw!Vn^{m>eSbv_WMss#` zw4k=>K2R^}OLj9O?QOnp*1&ng>ixOBw`{67nIl|#mPa5#z3;V=WH@6{SUd!!>tY`n>aDA#&-OHCRf_B0DuHS5rVW?et#1pB>}XR{=lp`ytj3Rv2bnr{C$>lP}@xrFz`w&h0m~yVqK5r#IoC8@a4K z%n!qOnX@^sWxz20S%00_e1BQLN%+!&!^ef77gtPc@_$^XY~2T)B=chh>8Cq?d zJE5u@4oO=3zRDu*jndwy{nITjC#_+hjiCxzw&Ox1)lb&Lk$-BmP}Hu647?xBb8Mi+ zAHf~D1@^X81r--q^tEh_W zsz*RFxzF`*2TT=lTzc^1O+k0^dliIdzcbTL5m(tD36b-W)T1Vlh8CCMxCS@=Py~a% z-4UjB{qrz5*+I239COFxGun)*7YsuS$0=GJZ-0J4w*^CqVY9DB+Zy7UPi|HsSF`wM z=+oC%VOwe^>$MfbySKvE{%qVcBiPd`<9W5$r&mYFItk)^G^l+6`i)T@D*SX5F6$_@ zY(%^cWr=h=9J6XOT=7-HY!0khjg#_{|F&luue9y@Gf6mOWf zNq?m9p$;yc84P<3asGNZ#EJy#e2^O{Z-0A0z)V@de+JV|!SoiI_2)AAQ zjPR~bU|@9WY4v2+F7Nhc@q8aH;vMAvZwR>2} zyASt`m_<$YJbQj2PII{>wm=T zx~cW9iugR%>~mic@58>YIa(?E{nboYLC4FJudtgfDD;<8`HdQ!pZ$q&?(vb>dnhOq zD+P4~w)Mxp=rv@|=b&vp{kU5>O>C}$jVP_$$MiAw%Hv3{(rvirTZ^zx%DmLH^8>b* zWoDqjj~reXd?ekgqhQ3TH9hSNywWn8DmnS#){nLi1t;tKMFZZx1UQ zcdxt3o{6}6HM$P5Uu^PuUM{C@%^tVcy()1NTM8w5Zk_GaJQJxf8O9bMXMgb;HWQ9N%WlecF0F z#Osve@H8G`c^60BJ8A*ZOMh;4;G_42*M@{4amnu1LHXFSoO~&s(A|*B{E$KD&s(+K z!+ZZ+8K-1liOjtm-Iv#GgV1#&UifhNrx8wcZ43Io34%#{F^w-o<<3k!_quy`G1OK! zdw8uycjx?Stm-;uOjIllGSxLZYS&Z+_6;L!|s4KaeGGHA*q$u@#^_r^X zbXje<{`RbnTQ)M^{^ulcr;mSR;qCn9*YC}9Ctg0+N}nIzkg8>C0)5=ud*dMFP<&OV zd#H9UF9j(Z5kY08$Lqaj%JNpI0F^ftZS|KOyV6nVx3@`6s&-dDo`cIAUhn&%UI{yP zM`ca@khgdU!hg#qXLvkLrUp9Y0;6rjlV~WHBrx3(4mof$fqii1O5b*% z$u1ZFJ(cYwtS5%2R&8kQ{&|4Re&<-AI;Q1ElcL*!SASzU+1%c{Pkwt0?0b`=n^suy z=j`*gZ?ui0?To&OAN1I7_s_|4q-HiIciVe2aoN*SH+~jYHKo#-^_bd5yHhWOFP0xK z=~ysIo~>7p+UBPf{}zOsDaenY-}zRiKX#W0W`PT9#=;rIa@`Y0)b_>at+zqCr#;r0 z;?R)hbALw3LpAJF>ipPw#0MgtY`ShY0t4Td_c&GzZ@(zNp1^r;c)PxP^bN(R#2}No zYnUBYn`V=2H|yOo4$o(5Tq#{0H;ld3nbGf88$^=i9evJade|Nt;g~=9LEh}&chc^e z(Mm_YYk7lfFQ(dLf#~u#SYY~cq?B-WeCZ@kI)B6}&lIh7yt?Y`C7Il-xbn8Ko7m?i zJgyYin|7CucgX26Zf!?xlOw~HCwG!|j9v6zc>WyvYgX<|CKnP*wfZEgKXl6{g~)Wf zwTW!^#hK|^%H3CX){fDY7w(7cWH6e!f_u=w;nGR^)b+2s?SH_PTW_Wz26yQz`Ek*ny*4}VrMGxb~M<)+HxvU&(FbjoY7^KCY}7I+;kMoH<00QXo@N6UHh@b1Xw}0=z znmzBg<0)rrk)6U*!znMGdu`9V&%|@O!+q9uES1$`yesC*9$(Mq@!}tkMz2wUdUHL7 zrM`JT*WvAL%Np{yG88P!`>3A@XM!hxvMZKY z+HS+P8zqfat4Q6v)MtExsqDN&`+pMV;o9Sl>(z8M81J=5mT8(mcxR6&-|`22wJ#c& zyeJ|@9o3DyQpuV;4Yk9|6NM$2jK@e{3q!4$%}0>2NE zOQ~1@VE#OxZg~n@4Dj;ExJ;t`pC{KL3S0m&Kok<75pbaL2V{(B5AG>UTTPyFzLiqK`DGu_pYoD;|q zmD0=25rDcAs8fg^?4g_+b&N++HU%M?!c?5=AGEZ&DPq!VPycR+=YN-WFdMZfHo6_R zp?GWbEB|oDhpB(aGjTt$A@?0qY;MK{y<>5IG=}bT8dv)zP|MT%5CFhE_2_VwrfCC! zAnpF!=K)G<_A1;U?8J%uJfDyLyrq^W1M^Y|bApg}-9HX5cAvahK7W{O90aABhDPvm zjOca0Z6>LzZoBP9wSOBEq$1x8lhGOl>#eHd&`PP>n62=(ytG!k@$Rvz^x26dM5cOW z*83|%_P$weE>@fF&&@Vv62&<^uG)IYCXWwie_lMVy8^iNh^Zc;=`>17Qn|_WhcNXL8RkL|*(&^nsO5}~FCRBvW z-H)|2X0Mf&?bynHJSA}8Uw6f156UxosWpc-=l0E*SGUrxtLH5QE08{(&()`WRyDJ^ zPFcZU9ryToynlF>{k$6X_1Y+RnhMTvua2v_bq>y_wRd*Ax_uh&5|ina?DZhcn_ksM zu)lHqy7z(HSdJEzrSjWQg35 zPht@;)>@S;a$t71jSp&(jwTSN9ZJFceP;3S6F`BVgVoVdVqnrrNA*n_@`p*&_nz5O za27ro-10rgUsHj*a$o~Q41gt?!Spx))L@&vKJFi;$Ek`-&+`saDH%uEFuTs|+F+Xc zu9Q3PoPRwt<+|xi{dkl&>l7zj>%H%U?tD2PuJSp`U(wcbYyRQ4BHWICt1vlsp8K+T zblG8OPexSHuugP<{qVMBa#3b>Qrqa*>l@zOYVM{zCV9lzyX?7f z<;Py>FqzJpe09>=uusC&e*6{aw>UiB+A_YpJZ(ES9=TD^yCJl?Pf86(yMNm zo_{Su!?62)XT|1Wo<~jAE8ckfd%t~#5y?EZk=CqWh#OhhGr|cU>gH0H)3v|%2iCr& zp0K^`f%Q%k`BA3BP}b*(5U6b{Ch(Hu=_c)-)) zk}Gixv8<&>rB5GfECh-WhZp1Uj?<5Md4FEmRiUnq#o+Mk;*F_jwGPGc8OjU-DbY%f zNRaRJ2i8Q})y!-3G=^#4#?J%uAf}JXHg@0DO?!yv17!Igos<)s*1~kXk;h6NoZI{a zlcEso$gJNTV(j$#%xkB7L!&pJK%UY=R^L#uI{5k2`BOpn@g653_ilF^MmvclZGYS? z<-EFPO)=hHF+M35 z)YtE1RQF*i^^q(;Dd5|BJ%QAv(tjKAdoF3L?Vdd`hNBn0TwpUyvRG4noPmlbDcK$$ z<#~UtwI=O}on0Jqz3ctc+IB1Uz?!?DtzDKu*M@&T zA{~7ScW1>rr%fkxwTzL4r&{BWmToe{3`9I_ZJ`Ep-`m5bk#%mgS&M1u6j%J-9)X`B=kdT8@SHexQx#>;p(eK z%r13<{aP_wO^<8Kl6ZI!LU{1Cc*TF=)p2d^w7&OpCRuOIgpexh#xw)$E!tE*!cM-0QywU5k0?#-UO zPN(;_b={%&%>836I%T|W>~b3d#PY4yjkneoVq@+0tjBm<^YW_1P%9y`+pT|ARB0fv z7fC)wUa9fVYr?k|Z5yuZQF}HA&MvZ(^$Nx=*!v$^uGmfCWI{DHLK{Z|xH86AU30R# z-R-^&pINAC{l07?OLYzIcxj!J7Uq4-_>uZ2c5H&C%%LCD zJlTx;!E5hnQ}==)NVuWZ-n9@)h; zZ@uf~T3hGf?G|`jjE><#-8jT_m2BtRM0dqhMb$3->@|ryI`OgJ98G`GmEYO!j`Nx? zZrfz1nfjIW&iAUdCb>@d7fj5d3=KOKME8Xz_nP47IF zBPT3mah(LBm-)u488&}eJUI{WKA%Fxx{Dz$N9Atr?}3>JxoNx8Nt!5XW7?du-FCNr zBnBiewyrE;WIASjYMR7nyrwS5;Zg0)VhETWJ3bt7itQj>k>^Whx7s;+d)bM!7eu09 zZ?zn>)_q2;DL&WgfL|Z>&*sgr zI-=Cnx%&eevH37NU}Rhx45!1m$*x!VrLg2!`+2us-;Nx*`d};P4%o?=sRP$?2*Ghc zXxyx7x3rExbei5J$6Is5xbd0I)6qP=crZpZY{s_KyJsHn51>v5QIc6Ut=@m(!TcD46g^i`$*mdbUh>e| z3BW(BFt69wq`r;sR{uFtt}Cv%1)S!rT(vF%Y6wb8rl zJBL*c&Z~cupSE~sS2#Z>W!D`s1C#>fMp9Om0YWatx_pH4P4J>suxYMmc4OOO!6m22 zmLIuv9zRa`eg(E=W2JnKd*M{3r_ng?sT+lBO-Voa%3P}}aJ8G1Um{z57oW@W+;pog zd%zS_7G)AgZEvqW9>|i-dc9uNE6Gh<2(o+)FW!GA|6pfV$I>R;lpLu1SATr6>vSzA z;#w3a2R{f-R(1{4@)X}7fTy4-qZGUy$fDk~!IWa9r@PGQMYFxd z0W1=%jG=SO6f2~u6jxcWyUAv3>e}1xV{a?zFz6~cSfmgIe3raYZG?3>ouiC9(_s4mu~{@QHf$So%!=Ne3C@p2V&vj18CQp;0IjwEFZ?SGSTY+A!(T@WL#Fv?h%xZ zyX$a_EuVMaPO|O2R$panXr=Oa&Np>CkUPcJ?tGCY-m#BL;JwA`MK9U>-42r>-tKO8 z;2frsSIqUm-q$B-Rj)Z!MT~$AcPjt^Oe&yL4$#uV1ny zT*C1B*{=j5+>5b4AGc~!Na>MDIr!Eh;f`;qyziggD*gCg zAMXU-eTwA7jwnB06zAc)`U7y6?S*-B#)kMxtLO2aNiOR1earRp#cuftJ)u>X8Yb3yLa^BFZcU?Wk0uGe}cZ~hOQEfd7#&uTfSMnf|#GYCWL=bVosiL zS1$7D@ERV~^kKI7^&>=Y&-xg2-C%yLwl{fa^o)W|&r`zmho|15%Yftujd<`O@Q@4S zbyebeZ<*!wsep)iU}~9`eEpbWfzL&8x)kAKdiI3N62?p$>Co8*g0dd%D>oF=*j8Hg zzK8w!bRf{{S5$nQT~>30=zf0|i690K^?pc3d6HXq|9p6D@46zqVl9hJP26s@t-XGe#xV*`$h0oFt6ZOo5Z zg_^tb0Z^803qY(aJ2WgiG5`Qec7D+u%JZF2DAUWn2OwjY%?}>Q9o~P_5Wm+^4}jv% zcH2?<-H~DOh~Umy{><;GTHo?!GcG$89D4xd1A9_}JW~4 z_Xo?42~Lwn4u3$|QSSkWlV!&QK2;87?2%I0AOI9`xj62&?6|P(urTjBku1lbz_))2Y{N-$HR3r@2~)u9Tfn8V>f@)z_VjD?`Q!=fP>jd zg#8>AG$A*ekZX?hjf^J6!TQpTAVu@%@9R|qC!it#DxmcMzZ}7ZJr>QP<18tj%sdWc z(JX*R-e5EftuPBnGX?f>AUh17;?81^tz)*A=##jU%exix6%WoXwKnB?ng6s(9c;;lXCUgFc-}n1il|eTD{<|spH_IcO^?MCs zTw;GZKZATeM_g#-`RUfqAWYY^nX2Gkn# zzJD1L;8co5ao|10Y3AyBtVasRIlp8A;M0GyEP_UT$VNH$eMmD^hH)g9=J>s??52%z ziN|@(ve7ED`E}Ov$b7xwa!i4LVi9vzX^uZ}RA8S)avm=^AUi~bG*)3@7TR*Iq)}XS zLex*iqWP!}-5}V!VNu;6jS(THBIbM@dd;ZJk6;pkB>Mh@$icHD-cgxMpVt}O;}CyP zY(C(A0%rh@1grokHpi(Ml^)M=odDqp3g0Do8rkFGRJ=$;z*wJe(-&HSmHg&c^1IYyx|e6c+5CNVigKV zQ31QM6v{E1tBmI2AK$G`T!n8fkv0QH$Q(u12O=I z9x8L)+oC_$eI$eVT9#V>t%v)9OZ{B$^YxSYzsdUI0cR~a;m??G$GIQ;!HhYd`gnO= zD>7!ur&c)2B$M?3j5f(+X&@)O0e9F7&RjE)`l^F!(g?%0bxsQ>b?E|Ub z7*a<~IXh!Vm~r|sm6(3(?W-CS4>O5nx%8@Y8ZYjQW2=7Wf0Q%6u(4 zeJl^mQKW*59FifY64aqX41~T|Fp~tT4R)(cCOk~UxKW(kHEDlEdFCG{@)CO@p=T(k zI(}(kf5Ogu`#!HjfFzN2Dw+iL#K-|;12rhfRUewbuIi;10_nQefm(x102TmBHwfB{ zv(|`j+C*4untQ~Y@5_I6P2XBr`j-m7>;IE8@fTmcxWGU0gMDI1z?U#Z2ohhGKvrwp zds1WeX70;dfEXY?&+>n<1dRf)w5j zD*rnNmbL!k-`qE{`+^N4R(^VAWKHum`;0cmH!#kg&kOQjr zP8cXh61(~sR~mnzno{6G8#5C@83eKhM3?}h$TCP8NAf(`96PGe>ZO4!Hyo5AN^jcS=FC!8-Vg35fANo>m?qp@6B1`>6w&D(WbE&-c$ov{&$egX74*)?r4fw`ni(DVc-g2r7NO@byTj-3Lc^sFJzYQ0Ejzhp4A9LnUOK*s8 zOP+kW3Hb68Gn5_EUE-E!;cu?}$LGha{H-&2!rwlfo1)f#`?dM$95=UA8Bhff69Gz& zP5~4U2qGkvi%6WQ0LwP?B|uCGlFs#q`l6El_w&9*8*^Xa3J~%}y(z(u5J0CR#-_a| zJr*JLi9}2ZjDiAx>P!-ese$Y6uzoU<*k?#0!pz|m=l~1!rYLRWoeg{phYE?**Ca|I z1gM2>m--roGp}A|^B*EUmb&CYOo0B{Y&>TY?ueSB-Xq4KW`T>i_^H7KTg(yso1YBv z&)+{-U1rR;Ce2^I>Ozmq$KMw&`^Ba2du7IgiG2AI&HiwIspUP2D4+z1Al6~uBLy@f zpr#Cg0s%3Pj+-@TNOFs`qVH?giC)bSm?|VlhSbzy-sKYYrGm;W9vC%nNGB2I2mxOE z0Wx*J@?|gnXdto96qU%qu@h7(34QSlkOT;xd%p~N!37kFViDiW(ad#(4A9rQ#n81& zD+nlY0Wk@GAV>mzg+SrRxCD5?LJ~`BfK_uOxAnW~j?{;M=trlD(abF%8_)c5T(25& z1q=vK2H?}&Vt|(T|J3CY+kbS*H*QNW{F@_wIm6$aGEym?%u*1|C2!{*x$x3Cx5a{~ zk>zbW+slq1Xnoi-H^X~TM2RdpzNl#Mgh%UMLQ{2rM;7@~H)tRhbb?KhAW0D`8`7;f z=bbbFdErSH*^7pGUkODLVUWaq1(B1#31N^3Fb-or5ThVdU5C&7;GyIgiUk!B=ubKk zRMFn=VDC$im_(j=<03W95!46Ip^Jib6@`a%co--%(0pL+m9w$ zau`v6pV5kJITjYUoZ}1T`u=iw&ZZcuJw^~vMWmStMLgH841poHBlW!vZ>n-#Q?eJ+ z&71=!f&)qM%%}JUHfXDWoR(@~#(*#~X!wW(NS`-VF$(%uer1xBo8poSjxNR??z3QM zWP%|iGj0MI^ri*@t}4`qu%B~^qIE_k2oRKi7*xd?zwpdu2gOe>J^*~pJTqqK%rl`S z{o$5hzChzZUTSlx=N0tKQKe?IyjT3hap|FExqNo1$M`3&;zg@0weHT`XOaLgL@qHfFg4H<2xiD3KFqN%g5H#5r}0!mv3wQ8Ip&Zw#Fx8Sdn35! zXMKU@`bWT@>t6;1MX)z#%swJ&st`nf(X5RZo(JciZoI&Qg44|N*f{;+dCH$0{>ecr z#~VvDZ`x?yme)u0Pv2hn(5xwc^yZ(n%~W%We>FC1NxW$O@J6uU^Ut|{*SC*<`Q+UH zR6L%xs$BUJF#liiO#g`|f>7jxqnr>3s0ds{t*KWq5rSFAFL+IT6HbD-e4b)|1c%Bc zWkxZGO=qaS=gd(HD3ZYd2qdN?5?|B-Q9+LtNO%|Y8G`3bgnf(v^>yelXBW&HM1>?fs*qe@pRBK-7a@&h1Q-R(JFZH`dPfZ&bOesJbrl>?C1K7?-p3<6O#Ms^uNVi z*C!kM=5UB&-{5-1sv8FkG&KtUH?~azTP*dwxL{6CFr?KT&Sbpz710r72uPm7Q*e#) z!3moaqDa>PCK7B~sn78G)2NV`^ZNLTY+V)iDHRS5j&7U6k*`EqkVQa$Af&{R$ZSDT zNm}Y@?sq(#mcH$mB1IAygxepy;wtgci}`|A5P$JX|%7!jcK+n5D1w91#Z*9$bZmx}y!SgwnI$gY&xlV;TcIrTeGoJmM6OmSNu!|)) zf$Q97Ac$=(L`mQf2lW4yKWPjgCS3Ro^(ATe_AW?G8_lniScD{hdd<%V0;|{pl+53! z%cPR$JO#7o&?6`UEWs=1&6o>lCLuD{Ai{t&=r8^x&zuRw7X^|Sf*z}2Ko+EUCie#D zk%X3-|MtW=kCfSFxi;dXhiJ|tQkZidaevnFi-)tv#Qot;1Kwz*fBS=nGjI5=^CW*Y zWBH>UMc8MH-}D!MMovo5`QK|d6^X_NX~sd)KP8YTfHnsi#NzA+&9`E?i(csMW z2Xe*pbu+ZUG}8?ieAg-m^&AUG8W941<^>QGkB{LrPyw^{j%C<0_WT4lKr15kWbREW zwD-3XcX(bO*qh(lTl|FkdPlYG%$v;Fw;dQf;~JRr_dl?I=vVtJSS0@HpgEV9x|uO4 zCtrVa;ZDjraw=u!<}+@eoYxV7?#qFCK{G%sUWgf7k#O+bsxXM3E0n5AiEgd~(7D$J z3HYkiFYEt<`MkW&JH-y@!F0{ZfJ_4kmf*0FOwdE_JLR}8+dTvQ32-<2un{8Yf98S9 z@BQV-netPA^O8xykG94CcuL=%`>UmuFD5T|{*NE~A2>1B{f{P^@WukB4HmwXh4P{; z$qz68V#8NI{qgW!;4tD$dLV6736NsWZAi!;Oh60YB$-7i0viNDC?F}X2<(w0<b?a}wmp|J7+Yf%$dRcE@&KJL){ozkDFY*_CHRq2w_oDwLwi6;r zQld#EF{QDKK>yYH&`oCl?Px<#iKKGPs#|LX9TbouNO}tv)qB?OV9kr5&tyP+1@w)g zjOYl-Q3v;h8_)PELSouz;jjz$k(W3TipFN_>2L7G93I~WmHTKY{v z(Ig_p^9W3d=WU{T87pG7IhM^vC%rjye0YO&t`8_8Rs=)>eE02%G?T?a_Axv-QqM>; zwu#XE>bojeLgguo4wu!*4IzJchYi6{^JOmzHa>BP_pJ{+*FiAz*}10RmGeQL%{@>9 zIXJ>a6aLW|2sjK>fMPs8dsQTv3`#t%FGR+~$5IZ&d2;>L8x}YBfq(*~WBlwdy)m!n z#-`@DXomdnwak9!d#*fWzj*nxHe>}#Jg@a%TsEFHw6x&dY}Mp`_*XoQ-Ri-S5fG%M zPf@BlkjOd38*;7$&&fn^021)x4S_R$%s4amJ0eL{l`55KNi-8>l({v*o&WkpGpC~#rUYYeeooA!k>A2=uQA9HAg_(maeysUfKfs&J zlhFuB0EmYFQ0I|uIrrB^EBqHf><tEl&qN;)1SdZ^XbPVM7bbF~D zKyIA(H0{yT@t7lWd}5bbgvBj5dge)dKF$B?>k^W!3 zN$PI6rwbg2;B?ICg4ep1zZ8AGC!+`Bdg8?`Fu*(q^8w5NcyK{2o5VvNnD7%WI>P^J z_M832CIqkN>~GY=4*{;HL{25F`Nnzc?|B}PFQO%&$#eU*9{ zX%p$qy5*%Fu?h%OIPx7J_@n|q(`8_ovu**ef90vzqhqD)l$}TL;^X$_T`dGEf6+>j zxvCVW4&svZq8ADm(Bx+iUlez8DD5sM$qz(v0}992@rdq9@vT>UT|dFcocr1b;CP-7 z9I`TVzx6ub4}uG=z{@=f|9FU5_|Hr}E`R@?|3P2+ z|LK#v@wX3tIjAT*WcuUfc;w%nf8;-0LwxG}F&u>&!n0t2SS7tLOF2D4F(#12vk7z2 z0S!)2&=!A-Qc)T^Tb4F57SX?Tpe)2e{@>|9S(SaVmSE#8o~EzKUfx1Hfbc(f30?Z2 zWx8|_b0@I|Bx@BlEeTxlz=+aBj*#}QHueT61)_k?LV%vcvLa?q)%gVuPyTC-<3%U^S;su%tfe-8wI^XgQOm?T*XmG7IYG8O^_kre zQ@)v;nD-f^nRTStm1eF^e|;1%oh}szL!_924Ur7V-|CWuA;^rA*zD!9QY2@NH^vTX zPj)VJ;=I=u1kb>$*vrt1R^MIKo?x(|%Df(m(?SBU;gu$q84v;26eYi7%v*OhKnDo| ziUh7Tq?H2af5ZirTU`L?fY=gP%zV)3|HTJ?wa;(vPT#m>Utah-e~j z9?kFUEtY1!2*#t~e~2AKi%(sh)VdGFS?kxWtUKoSyc8+y+((ug@F5%st2o&O&HgAc z6>#M|2(sBro@?be`yp`ozFD_Vvmd#d`*29+`>h&(;cEl=-X}85o^bZtOBtKuapq}1 z-q@E*|MXJ$y>IM?fAHe$`Th&ze)aM2^{{4K`mPx-!u}N7e_rlhox1oBF2*#eQjwX9 zNr=f@Ffu)ayGtC%Z*j(e(j1P?|H|WnR+)*Q={Y-W4G?F#LVXqVj5Gtud_164JkNtP zQVcyfBt<>wFh>!hcWahaQhH9BK^C;sO9U&k!^aXv8id z4?bcU(wM~nYjX~-TcAJ(yR#;D304f!IX_(1(8~iWljfW2jvtDUk&aZ>+yslzWjxTE z3V3uPSo~}gFSCn*nRT@SZT1Oz^Q$R{#zWBkaM&NMe@%UH_L|m|f#)cCL(s@_@9U6M zM{)6w?DdY4)0~HN)(wLxfBmB+*SNpt<#Vn6;^qyM{v5j_`00rjSnl2VPyhQrHTV)k zRDSR8(EjmpqQ86`Px-;n08(UViUvR;f3>d{2r`7cN zzr4L&bECS_2Ks|?VUjo#=d$$y2oScw;NZKMeXHV--|`y8Ne7X>IK$=$)IEP)Kjef{!uX4gwGIE`JB+`bmh~~u zVtvp9{tZ;N%t$hKb%Qd_NkvvtePpn5T)HgE!~D+0u3Y6rg+!N>4aFR4N5|=GJl$#Y0dy z?gg=X2~l&zf=Tu928yqU>d0YlvHOAtrAxq&%}(9Qepb<*`=IsG?mgx(?}HAkP2}Eb zSYI>*8bYDV3FeQLRLtgEN&#Qr3(z{`rXW<8AyXOZ!6yJtFEqx>W~c{WDeA!?f6L?< zjwq;4v4JtxW7na@w4SfaKj*P_6P3FA@&fkdpL*~RD=4oHe#=46&M49RH6c78TF&_{ zLCYf{no}{9Yog?Xm+n%U$gKcr*CA+Q7WeJ)ur58uss{BVC+&;pbv{iY!FU2SH=f7j z17bNriDgQiKu`IDw{IdaWlohHf2VPcs3Ih*$JfUvjrl|$b1WFI8OPl5n7Y51V*G~4 z$0+IVsCEJlG6ZcFnCAs0JVNYTe$w^({e#!peEb~kr%&`dV1nx@G#2ko3W`#w*JT72CiMh|s_7Afd1e_UL)V(Auz z#bsD50cQeqLQz&Po?nUD+|7WqRX!7Go|VKjf|!PQy5p1Y7VQ_tFvkFb=HorWtZD7q z!hMXsn1?x;5}YrhB=r_opVm%LJDw(p5pIwE*A+asd}?#4P>pAubpi2kv;pBQ!(0b*viPQn|6`FBLhxqeSLA2h<8vrTM}h) z8O&AM-Sc?V!Qzu@ejRb8NJB0I>hylaE-onwrS2#!=M>B;bX2#zf6N%SahUDoPI)iz zQ9!6MpH*KjQj>v3ob+iQ#4#IVbjMi0&)7H^cg6mnVSzXfBczUL$-#? zL>G0a%>!fV;x#vqUNiONR#J|-fn?*{o5in%5`p!dwPeBw&m&NXRIL4;r|{lIRBbPQ z;hL!o+JQ3X#B|2lf0l_;Lb;b{JCa1sK`$;|c0lOf=0eC;wa?xX3Qpl(IS3u+AigJ; z@Gv7^8m738V%pes(k@rQ$0dY0mQf$jl zuLec;Gr`qN-6OHQQ31x#yzR}iTe%-$C6|5?heSJDf2&E+f6rdsL?%deMNIeUH5H5c zUT`BQYqF3kU>H5`Gz@7&(6C)xzi%Tc~_d`IPXhGZS1@oewugmS1^4-%l^+S_c7yAJk}nvf&}E zTS7@9UCdX2k~xKYshkmaUppZz%Pw&*H%lVD5aaBzbkWEMRdAjxE?%5F9`U;zdf)-= z1s&1^^-eH3$;~@3#D!)~1dWZ;y2Eeyj^FUdM3*(`f7KYf8D;30BlSroPgrx*TgQCz zKI-N&>1Rr^+xw-qN`$F2s=a9(0*V%xf8vXdxIvDxlOFAhi}vu=vfRK0f7$a~B@3JR|P8GIu`k z?+Et|o70%SN+Ot>wrx15J|JBuOp`L=1eAz49vrOGT0~oYAE>6Dr#S*$HW8iIq;4z7 z=J(d#A!1?uZxv{|o9%=Wtw@|Uth0ou;>CZ-e=R14_L&R!?~^bnuLRYVUpMbT!tFjp zzh`@R0+fdVcyYi{cja$5>JevhjPaadqz~(@I{qzwFhJuvyMlB|#mR>}$;Wlvw5dmw z54!7vs<;n9KL*rDmbsCi-mId!83H9Jt4!?rg7NFeuBHWI7R5O`mG&JEA(sGdxxpS?5oSnsO5Z5OTHrtr5i-|nfC8t~v=JtNZ=zzZ; zb43SR@PJv>-czs_VPqU86Cu=I`l1Gxe;Vlp?m*?#E-T&2j(LrE|bg_%2MTf54dX zc`j{?75k$OKgN&`xTN|NBfE;157VG(QZLm7uXMzfDBK$B)6mXR9@E_9#kDknP^&M2)!?-!xC6mc=e-eYYJdA^j;}8u(9QLQfIFx7OaL6<00&Cq);}Bz< zs<*l}KVr8|eLs%n=u3{aFYvdZY7^Ip#_L5vt2Sb!+jGS^)-W-xJ%0pYb-KmG5&k}Da^PzG$UAFIlxf-qGlolJJe>hjBNVt3! zlzuX%idrP*#jRw+4gVFuGP*L3Z|k}ilS_Cm>8Bt zpIk$cyX>Df%MpuntZNaNR~W~3ydSZ9_KO%=`eXAwAZ(B@Pi@><&OE9czm~P%%eAOV zpMj~PO1?%_fu4?{s@rfqe_OjCQfBK2x_2+pU$H4Xma2*&{;+QtN!kS?k3o=TXcIET zdbtYYEQ*1#dwMW=nQ=};y@1&!q`ZleBK1Wkn?{IP|vj`&SG-lSZe`yN(??Yuk$(Xt>KW3(VW0*y*E=#5b1zA(Ve^vn}#=)|^_o%a^N<_-V^9xNz_eY%%_dhy+_Za8~i$|BieRjwC@)q=6 z#>4}oRhLoq#RbnkK+o>yMl5m|JZ;*|X`UK|^R%#WGj%>s<wNwt}mEvBb zQZFqw9)e!5!d_RNRAnVp7E?2OfYih!5wAg#y}Q6Yq9y0|HeNQ4C$r;t^SFh3uhHKW zronw$1RgQZe8?{s>d&!4KPQ^pDZhkGgQ55r+(!f{e?kDekNo7eF}7U))UQE*mUp$W zrDI(Te`Ym^g`=N?;zj1VC15U8I?P^p6$Ak(=8%htq&#=Qy8-q2knFqL)ZHni3guPg zVLQ1E!oBo7x46nwu`ZTvyW|bP_hdcUPsvPqzc7FJfc2`YU@f7v^{T(kFn$ipe-eT;3M@TVgOz;E($T-SSRGaPDvj`f}DCtuS$9!undM=m2*wVx73 z|DE@{dR`R|aWZuw#c@HrFy@Zc822zm$a#|{l{~~b1$U`%;X0#`G+4k;9Bq#8q}vcv ztbzfqcqNv6>rko?7a|xWg$G8z&a1Xi(7aRhe|Oy&qWvQ+df|mCBOkUt14FCa$3kgx zUeku*kVi2ms(B-QTw432?mr$(QP83J3}`Se%(~u(HLzex* zn6#`r_bk8ugCpCP&cDZy>|blQx_fa}3vsB6{F{HQiY!TB0Y>?p&LUD38K`m_!tZgU ze|aj`WDt_JrY2?__YtS$tm0JPapFiwW>4|56*OG}C{3xBis+K(i1}2i{3C|scuEy~ zaXm-7<&+!O>qSEQMXsbGRCOtIO=Iz2_ZUz5f=vqx^auOa);C!Hmx$9kIf_{q2S3k-O^R>n}b`hoJ6*KKyq^I8;>yeh8|Iiyh zcz_kJxTb);WTv4C!RIkBS9rf?a|KRKWju$%CVOIrWJo6u{+YgQiHf!ps!joHdsSMT? zmaneP0+N)uAcy=h6mtq$7S?hXZbcd@O*{(kqEar@>WP6)xS1}Oz)QD>h#8S`BcJA= z!#QMLw1rQyhd8{_x2~B+e{M?P=o2QX*3pn<(be>l!t5oaDr1mlPqY!t%;u9$xO8Zl z=*wRg;O!SnTSsO6WPb^p4#|kT&S*I1i_b7pKb#FG8>Y;d7PYW;(Qh{D$OUkUQ6JVR zt~2dDdsUn9-kIDxBm&*Ma443A;28=dh9ufP<0NI7X3f8o}Cq=YPjKI;O^PYwB9oODUp!`qISG@??nzIZvw&4(ag zX2i(^L?7}z2YqwoY&h92^FP9KgKG{OIx?JwOCTZDs>B5 zJn-t#535o@s#K65fA&PY2bCN$UBpqQ=5FmY>XK7Zmn)(g*X=W`vHclK)ds<`^?2GC zUK<|N>O&~B6VQ2f$cLHD*;A3wUJu;#zZOimbSs`%B3A+MXfX__^o@dKR{F^eEcz}8 zz2+bC=T`yB>vurZ_v4nwA$9I(f-zRFZ6-WE9Cy2W+6f3x`jQ<8%{_OqUh-7?Yk>}PIi)|ff1^J>*9G^6JD=%tJ2y@YLuSII-1>7haoo&LVjG$H(x~Eu=z?-4#NIdy*6T7EI?VYKx}AOn z%h@M0;*BTeQ&_hd%d_j|Ip@;k2Yq-WZto0J{q zrqE}HULc?-q3N1)-El&*Pw9vdl<v)v^gKQsj`I{YsY~g8V?)ODN622iuGHM);CaR29F6Ma_ zZY}qG&=ZPdNfc$y>=L=8eA47p>v=?_i}in`e}X1_$z#tLm(ixTBe;;39T;1ZsNks?1#gVjjDva8JslGK!jR$x!AY z3xd5q)RA%OsIWc6yK5M65WnM1*eS>M-)r0J1I`7<^7k5-(T*OmOC;+P9nkom!_49g ze~Bs>)y0TUZ#zz@^L)UmkclZDK#7zA;zzRQu>V{q25X`{Np$adh&vcDw^)lg|E-4d zRD*8oagR0VE~)s3yGJ4oj5kRiCf7^AoSHt?tYqk4VRVflL>q_#ACOP4D;QXwrXA5` zC1`OO1B`W*M|-&vuYnd+Xe6VP8K})ge>3@&Sq!C8j6&&grCfMePhjcH;QJScXDzIb z(~^Ga1BO^jI?1YdeI0P@fH!9TI{2hInJZ#nuc2Mq8} z&ZoR0h(+D2W1h>dksqH3?~VB|c~&EHv~Oe$WtRl{t$y5kvEdQ>PP}de;z!U+I6!(#4=2A()*TUXF19~uQ2y4$C{8sJ&+iF z!x#L7A7lTBOE%p1<^4O+f6e?*ujMl+TYA7vyU1f?oS2x5zpPuAs3PJ}wxC49FyF85 zFZLF*7EVb&C5L#A*?B0(LA z*iAkcwZu86J5ykM>W4?=x@)cqJOR9D2T84Jt$F4o-&5@%|HAAR`1|pM=U_xBDZ;-+vvcL zbg08X8)j?kPF0Z$J#|mmjQzppvHE0;{fL3T#H@sPspt_0AySTd{HX8g)Tqv_%g3z{ zZl$^BI`AuHa=ZE{R`*mnfi66%Wu9wJy}{p2zB1&U`t^G7f0@AW5)#CEbcDc6QcMfg zomf_7i0X-c-|oO5rcT5z?WU_O@=%s#n0F~L9l06tyjTNeLw~8sI3uW!rAO`>N=7_q ziTd~(d81schnLsHVEzmB@hNUK;#zv#(~7g)P+|ATqdXt=5d829{w||O48$>>x}UPM znzBQkha-rVe-HCZXYcI|?&E>SNhVUcq&0shCQDt#JAbb~lVKeZ-TR&zahyEe?fsR$ zrxwTC#@55_B24`{~QVw2Z2bx`-=F)Mr8T_Y=jjR-Rj58leoO%Uw!MNvIM? zi0~X`>2mVvgchTIhP8E7!_n@cWcIweyi=@h-r}UWe+kJfBnlW16ugkyIt)YJ!B?o) zKOZjd5Zum2o)Wn|s|gsnBMy8ZLOUOnmAx^j{4 zt3>Q;fBwgu^uCw#|}9^FiwVbUPKGw?;A?Ad0QG2P;HzQ z69RUx7fa2FRNQx2aig|!jl9aS|9%VFT+N-Ce~D~-3UzyUiSzl0d>Bp}+q#ywxo9x!M< zuj+2@{ppHAuma>3IdLx`HZ()-0S|Pwg1&wT_WYjLGY$!2^SM~4ZXIl)%@Z@neHkq+ z=mo_J9vf#H>(TZ>UH;^A?D*NS8*F}ie}CE^Ud9@;-*n(H7jE-wV+~btnqzw}RUY}a zUe4zJhwtftqdnC?*YUm0_hVgBpK-+HGsPFaYI_33v#$_qk_Gd64e7R0H`3$kTlfrq~4_*($f()sa_;?~t_njK@*3n=S1g0htTc8tZ=|f5Oc# zX6w%6hg+fDj_b-4LGH>>H&24JpDQwT++Kcom%^lO)~tBj`nKjxaH?ik)be&IFXPCs zoe4JReb8Jo`9kKZarViZlp(!wvs&drtwSsxv^a0X>F&A_r|T-j*Ujg{l#X?_;>ag( z?=Js{xAkK#CjKKnVe1Bm^Vs`ee{#Xw@;cb7mpDZ(t%IGF%osBX&3o`*$sY)0v8&W$ zsyYH@#)zI4MA`g(djK~}6Gy)%A&6(h6WILb&{@3>$=TXI;-N?R znZWkMnO7fbQI6NH;{AcYp+C>hlvmP-HK~FmR%kPbHR7Q;aO657e~^fIvXmxE0N4H8 zg=|%9Tj`m?EUID-D)WP-%t47cJj}3;>*e*PNm!PP2WXfNt+!~WORn2IXEU>4**jHO zr{I#6)`VWJiv?*o?aA}HE(s{aQ7k8S=$3U3#G3e)7jxhA-pJRGo%nx`vAqKy_D?Rx zUO(tWd=+GAKb!L^ma;PIb=Ez*N+raZUJ}*4#ipf@6r3)ui)gQN?|-Ga6x;4K0HvQZ zW&JsCGI@swLwX>@`g3V_RzL9j_>3{0DIIH}#49l;Oy2%Cp7o;-IQ-NSj2v@b=YbQj zR1Nu(S6ns7uQ%{Y-qR{e?rRY$kq<)2h-tMdI7y>aX_ho0C1vitz?3=JmutL$^+n(f zJo9^*P$d}C++(U+QGeaUWaw_vrSU+|d`iteO2rTkCUI36c*(5xYH3I^TVVgA#ms@n z68FvA=1a6X@Pbb1v3|hD36{nFkfX%DJVBL>oF3!6|Lk@B8CL$c9Eq)^zdG~DelYe! zK|;*=LX#km{>Z6cQc`K3_r(KKBH&y7>>3lyy==vsHh5klGJkpYAd!5(FRq!=E~)gh zr>E;cWw$DEpIP-3rn>Q}5En@-G6z-efF>|V0c$#1E zx;)3liD2J8oN#d?j!RDJ&hwCu%lI#U;NNWgRZJ$BXTkmG0cZoUhVXMQJ_4*g82O@y z_;)UagY9&-UaBKD*zzgt$GB{Hed1IcZDX=PJnyo23V)1<=QP+vWOh#m^BAjkS8gva z;*@V?Ei<_!i2s{QxMz^e3fhY{qBD$%x)J+)@p|e$^2c{qPoL+0XX3J&sHyuB?(Z%a zrX!&8Abl(oa&7Js(n^(O2RwX1mYC4j$e;>HS!_7A3Ur|58TKHobh=M`3%={tnC>0!F!uW zI^#t?*k^ymC6BxT>m{p_CjzEsiL8WbpJduSMt`aL3`n>1lnDK0=Eufyp-S=KZkOn8 z!3nInK`g(dZklZZI(3LP?ZzB`;#bmGPE$N2dP_xC$avp!&_EskFN_*Fn zWx3`_xn{s0ML+%W^rh>ohO)F_2IQMadFNl+_~SO{Rp2Iqekqq?Je#KNV`v%%O8HQ) z9};aCyjBADb=TUQin+?xeI!VY$uFnI?=zoj9)d3BwC5=S%>t(@xSk48DSxt5JeRcF zio_reDnra1>ISL;0m)9>%?(E-n`i;# zKJlrIeo>TAIHIVbMGv9>R+=QXEmB~{?8*Psn|wYgOOTj9bq5<{`_`UZ6*xu>EM zPLZ=pClm`~YW3ni>VGGyy^L&2C)ZN~reCT2<;^u&;awBt$(_S<9!A^l%fE`b7@9^n z<2h3PZ#yFm@cKgs->`L)dl_ zb2!gCo_h=`S5GA|ofcWOr<#Aj8wT*oy)b3srn8usLby$N&wtV1&89>&8FJOe>6M-n z!Xu`;1*q~q-wjW!e6P-8bH_FKFimo`_I8EiktwOCZRVTeb^%$MFRQ@4xx1ea&8w^QN3btG z!=#AljU2p&n#hU#L@0x?r@s&iH13up%|;Z(M_4H6Li-`J|rqWLdsozLi18=u&2)pL*>( zxGs?~`B20kfRdz(g?g_8S7$DAqp?DO$pm+J;vsLe&VN=cvHT9Ceb+AQ#SssD1-e5V zqmR!`$LA*UJWl$jZTmlBwtjH1C+@TZmeVi1q+PN;yI3VWWp9`_lp-L1$7Fd+wGnA! z5R(-Rvky1PK9(dCzMgVI75N&B4zlhxyq-OgSHW3Q%4h}uz!@R|NK|8Psn|Ua(@ug; zHoV#Jx_`q&rcaY%XX{m1GzoO@j&{Wy4(E^TJ9Vg68P00PQCH@p&OG2C#`Sm{s~?+6 zm&fz#ey~LtqXR_fK zm+!m#XkUCi@1d1>Q8GOZm|oxJbS6Q2zn0tSl7D;oJSE6SR3@$#3DZ~a)@M!QgHnZ|(C!_$jL(S_x{JL8d0!UKL>G)}L7!fm zK!2oV?ds_R`oFG1zO1a~JuK*@c=ppKAo0}2eFgcOo?aV2sMVS)ZRZ2|KJHD+`L=s! z{p{+#`nUok60Aq83Z~6it25426S`OK zoaotp0bE3&m%-3>Ov#37(?!S`r=`nj@f`8ly_?RSIAD!0>OL{nHEuI%6iL9#FrV|B zQ^)eA#{C(-VklqExWUiu;r$_weD7+EBbKj6f9q`j_)j&gxPN$noT&Qb-+!PFb2)xD zbyV+^ZjXzACtYL3i%nBfQ7+2)A#E% zQ#Wy?3GxNQ(vI~Q+xe*{KicX)V@}5!oFQSy7Mlr&tzwQ5MHD zTO@t#!YY5LOhW89%GjlN4$0JAfrr(n@pMKzC_K>jU!b};Xr`&>zN}rnel8G0YSL?q_j5{zCv7j6N$Mt?!^Pz8 zX1@lq8SbDWsh_VO<#sCI<7269^Wdo_Q)*a*;d3rfu z|8h!<`v$7xE-5092#?aB5NRtgcM2xqg>J}=dcK%LcE?RQ3F_=CO@M)!&WSyD?sBfS z`WJoGUvXA(s=7oN#MEW!1J;+TGWQT)l|t!P7a}EX2u*oQfq(m(USjM-`}#3nCT5*g zMY-X~MTd2U0LdXKBeT7r){jRpx7cua^{Ye|j1tzJTgB zO=Ve&H1=H8CV!lkI!KEb_hI@zr`LD};hB&2%P`}HRJEZGW$RX5$dt}#pGkYz0M=-! zj0{fuH6NzM+~G~g#OS*zXpyJjS)<(9xa>1WoYwx|d8&TSygmAIa*XvH&jUQ*LdCQV zqrR+|add$AC{Qk#;x(+hmw?HOXmH;aTC{*D(zX4UR|a9EhUZuQ#=qe zTYzb8-Pme*S1hQxC5rp37l_A<3NQh!q&U?(EiI1@9?OE?KxOEf3m1K~#8Q+R$0&>B0QTju-_JGex zY8+Te$~|){_Ci2gZi1vpMTKYml!MTk)OzZ1s(&NTL6@aqoOX_<@LUDqM1);DL3Xq3 z&Dz4(to;M9(X__=&G^rr`dNT*@Ndbqf#W}}Aw`@NM;vpY;7+^WP&!nqa06e4t<@qS zyx>crZK5ah9bsVqT3u~eCFZYs_QYnTYCNcflrF8Aj{BQs(jbqU%2gTHLaeAI}wHKx6)gh^{#I{@5MY!`%mJ{x^k85 zU;UMOT$5|%YyI?+%)tLO(@9Og#gbRoLny?cwr#Cc;5p58n%xDP_Qj)X6MST^5qufO9tIAMh?N8q+%*tMUKbMwbORFjJJ5n{G~}?; z;3|IyRFJ#LWDT1s2<0;%8M#z&34bf(j}SBb{td=uJ2SqglKj%JghxEj#*QalWB(lv z5~5q|&!>h4z6ywy9S_C;eqn!n2sc19T>~H=Gh`3~K7Xv?9&`cJcr&1iI2tyvDIfyB zKRnMCRstjh0F2Ybz&GqVaE<>3)FGePpQj9j7QcYtegyZh>cBVR7U$zrfmVVFpFf=A z^OYLb(00I0@C7iVOM~N~AT)9|exEQ{7Z8Gr$aJD{{H#1i@nk4qr5EAk?sbwFwHZJE z^B;FZy4JUfF zLOF~zM)vs--DgM2u2Vb@hb!aJp@%B_h&Hf}BF@I^ZPzI^ zULS^@B=&!L8AaS1uRC3*N_*X}hn^C9{W*$wV6W4z!*M>&e+O%wUM7VY;Hv}o5%&*V zw-Gg4l$qnYbYDo6yLv0=F9Y}7>X>K)pAnV1Bc!%tdJ8WF$_te``T$Z^T_y77v!l>K z1dH&N12lQIiXat^(8&hoI35hw)C-})qc}E~9?^dU2nW@KVG;3+(l1jF-A3&2UcBoR zXzZV;haOVE{%xa(Q$ewRs_T@r*MoHE3ANW>qKLcWbsXQqUJvQelfhnpiy|(@>o~rR zy&ly@FCriXy)w~YbP+*OP=b3CDfry(y}0%|I(^5zRMNpj!%8kNmtvpgMkiaAOCCnm>L~##(u|E)(`Eo{9pPpH;{h)hklONfjk_HMZy=goOVk^ z+_bHt4O~SOW`dXhx&IM>y5gu`t|L0VU(kOmGp9x`aGd|+K7la680U%=d;Qt526ZU7 zb*7`5_;_NU`aqrZ64p(*O@_Dws_7hnRxmxl3_g+pYsxb;Rzs-0q3l_?=D} zP#+Mf;d58l)2*N|tGD=j-9NvR!dEiHzPLi$qS&erE`f)JI`TtAShT8fUrkn#|L>+d=jGB5w{R3=fOa|GPF57#|q_l&Hk!yuV6bq$J~$xbvys) zAVYnPb_M;L%n-V+MmvWz;uaYGMcaSL8)UWm?RhHHMYAXZa6jN4bqlULk`_8T&gb5s z+}xeF>7d=J8Y7v-Ho3)Ti>o=kGK+dZSvBqu(+=bg_my$3JOgf0L^aIN*2K?$uHo2b zJKVF5ZArZ{B+Y|yUurBMrMQyYqip^AI+tH!0KE|H6!-mT1C!sM`;7^DbC`d7g0_lY zfBbC><&O+<=upQeR?Z)c>%>JyKGRjM^}=nwx+M@7^&jW|Y*&4%f3_3<>3&;5Z#+9T zA8@ZOtJJ$B>lKCBD*e?<;2G*yH4J86cYn53-evHQwh0}~eE15nKX$#eI5z*SKVOD4 z@~dO0!07py;Q56`Z&AC|ED#Kdgou) zid`Q9EI`g|dI`+OEU1SH^Z5+YVgY?w$^N?EBcx!FB+dZTA#KSf_AkHw*&+ zaq)FQ@wGd?PKN>1;qw|_A3h)33w*sE+mElu_T%fZ{rGyo=Kx4>9eW!4MS$BeeY4L2 zgk!sh8vDf$?Ygo5nQhmN{daA<&Dei3wwsRqAKQJS3@vE)qc?wiK>KYL-V(s?mMO}e z`RF*u_QSYF@jG8e;XL^g?cr1VN3@BTr}meJ_T;C1f%m@)hacVXXl2{^=(yDbQ~ONm zmr*D;elxbeB^28}p-s6wwY`lZPJY@Ne3nz&XZ!5a^EGo}?_GZxW&4W08eD|8gkXD3 z$Mzi;AJK*qd+&c^TOsh?KW*`um#4Pe-gh_~N0H&ZC&zKA@VVLSFfP;|+aJb6XuS8S zZH>>pJhct&bDY|CU8lRz?~K6nZUqM&yBM3nJ!_STj}EF`2eQfh`p0%m9l%)CfH4on z$#(Z*;JD{q(;ncFc6%s6!+WQ|S0C9HO+L`=@%<;M8t;GbK2RTBFa`nz(BZuY_(W|! zhA}ln;YWk{a7ZHFKv5;Qf7Wh&gHgUUPx}x&k>&N20u`(1(AI4>U0Rabb&D@u<6`DY zRNMUUJ>bb2V>qi#wxfh&{`&n3{7JO|zTI#NJl&I(@_;A&e~adk0$h#J;08K2xl<8= zhsosbdDeeIh4I!1@LW=(yxYkH>xo2;-&*1Kr~*721?xCIwReNp7usH@pU8joQIN0i z$tB_e?D?GIzW{uJeI;b1=&51E4UF$?DEsWX#!z_i^WG4OP>|z$0#GA(`1D>83h7UK zqJO{6RDi+RpZ6LZm>E!b-mVYuq{skX18YNCF{?8gU7SaV55nHyd@ih z&r^TG`tbXa9{V}BztiFOGQ`=efII_t@Vx_`hDiP6z7alOj^pEMxHlxm_m9X|Lmf!% z_c&~TykUQHW*zP#q3k`4of5^pOp6dh0vA}Wk{@Fg#_&fsE1}<`m@h9KF=k-U&HvER-XPJy56g2VlCJb`YULjW@M0ay7Pi_JLmXi^)p+56x7`u~!v|G(z_*FM63*8kW4K5&!YK|9^ZO^S2Jyas7AvXI<#u_<#N8Z`{AQ?7w)oe}4xt z{LFCmzjjhE?EUv$`{Vb1|Jdn2c9j3)KlTliBUp#F3pMIAA z@r@kCX!qZE{cqgzXO{)Tet&lRuRq-XPy4tSfnuDR)&FnajA0lH;Qt4I{QrOb96JA# zul|Px6YjrO^Zq|u#@{9`{iC}5Pjt_4fA!0l>5ptvZDwV-zgKt6@rOsd|NK{A^gqAn z?Z5a4gyD0>WElKUALbt)%tL?swfLt$=l%;K=oz zFGYV~LYqzh>K8=}Tl|x+dkcTUKlw$q@cXA$)IWVRca~@V`s!gUe+xF_2&_;G9^=@o zXbU0ZBE%P+HzfUk$dBQFbmjYx{{4I1@#N3A|F8Iy-yaWa{kQ*r zshYPjJ^t%|IkvMr-lhC6|Ly9o7$)>i-h zpKJT^4&{GLf0GRVp1I4mZjS#TA?UyFTmB#CHvHEjJ<|X9y9nNMRU!X9OZSq%{^tUs zCH?;*XE^|4n1A?XCgy5};ppoBn2EnB>aP53`Cu&wNRY z<4bx}HUv~rwMssG+K5h%@f9*!#-8vsx{ZJ{>9CK1ZCS{91eW~e8Q1v{u7z78net~u zol(UX?q4SEqy9QZoF^Fn8X*N<2Q7_lyzbMSfxmvYb`uWA0e>q_Wv!cjGj$x#ne6uw z8_l$E1;4;aU3yJiW2T8ON#eNJ=w!cfk=R@gY&a{ZF4#Jb2qWH?G;pg02g51?w_W6~ z0(BzOwU5?k%(AA6xXKhgtg&=Ucds%3g0J(Uz-vOc`39>KZSDXCEy@DYXroD@h1Kc0 z&hs?G5g{QYWq*vd`{ipS29XIlJLh%D1qSPlneNYKhte6AS1x~ z>=M0~>VC;ggvXhS>t4z`t6?N~e7doJWs^lHEesG}_iFq5Qxu zi)o6Veqzh^qq{J>V&`^w_EL!Hf-TLtzy)hh`t@k2#A(-biMniB)qiofcoq(I0)KCN zbl}BZT4zykdL?eifk@yJKFTlYqwzaKXpI8~(*uH8cyu$8Uh53tJeUw&y{&Pgs?(M3I(?{+*Cnf2(3c=R7`%(X7K zDj8}yG|U@QC5&9+?a218usJk^MjMjG^0La(5d5R_fyTT5{rdbT$D|$mdTGFazL$p? z4t;;B%(8dCxtfyrs*&%P6X_jsxp(+eXg&Tig_%K@s&lqy0)EhN#(C}p74=AqCul@)o zwy8Z4k|lb^afs#K&Hc(fBMg^r zA*v{wNk!B>FS+o|it~IF?#4Pb+z(a0r+Bh(_S0a>-)m{UZeBztKi9U!cHeEU?w)_u z>qAV;vHbQ1+^!@3blpbadbt%~E*xZBi7S4Q&2A8@=3<#%FOVM>d!kgIHd;5Tdg!Lf z#~7;}xLsHL;(j%wRNcyEX0D(ttyju8ptRym29AcQ2U=@ogW)rH!o|g!`prf>jW1_#-|$O*1x#2&yZV}QQ`4k(O}oZzr2+8* zo3mR(^4_;w@SOMFF2m9nEo5(EaW~Z?dN1IVt36&Rf7bTG@)U|BeG7%;MCos${Ek}A5-+T=El<*-^{oE#TB+0sSzu6(sMx%c{g6IWh#DERA zou`igsUE-GfomRIbG5obUqWiu~I$-5Bo9FPP7$kbM9-P9J#G?aAQ@0m-OOgkPy&kuXiwiGg zBF7!L+~lg--nPB#=2ZI~Z$EH+)pnc}1A@(om9d(%rg#t)Fr9z)ZZ))63&Sv8bOYmR z9z`<+gb0^Pj$f~CX2CNOMF))5?msV^AG>)>okjSCf3Oh(A(fxS`D~T4R?PlHN@^uv zIqjcu;}x}T9~fn!lj1#&PsMc++DVeW7?jC5fH%!q=ARSQkLz5W(V za%UvRAF@%w^e}%|UT*YqxW#`=eK=L}UQrtiFqPm&$_31V)EmsHX;mJER-fCQ3uCIU*VSax->JZ2;>vfRTS65*#fl=RS z#qB4z={*OiyB2jAY{)s70uIDCfu(h&yMbC~>C9-AbhKZORb&d1IMJ+B`q49#P1ABv z6VRCEo~b%xEX$;-nAtn?*mF#9)ujaZ9o%Seej#9kuGiy%26!kD#N@p93bC_W((4?J zTwGDm4=;ao!`Ud^ZAaeUWs?&1JD*k^&3xuz*k|Wz0T>sj^6_{LpdkjD$r|*n zZSvu^ZOip$IYmB)>2%^;{byrq^87+EV{;?rZc=}PzTvO~Ryv-T$iwQWnLC=wA6L{v ziY+R}uUmzz^Vs>gkqK2sRk6<(W&{qXj*L$1w6o}ugn2@$VG@xPe$8*tG$jm@qYWGD zz@MHI7CcuN{LyP&i-S$KPlPGUG)qD$WZYGfsTM$Xes(E@oP+Z8KRwT?^H%R#&63~L ze;9v9?(JOt#9t{0D%6s|>C$X@vKb$X%df0W&-IVUgf@lA5h_ecM_Hhr8-E+n^CD?m zm3%yBkM~`0XUL%cXfpzF3lFc+qrg7h2(aeZFY)XiJ4K&1;slONx3qYHMCaYhdpc_N z)grBzKWC%7;+FdkXulV+bvQyxab5wC=!SoL!S`FN^lL8$)xq2gUGMJn*A;c>R(12$j76@QRpzCs>mCM#Tdj z3X+ff?BX!>GI0f3I+X4dKa<;ac4DbkYPM?V7~tE}LngL3Na#FAavmgp-+EyzEIevl-4MI6zRt}M32tbJf%!aBY8H|QF{2T ztgla^OvUuNl}_B`&#w@dMa z@YBS9l?VwmmE+H}mogL?jZYt}yyw`Fz(C%T@1vYzbGLsIfEFAE zUZe`XqRk=?p^=~VMjP|#Emp_cAdDx1Mr@zG6*(8tuRw5}1QzPoXHH@i`Pprr9mmm}=2kwhoaE zVJ3RxHmWMaR`t@t3U4fK&}4sceaY3t0ubIe@|Kj3L8$ocV-I=l6mwL~)0Csm*~F~K zYyl>v^4l1@hGYC%*tKxhT1oMI9XF>DdjUUVBFlAF5LZMnszH7^KQlV9%+^MxPKUdi z0Oyj`*<3+oJu(BFTRAdB9ATcer{mD0zyVzla{(+IJ1!pq1{I9~2(Nz^&B1doh3jzk z(-wL>RcP4Qv_?sw5PezLtjo_{*&0LbSbOmq4l%{N;i4>Sr7C&QCnOW|Tr&2$alY&# zfk9VHd?0edR66ddVrvrI;9I4wP*IwvY%+^8bd+=R+_~7FB&LxjNwah*0IsKo=}Xqu|9@%4jf)`Cwn3Ozu2p@Z>1Pcj}?SN8$tQ1yrd^cX!# zg~8_JcGisXp}2NM8sjlDe9tZR8`{r)#cbJpn%0mvpte7GwQstqc3qF-sKHPj+--lK z$l5uG!ySSIk$Qp1V;J&Z9QCrEFnEfmz)4nB02?pyJa;DySn+>4zrGv%A>WC{M9-XB zTo0n`gcDkQ{0_9FF_eTM%YM;0w$++dWzRKOCyq*LbUuAiePY(Bu`z%O&+M69s79op zvnl1!l>pmzHJMv0KltrxGp#PW6HYLJ(FM6%8f@KHg?7ReO@0|wYCIh&diHE}t$b$2 z7guW?^2e*Fd}Xml1*EW1=qJVGx%X-fQzCf$MZfO#~@^Xjf+-+HXqS( z?s0p$R=TU&3YnjyRvm(K-c?wG{eJE;4U<*%+BNIqRG@!I|7j0t*WVO#%aV875*@R; z$*^JNAHU%Mxr@!q>F`?2I-)40@{s84)yIR$F z*UujE7o?y-(e04lO_t))0w4}EIio+@Bd!2jJv**nM<2O?cG<72kutsQ)fdlFK@$Rj znDeTnvm1Z10(M&owh{#nrr&G-@^qi{$+lmOMUgT0mc9VliJTs4mu($)gYH?E*0hxt zO#+-K2qd_J6{3*LkAoF_;oMaMe!au^^#_lpW|mHqZp<|bt+*9Uj*0;$IYj}1(+XDp zb19BI_bNkm$&UoZ$)bf5u-${PP<5Gl`eBmUuBCryJ>+OoCV;_79>U={>$PoP&Sbz~ zZbE&-LyyA?3GXH|9ajPwXz|w%GALV>lQ!0T`KK()N9O|h!X>BvbrNp^ZCbnd?SchzlFjUn{) z9WsC*j)ER#i(xmUbW6{Zjo4%rq~LjW!H<95sr#_Wq}Ah_SaY+xpB;#P$6<~Wo=H9! z9^JUop6tu?&yFAG&1e`Skv=|jyy`BU`Vsh^?zn!RONjtTKOh*;)Qi?yS5=gl=Un-S zprj}n^^SJzjrmkiY*eEIt66wDctPO|E=^?xJaB6MdJL(W$s7pUyk96Us620>BjSH& zw0xO6YrL!Qj7_{p*M#e0XP=eU14wS};yk?e&#&9I>dY%nZxug=56Aw>4Q^bp>2 znf?B_Cp9*{EhymBkvA_?1hMJr+pmI%_~QVHN> z6|OTJES4^y!e6pePh-Xw$GNuiKr{Il6YnT;_X^$EK-yb5oRnPo`;c3JLfU_-1uPE+ zz0#ssJ$qITg7Vxwz1c%8;$z<=Wo@i0G|bLq2BwOD&ASc{dg3K#&%9(_0qiDxFu80> zgS?+cX?^eDJUiKNwoO(bWSNYt@gR(2;Puoh*ir2d_J|K-V1f;}qS1j}u+F36xNWWb z8^aLJi%>?bXK=9hJ7S#isvdtpT*sv&+}i^XUe%FjKv;C-)>Zp6HTegTaH#MRn`Bs%ZTmZ2&~*SmorWE@3I;7Pnop__CrthL zNHb;ROp;P1d4k=r8r}o^EImN;*3>|F7^OhAcEmJ)#4o*e0)k#%Lv8zFkpNn3NAJ&0PItCR8ri z=2V&Ub_>a(MRVo^{tz0<@H1XZ(Y|q^SytFz1D(ba2oX~Ti~cdUe#(eUMrnTImx;r9 z(iYyi>F+J@lH-yPxFOlSY<_h)Z=Bdhy`eV;2k`N0NZn`?oa_@0ac`0`MNgr~4QjiV zZh=x%r(Zpwr3!yB8@0LK@mB1GAW=F0q9+a@jUjyUf8#E54Pz8Fq^frq~p&%){)JVFzO3A^F zwhGmGJ9-S?FPJw4c-`Cbah5(xQy z5v5^B!t2rifFoXzHEtsKTMTZ$m+U~bgsHgNmecoA5pqIw?SKtg@FgY-JMpzmU6bzU3bfLcQ zFSv8jx_SP1+9a;1Hvk4g3_x_IxHYo!qh=y0m@R)3fLro9c3I@5th>bJQG0c#H6HLl z4diM1h7yR2`Z(@Mj=$O*!8UOk!0*+cZcmG9&Hs*agbYWMiB zT(`9>52QSbu`E~35xqyzV#+GMmS6Xm|Ai8gpFYZy?2lg1r4mzVF|+))cCY#DJcz!- zwr_tC^j5H{>e~lo__OPBxN^`}O^WI(5ui7-U|-!#-0R4o{TNild!1D^B-SSt#@ls* zQa$0e6Y$n3n1B1g&xf)}X*?T}$!~ys69O2R#B&S!+^DbsFR18%N zcz%Lf?N8?vv|?}x zfD&_`q@O23D`A0!Ky(o$fYPY`yPzIg-FZ@1AusO~NfpWHuv@xT2`Fc`2 z5gev}_kIT}gHFXBntu1%^;}X)EhLp$zh~MUi`;_fC7m85-nUzK@52aY4M~4m=owRC zo0sD?KSjF7p5}W=GMVJE?a4=)6lr`lgeoMU*liC1NyolE4^mhOjTLgZB)x_7cAX53 zoX}Ug1EC^XyPi8(i{>WKg7%w-aR)Dsbk7D^JMPJp31M$xDA0k$OOHG_Bs-E5r8FoE z4hhdM*?K=m^L^sg;^))(FOGkj=TONkx2+%Dfd1rx=-A=(JNvg}hh~%rN|N*m1XvjA zSu6^EnNI>MLKaJ=64qjmyQ_srnB81foRBTXepFY-X`|2j_e_xA(TsWo3PFYdHw7M%O(FlK%bX-Q4hlfI{ z1Tx_q^(d{z!>NPBW!x z4){KId;TPP28KC4vwnXtcysenzpfY{B=Qi`CVo~IwS+~AD1CxHST^MAFDYZ$dI0Fz zz7b3UJAXIWG)3j(%_Xp`q45V)B4(2JYGxQ14=nNyfJ!YT&|KUfyUO1I1T*JJp>RFQ zsW7UU^T-z_!7XOKwPkswW-Uqyfkk^0W{=NK`Cd}U&-P(7b zV*u3|lIENpLZfM=%&vPTjQSgj;E8|Cd1ZicJ6Yw~~@hUPy%m=G7@0P>O%w%M)pE^2p*RD8Z0bpf^=M zM$%I?^+e&KLgwpr){%5UZNqu=%$PtyQP2v?pOSkR%7A0~r>4`y!6vL(uzIiXu#WjQ zP!keO=x4%m@nHehIo-ZU>H7o4^4s%d{BGjz{F5-i4sRJY0h&%ML{>a&9J)a$i{3zG zX#)MxpbLLH8BVx2>SpJZ!dd(SH6>tFB`>&H+k`jhCNf?VJXIb3L^SaHV%5_sx|FuB zmXihRC>TG;*sgwB3$~2FN<5U4-Vl~$1Oy|M0+}wjFoGn%Ok^{b0)kl@?Z+s%KptK^ zE21e-;xSfp+r%mTzJmgo!{P@~!*vnT?^-NgSX6)J2d;SDPeI%XZUgFeL!LRLJ6h^m zGy(v|liHGfY4>)6K>G_oukevAxKt%3k|P*Wif(^fCQ`rCxSUEE1a#3YqIoXO8D?_imWa-2A_;ot2V^Ly|EkNx>In=U zoPLsrpZ3z2P!QVd=8T;ZQcWq)4qR# ze~@8f<**Sl6+TH3&H!1p5X&P%vido{FlHXG(XXnX)a#wQEft!$KQIfe1s!pHlGPq5 z%fu`zxeaP_Jb3a+q3^B60%;4%Z%65d_qCd~Xt>A#H1UCz8#hKAb0_bBX?Cgu+`%zv za8q?hfbm2331&Xr5)7?0e@r&nFSma@(@{+EIrei*-?Y3BEZQu5%LpBPJgg6N{5nzz zKiU&In}#hF?Qt9c?Z>YzpjgT18vAM0rZFD^;1I$h1_WTMdY>@LYM}FHOpa)B-rjJK zr97UmAjw3J#o#iihi1-O;f?>AcZj5(?q)cVVQQk~Lsu z50mx54ZgHwHHxuLc||aIV?H#>m%=&S9fdIlNojwS4}}3Xf0)&QrAR^m9#%pqzw4q) z?7yXLo!SFEP4FicdZ$t-gRl&RU`l)p$;iSn*+jG4&z|)@xEDO19KRrBYC9xPJa>yd zq-+NyQ6zKb$=R=nqcB(dCbWORJtbf^i4x_AyvM1tX|g(irN-#mfA>OX_@(bJ5r2XM z5XGfTbI5yvaZCI~kxD_yF$HXLvV_7(b=Z z*a!4rkyHIG{IN6X=|AJA<0*7!6#-!xVVi|w3tk>#RJo(xeKl#}Sr&iQ1bH6MbexX} z!kG8^JP)RbwEZK;o$xnQ=_VP)Q>S0(ZHYD%1?5?qz9kBjU=BiWG|s&OyxWIrg!Q8G zKnJH78si5ZmEVANdDrF+S^}BIOBWz}da!DMhKQ8s z1~=x3es3)H2+ru94h?^+H5nvzEbg3*|MtGNJFB+bt1WP;VJdjYM!%!iWNVGKa3@u@ z232TzK#uq&jdQ?Bl&B937KC#mb8HfApXXvaS5b$*$1x1E(4=CK}=zaSK%so(vWM>6lX^L|WU;6AeDG-_*G@}G}}o<3(j zl@s$kC{WIpK1EAS>lTUxgW5g5O$S#=@<9F$Od>Sb(3gG5J_352D<-(-BX!6K>YGxt zn#H^TML@d0q8Hg#34|qoVP!msIY%v77UfTAFkC&YoK?^!{$$R5XVorjCWM<9a&Pn78w{;-Mms@2SFnVc{ z6;fS2$An@Eup8G_#})ZG0)$2xgCcjd~?e>fKC`Sey+O5JVR2-)>aj5rIw=jh*ov2C?&|G zfE>q4X0W0lm@T{d;5oIG1NZH4ey-|F%6o0krWvx%ZY6(VZ&vqR)2omeP*MWKN6YZG z9gBL~(^(tJb3Mle#d4`@9zQOQY>AAn!SicZG+OvzYN7;CEKB~REBzDdk1>hsQ#wB? zU8o4x`sY%3_`FKf5Y%%mMMmhfW`tMIG*9c8Cj>9mpkSI4x)`@Q8LnG$?a8cj+jqg~ zw8Zo6o>YIPqP7GRz#cnt+UYYhGQ^j=eCnoz0~aXt?8hc>!53@pRDdJOLX~; z73;-`yKpuoa?Ml^Yn``snNzRx$O7<8Woo#qzJCmcb60Qbv0{?JtJGw^C|NIaWE7;F zjF$^DKZPhvkDoe!)!`R?K8M&DW{P?8Mb8tKa5I0QP8d;FM$M_~gA@we}dUt8_32_?CGg4flJ z-j@8(DSsW^@3~w1t}BdG!nf_gGPH3EA|_C^^fKuXPu!|NN>&t($126jX3XMJ!ObhY zSjvA)lC$;vmv9T}E9Zn^_yaToUacZ4ZdXIM@98%KEg%yWXCyKHe&DoEiL7~cLlBjaUJnR$u zRO8#@@w|lnEfZa?V76fBF3|PGej+xr<^VlDe!MT)-qrJ_LUOGf;S|}K28w@eOU#tC zL+yjUs&Gyx`D!1*0Y;5K;LJP~Wpk75v^aWJ8s~5>s$dq&E#V@oE@wn>zd%JyOQ_SP8( zoT&?(WTn8s2)G&)?3yxrlt)M`A58^kn(!bD`N+$*ZwxU`TnLA)qjw*sWto>R@d@?A zkQN48?<9Y@tZp%BL6K>L0L3=w4nyiGx%=RXPlWP+()AuY(}H1Q?7f%1K@K5*86GcC zoUmWi1;95q-Ly%==mI8Sv2qC__n4)(!ozq%YB|c@D^Dmo!6^HkPE@yrLT!wea5bY}Je$%M zNNj)ZM+wPjb93lV*)+M2E1E=qj|Su$o>RC=a5zNeWu?(6j{#JM`+fe_Z~X6b0E4$3 zOeUkwD)P;nz~+Tiw7{2mN^Enkuq)Z4nDbfzyG9%p1iB&$thzx)u*(2s>6rmZ& zQdIerJNKp5`sYOzH#gu-ssqgi0{KlT1<+Nxwe{I3nf{5Imp6y)J!6{Wkz}nlW%N_n;Z`38(VBL zK~v~Yv0;Y)tQ-Ft4^d<2QrS+0R33ka{|&?b6X;_`(vfs6m2H9ko9{oA1OIL9a8l&K z)8?O0jbXUK_>-=oeE$)4Ij52MXK&iFe{1`jk3o{ROLhB$HyK(zw;LKiw40SU6E%j`KYP@5|yv6DhOg#v}o0g#ec=@ zzT0RN*HP5AP}%Esy2@bP8xY)>xfa2q2w2yFWLR^#VIekuq%^Xu$$r%S8@OT*G+Y@8 zvs#bzrr=ds2qnUwCv3f5^kq!N7NeW3ym{Ksy<~&qC-&i5P8Vhwm>{XsbkdTp*kYmv z=Stgs5rh9xy5cUQ1y6<|vOQQs`_eRvcQFpATFyKesTgX3(PdH4yW1De$8Y0G&p9ns zgNZ#<7cdWhu6OB~kGUtTQOBZf#e@(X0GuB75M{~bsrN`Uhk06)_mqZ(h-&CH^EVFM zNqPH%Y%!M(C)iWX;LJhH-dVBWXgzKa(YG;~WzO!dD4X6!K^({-@Lc>eJ49jWMcu;j zdn2h}q&?tL>Rpd;275UPV6GV>GKH4%r!H`hJZ`0bcOMXYV{5yqA|_u7Vyzl)|zvKPg#ZwyXsd+~B`R*xjHA~>s-?EClW=bQ)v20lC{qX3P93bg1nEb_-vFGX1YFa;r7 zz;6AXmCsJw(oQWET6Tq8{%)uQx<;UXRWt<-R6`JZ_lJF+vR|z48x|Buw2mJRm!_EOw8!JT?a?p`~h3=iN zH#aCHa-=U|7VUDF!O0E_ACACEI@X65GAvd|a0y@CrZN8V%jk6Ze4d-#JkTWu>#sx) z?EXfj;4;I&eAeCw5Pe$!VTTXxsIolg8gY!nm#C#seJrw_l%A;MESWntbdFu+-^;RW z#6_p42iguS=mT8Q!u5~mYYTyYofC1U%%m3r;wLV?ae0tma^WNHyQNy}*AQfgilPTo z3}(*yrSzrU6IU#CnEyF1V4?kwc<~*})JEb_e4^$L!D%cC1$MR+)4!}jzSW4p=g1Un z9o1Vc0%3$du?xVkvt);i%?Qez*~%gMk9q>55I9M34~$Dlz9U`PcxpR;c9^LWDX`X5 zvTgwXBQWei*zB8Vb+ei!OvF25xie}DJfX%t-ouPWBChbf@a>H!oYloSg};{{pR78$ z3W70}vq%}d5x%wXu7YAoAXj6E>6O-2koYYsF$UvTUQQM$?I?W|ls#rf-ohEZ447>& zra%$pO*D*M4EU@8(;z>8%{i4sqW~pR)?%*;T7L6T?YYK`hP`ko1GFG7g==%5Y3Caq zVASbLw}1!U(esb`#YH!N);53FMcT`!T`vKX=00vHP-Zwt_W-N;@&2l3fa3uve$h^s zz?kGkt-%?WbhCYXtuQKmz*yDNU*1V0xs^~*aMoX-E3{Bq)iTn5c2GY7#!T7-bqV{h z!7^nEHq5bT;7#{?C4oA8S*Z1VVi_vH=!O+F$hrrhZsbEeo!C<>( z--UX@GP`R}#33@Y0iY(Xhu?;BAj|{^kh0K?5GPjWOjxt2`xse$lJ?YISrxHKu z(~Gt-{Dp{1j{XI&yihVMGrSbQSv~DUZHqcE06Ys+R5&doJdd^$aVJ@l8-`oj_G0gq1N1l)Z+|%eAW>m!^IG z3j}y7fjq3?8I9FY;gg{R6U(XA$g$cm)3z|EaA2)MV%Sx84y8ztio~3g{N{ohUu#l3%5rl=y&;r#Sbi{mrT;i06P48)}K((QpSW|->_Pwes z&14|+A2R&Gu#HpuWLrHLvUIT!GT~)6wir6s@j6YEc8t?|!!gGQ0?nL-@5DW$31{!9 zBeDswxpx#Xr@O*Qm;wy)OvBpWeQPUHiY^al4#8@t)-3a|q7r&xEUme>lTAoV_%Q2u z0?&Sb@4kS6z}Ri(;1&ECTxArHMDVsT^;nFinLhdIIAA|D3ELHo#m+3OUBnXe!9i`n z!S2n!)x8RiSkylWT}lQPq5Z-7Aq^pM6^>OqZjdms|zxzQ^M zZS`3;`6gEBVfu=Y`r+WYU)XF=G=hf-J-?@aorzp^n=H9Y(1HyOo02wU*9_I~?>Fu# z=BQQYvy?w`gb{ZBr4X@4FG$I| zSNlJaIYRixUvzA5B5TQ?tB^pS1*?Q)nd@agW|v7v>B!o;qD@B*0(4nc`%xO>_EnL8 zBF)C``!8RHT9bW5Jf*r}GNjkYGj8Jc(f{~~wEWdbIoKnxgrLOk)ep!gP7nu$IB^gS zcBp0zUzw~{gQ>5gb%w^Fq%F^P|2{hwKef||A9+vD>a<%=9LVdmsGB@FxcM=&m+x}= z7V~J@FkliL0?-9_s&@1|IS%7`-^+S`dwF(~sMx*274~~hdIeg-oeMc`44vGR|CBR# zqQ54{E!)kZn*+7HA3LMyh7FP+gqULfXc?q&dpB-aVWD+VtZ0{qKrnw9RFDE!jkK_q z46O25;ae7a?LB~;1midk=a7db<2VBDj?;1kkdg(~z0koX@rNhbLxHA_TKyh>l4Kk& zC66`q?_$~e)xxYtx3-UN1H#r%!dZs4P}tPE@I0FS+9z@Pkf`-Sv{&OE>EaFFvGe^2 zr`I8&`Ew!Fb?04D7Z>HBZ_<^Fr47s!n*)xW*kOXg(;%qgSohd(QgJ1EKSFx-`xjj4 zbYb<5Kz4?*@wP-LTfP?=W@z+(i#k;(+sVZW@&F!F{Y*7}3S@$qYjB4HbDLp~a>3Gw zrac{Os#J740#|rmK>ty6U(6j&xr%@FZ*uoemurdp%zZ@Sz^Cnwn|r9$r~DndsxxynD8R@t)u)X@C&bhB*&e|y;E00 zb2olYUw`ctqHbfqP~bA-#0Njqa091mI1mYu_SBOvs!PjtG3f_un=BfiC?9X)IpS*< zVmwywO|Rl&J#cb#&$|YW8Lvp66ahD~K(gTbFy#K|4lp)2k|!@Uf8Sf@4$I zpkcVaO08Cpzl%0UEke7mLy$tP4Baa}a|?6ZiME|Qi0T6Y#=6Z>fWPy)48d}0>(23V z-l*ZV#nQ)ku&j3!dn=u^tbX=nf!kP3$Fng49nK757wu^jGLB<^cA^p4xBkA}=hwFr^Q4 zi+bquVd<9@^jOD#;B*@SVE~MIvz$?q7*VJoQyL#>y+AXY6wx++huy(I5N_(*H3nPa z(>^X9K_y#UOi}Va2!b0SJM9=o#&ah)yVmJ7qNS{dg$v5*mC)i97h+AKU*K4_8T`xD zTe-IIqLhzvUveUlbla&P>z=TN92IJ`ru?}fMSZWs>hrUIHmZ|=we;2l^R-}RnTjE7 zPKt@LL4fE;{*oxm<+GHnk-r#|b#D`h&sGVHt{<7yCAyy&kfEBmg)hbPKy8g`uE)|@ zG%thq2G#|OUk6-Uu*f#kvb3uL8RbUweVuS!6%*soR!xqin~o!z2{g~$vJ4Kh!a1jykR$P}@bzf(HfS%>v7madMbc=lBXF+oDYVn@pLZv@oi$!aSLh=O2q;p zq2Io2#+}x*v=qfw6J9BUH6W1l6G82z<7cbS4NzTn85dmq!DZY|YJ>#)?5klzn~{fq z6x`ekcCNkW4qP$2Tc$i1BI1maZ+z$59^b9D9e-$SY*ctEhMeNYA6jZ5+*v1Sq}ls5 ztg-xV)M!v!BhMM5`-^6zsYwJi}a6OWAR91 z#|Ru);adE9rBYvL7L3QlB6*++kHQt#;=kZM2QRxY*fO3n67pfLPS`Ns zj_~vWT8nu4K;};`Oyb&r*H~}0RX*36b}hCQuOLFuEV>Ti~6idSL7Kx++q< zz5?hTcNF*UNGm^bAU)3g@yvdI)gwn{quz~`x9#0XYIu2SQL4EuBPU-;7gRfsajriP zk-xK0V+2(fuoc{3%8-bOZn4WX0>%^sgc%5mKg;K!09M+7)_)LU(=hlhBoT9INr*50 z!*k=Dl>!#AHV{q>%-we)O=!6*<-(0< z!iZMIr->y(t0Kvv&nf}0Pjgp7r7sC9RNtrH)m>Y*oMTpRi4}27D zman8-#zj^pF2C8hVLrQml1CON#3Lj!+<{6~UiT_o@k|ucWRAlBdNUM;m-Y1hW=pA1XsbQd{LrTu3O&Yo32CKQb&b$AD^ak`Bs*3v!z7t0~HeBt3}a8r_H z=rJmt0D|6YB7Df@5_x3^6Q&L}B}o0^reg6|@=GyHEyyUzaNx|X2pR3jUZ!e7=NI+% zm=NA3w}85TvmjKp3*~R`4ZK~^0{-*nt8XSGqtMYrs4b3!|t_S!U}Xv)z-1AS{v7H(2=Q{`KA(;`D8ISUNlD7AFQSf z`cL;CfbBls_AYV7!d*hVA%K%#MYY&YK3QQ0o`hI`YJeHR2ea?l0Cw@^AT0tmcObt_ zYJh1_V5ZwEV0dmqV>0D!8_M(Q9rt9U1-Fcsmre%pnWLs84^(;unQy=6ni3OD#*bqiD4mw|zdigXKU_Y)4Y1xkojoKr*~#z^}920RXydT z84kxiMe{jgCLyj)AX&Cd zxhv}x0^Eg3^9*>(97HJ$xD43PrD{hR-xZE?q1ljUoeNmKA>jV&!}IU&^y6uxMkRCF zLOS-C8}#jB-J1-+Va8;CdBW*%USr78@og-*4Snc{z5#L1KP;D)$=kcgu2m9XM9Kp^ z0t__dIx@xR`Dkt&G zCxi!!W7`9Z3}UOyvoh^D&Z2J(<(Vh2=5OXl-b_Lzb5GWqWgno~8}zY%bJsuL7R`48I&~OW52$ z(lR2!$imk4r7(g6x<(*&EH4EpW4v!f1XI3+0Mglfyjzd-4~-#QQI%S!YJv@S@XXaW z)xho~bBAXSqE6o*v*@Q@Brd$=-}=~icXuAmI9|igv><$ce~y7UGDlzXJv)r5$76Aw z{?h2rSoMM7i+Io>1#6ZDyFmISU0{BOSD!90bIVQoh`}G4Gl-Z9xS4{`;l`}?s zQ#%iQQw(0@U2zmEFHSJy(4O+BXYfMe^i^DZv|krmn(R7{z_8_GTOCUGMo3_LqZH^^ zSH;7ej$EOC?}PLkacK;X=QW^Undr;fDyjFN7*$&MxtYe#NLm!wrVRr~#@kAu(m+NX zs*pr&NhGA4*59VKp~@yXB7<=DoFQP$h-J&&uv|~_vhD}$Fkd;=;0tFa{fJs9>hh5z zHwX@=4z|O(hA$oy%mF>^`uSp^3xy;G3+p6KMaRT{8Zm*P&#{*!pjwlJ_yTk)UMr%S z;2<~e$1>!1t^^U+*%$vT%6U6(?y;F-t~7{O!di981oT)vzB%VlHxr$b0Rkh>hO`fk zf;(V~-pT8j4N47;c(Q=9`N>Bt|d)=u15GtCIQtUSt8*kqF{jd^n+gM zHF4{-C$}%lGl#Yfnq~raM%+U^m6WUrrw=24K;-dE;Wb#-?FrZ{%0gw#`~D$=OTykH zQZdSC%@kmi8A(N7wkuq4tp{$noXfxdNC(#Pa@r5twk0G6GfB4Fnbg?<>lhcIKsvm- z>Sd1{Rm>uyJP{8B*rkaotN_-2K7}c9U5JV!rPWfM3ro!0u2C$uxokq*eU$O1gp1iEOcZbz?gm$Nns2z%u_ zm7@XWDAqWIV{>rY9u~lq#rHdtH59sbR+`WBc(NXRY>fHHm`21Ptqn9t&V#Vjys=3y zV$=+%)#HB>jLgOy=}SI&j+vQ+%c94Bf*poZtPZl=C2^qWjw6xjKLI~G1nE|QJd5pD zdr4(qI+=l(Mf#}BV~&K03k85bZA0l8>m&vz;5)?HgshzzeX!FdR4``8vBKA>#Gowy zVkHi!W1S6k%7~>e(BR?@sH}o-ek+Oa8TykM4ZRky*`vI)TMez5<9RKJEB<_ckU`}* zk_YWn_xiR5_1Z5y0JQ6YEgfET7-blD$r3|tTDz1azSvmX!3iKc3I0=Qpy)5xMLYit z&#Vo4mJH4nf@uZx8WFQeKlcyn(>ZQ46_cCq$jqW#x8OG#lN*t3_C|vpG|1^I*0~8T zBs*zhO?&2g`~2~oE)GO-tDGQz3~HPpB^qnaAr7M2iNJ3g!kIV&&Tz{lzNsmje`chv z(~x9n-*R_@uz+S!Ki##jg612MxCaQs4@!SwZ4DSrGQ+SI3?{Y|H@wtwLdd zFD2FQN(C{_7EdU%Y5#4osbDfxoRpSBzjWqvdm4#Ncz+Y5@%@JNSqhqegk%iIxNX0z z5*d^IdFsg5yB)VBI#u98f$K8NmX_B=cG;16#@>Sj6^?m1O$F3Gu8|^OQYL^`{q-g! zt@37OhH`0wK%~9utPh`zP%AkZ>o?a?{~cl_br0 zB924Liv8^o@p0Q6tvBBQhg+jrnI$k*OENg}ERZ9nN zh!+JU_I%$1E-sdw*9_rfc1h;LCGVOLb$)Rl=tI*4gN{Z4ecAKwfI-Fmimq#QgQ+-{ix1G>A9!JW*pbH!4y1um0?4Lx zT`LP|bZ$O3cT1`gkE=K@<~y!8$)Gw-f=ln$r<$@o^x0-_!$^>8)QybYCD@SMn@@BZ z#LbsxxTXSs8XCL`iUHus`QLgswEgh; z!AjTX@g3ZsbNlPyI5$wo?;*IkE@G7s0f2U9VSfyBS?|q1T78hx;ZJ-k5Ti7O2;4o> zb*ZqKF|5Lp%W3%r*H|C*SB)Z;*{Wq^P`Ov<&S+YEPe5>gm3;b?oQF6@utidPP!hV^ zV>jO2SNyFQ&~8N~dVd6zfQ~3cEgv&svMu-cm-ic{dD~3$9V)7El35}hnFXrH1eS#L33^%)(Tgdp=Bk~F- zd}barUU4^0$?QzP5;QkSct#d=GM#eLF>kTzXDvQzoWtmfP<)BBrcrIDn0G=3k`uJ4DJb8KHdn$ z`zAWFxwAAX_^fwatF1rAPzwkROYH92vkn}8%vw!GVb7#V_(7{>$Z08ogj=d~XiF=k z?k^Pb_lqUVQ;3b1FFz~*-x3TZ{1WC>IIb3Me!_cnYciNXf8J^>%Bc}VC*>r?6=2(d zWUhf)?j6HS&4Pd(KLHS%+;r_K#9K`o{O+8mDNr~a2n59UVL&ds863;$Ma2uJ#V(3} zKv{In$)s^wmU&TN7;%D}qvtsxS{lSj`V&+!jAck|I9TBvYbWV3qe!u*zx~ zLG5pL1)VOmK8d!8xDN6X|3yk7GHQf>LWLr5$rV2hRevHHED`?Gs@wymQM1TaS7g4g zurA<(doW51+%yg-8+21Kx@%`f+Q9un^(T3DXO!>!ET~WAAsmZtRIu+I3PvTpDz?n! zx6^U)cH_)m;fl`_9qK$m|$!(g=4L4pufUcstRb&Ypd7#+Cenj3m+ zXo4JCj|!k(Z#Ip3ly^n>?(z~R+=5h1b2ZqNmZX2Rw-o(y5@z@cSn*d=gS*9(N^syO zKE>)4R(eH`gT|=!sH*ZT+2$F4**!Tg3qmXtZr2}5gX5%uJ%P5gu+c+SMYv>`)h9G` zlJrMS?Rcc~frZ9Equ?oUu@aHA(cSXHP})TIcrwM8KsyxbWf9YEikdHZa^++W-N_}i zF_k8hpT_+-6$j!}Ax&M9`?**kSzr~MWdQVd^iQgvKF--|0;$ook4AHUtO7hR)E3#B zSM0)Av!ov*_g9e8ujB=!`ZR(@#P;ea@T1F68|$Kg<=MD)ef_YS$6S+lS>-4Wrn+P* z`cCx%(@+Mut6q6ct73+<8~>@l7=xDvOP(4auY>#sxtv3Q-j`@LHZAq2w@k}qG(_|J zMNdR2w#b{u&bkNqG;LykD;5bp=URM1tn5D+z$0on`#HC(*VK9kM)lmuLLwLJ)gGmr zUhC`w+w5i&LAt=pM!5Wg&lsRcbfVC%6LDjeNzG!4s)OHhqWtlM*}d{6GiS1X*s~;0 zn2!Wt8wVxA*!1SBil%QBudXpgw-h8MONCfry5CSn@?zyC?M!)p0bopm>lf7UHh(XM z<=9Sdif7Rd)sSj%?_G++`)soOe1gg^4ets?tLYPX2b5PuQK@%FYvw9sUs>n3kOn~R zyHW75sI%>|rRl*JITcBnz0cmDa~px2(GJYj%O- zFG>4E-A^cmh$%eXLnURmSDLYM%i*2-bFKtC$6{yR1Q5x8()@)@z`j#>)oD-}_4Amy z`SThQr3F8ieV)0A{kLHIC{W_B+*1d5PZsmUW6O@m;^OEU;~i7ouRB6rf70rOlNH^b zcm{sweo>yhz3?W*zl&yn0u>tG9vF=jSKCsj_ptCkYIlsJ4@I?WAz%7d*F4K%s2GvTNBNG3uw8l0Y&H84FjGGdh^?V+h~O`ZQEU3lKYarmgE~rx0_y* zR+*%DVGm^=Hz+>^_J3L9;3sm7CCMK!PP0n=RDMUHBOb%fuSK2`LEvPh$~Psz^ryW! zh!WcrfCqEFnG6vZm@)xC5%obXW_N-v7bOvJBQA)8=Z-j0iJ#>{v-lo-O|RMX0=P6p zB6t;l@Se&|!R6n-n*@<~XRtZyYO!PxC$TLOnELA~%rRYbYd0*Nj+6L~1wcHps+cE< z;ds*Iw=(B^m{IK6Q>YDGXnIu60KIE5)`-$2tjFv~4b-CkLMZFBqFu#72+^(IHh@43 zh8PL*Z>EEQ^B+8+yfc6K)Bl7B|Dk*m!UyEW>bfS2k^YQxxqmHUl*OSE>^ve<0hqI&)(G zi8B6I5S_)4;dRu|6CIZmN#N_qlRWumL`$p^POxN_S|Pf?0dWX~Ixe3FO>R&@tO0{8 zOf@^cT?IKDQVQmfwjiMn(8-sB%s`%|nxQp+M`}|w zxuJ1AFH3=6Dq~M{B>AUqi9#6@Yz2|sul6yF2luOiAJ^I91a`-vryYW0E)wi#VI!fX z#pY`&SVlgQAxZPp`J()j2cz~psd#UH5_l_8=JgDU$qWr(;C$NCoNKgcjj2i#__2m% z&BCf{-zqvJ=uH`oN8bZJJ>EipU2{=d6?nxN7)BUOfkZ;QdI?6L)rQf2s~PWACLn*p zZyVb|n&!#lXOApJ^%6{BXJ{S|ENgabuUdU&SCC$(Pay4hE|q*hOora4z}{%pJSDHq zOz^`k6z#JFVTVLw2^t4XCD}wA1T7FR(@PP$RbwLd2Qi0~A?v1VthR!^Y;}y1=JdG4lbNlHV=GzGg<1dUY>26q2-so@H zXqIbuE@O}`L!2)oOD^LpADd%B95XTqjT^Y8-!TAWWDYwftba3E+Uyp^^ii{_3gVAY z0F;WMu_f%!*BeZc0&d?O{(Sg&=w$_JcK!ach+mQEUheke&^Ql&4rdQy(lRiNRFFaD zV#xYLFuLVFgO%$x#-r;^jrvoFFOs{%E;*`#%mal1syh}g{?%8peiTbfA-|O1F>^TQ zRp69|KF1q7gWo30%?>YKCnYix4Roe_m~fwg*=c{!TkyMP3l>OD+9BKEAznxa#-n)cW(x$2)wETv zD^v82}K6jhh?Xf*fkL#7yIZumEH?_juZ9H?bq-R~M z2_0-8x1F9>oe!S_`(0f5Nq??T*%9Z^9~`z*SWTzTw?iQG-@hC1Ftd#yj1XHKgKjhk z&Zm!s6ZCkB)5$Qu9~RR*PA_Kv`hKD8JlUO#PH4zz=D#l{?9H*B#Zq-f{Y`q2Vzsm0 zCkIckRp~pcdB^IGKToQ|KGRG3=@rw#5j$$nPNQT{AM?+8Vs$<*oPPv!dh*}={`_bj zcJnCT*MsuxfJ&nx_oq2FYzORQndgw2O6&RLZ! zCHM7kjW0`I+V#=hJ%2f!EKZ;Ib(EJ0eNNvWDv$fKcQWkECGzfWu;G_$*tKxiD7}14 z$M2Kq%?hfwnN8kzL42`#d+#i(@MsL{ojd67+-0_#Ki-mT{XW}1hiB`snJ1s-t~VL7 z?(G)m%bCO9XYRrr344r37kd~@FPG=YonHrLS|`)t`~2w}*?;(XzDA2|UL=-hn(UGF zAHz?1IXxNf4#`l^>H9f6ldHLALoqj3JL)CbBYI!;-<@QJtH8Y3qu9e6h>W+>w6U9ZCIe6PDvZrPjo zXxf{lv-GeZ?0@d!Jn+wP&73!49KH9mogbW%=EGpUip3-^vT0oAfgyveyI`}^Zu?Hf zy(r(zx9RDlQ;YF4Uy#xZ50Iyu^Yl`))s9{6i(MAt^Ee-@-rO*$R*T(4EJ7alvS+^w z`KQl~M-XkNsqcQE=rZnq<{#s2nXPu-bggwVa$6 z$MmpL$Bd2A%X9kH3#~m1iBb0DXo%bebFK6AzI(i%0u@cRmlfIb+r*poudX%s=(`i= zv-M^AWPjy+JH3_HT$Fv%TXb{x!{6S6<@_TbSh>4QsoC2-*IO0tM>ZqNG2HfpzSEV) zcGU~rjx$+os^v5v9PEjGp}nX}ET`*yfhb`Pe09EoYVB!5`aLljYLnGv(x*(EBTp@e+RX-+Oz zC&RAkd21tx>f)N^olO)UFUR~t*iP?qF5eAdZPzQu*k;+nl-6>A=Wztb)#aYfKRt{* z7_Tl0HlMiDzlN(td^=|Q^}IY@QsZ50ZpP(cyc^H=?R$i0$+B029N&)CZDAjdn zEq}WA-tRK6^G9-~&f|Hl{X01?PyRi)%!g`p$ZKP|8>Y_N{(U*ghUoI103YAIMW4^l zJ3E-oKHlv!8Iz~~W<1H|U7}G}$ZC5#y-kjfa8h@n_uiGmad!-F!w!VI2bi;cHpQ1^ zf3+$H{j?vW1weeR!gph}pS*WJ_;$xe{`k2x#D7)7 zb9dN`V_Zg+J6VtBxBDAi?VoP=UXRUze;ExY=g^2fb}-kWVt0X4mEDEL%LxC znO`lh@u>eH;?mk|4MX)-Br?*SEsZZCu6d&(rGI}C& z5m)0`_0hw#`cv#z{oe0CK*^nb(SEFZoz(jA!A@_C)6(Pu|Irf;J@^%kR9;SNsME1t~` zYxeoEUaw}3?;YpkY3ZKQ;qsg>mt(ZE*7sFE`dp~^V&b*sX3=oCxZE!3gPKl%ew}x?JGJ0_Wwk<-&rPs>FFo816(>GWwSTczH?p?2llfy$2VO-v z@5V+UdJdlU9LM&YCmp!K!gm8VbORiEp?unFDF8n}z`t)=c3UUi!!wucjlUH{hDB-n zRN{XBc|Eqg1_ju6y8xsA^XJ^Ge~it#-M*)0y;zlLLyjmU!ybR|e6!Ct3V`N?{$|`v z!9vZ{D*)tV?G&uT_AGxSsB+cUldtCbsL^_LM(53f-MdTu`m3HwH6>qDuNvMA?aSuF z^t`c;r|E1~jP=y2ss8yeSzGgTW%0AP_K$p$)cdTZrRWrp30GW03dUTH$y<>lD+&ZZOSZwqcgK1)o zpIgr$oreN2-x~AX4D=K$)J&}cKymX+Crl6SYJOD>Y!1KyDF9y4Ck>J(4S0P#iJD6` z@9xSPHWWajd&^+CeLxf(0qo`*A(DW3R)%JpV5DM;W%1d`l9LyTfp_4P04jj2=lF_)VzpcGK7Ynf zUd2mOw9m%Ap8F{dPMzrI@AD^Lm{nwJ#)Z}F>>MJ)dnSJdtCO=MIfckXpoIPy+h{}} zWg$@`>qGqa_ux9RKnNnA$q)pyKyp0E3=CpTk)#xcd@u!ykz@q2;SD6Un)Q%DcP721 zUe6lPQ?DnniVQ%nC&}7pi~FzVd##0c;SlM4?|CE{@MqHZ^t#h{-kga{Y?JV(_l)&= zYcq`^eU5)fB@ut}W&3$@Kq|J)@Xyat#{YUx-s}^E(Sd1(Diu%fxld^-GmMyt=y=Bb z(pvOXWG<1?A9u3o2&!1qG|y1QDTXai9m7J_LT=qH$)ZJuj^akaQ5{&&}Sy59HeRK zd&T!%20MvAB(lI{FjwH-dfPCfQW4m2p#5By0((VT_p3;lUbpK;H_`*YA6J?$*t5yeeb zk4&Ga07T9(aEsuf0vTh-U}{&Xc}ij3?6qR{!r%gYhCoRZA6AqA#UK)o`h1GOTsm2d z7>u9ciT7+}xHyZ?7^vfj(nKND7>Xkn6rF!WVNtXqtBJ7%mG3anz=T3fprpu3Fp~L$ z0z>C;=+OXnRDE*wuaUQ)Ja+_t$#(p$McoZ~qfYOmg5c4fj(C&_zSeeOLP%YId{R})S zsNi;IT*$;kVv7kx*x_gQjsVpumn=0XHOD&&V#hKv&ePn-DyMJ*o1i2h%CNd2H!(jA zDbVd?IgTe)ENnqJ$J7^;`YeQiYcGGP;L+_A1jK{`jbQ=k2yh+5^V`g?bATuNgpjrN zrhNrU7G|Y=NYdv|?c3w!Yd@aaF8PxUyfm4A`^zjjYhTgCg_oVrBpbB;DAqrlp)`t` zeLXb3spsKX`^=N0;<W{O49Kg&3HZ$$J0c_7z0&~8X`hajun4JcFOnD zOic*Hlj&2$l%_WF(Q+A$ODf_XsfA4;rzjmC0w0(i$N))dPpUXH#mKS@INE6>FysfW z6nO8X?3x=94(M2 zERa1B&gV52n#4%@UIhqLoHBoZByBuo&>c?2Q!S>|@PP!If&v64pt~g5D}Gs}QUpTw zNuTjk`wCO<^Oqlf`F9WVv(uvlfr*6R)Tv_vsoH~L0ac?JlJ4WbV+@6`JF;Oh)c0xw zuznKB5YI}}grtq#2OZawuh{u@7v>Zt3s?;8K%5dLC1V%mfS@^3$z*?7#cVGgcLa%@ zPKQ-~u$7bv!h)ORyd<*)=~Sy?s^5RMA6YP0KkqN7e*dspZ{riS9ggrXd-5sPvCbz@ zivwP%41>(hkRNPO5|F)*oigwTF!p-ydkP5xDJaOuKLLG*FxybUZTS%#k!&1LS zO=u96Aearxz>kz~Z6r)s3Ph_oBZ8i6H1ZsvN8*nXM^cYvIof{$wQOZzTN=*)^0PKZ zhNqD%2pD&VC9$M)0n7k>@)c6FcAtsi)l}Q!%RfYO=D*@q%^Q1p;i#U*D6@bf2N<)f z;RGbjkzdOE!Z5Ck9hPp8XZE|5Vv&n@XUnyp!z=!-u#eK=4qYq(LIDUQx^FQIB@O^d z{X87n=lNf7-qwEyC6I_HK{z>qVu`IdrYWBhwVwPBcnoRhd6%>Tp8^&7ej3gF^q%4s zKVIvW?Ryeb(5-3gP(^)I^|o7x)^8cJK~hRku^}K3$fU`Jnvu9u2D#LE$NH(4#Itgs z?>tc~YcY1aw!Y#NfrmUq3CQ;D=TeXqH~~RTFieCbh3|j+)!-2b_3y=^`Mpb)HU6Uy z?YX_48*Frn15;59^919x2rNO(?zN<3R;;Or36j$gA&s9RLT6Ql5Nit7Znt}deSvEQ zD;LCXjA>$k#+jmx*^;u^QTA+6 zAcXlKi{*cSDS<3ZmkV1&tj;5>1O~yTCgLj#@kskJolDepG+tRNMhP`{k$+w1{u>IZ zSPzrh!BuVJ;nJ_s$utC;=QN@fkHkD?KH47HF>3ZP)aSp=@e|;n_t8>~pE#P3q>kRp zFdpz{WUw!MMw;06@}2r#P@h*Z>idt*9f5-?Zv%g+>X<3RrivEvlg+RY_5@-)N9Cuj zN}Hr{K*gaR$uclAs_zTsVu--ct?q5v>G{pQ)F~?ZkP{B1(D&d$CDjwh% zBSn7|M(N1<&gnLtF;x;Fr=m7eXb`3p#BD)+<>&@Qq~1mpU3!5b#`aU@$PyF;GpUSs zwrX=cltx2p!=5Eb>`IX~T&E5RMD8|pZ zXq*=rPBtPMi+hLWADX3kZi@JW$&y=Dfv|tkQ>#-iBevh!T2*99Is+k01Y>JK;bbRrRBcx@)fO1zFCr894&U@>{iH}i`ek99iNR%LRA#8jQ#0!U`sgfAP%0ZG^ z$Y=&_@0-kDx_ux3o&t6YdBtRbTf2)7tolBy_Iw*3%f9n*+TbgFwyb`wRiFD@-*l*RU=2c_<5HjFUE`gp#yi!>YVb}S zpQ%2(41RK-24C{INqGO;hJEql+gpG8)p+y=(Ka<1AE0tr;d$pF5@yUM)#mQ-WP!r4A zoKxsKm>Bx|jekm`9U{GRTpN>X!~-Ij36hsq5aw@-!U9F_o}e2mOEx4GQ!5zuuv}T( zml!E=w4S-E?GeLh19Bx-g3e1*p0U(;f+Fe<@4;W6vgkg|3P}#L z&O8`)0~Cd3md^uJZWw3C3+tYpL-Kten_Tda4A^%r z_`>XmxaY~}YwbGsTmIzYtLMyGGE?~v-|SHiqX5b1fHlSuNj}kksvKehLR|dXIIx3o zF;IjhCDc#e2qYc}_?!^Ec7>1OzSAY~Xn0JXwkSQMQj8V188&}*ekSc{3Vsl%JW%

;R8+5J?i{0 zDS&8y0$I;WsRMtq;WsT*bIrB3=TXH=_}}uz4kL&O>kBi23jPOd9HW_H6y>x>fnx-- zP7N_aFeTnoqJ}?G>^>0;A=$*QF_=SQNo%Qbv(@Y$_p~HZ?_ZNwW;C@W(>a3K+DX!9 z0d4PpZXGj4++zQNSRg*$OkpTf?0nU(TNR%6G6tzl>CdoYy9s_ zpJI|@)%f2++jH$AHEVI{3oo?$@)JjXuu;@v`S=G1J>&W(FV%LgeORLJKjNuKqhRD= zh$v*R3Fh8;bMSpC^_~pRE@+>c%md%K{TJ^&sm0Rs8HJ-H7=~xR=W&<3Ik=88EGb5r ztgWLL(C&Y`2U6ptRfJMbK~hHR@)M=JUH87*gUbjjfCo^n*FECGu=TnfKc4^OMsbhg zsP%0>`0vZ7>8~#}o?tceU;bb%(9zxbo0b`tnAsh!l=APG8-4V*0T8=uvjwdONo`!~m_ z*Tt%+ja&LmC4ch6ka$ng><6pn1O2lmOqYM>%n>nFBXY^FlLg4nGuQzYPaowsFZ2c| zD6UgqE#MFjk?}SGA(n6+1=@cjaf?IJn-IZeN|o~DkU~C0urUaFiccpCJGNvFt;NPW zu*m29F6UDx;zP4SfwcVDB^q!i7i5I45FcrFd1bu&q zhI5gZPfOAX0d9hd9FB*vyXfuX({LnegCHp|NHGCH9q`#OgnO?PW=Lh}W0Bai2q-9d z#cKOLvdcLWOk+fiIVtpGibS!eO8aqrPLo8RlL?SDe%w!3Q`dPWC#Rx+;C6&s8)Uk6 zP;;NZ@=7lhr)?!{lnM(~N#gkK*aUyNJFuZRpGPb(R(6zDL#mbKKYNd@?)&(>mNO8} z6olO|fm-HJ6qB>p=2!apzzN}#YyWALl@uD6J0WYh0)Ei3?#E`Ff5+qCzkQn zrdl8K!>3mfGR4xMQcjTO!*y*hW#(W!3^BwJD64Gq5N1(HO#-ZPq;pTr1H^MUt7g3#x7+?uV?+dkqC#oE8Lcdc0wmm?wkutig@eGQ()slI zX;+E1E9BIC{+&Dh&O`q9wPTnK&F%8nb1vt-#@~nHNtLur;OGodi)B-WkWhFA(?^p( z`FX6L8wwY{{JUieXGEorwfcYi#!lnfCXXV6?pU}XloYr=uX=n7riX*8_H7b=rnPQujYOI zeU8!M)-T_4w3>)Jh6qv2d;x-xTFCwMZG?$Q=qHT@r<_or)YSeuQOjzAHKQBrUrjV`exPo zK`*63V-`n>inn`#@WS`w+?ay?+YqYcjPjk^CVbJD*-+Wp7Jc?yVCI7rfKj*yko0% z!C!Thw_l9bxS+*(*t)Z$-xrb^;dv$lWOratM66Se@wL$x2K6r}ZY z2XwC7@PnG;wy<&NTqA|_&mgI%qu$@SnsmF5-*@uM9*5cEm`PefDlK%G9*GhTe|(gwOV>o%?{sw|b5Jg+qS6hc zi=VPH0*dC+Oii$sSvBhU^~WGN8N+h&HksA8BtC!mwvEcn4v#k!MBIZH9s(CNcu2uf z<00G6!!O;A&O?61*27O6)_#PZT*c-s_n^73RdXK4i;xxj`ZKEsuT7)o6=dnr4uwX38iDLg_#QNm+~zMVjGKGcXM*6V#`F)>%8nv z=H51z&&`z-76r}Q<_M>PIiM@9#;|Lf%Nc)jY$r+uR^AGPu!u?!D9R9wOj|HDwdbEY z|7`AC(cCu$L^V8;mk!Mrk2KIcEkCfoFk!NT>ZgYG?RTm_Y7)@s1+wH|hT1lo^R`eG ztyyZ_%&Rz)lUz_Kt|Bc1B(zVZ_bC)M^w4zK#`q`qIfJ;&Cu+q5y5*=IzWfgp>pFir zC-sOSX->AmNMb2$0WrMI?5Y%qsKe4;-k{z|3_?p*#(%^-k z8?Lrp*B`pMwsu$vOV_$l#t$#9u)ULC9Pm#aM{{ajIJ~$BN!1#t=gMNtGqs(I5>3OZ zJ3X=|)z2(+ZB*APhM6DEi<$Hd6m5UkdpjyvZ|hO@^)nH}hibz5s|6L}LS;0?Y7|V5 zBa@tBV_J!oj=$fSE)%=p$>-oJY@RP{GvF2pl*aOcr6wTY&eZ3Q_P=i9e+4d~#n%?R z>iij%fA9`jgI5kMcKN#F+;8e?Kb&#Td#*G;RzQ%vVvWq8F6SW{CciK?veADVH-nOx zqSSsFX+1<6KclTJqZNorXY}}}e$JB$@Il+Yg;l(>zwpegdxa}#d%o)Dcm5@Qv|6nf z=lc|czrHtb^34|qx77K~LDUU~mlR=Ac9N9o+E;^RxRNFkO}>(LAMqKUnj&y%}6SduFJ4B_>^#08;W_c7D26XNNsc)9Sw#A!ku>Gp97cK0539@S9eHw(!jQ<8dI2^7rg z4z7gR*rJN@R_INl2>E}4-De*QCW~;Us5~m8eXSU=>hWQ4SUq7*yTsS8ZT3$qs`0Ja zzew-jjXw>zJbdwP$|##(-bg8k>lH#jM6ZO{6$Gs?o8u{8!y*3CoQ& z#2`l$iJ@7hckb(Vq9Y%Eb@ZV!bak5 zks44WtXZzO1#)DApcH*L`3yz`3yQJ+ePZorYSJl%hL>srB4S%V&a8@ZkZeW{#yZjU zE+{9Bj;bO2CRE!!*ZEg1zU|)p;W?MyA?mjpgT@}dwSGh~t!J};w9V0%uRg! z>orpqTVxY_?9WDoq^|8G4qDzuk57YwN<wcCH8;yq zB_nR7B}ad)&VXEH95nlj4qdGc)KeI2_|f;!#+Uz{4}8aNt)JI>8Cx=_UwoCu{HvwN zeJbS1u0|Hy!i)AFimLC2gA8*S5bPsR|f#kaPpTz_kur0(;GP#^4#)-lW) z9fQ$xQr`0JxUD5H^;Ihr9r#|~kF8E5>v^Ji?=RiZfUBfG6RqCkOYaO3-8enb#frP2 z>z03tnUIgON6JyFaZoE0C6;AIpR+lPCe*tS?Y|bI5HewrFhX81JkMl_v8jJYgc}Yw zSX+3tTfFizW+LV@Tx~>9evmY_3M95>O)(Bjh7S;c=6T7cJC`2s|=>&bs4M8W9Ra)|8as;H?C;IJ27XCKI}ldjSJm*LqZg+IY&$3{cRHxH{^(-&9G~H2t?kdz{Cbk3zH0gPNo#+DUwY`J zS-t$qi%TSuf9ZB#*r%;6yfnebr(#QFGUm zBxT?`lg0g63=u;n;wF;u)sb4;X>6z(E!{xwW;vT;to7c^g`T8HNZl9FKL5Vd_a_5F zb>4Ai6>sX-j)?!#izHuM$X}dXqgOohL(==!P-%@miqVSntVJ*~qO2VIVeU3q)0Fg^ z@4&_#p-V8q@y^Agn5f8NwS0Qd+vtCMkck7^;Z`)5?Jq7d*|>PqGb<^~YDKM_cJzAe zN4F0YyRk>(LnV4}aL`+`sGa{qGuw_VrnG`dW(_@?Ec@pV&>-D89Ou z&l0!i*hoft|84M*Se*L=B5pd_aBHE$E${oBD7t)PvF=~Vg&0Pj5CmjFBE^4ARL|dT zA-mBk9--EsY_(6_Yy3$XeWt>VKjGScJ=-(THpLb%yMOuk`26cRfBXLVpFKs$J{dNpJqJ>PFHx*@^z-O~W>_Juqo+@y zW3jfiFU;}M_ar|WiWl$ls>gk6U%q>f4oUleot(@=qit&IS-v19ieG2l)LxsO5xqZ# zDf@(ok49@CV~hoN&F3%?#NYde2s?7MS;=$~f?;h;9`L14f{gEwH1vPSveP_GR+Bnj zxV0L%rren_=2O@%BN@dI)E5Tv8no6NQwu3Zu*^@w%s*L`aD;QO$!G9L(R7Q1O0bvtnNyz)%Qtg!K?Q5{I;ydsp0=N^?mr_@DsSA=>(?h?iE78%X=el$n#-XzNg! z@!7_8%L<=K7q9y3Z+;H4%~?eO3B?J;X6IL}ru)s^7rBmux}OdVa0h@2nCO1GAm5Q! zKbi5U(I0<5o+Hy2Y+CLI?fGdvGqZeOr1B9tX$Ce@p8nn1{<_-!MveU?+Wu1b z@?XDf$$Tci*3JJ|_YtARi5o$Pq4X}{}XM86k*z%~NE;UTE?i-%bLsh$7U z)M(rJ(S`r^{l8)a`ctp0an$$ORN?Ke|0vwFpE6(9ew$iDgPSz3;J?#J5v%+MeWBO%&MaZDfB^X&BC*>(qWvcDoTq(q3!o&(`?g zFMK2#zh|;G#(ilk&mJCmLtH<>oqYlUt^(08ZhW1}4=1ca{res(W1nFY}Wr7Gusv27fe=kk{wU)JrS9Bba$jN zv;Ae+t0*rU61wO9l}CS-Yg3FCmMSO_Z;>|`El`j%B>Xc-ontpsL!)jt zMqA-{FZ^D;@R#ms>$knj$)BN{|W5swpy5K z3R3eXLoTFAV05x&K{4bGDDHq!t@smc1m@H9^Cdk9;~QrkSdD~QYr9)B*iwH`#1aXD zEZQR(VWj``3yJp9rh0vb<|VpdyFi+LHi2~COigvKRdX+1`sKktIQ|yPbm8zP$Nv>4 zVI|k5pv*h*t}iwBrDNvs_Tu^}#>E^xh0xp@N?CiKzVk;FSYqVNfL$k5a66~pT7=`L z$~nm4J1%Mm>u%AOOX_Uf`fz^}M=TxBw>he1TW4WXeDyQ__yPUzm$ricGZhb%D8ykI(I~?Ss>ec2JJ=^M5q`YS8HWkiJ8I*8+cD=k(vTfHVo) zeuHM+dJn!hZ+x#fT1)PlA|5xmOU1FR<6bq?Kw*ip{EE`Zg?4&|FuTF zHme~vU-KYfBYou1!ZkG?Z@46g9~knP6DSNacZo<&7!p_K!$eUOMMD(iB?dVp9E~E1+m`98ys6 z3`sfxG3ql*5Y&`|DA+pBXzjScLsU-t4Cwb55M~HlxW(h7WadMSe7)xYRs9^`t?X^OpL&8U5G~2{)1ng7FrK z+|)fC1oNclC5(Rv&9gF_`e_vhi3LLSu61J{sEr>ZJ%9OK>-e2k=jOCL{x5#&4?Q89 z|I(%Z;;@oFi26YM#G-ylv@Ra0gd=I^r8G7sD$LLKp{e;lViafx%)+UWXdSseH_MM~ zczmouu+Ykc=BAnBM-HT3+UnQ}sHE+{Tmox!Yyz6+)INU@zVF)$qbJ#`zwp~1zIZ64 zWQ=A{HYMrkXv1)cfLg3FCZGgGhw^5jcxfXwHI+a}nlXmBA4<+_B(Au@?Sh%OM=<0I z6BkZ1zFMLU9673$&Zi@AHLeVjwVqG$-9Mh{T&(qL-?bc)^v$mxi9d04i}}vn^ld$O%6S1SoOj4_b**t$F zFyad&rQPVbd;uWw8>9vL)cQTdz;5R2l=END|J4umyNB8^58wRHf7#7= zM}F>@-Z$ODAJhx(PrYJc(n zmdAgtNB_Zn0Mb*sxgte#Z=3$_(O>oS*4O;5^S8R%CU5xG)O*j>{ol9?t#3v$vTT2J zz_Q^uPz_COAEP{qSUlVp%v^vkCdu0b$tK9_f4?OBw6Febn}8Go|~qkMKtyJ$m)gAAa}I!&cv8K^}jn z-cr{jb??(dxe)?SYY(;GA#PS!ZJiwn$Rz6h=seArPNDvV;d>r=-Ps@i*YpNm>znTw zH_w^Vv#IAU_z;G)3@K2DfZKPXWQkR|^n=8~BBJfQ3SnZ=@&f@l5ljosyRDo3Rn59W zTfTf;Tc1Dw%odu>Kf!E{Ii9s2Zr)3cdnR#eb+>s&e zYM8Kq<2@-5NqISwBmMsk*IW@fT3RsD`@V)upjv31bA!|AFMY`G9uNE5&o_Vf?0Y6s zg9{*G7KEdk$l>1cU$G|R+osPD;*W_h7iOgrk|r9jDsrW^0j3@MT}bJe zskmYxoBi2Hwt__J{Rsg#d}wZ^V72?BXBYg|Sj4(NP(PpX(jI)zRB2{B)T=wuvnC>+ zpe(t7WCsVhc~e6j7pYM3*%f~bvsw@&SWaw(5paY-Sh{7Xsnx5P@1f~CVMq_n5ZX7t z{BWbQWDDKbVK=y=_|c{Rfj9m`ht*R}&%*vor>#C@U;S_mpTGlhA+j(qsx!&n;su z_od7+isZW~%bA`HWJ(qgC@)^_v&eW011kEiD7o&(Gu3#5^4pzZP%yi#FQ~#+TWrzwE zz>yzbbB=!TmZs!={vUr_o{k$cX@0_zbn@UbLQQ+w{q)CNF;V z6%3}5zv@)1ecm5EB6g~V!~YXAgd)(s59wZlbeNfvuCScGVG)1(?lU456m&ADE;{%Y zR&0tysT@6^ZIcr$-Jb4CHN}3Mt8$9;Iz==`ZzVE&i8KBD9Gfzl>b_GXk||cs4OLMy zqD4Rdr*svdA$Sr+`dorTQZkLcpG`bGHCn>&SqV*UkTUe^EVRAJsQ=I2A9i|iX9G{R z7_G%>?D@yf1<`+3U+BP-&ePl?cvi7TLC;CjcR&#E6|&Ut{-0;Z60=25d~NEeJdU?ox^$5J` zJKa0|-TM;%^uBzU-7@~^xsfyif7bcff0Vy8@GpH-+gI1}a=e)f;sm-58`t~~{S!U5 zv2utt-}zN<`@4@z+lkZ8GqR8}F+|Y{S71TI^%1$GfQ#29f;}vsBm*uz;P^p zooz~a_kw?Q^O3cOSFakTFMOFKSj6Tb&M{@j6R145|Ma1H&E%=_8Ln<&(+8>VYL(+! zwLHN@H2rF!^P21P!E9LLHi>@&{hv+A_>&Y3elWHM`D@vmSl?p?} z3NtYBRjAHbjZ;hNS!~gY2K-CI+V)Xt-J8gVpo)JdD7w}hCP{hEd5Q_hvBM5rbCg#z zbpe<_Ez`*3UYQt!IiDw5#dj2@qn zG6li^=!Y`>rayG?+YVU6cm7jbpn2LKeYVyqBn{sQDZFM~wsQ>3G|@e^y0=H~vjOu0 z^I(6}+P0z;sYzf1d!H?HJ-b2ck@VhFywM;CjHUiQebe8^4OY`MlnC5_hh{cnupfFM zB;`csk)P4)?#3q&bX6jlFAWoj#?4v$ovS#SJn~N!MpZV;b^jhMTr}xeKuc61JPSt$ zM<@)57g06o^uTybG~(afOnD7O8>K`sX+~sp9mid%pIqYc^b+hzUjoA~33^QXQ~Nbj+Sk z5Vf0H7O-ma(ng_!nl-azm3km>YEl^~7IbPFMVDRkD}v#v;a;PERy3#)d{CWeJ0vUX^&*U>rAo7VNJ@+&5bT>pQ5R@Y){ z1+@=W%Gy{{!chN-w*~+MlpX~6zlpad_o zzp=v1x*O~&;QZpZA9G7JJadPXeGpgVF^Ezn%jO@;tO7mA2KN1>zk=tamBkBk9vIwzMsDVR6ia{Qi2rd6&K3P1 ziqBd9v-m3i|E+%iAHLNOz#_*_N;H4v0f-nlpRfWh;Clz$KobMQaYVqQyuE=B@D*tH zGY}Yd0zYtPGh@J`tO5J$hyieo7KngTs~@G$$en?rvVH{F926@2>7W_+8i6O6?}jH&BK$eJj#X} zYl9GecPmdg@2P%cyr4jau>|;-r67I_o6;DX=HU2AMoET zt2n?7VE({ieHAQkR>}_J$`=rx#z3y50I&{cH~<9?2S}AZ0JdUp0K_maJ97RbUp56c zjJ~ftfjD!i!(HL~u}XdeW&U~J&-;JT&%Pks|Kfk;uRwko*nZ*w;Vb9@a0gZh_>GSM zxWJQW+E-pM&Jur{!}hQps1M|ie824nyL`=SB1uEGwD&(v{ROu_QhYA4BzE{wd{j5`bPdb19*x^sLkR?dO_A_1(fB!fF z6vSWwNa6lm+hH>xGN7#33p|WC!pRNYfx+H((B|B}=_*(Tj&Q61Tr2*$StXzVKWx?$ zP+cW*AW>KXL4HmG!*MbKL=?o%Kd7&Q^;H}JEdSrt*8{48Ru(Jli698lA{QMYA4rAV z5QMNVG(vy-$n?032=zd8gdCCNygd=zMBMht>E?NcWuy}DnZiVEQW z$satcsptTNVJ$`;s64s?d@t%VNc^a8Hj`a9P3)_r;j}PpHqeCi>QsOOY#SeDBOO9L z12&lf%PXk&J?G=#ET?CH)pfYjBwl8q%*r9Oiidx1;Ml4}H+Yc^fYVuX`ftR6dleY6e>i(+)0nGjN?!S2$3qd@N|25(T-E0nS0KgRcYDskaK^DyVh{ zo8Utl6dHg)totZaBH+Ms$}$6Io`EsVY{*BPQ7ebEiJi&+j_b}BkTrc(%_8eWoU}P&mnk|R1F@=SzD^>K+ku1eZe)sa zPrEysgEtiEz?onP&1Mf;YxXtWg9RQEnrHaBn zk!ZlF8895&+JLCWGhp>^Sx(w`!N@!sl;3~y>{XAm$^nwK@q*c|AOLaSCy68`GZ3aI z?AjTyP9%j%%>s35N7hE+B@g6W`P`~9Z@7YkuKgJ4uVZsP=_Y8p<1^vpgG)eL_K zYA?pI2(ma-lZZ1DCG4tMVEsr3thUefEU?M%ZRg<|dmI}V7}E?@R4^;2!8eXOHqJ9S z<={5JnOvL!E`FvPh^!4n+{PZmGJ|ebe)zN;+#*@PHqb5tvDqAnO_B+)F%G@umrqUC z6_u}8-VKyd$rSi)@Jqku`EP8&&^dolxgNIxZ-#Q_XW;ZP$BIp3){5j*GxwtlfL-B9 zi+JWh26>A-*sg4xgwG8eK^q6|Z#0@TzCluHbDT0?Q1@ul*~i zcJeUhHX1q+CuGVYCR!sbGhpV~Q8+hayEt%`nS~KF#Y*A0jYm!Gt1~ctm|TC90mnhd zuO2EoaDM5Jt8?5YNa}M2Vyozt03=IOaA^|)69$!sr>9&6Ra!j*o-QmwqzghKxQVI* zGx1DP&w#HBiyH}x{~Js?LfVknlZ8{5V6-7`F&1!aFuWiD%Hl$Rsp2rgS#w_=NpmNUem204&+6E$N7eRkD&B{DU$G1%) zC@;~QmzmAy*%3nRz&D9Ic2EbCo`FQy1^x#<%(LVCz>i~R-7h@XbwT=pqx0H zJI8+E4_z13FPxiaDCghWe+EhAVQ#D(V4Z^=7MV9k4Ns@N#Tb ztb;h_SlBnQv0#1sUTAwWM~<*BHMnEbk9E_b|KbJczvQo7{u4b@aDh|~*q2w8dw^;N zS3ahpO+r7-I+4zTSnR)T6?0~7vGccmjOI@|=#Y{dXd7_T&G5#Zb%f9jt1WWp=8Jp4 z0Bg+jpT#%1tulYJAHL16uK&Uh!hgVrwE=JaCw%_E0h^+~K8^*uG053>SkW75Jp;JT z0Y(w>f6l)SfP)LrzYsb{-fa%wEN-i8DCd9LPjDL$OxRX@d{)cMchH9du?Z@@VdWV{ zp_`yjdTLFVx9(xqidZ+$r_DfUok<~qBZMLc+jW>{0HS}H{r0!Qw@(ay%FNCNoC)a8 zvGh$twh9NBriJgD|60Zat9zKTg2S;z+&YtB072V0)R+{Vg<}lZueyvNZQ)s}$e>irM4x^6#4LvpVMU1Wk09S9JZ-I8ldU2)y)}H4E z_2%|xoEZ$Q(|}45;?|`hZDwt8_+~c;)YVNqOm2S$Mi2XypSF@VAiA;M00xdV;qyOW zP_{u2`>fw^*(TapFI0c}rS5wnTnr=d{L-y|Z0E+R9H6q5frS0Oo&n_F(*5QY?)t0k zSvXdylIibcX#Amr0+^wXcfWd`ZX#;N0lRonSXH*zt+NkU5&Azt_$yw$IDf@c{#kvk z!d-u*uWTl8cd*)%FgA%ez(gLk8$bx4e|3&yvzqze<2n+i{sq_Nj?Fq&4f6A~mjTM= zKjLTI*v?pI9{PAz{3XL5`}?ikacSc@llx6iX0Y(uVN^B;R^@E|ct7k*E2np+Kh^O+ zWy8?#{6ky$+J~ZyTI_KfE0|TnSsNReb@G25L%MwIt}tJH$B%z&Kadxtcb~9-0gQ51 znc{)UZA|%FH|%%9=O6qX#}4>m(kEd9t^?(zz%hw+%=K3nbs)hpp{4)8A?^AEHdxm4 zpuGf@OMl0$aGcGGuH)+yz_kY7yyicE!*>HA5NJTaybs&j8zkbv84CtpNIF5h* z?frVjTDlH2IDYWqxJF>+xdOm#=4BQEOtU{}VV*1D*K-A?e?2#T@DG@Fe(=BP;0OPk zj(_mK>3p8QQ#l5FcdWxLC=Sa*{v+==;hc^0#T(`s?z#TK%f5I({NmlYbk)K=NH~_9Pl1Wn2tG^7Jq-z-f;lu*YgIZe?9-fS718%!T(AB;Qyq5@PE<= zqyqpK+ObqYzE~i9W%~Zo0ayXzr48gu_=PwB@E?5f<{$pWFJAV;Kl_76{_y{U_klWO zbMqg5Lk4jC#%MGPVAn_wl8j z{IktmzI8Y2=6rdD-)i8Z(JTw`EA9t>7L-ZPIQCojZ@7vA>;3~5((M0^D}3uc{s2ut zvcDupC4hA!zsaSBbo0zFxp49i{x6w-C>5;xciaZj?f;Ieed+iecb?~J{`v0Ifh5sz zzoIF^xf$4JMS(KSpzdd2bb;LbF?RG&0OzU>IOhSknD@^O{MP3Y+zvp&%}4c(2|mLq z+bPI6zzCoke#`eT{@<^>>P)lzGZCr)?ihCf7U<1?rrRo@Yg6}~hjewJ199zt!v3=n zn=9O(kO1UG?mI8hc9-}_4{ za$JFo)EN-kOn}!Xo^0euKyd%FlYTK=_mbfM^oBQO0sxCx#lTmDhM1$c+20Luf30Xbl=Kvv+DB`

%&DiCkr4Jk3c@d*LEM`(fPzzjHV&BK;v1t0|=-`F>ha*)CG0MP({@i!lUtYTaE zufzI;?kCTYkhK4f_X;qM)1Tg3!T0JAKX%v^;yDg{j{p*QPoso_(t&URgfK5w;J&dw z%x4vQLi!SiJ%H4Kq+&TpM+Vpq-t(Ak>bR`?x|Dt34;rD00+}qe1hz_hL zhV$hC%u6Jcga4IB1^I`6>%V`le}?o15FP>c@s$&#Up8R*{+Aq{@Vu=9-!H*S!r16n zeJ7;%OCO{MPNDMxykfb6^cH}9LjDSeMdABW0N+pkF3+L@JCwTx*K>FSXwLs!yZ^N3 zm;Wza$uGKs4YorEpRYiIat8BPUcd5s^;5qMSOUm7DBwOs>AOCEfWr5u6_@8{` z_J^J+fVIAQ>HO%~e_F5L{jOku`aM8-=i6WE^&x#p#iEd(_^a;$eEx>|rohelQU~p{ z|5bkvq;EM4=D&P@$_mVX+a+;+?Ami!1Jdbl?Dk{de|fL*mwLGkKL4kB{N+bPE2M*%a?_4F5=;7{$$F_3Qw9?nU>cn-+F{(u4c2Jwxb_L1R#A}V`Ad;9T=+n@FM1MlN? zzxQbVtkQjd_JxP~`j3As{cHV^``%0bUBZKR@Isda?E{+R29LN~a5s)c z`+_d;+pm2^JpUq*goPWx89;^iU~n-|Vf-zwgR95W?@M(Y^UIDDU~B+|aVp?lz|gKN z2d>u5z=rF8>^upFlq`UusQ@cN`MVn=q61#V>W~=UVJ*mi0I#JH6{z0>Uyr z!E+4a42dm(1Y-em7Elb(I(!LC7(;hOqjb9^8d(e-VA@XBd!1(rR#&FG9P8x(aGVf8P( z)AAiLlTz~dM z$Z%cM{%`-wW0;=l@?ZbUGOguuyvzUc-~O-v{V}|j|Lu|H<^TCB^=$g%U$oRO@BfFF zzGkm~oSMhKeqr-+^yR-aOWHU2&y%fes(Ses5=H-Yp7MWLr~bbd>5~2n1R~q;c;Nq< zrE^J<|7(HXB%}IS1O^<<0Px*(0m9pN8(#MxEsnBJ0IJGSqUxFRte|k8J|HT#Ev8yh?GxLf zySH?9K|%X%Yt>!Nnmd&r9pzjbY{Tzvek7Z)DF@ zms=S#=K^LxXcI4~$~DPE>zOCK$haQ+!_04NGibUZh%?xK?o%Zw z8n6pSWRTmq(Dd>lmN0sz=qlWVs8b3uc3Q)%3q|$3V1GHxl%ljsfJDsH8!e~Hj$9Y# z$d)*N6K49pUXvUd>YCNwYmB5$2pB+-d;-7kKhDG9fS+Gpe9M4jXu+AgK9EnFU2Y7A zQ+cFb2=;7%&t+_4U+z_yq4uMH*4del_-X?sd!g8hbHIIjZxvt5+qPP(T(J~t92SQk zTN)1Rh*oZ2DPg7Y1Mo?pks!whk;m0ncmE7hKZ`{CTIx?$?*!nVwmhrG@o9Eh{k#x_ zpdFq_2XXTFQy`Hlxk?!&X1GD_Ev~#oeHlq#Dv`Tv*dMLt>GRXSmLe~I;=TS{qJ*Yt zJwot)DR0y>_>{SPzMh}Cx?Fhf;ETh?u-4u>g`XY7w!@s{R%58Em{L5IAl1`)yh(8J zy6|)9?8W|TL^^kGIN*kvAI)&!Y&`W z@ffh>nGiefo10Op+3Q(f^Qvccbsx0dJ7SWTV}<1SIuxsd_Zd}5O^fA`N?(y^)LkW{ zx1Cbo5vNK8ag&pz@uRETTG&M)sbapIW;H3N>q#}Hb3&A2ocgjL|?&K$B z-bJA;lFsxVwwhjlabcARZ>kb^r1(}5-3FmH@mfG zbDa6qr)bVQevFRXoL+)9QQJm6xKN|H>)j1ZeR6dI(N0WJ*$}17Zf${SE#gb?3-_)z zA#lefKREo9bj^LagWrVDm#h)vjo?Yvs4I{WWQ4!gnQ zsD0mWMTPlDGL-HV3FWeJ`d9C0BV^ zJi4o_jE+jq#Dl{Z@)BD+$_UwW*uW|y4XG;K##jz1C#%|Wml@#RT`4!MQJ=inVrk>9 z50h$F2j8cEq#$U&O;Dgew?Z91i7i%xwRTE^vB@ z%J~$^EqX3T!3v+QWR8`x1JYi*$Dow4 zDB3Kxjmn5I3k#f^#;A5n1(@2h61&sn$(iff7-mLK6ALTe{M;IX3dn2++U^JgY zSjl{Un(yq2IZffe3dU6IjWf}wPT<}J7hL)3+akX>@u&&jY6;?1Y&5Eum&lBzpmet> z#cLUaE=KWOC4j`t;;GCnP3~&W;kM(t6En znn^P@&rESTzrsme07oBW!gviskJOx(_qdtTMKCS$t`B$VQ(~`O88oF-sxD3xqk7%( zwpIvsHDXXpwo<24ez{V8-3yh5^_Js?<@4d5v<74P1G7hgp6#f;$5!G&Q|O`9Cv|##fFjF zqy_j{N`WX8dygLYL6AC;c!+9ixYA%v zTQU^C{zyV}Z#hn-Zun!=2V?ZkLLv}nwaCaICBj1Q*jbGOn&EFgQX9p{4+||r)6tV% zo$&M$p837AG_futU5_V6?27n*PAGkfwWH~sr!=Nay@q|<%*~<(F4mWQ!r{{=iLv{< zq!S(%&!VL`3FB*a&TdJ0Xv=}z4_@dAPAJ1Q$R~m6@A_NsSxU#b5x0-|5iolN_zQ9} z1|d07b#or1DxW~+@1WsF_RLSX5LKLh2ifW0Bd`w0 zyWs>UtPWHu%VuBWCVcyUl&_spJWvg*Y`icCZ3P^5w>G|$1)&tlNC~QXy6bhvT|m?L zr2CYvlyF;kE4A$HLXI&5xY_Hw^q#XD?Q z)bZfnqOB|Mkn{B9nixY~G0pW9ko_>=-Qlt4hXz43B5z<}_ZZ86-0O`ljQZ_9zEb^K zD`_{pUxapDxc=SdF%zMDFn3tr8J|6Hub?8uJ?VogO41f}W3@w_RbuuDJ*JwcI)fnv zWB4BMW{!m(oys?d?APeh$phSKov!syWQdcV!;F>edYX41WyOrs)3{o&1mIy2c6 z+EZ>k6#$+ig-!YP;+~I=$%!_0qRlBR^HZT^yrfNv#EtxaX$^4*#2J`R@oB4C!1So| zybfMzR9>u6&UY8H#wTrtbitA^x z9%D+dUPg))ALheNcS7pwjEQ6&cf#h@43&mf<6^j-En&ij!r7%0)xh(%kHlSU;xk`g zV~@3|o`kG_tedFUTSDbyd?(3>VHqp-xLi$)Ys=svM12y^*i z`18?IIT#fjv(<=t)g1BEYEWbP$%mMUzJrcOwuU*Y&VC9_{IJ6`dHKA3+xJhjPy%1L z`M|!^kLL23{;(8U5pAj^4RT(L+>H31) zxASs;tdag)cpA?hp+LLW;ih%h@tuCGnwu`gJ50>*o~G{}Ji1e5KWP3i>5oBEBe+}( z7bq>)Jnz?eod%xv$)gQlgW&;U6UA(`LEV52V8TGxf;UK2xK-$|=&N8@DOS3-0<&G3 zOcbS$KcS|J&7n?l(OrGs{$8amvf zgir2Fia(1H^{H;CF8%P%mLttbOC)=zsSld)N0bvUO+E}!G@fPn2zjl$gd3BW6!*GB z=Aq$vL@QArZEwEXh&+60x5qiMk!PQIUkX{ zY9}Y^d`?_?1hMWr`h4w$8n))Jh)i|F(c(HD!|1BxmnLQQuQG1|F>^7#kNZ<-BAmOh zmzys43u}UltuYa>URSHQM9NewJGr%X5Op$Ma}P6RJT&6^;guQ8h<8T*;jhrc4Yjv~ z`%azvhu7mc>2-p+TG9-Bdn1Iq;*~{zW@eVN=yzE&wO1BNmwtP=Kuc0i2lJrgaI>Q=sI^WGSPNUq}FdTT!#x^pN7cx@%9!NzRR)EJcNDO7W#IK;?b2utvZ%O z-7Z2WghQ^n?z)GN3xj|DTcn( zk$T*QMr4KF+RO?bfShB0j4h=wPcHL*lR=NY8nLeOq!ERwGwh~|^mqBzLz!8k#K&{@ zO5O>U2HH+Aqx%kNC4NNoW8aKN*Ve3#)}CvndbfmaB;(~Py(OfH_RN6LFU$WJmU?u^ z=(Se1yx*um0!0r)akq~vF{-aGs;hl! zNpD_HbiT>%xm)nH;o|2lM^}4g#r^R-RzoTwi z(4j=WG|#l1@Tw<&&G25syNR4V`%)>_^AP7B5hxSlz5$hl2vIwq#ZF6`m)y%7$dTsnBGUF)?>-<{8s>8UKrEaZO zc`|q=X_e=5`TbKogUUHnExCt0H{C;B%yI0tXbz^kg5$M+jgbTzV%(@~ZR(SI!3zLpXbWUL5lV-k!?!Pxj!)Sd2sJu)AOLZ=se@z z7x2XLmd9g6ZKd1ic&9?MWOO-Y9!Wfmn!l#2hECDy9a?j+cYSdgJ_!iGW^&iia8X16 z&n>l1yf-9&RlGTK-^Ak5n6;!UHSxlD!g(6%jxKL(eh&HFE-K=U-{Lnuy^5?+m?!HB zQ|6aTgWj!sA>NEGzXr7BE22jP&;B6|Xe-O$ewptX0;&P@uT!bTSva_+zf`<{W(Ys? zRpG00R$(f00UAwdSLZIG4@bN|gGF5fjb|^m*l@Cc_iCr<^L{y$dViNk9D2q>^!>Am z>PJp;IM~^8eAoJs)!e+eQ}TYRw72`K6oY$_7=`=33hIpBC0TdMC$&{VcTwoiVL8-i za@+p$KA)50WwpE(ls_15RuS`uOJJn@ZY9P!Y_(^|pACENQ73PVeSg-9Bx8$cVVEg} ziRPDop|HO0hjpQEF?!ggCMinDfGO?O%V;e1^zBtUMvl0FKr@!a-wl3r3wX9dq6n2;g3{ONKg-#jL? zq=}BNN=MyBe=;{oypV~CAzXQPnV>UfM3yxwOCmyiB|;2dW)R~_4$>x$_rT6(Jrj$6 ziEX2pvmBxu7kU_V0w?>9dfxcYYtS!3Y@4=-R?e2k>5HJR(bbK^3Z8cQwA>+HpS@t* zJKS+=l6fg5CyLXa;AOJl5AfE`dMma3)y$*Hr12ZEq&l74*XL8O{E=EU9daM#bS+Yk zqb{-qOHmOCaLgjUh@wzu(@o8ElL}gYJkI)q0et?57yEfRl>>1$iW>wh3p3Dw(9ER`6OfuA&k>nue8ZyVvDy+@t7RF|pByz6vPeRt;W%LKN*| zW-lCj^`*em*8a26b()sLvqM4Gs7vW7{(8ZuL3=Vc(WHoGBTjnmpV_CE$-YUll1x=( zRUbTd$?qTQV6VGap0wQqq%DW_7L`f$N$rPD>xGd?mMwoq!sv%NVob#(&)d^rwxq++ z8KKHe0F#7>?@}7Ux=T0tp15IucB2^NcqnnB&orCS`H^?*T_^6DfZA&6BR(Jt+QIMA z(~6fXP7TaV^%tamq}7{xY^c`~LE!o~t*A<5m7`53=!gFCnv4xjUrM&iTN$l7W;wH+ zpC4mV*XFpzto6ogB-uPM(_>7p-6{lh^s4Zi9Qaj+>C|-_qB48nxt}V3=ffQfYO*$g z;*KG_Jszuj8I{YRq)+C4`7m0VPcku`d7Z1Z#@js>#M8%m_Em9Y38;5Jy%tlf`I5eq zvk=r`H+LZ`PBi+^C3kTZF*d z0-Cq4V@{HxwM#v6f2aU|ZGl-}xKH8(;Xi84YqEzoJ4Zip3(-Ak&!|5{O+_awn|A+r zclhIe)hR;9K9sKBt|?72pR!;@My|WX&$0G$^}J!?+NbldU#-Vw4^zG~NI#xT(b31~ z5{x_DwK_HDAp9Vjp*<%(TU`{ppr1Z6GxV*U=3S(AYEd4SIhrMZh9O5iIqp;^1)Cqe zKX=7M*X-5PLGZd&7Bvht<(TdCT>R7?>nIQUs-z5MAwI+H&25E#oP$X@V(%UMowoZ1 zpY-w3^=lV$dD1yPvug-mWKnAK%Sh+5g~`%@vjK0(=nY;D$n4aj4;S-BUsH=GEWee7 zlJ1bneK(-2npcc}MCu{H+qMt^lW2P)-~DKQ*501-k7@^{%^HZDP!qReb}MdW7u=Hj z^+{i=g}deaaI751yrxx_c9OEvR~qYdYG;wKlT^Wp>%%Tri`!DwqOa}FK7-HY#h(qS zq0H1giaZEMY}PNgmXYa>EIs7aPi?Nxlk9%<&s*AYZeNjqp7r%2mZg}|LittpoZsmq zsy~S`UBt~T1QiwC9n<`M)M)1u4)#00l7`|`N0Xr!zG0WA$T_}0-PyPxgEetkU+g{Z5MxSqn3IVT?xF(OKAJdT4fU+J^O zKlBZxna-WLHdOv+8=Eccm)E^Z^5R3D13PwqA(1nf5ItK%mfzP})9Gv~Ow^7+Pe^zt zU%*PwGx~_B!j5*rX2;LdJg#p#ZueGntCLugLZgx!U14=f@7Je$OPurLEM-{>)`k9X zgudJ-6wOBw9q-R(dvY?Cl;um^5wa+q*aC?PDr6riZvA-ppcCzyV>qS*DOc`yP)fId z{eCygcQ!nO`$yfliS#f15~P%nsuE#|dQTxAcg!$e42bN-26wMGIh%IE;m%|HbwU=i z!g1?nUl8A>h&_9Zx@j?b&XAstQ+{mTxV!gCgwJ7yx6iE&KNCi0?^)lKijp;X#Aeiw zUwNa+gQ!1DDjjsXoiSO|k>W*eD(04dj45CjR##JUL<8o!5)y+uI5MOW;tJ_ymC1mR z*X{kpA9_knW{()_lUk-gJ?)a`m}U0G?BZRVtWRFWWuBZlzIN(Iq4bq_J=;a9N8@g8 z?owZfb#X7(>!2Tp(29?km_6p^BmSII2zj}oCXW^w<`~gYzVaF;{hNl4FU-w<)wrJZ z_;rfw;2#Nv_sTVm#F0pKr!{noyN>Jn`0zuM?ufa{Pg!s?+PmKiWNrwFws5BahnOcoPJLG=I#kV^n5%Q7L zhF83CMbzv#nBx^SCSH}-e6VSMLIwp!HP_9$9cOUIt(Vh0u_wcZWGY@tF&qXxWao<$ z+IKr;8==;1=G^1Ef|cWGNEAh&$$(p0F96NgK6H0ed@ zVk3!TN2J=Q*0aTXy%v>!BHI?Yn$0PWOVEq*5b8%y91_jl`q>pTJ_tyD(i&mcH8ngr zk9Ka(t9Guq7~DoO7yK;0o5YJacRG`7zVU<@y?iueyLm~6X*SMfF)@G2JPxiNw_dKlZQWH8^ zWk-nWI&69C=gt1W%$>bpef20UjVCYuT5w_vkoB3J(aA=EKB}eFZRv}!EYsK`b4VShS$;=X#gBcVgWh5q1-I4q>nEYhYuHFwR}1l;D0_ zR3}8SpwQlywW8O5W)lu+@Rs~QB?{mxj!B9}BoVUcj4tT{#nf1~b^YRhDJzm2HOkxz(<8Up%;L*6 zDferQ(c~70m+gYVC?X@C=>(Ab_tYFkOue4y`MBQWvl#X(+r93Uri=w+RLCv+Q0V9z zxu{*jNqAtvgWYU;-jK%btKjGj<5C1`?8&+mqme39rcn?{R36c6_^%)=W9i&hU= zy&{kMl9^F|13uHow?FMeCTEwHKK+tWEqWqkr`Ec#AJxV3;)f~9dx>R>U%Vf4Hm=!Y zd1Nc-jWw#5bDkSL7c^EF9^DjNph&uh+G2RPrUO)w&W+yYvev zvBfbvm6WZUpI=c-I}7HLUB|)I5K7CbUGrpJ(o(GppG<%*+*qI6y0+8=X?I&Z`_GOnA0oE-e(B~Nir zBY$dtaNKk&)1de3jkKQu;ZU;Jb*rFFQ{{}xp(B$^UZrDAI#2Q`GpCT&jU?uCL^W32 zPYIV2OnpV4Wjo|C6glB>Ol8pAc}2$jAWUzG-?y}}R|k!Y^&`<|xW6GWyIn)s5DUUR zxIFwSNs4+i+iJL7qAjPVZBOx$xpD4;Ts{kb5%TqIF7Mg(^s!gAxmm-mAMzMSG>JSl zIY_IF)6P1sTd zCy%#AJbm2PrSj&7{i)oGe`M}?yNyrZ#Z1{#>dDP+J~}^zAVD94NfC9+SocP{wYRZ< zdmiN?ba?6@om@D5N4m?~-#hw1xg&IN$K-0&<)$b#Rb9NNEwvBF@~_u_aTVHj zT+QS1;H8IaVd+DU-=B$6WBb~u%9iig+9SEbZVu=^{o20U=ZfWdLiCw7Mxt`7*qNNq z*If{A@Alz_?{Is^v7ol%9eT$OHr4%_DpM=G*|l(!I5xNBrQPwCAoykZ32x?gqbRjb-9 zyCqa#&msL`0s$!2WqI7{MB~VFvsSyA`h~a(w!^)Kt$w#Pz3wOKPjtyHRc=X`e(6B2 zS?Pgy@5n9RQTo{*mWQ8)7xOEQEO)@`vUkw5ItJZw8;KM{Md4)zPg5L!RT86GUk_<) zL(M0J9v|;1zB`v$kJWx0sXZ5>FQ>OBa(M*Q;bmv;JZi=+pZ_;o=dH6n3}okfac9VY zaw@h^&N-`74zF^Su72_T4r{<}C6SzAK@!xQ=je*B?zJ#OEg&f&mP*F)V=paHTmYUU zf2>b~QmYbj_>7lX8((36W%CA*%fzfS{O5g_snJ__7^>0OOFB?mDQoT(2h1-wP| zKZP_v+uCH;G8da_*Z8pq^pqPKJZ){Tm+cy!3&>9LB@U*4rD{iiFd<@%l#h$6oS2H7 zA#7+z;k@by3bg_wDAtMd)dAhZ&*&7z z!A5&)owmreh7nZjN9xcVh0Wr|Gg2K?+n94+x^5VPXQZ$O=rZvlrRZFypeQ9{bz48N zDcPnFW#5ja7EzNem&}VkhZc^`;44jU!>jpbZ(Hhd-fdWjA|&cOTL9^=w!+&!2L!8Q z;yA$DwGioO)cnDJ=FHSm62p~7d`OVK`&C)N^c%`0zcC)Ni|3Gwdr<60P-N|$*YQJu z_Q@6_d=(h=LfE=?2rOPS^)DQv(Ch|!2J1SPFc*{;10B}%XAb&5$Hv}tLDCu_jO?mc zNQD~d2>3ycU?HSm(+T=e2P@c>Gr(`#gW4ufa{dM^h1XDja=J)&*(WKhjUleAK@zSPG(zpEm^kH8=#?hgk!6v zYhiN?TCpX6xIu->r{29{QjXUer&drD9c7$}Qrh z$AaC*0MdEX?10m}MALC}j8ZBKUu!^mZObEhPD@RH*QV@QQvPbaP*ev}>%6O5j6%?o z7}#3FWmRO;j7I+X-4m=0JrI-DQ1IT;>WZ^*QAc`%=l=86W|Z5&mdM9+N;EAl2k>FI zOis4g9SB4E3ZO;3&e|~pK)d;kbMnprcYw9qf-s9bJ_~&o2OQv}qK+eQlK4I}@Z!0$ zsTBx+U07#;*8{_IjFO7A5UgSsqQI7%HS<<{GNZ5MYl#g5M2y)USr2)=txEqu47!Fb zAH5yI*cT4ocPx2IkMj>-JFLQU2m|uD@g4+}fKF0ffKo}i((x0dSu4SJ1=@62pEh&M z8X^OZdV-&{iThm9Wh=w9KrVDf@hN~0HJaUjBNR$w4$cv+*M9)6Kdt3bwr4+jF^OBf zVf2i#PZAI^D%PuxlK4hc4D7qj)zh}p$9JQHZnSadb=N3}obwn@OoggM!8_C25R9~C zy!_K%-PV~oL-7d(?EN0}x#ND``p`cC?upiJR(oOn}}<{lCk9 zzpi!XKuz(v+Wt=LC>V$^>kQ2=%6a6TMKoXL`HaJ4DxhP9a|uR`4F{=M1F7$b_3?7u zGR$FYfr&}re{xThG%Mzw4HLW>^lmdK!cTBmQ37v)Lj^ee>+@85J2E%=J--YQcy zw>%s)AjFY z_q9bs!@{yo4}^Q@EedAVYeJhR*9eTzR8(;C>U%t&{SL$$rbU~#h;ct zL};5~UjeS!D&d7fzQCeAX}1J_vg*|1-(BX1#jpE0*e&U;m0I=+vCM>BtU+3Ks_V#T z`N=-T?Do3H=Y?YcGbWd(k*(V7Z106B^2<)Odwm)O_W94aZ?UEW zbS-VTp9|cN?*gHQD^NYKoX=s(UU7j1bcih<76Q-y;Fv-PQ#iGq$>wu^11i6hxDqQH zARbVD82?xX(3k)>(UQwF;;0_v!k z4;L+D_;kx{j2u^1W$Gq}jBt2}jS|zP7vox3WwtDlL6^{*q(xnS_#on);AA^YB7++% zRh$D|t7#?cyt8QO85_PqaV+=Bs6SIGFJ*eNRAn_nZ-(#Zdgm~YkjV|tl3m&3?p!EI zE|ZMj(};x8xH~GPHZDbG?xK8yJJ9;XQU80-Rb+01h7rk&lYaaTI5%g%Ic)2K=invw zB)vkw_jr!r+r=b*xo7xA#w-{?-u!q45%*2*J7`u~l}z72N-tG~RA!!&B~Wb5Sq-C< zl`=^M)ta>o{Rsnm&MaRj^Y15MOt~E4KC?cW<3`IUjcKAf?glefs;1#)qM zjNe*tYU6FO87jaJTUudMs^5DiRMf*sR+et7^TaxeF@INoF7^dtq7m$A?eLVt zxcU}#LUCM5ETf1yB#fgQvr2@|^`g`Vw7M|?jdAST3>${EelD0cCeQ7^a>n*h_r_*) zK@Zs=M%O`qKIC`CJeO2(07SAFA=UQ?V0WZJN0DaaLzSKVVv@_(BCSzXQNTSRi)N!_ zjKv;1DP`dAeS(8C_(X)9R7aS!iOc-BeLtah&GIp>BN#CU=BsIj^Urt4?a^5x9B&L5 z2ra+|L^!l%5;CzJKj<2N!DOP>4Xr2VAV3!f%$t*cW;rRRYlx+|Lgf?hj#+C`SP~X~ zdBFJc(hs*V$c&R(xe(e7AVJaZ_SvC;J%Yd3mK1tp4SiZ-WWA|9rpL-uKzFi{y9#P3 zdcVcP;Y3`LDIt_O(ulXJCK@;EnPd$70$G3Y&s*IhZBO`SUmj?XL=mmuh^4F8WxwG)&d!~>)PIP+KAoR~<$;pX zi@G~kp>dEad^0mahHkUQpiikgAcX0>RmZ%4@2$eZ_!`I3wN21VT6LNh7^rTqNqnM@ z27QU@s0Fl4(9QlfAl3#yUK~ZMGI7fCpr^AEG9A;M&D3;^eZ?Wt;yX<9Wa`_IN2nOn zx>pWU-+Ld{C&U1*V*7c!+YG-iV)yr#>g4`h0la2%nv;PFV6`Hp8xXwi^xu3DmDuUay(gtO+&I6&&c0H@sti$Q%MXK8+6&-wY6-|o6K{1=% z>riP~yR`o3(T_(q;|S&lBvT~gfxk<&Cuz{WEc@Czt~WTYw#a}LDj@}(VgBPk{!1kP zPrm-&{`>#&)b?xWVX}-c1|PlZ~RsJm)pOlNOF}N zGPZLX2@J!X|ALVJ0oNUWSz)v@lVGU&;|q=dlCu8MBd$l*r*_Ygf2PHY{tr6w2j1ri zRN>8UgxgB~>*HtqFBgWtAX)t;@_ZDJ)L|G?`kPGYue~sizF$-6oulU8)FJ=!`DZpI zu`$XT56Zsj-~92`9~FuD7mBV9ccV#_xtZr36|z}{CcI6krJw76p04DAkJ8yLa+j*c zGgn{I4L|RVa$_{4&lbOBZZWgTsc$VLLo09x`t;icCAhsUG2hVeDs7kaD*)_21Xe>o zEb#{eK-4~Ehrorsd}^$a+^!g z#kZQq$Jow}1oTNP9GM>zv;^02YW=%dMA{e)$L~Sg%M{9g6>cINgtE$6e9I#)nK^t1 zTS;^8*L5nFcSN+AZ`h_Mam51ugVO9XD$N1A^gxeTqy1q#Dv0bxSz0c%K2U$mNr@dS6#GrCeV0=2r9wNhu)|!UIoL; z%`(w{^ChJKJ7VbRBB@Zbss%=NPXe5z{sM2MMVZl=orT86klb(_KQLQHhGibdO)I&_ zjZUEWONE?r^WJPn=$O+DMV_0A5b*wtGQfaCbNOYZpey z>O6XqSJ}Yt@xE5%AgBQuzGs43wy}w|L6!-Bv0vo?9N_q#$P0uT{DTG;!~|GQK)gHs zr4=Kqy{w1Ksc$OJb;XYqj-R^{e$I54rafzY!paXaJHEKQo9Ogib6&e2mD34l#sc|4 zraXBmqQ(Y~4O;hM^SyUDiolz2%qK!bVSj!qxc32YO8ey?p(U#2c;4iCdddhg0Tsf3 zb86TFm1jsM*Y5y>VKdz}MHl*4LpF@YH9Bgb>+xDxdhxQ8lmudhO_gTBpLjrfaeqD~ z@<(GDTswer)p=0Xdys~>qF}rMYMC}|9z6}EUA8?($!fn)HBD*DU^Bqs>Y(cTJbrQ3 zdF7jXb+vE1m@NsQb4|!3MHe1LB6Q_{&pTsSay-y7Nnma+iK9#}abjGXG*tP>+a!C> z6~qVw$m>2_SS(k1T&)mPFclFBkje1aVSGQk=(PgGC)NwT8&%HN$v}VfOaN0FD%QYQG47Mx-GUJ&{ z3@HVGpWx&MhSiY8X#Vw2j7ivcp+A*l94Gryc?# zXD<0F6a4D!y!IK9rtxE)y*_;?00}{GmN(@OA9$8k?b zQeHVd|2dsK7Qy)(jcbj6KjJ#fvt=RGZKhO}uq^n>9D8SGCUkWGRHEMPl_{)~7MFe1 zFsPoZ*Ox$Gk38t*AAu{4*fqZLj)BA~`iz)=deZGM;lojyNWpzAdk8@9x|$PbwmYJM z=C?h zuW%MUD^4-t*OJlk=R$r>8WIU;p8I|v3BJu3uME=OXcg$iZ3wr_-GYsNaNElfrm*ib zr&PZGS{vvu3msg|PXt^dt5Lgc5k}p^H_M^xIE>GGr{!?`gHGL*6eXCsrb<2(7_S*e zPw|pi(XDTglJ!h~j|A61{d9M9dQxXd`_>q8qV<~}C;;&xpBD)Pq}HcZ*0POEKBW;l z5A5oQP-xy{a07b^x= zv;uA~TfTCC@`7wuYp5t1B!jHsUaqClYI@XSx4-li&=B{1T)d&X_tPw8eFN&w!OpDU z?t={UVNo7R=)QWu?W+oHy?_}B(vWc(`lXLgVFE=ayH`4OMt%d8{a#M6B>Ke8&~+mT z4|G=lJq=EHoTN;`IVhT=yL=Qn&0_Av-t|2-_pQr+NoYix=ZO&d-j2%8>NBI;R_?|G zRJprXG)wCWS*astOoWR@IehuPW!dpk@H^7!7pt0SDcmYoq{mcO`yeT=jS^nR2L7lpm9GmPrcbZLcz3sPf5`{r-+s0aisB^H-gXj-$Pa$FLWVCMD4^xfAq)MP3v-*!|zN zYDk=+#Rd#`vL4xcS`lOI8v;=Y6fvjAzPHEByZtr?muuYX7gG)!f zRB`g-kPD9H#&8$c5PX5J7v^jaWGvz*b&01b!e4e%5d*XcJCygb-{s0;C{-de|7%a591+9SnM!Ag|s6pQ+VM%ubf3c}w2myd8uXe#-WGXAkn#{tijA)M=)a@A$ zKU%L?@AQ15_hUKU9fQv0vz>`;)6c2`7)WDFjJj{7L*{r0-AHbK)HV@XVoYvaA;DKT zx-L0=#!3+j;6)&?UajM{JEg{UIHF*~D1Hswl`3GuDAVF#_Z$B7w@AoWTk@Wsf9G++ z@P6`EEe~So+yse}gZW7=^gs)k1127z1qPkM)>*$` z9rB7N6CVH$*GJY>ZluGOht!@XqV*#)p2+b4+Z5#882eIh+H(^1wVX};6^~}ab2F$e}Q#sfWlg$-PjQ$V__WBF1N~0Ux&yRp_BS-;om66nAFxd)OO82`*Xhn=GM#z?Z9Mt7VX0YTcwM}TXxc?`Z|YDs zWRsCosNNibZ)mvKdxbD*f`H1Q*Z zUqs)Sg@#|AS-N{w^+5qgxqUnJy(@}X`Fdyg-LN`vfo*GEQonf7e@zOlIzO@Kc%}#O z(lF@MI3O+(VWi2xhUA$%(2a)SCRIhdYe^RcaQ7;)QxxVHK$cE%K{Zyv)rVg;lyOMK z-0!#F6DCZpD}3;7m!5x-a)`~ZhS~D4-HL=%6a`Y1(8ixxS@n&>`KrI4P$W@1Ug}1~ z+0`sN!G`0*<@iD5f8Iqv=|rM$f7xH0Qxt0fg^mUq%9WFdCjn=Bwua#LKx)Nb zp&Md)-{W#LEk96MF1_J+7qDWSR2qqSrqOqCgzYL^Y!43!P_k+%`i`=-ts^Q42K5Ydk~AH4)RS2R(^7M)L-oAgpVm90I4R(2FbDwt zL#MsoBfgJlFjr83)DJ2kH1rt@3mPj`;j1P5r&zB z1ee(gS$SDdf0;I-xc+SvT(8HNuu}XbwKrLlqA-0VeFy{E!Ru%pTm-Cr>bL%0!F8bMi_!*H&;RQ1gbMV`KGQ>A=X;H^=&O4H~X2r6+Xkv71A4@9H`UB0%1 zu8#CpmF4dM(!-zNIZv34ZAe~?_}b2}e7sX{KOaV6c(%ZPR{;iZ=#ZNHGmM5phML zn>X=EvVqZtfo>pseDOGVZM+9ESbY)24hS@fg&U(|I$~fOA~&9R1C$87D9@L0%RePT zj|jIh!+XZ?OX)A2nz>B#sz?&eYTOdQyam{te~cfjxw=N*Ky5dtg?=g;>0RB%FUg$_ z&*4gitNBikmsXZzGpqR(Cguw1@n=2UY%QI`1l8}x;|9#wR^dXbIB1Gldj;zhQ$^+# z*nW4tSuBpZfPHPZS{NW2J@b&_P>y9=xVVk4I5*{THe2?^L}R*Y-XiSlA>22hmd?|Y ze*s=r6Oa#|-I&K??p4PTOA?i?mI3Ewel{gLh>ec4Jb};;fekpHw60P$re$xHJw=jr zx<_!+AS)5l`Yz&lHF^mbMO%6ky7{OF5SB@N3|_m4PwV=+*%y0;JT+lQ81rUepmmDzavzVYGS?a$ZZIM1;8P+5Li}Q)^ZLSBJLOu(+jgr zm%7zT_XjA?$~ddis;-N#;vH<^pjUWt!f2^A$7Q!%H;IqROsmfyLe{H_#s}wABY!Hx>Yswft*=DDn^VuBH!#wk*{8>SO z57!1gw0|VSKxAcA`4iUI0CkC4>XlI5;eX2@IUVnw)5GkNrd{GbDj#V$Oa%X>ayHQ@ z&oQTE`jis)J$b`Zo-|0*DQ7hhIgwxpIL1sD16&G8ppHm>uCK6E+X4%te?iqNOjUNi~i*hvk$b1Il{V+ zJi}7V;fGPDssCB{xhTUBAk%DQ*R%D z{3EO?8{f`$_|rosRI>Q7R0nIxjtPG33PbX8Z-guAN?^>m$uBnCTgp}DWxg*fX& zwO<-`IG?q)(B_X|Q(|Pr#)nT?%p^fH7t%F30@p3(i`8aiZxZJnz4POxz2MhY1|v8p ztf}k!vT4t%LNnugf98!J^j~t~;J(1=C-fvS7D7p)BpT%VtlU~NupNF)TEogr0J>l% z3(itK*dBAfA|uZ(=iG7v{OA$#lUjxT<0|@(_Kj6pb8(~%AUOs*8yR!f5am2ZI333Q?pTrzbncY z%Gj5nYSK5r8ARgw-Xg^c4YM`Y;YhU7NW}UmV4%T?t^}Hf@cj&`z~0_)yPz?f_agQW z21c{|7#ZCnp`ATX$dwt-!P?MVL=XZwr&KH>Va#exlbnZ|q+fhZV94u1Y8SrxnEyHT z!re?C8$T>fe^ACy^8A9hA3Q96+kHn;rD^gzI=c0X{z}weMVU_uyM_iclyyLuOiv<_cdE zdKZjT8kJn3;%&MfJ3rLrGjX)OzOz0MvH}?!44oA)f6)XEnOHR3K*jCLxCSd>y~F-n zVg2Rh;BpNsJv18R`LN;SnO|;+B|W09N7y)jxz3he4@`FoUES>AJQrha2WCe5vXBna z@(r>v=t9Z{4HF-i+zLKRZM5La;1vYT@Vn}z9kKV7WG<-_X=M9Zi)CwuR9ZYYnFdYc zLS^Def9k$`{4=}>q?#KA0HzS?J`)3sHH;;5TMDGa4KrhaCB9@T6a5FLU=>BPnRNrd z>d5&LKsZjlDKq2mQy&&JzRSUpOl_B~ky1k3WB`rXbZs7N`L$kf7e0hUIQt6SDOU#9 zFbhxuz*6(hH)rBX_}k1E@+4@LRH-QS1(T36e|)KlwO3EooiIPG-T;~~n2Kr6*~>_& z0>3sFe1&QntHV*%2zIxoIpgKo8CY=Z`I3ti;Yc!RTN?vz>2l^EjuxIDeDsaNTmiC+Ixj2#D{8jyz`EGQZe^~Bk zR`xT<+evXlbQe;@3c2UK5cf!2d#EV(rqH1%YG+Af!@wjK4hU@3j^!dQl?SleNjqo8 zM@^&qA>7v4YtHo{9_C?%Br~n2*CULF;T`1^9qo}Q**QpM3#R!tB|_OUm@+j8s4x3C zgxyiEnceO8*0Y6}bqS{wE4nG2e}12_vV{VjUtw=`fmjj6Dd}%+_|zK@GWRUqU(axN z_t`A74cy1i0B2Zt%V99i^b;Q$Z@f1oI2;G#^KR`U1%|U=ddj|R*XvzhaJ{6zPYl^g z2Y2Tob=Xty-9c#wrz9G!o7BDCz<8Jti%wiD8tL{dmRE$(DEuB(l^fBM6R0@vuU zqatNS(XE>N5w@^yqc6QJyqAk!!@F1B&(f1+*1@0}e#DCIWQ z&6xUl3d4Npy-u!TR{ZvnY~@CkFCR4@VT1nqDD|i5>J&hWm$~RAmg2e&!Y9yxK4#O= zkl@QM;oGWf@2EOYiqhhvnd*M_S@8SlbGEM(JYEG&T6Z?l`o4fTR#%3GXd(eJrW$BF zVaS{QB9BSy^g*_`e^X>-y%%-mrE~!FNaxFiKVIX1r(M`W`&sFJOkdU7TTTO8)>!m! zb5@z36EC5nOJ%KGJ+3kx4Rup9uX@4;A@dd~3qDNW<*JwM`^a-7By}Z^JPa`$z}pc~ zHwk*Y{P&Bp;9a%)^~#e$);`RV9Rt$(Nb`%{kl#>O&sw%Me_Ty0(=wG!ck4mrLn8~* zg*%4@rFlW8B|Obe!r2Ml#%`7C4|{${W-@}n&j&Ycc90$w9qRekAv$H(g$nP)n1W_J z8HG`UIZnv!aTZsXl5rZw7&Z{pn`X<4-c^CO-$wG}P?t*V>4yn}Z#-`33l;qyTocM` z<^j$$f5zGze>pWhPH?m4)qO?^w)gXMATQN#ERx%`TMHcsue8KaVT%vsIuGwL^CW1WQXQ-ub zc&~;Z&RVK*nFd(E*}h+IS{+;6E~(H4MV@8r6dMYM(yXI(vWlhjw!)TY z-RM+uC?8zDbZkX47_t+!O&ca*^{xrwpMNlTM)-Yv{uTsc_q7k6bqZ8%3JFJig(zpT zyhBwee^JJ|$&Du?S2exgdh+~LW>(-I)miTdxar8~3Q?AtEvZ9eP8@L0=vv38gHe<^&BN* z;(YD$6kJT~Wnj5s;B)}P zGLXo7dg+7Io=Q2A2&)6&Of`ETwUq0~?vWcICscv}z7I2J0KU_Ri z-kHYEw&x(ym6By0?O@#;Nxx*yVe9$ZGenYaEu#s{ zpADY4g|MbV#Ob4wT(s@HFC~gcaDblC29dCEo#A?gHb=WBgcEo>u$la=f8%)UlquQW z^{F}h08%2L>MhXdo=ps2Xq`-6R!K1h&ucYx(9(3Y-FY^H5RWM_Vy<=gTl8pXzs#yT zU(Qe@q{xql0uA-YwhySBS97GikOrDMJG-W{f zwOxY*`$LxrbnhddO?MH3QZZWOokO$f06QY=G{PFV7 zh3scFQNu4Z#sr(QoLEyXEFiTw)w3Rk@8XP)1m`TZypFty(Cq+<6cxfMF% z<@cu}(?GX$y}=u$avqS-mPS#uC^SC9lMKhQjqlr)k&|+2d~3H_nKl8B= zfuT{kMm{;UM~z?n@i9UNv) z@bbE7Qc{-5n?ouge@6hHM5TMLfSKz#T2D-hiXoI6DGA(-Pkq0>d&RhHySTo0v+cye zJ;~vwUzFnq&Rn0dl@6bWmALVX0X+}9c?$ld1G&Hx0^e<({D(C>&MfS+GhW38%# zDqqV*U)@L?5kx$dOQjTdJIUXX)$m`j;>{%KfRX6B0yO6254sN|zxqj&DoCxPML^P54Vn`s08Ox!w1>%b#D+!jKIXRQ81U?b>kKB`78UY4;o!QLs z`IZU=O56ouWYVXo7SmdGk=S09won^zCD8REjcTCxfBfe^8T+zsRe7e{ALIqt>t!Vb z2(6(N?-VsO_FaHqe@1Yvy`SfOzwey0$5>;pMh&S{X3d&a1anremut}W#9}jbJk`zJ zA<|3pvpl|byDBv|U2`~17AY>D+vZq|%FlaNH+y%!Z#=v{xJ5WbBPD;W3-emJ8*{eM z=R42ue-_i(E7~aRc{H3)vN?Z_oAq>RK2FvCJuY4j*l%0it_M8#Keqa)qI~w)Oz*Av zxt|~N_x7?7gXU;nRD!5oa{_7YAL=SeQrV5w!sWc9Jvf5#^CtYKhv!|5FV zos{~XOzx{wozY}N^ho#d-daa>8q8tD;nplWwG8z6qjAfnEpNhJz7&s_ zB0n;89eJ+_)~2JOVR_TN>9tE@&-c5Rb9ZBVYqjAsvrWaM`^WQW7Uj_xZ5HP3k#YK_2U_Ke@#=NRU(?ll zc$wl5j@^_F#`?}WdDYVKVkPs2#93#bjgjxBFL77wvbvn)({!l%@to;oH2pyP+?{s% z#J9q#{j9`eg|p$!Q?m1*Ri^C1c8}Lye+O{_b-F4}7cpK(r}Wt7WvZ3a=OaGtvqgs@ z`Z~_z?Zdrn)F8XEUPK*_HZ*7}+s2z9Sb+*s;^;REm?dLYJd?(xQ7q`1JYt=Ycgy2(fs#bjaV+1Q-hFx($6aR`C3 z(62bF;XKgn`r|$KU5rBqqKd8aVJ~Mpgdnc?s|04eOSHU zT1KnM;nlR)#UQ^Vrw$x8z{NzHh1Y05wRSsu-&WJrF0$9zyR-)v|7C^M&T}WC)xA=E zq`F->)@VD9TYmJSWxdeCp*_zA*5C{e10$*pPxV>-q@f*`=r9_kxzl+xe?R2%A$pJ* z*0gz_)b4qHq)JvCH1m-kF3aU4AXL*znxh!gmF0gnlT|_JptA4wslK(xXrnyA2$hx; zTzR?5bCP|=&U?Kt#Uajv8%3i)-F+^%K?m#eJ{XeZgjP+SPv+ImS~}~_kFs64a}slt zj?&#ZROiFtboB7%;(eS9joY%FGiM!``->)&bGCkmr_}>_fhv~c`(!ibon9S_wzOcEvXQvArstazQ?Yg`D}3MB9o(p{H>0i`UtguIpqsK!&O2pz z${S_+8LU^U(zcZ4e^@!qUy4|@s~27VJkYnld225p;QsyB-%4cw_P+OFP29h?t5Nw3 z_)z}uGfW^@peM6*ct16rxeAOX+K90bX@rCj>=9)|6|s#HRd0Bvq0_<;d^bWPFyH)G zU_>c{(#9)lIpSR4!_{{At;KiOUa;pKtGRRJ?*$%!>p?Tre_Y~WHvDM2g|CZ=qFup% z!GdaPwmQJzsx9ei5I{~e&MruMkSGhvz+90+4V%WbNC#1l{3}_+u@eMBx9fNrBuHQL zL=Cd7eq!mN*hRV!>$EvgeLh)#isQ{d*USqF*D#;ir6?7{yJm}`c8rK^qTnf$KBqgPak%9)WZI=@m9OhwRk<3TE5p7DSGIW**G%FH-b^8^37Zc4)uGR310)b!CG5s#~Cfk zf5TG|;lx1KXy6;$Vy-RS(G^i0)fwQC$`m=?sd1QU8xWwSqKZpv%u-|Xb`OKlC-H#o z_Mfx=`ixNYfFl`a#fmJXXKJq2<@5XC)qD4_2kRn=R`k5N9>q zy;P>`^KLQDo7*_=*6O0gtI}Ri!VPSof2Nr`wKaYj``YV$w&#!PeH!~vEE4UA*R0d$ zuVyoUwZZ(=y*AHrxp}I#a}-lCx-HY&9zWFZsSFdU3`1C|YvpiQ-F6G=>&W!9*9ItX z`{vgSO-A=79gZJ6c?|shaax~(@XW0Xopq^MzE*p@IH=xhJRB^_*A1^P$4`Fme->zI zB-(ASysmhP!-I0(B@x!#CrpKh@Re!$=e^LuSPn|he0jrat8Cw^PHl7TX|IT{%+L1A zTq!U8Gb)Eus^8L(V_mMvCJTP&`VX#)y zY1oZE?rb+)an}#^>UPgY<|A$of9rNhnk^b1o=LScw(53XjQDNM=9{hOde>9gb_>_9 zQf(EePuuNwsTU8X3x7L2sU}(8L0NvVcm+p)xiZStW^3NE#du*YR{LstRuzBq7TWGS z3bxnLBG0DlTqO`5Hl7}jqWj~iU5xeIF)!ggnlNKbi0@RVV#d&mkJ}kfe?RMMa5f-B zVcHc>V^)k-dhjvyOR4P6#=JA{=76$&Xy{OgM;rS zW|dk-=5W?7^1&>%mAzeSI-;|bJZ@IG<(DJdzg=V$=hu09bjQ=${uFtj`D0rk~da4u;qS%O{5FNB8Ua zFrJ26V?ngBS;ywIecIh&w|af{m4Di9jKRgfKJ3DX4zK0PD)XUnGaNGBL2X(?uenr@ zu{mRR_slo)q~l8ze~hcW4;$a_rd7xuk4HXsqtaBvC`L3rg-SdQFXwq`g+sT%g)^z& zRqEGSanncl*^SVPbGTdwk;>y$FiRaewT$k%7K6oTSKK(;9E_P$d5mtJfwf5nhO&H| z#ehDFTk-I&{`iC~air%(K5kF-qjikIx;_eQd|i`z+m0*KMph zhI%?EO=XAS*@_}Ifa6i;#{{RVSG%e{onPiIne2_@ATnZNnT~v5;+cQW8)h8#))DOr zm^~hF&?}qO$98fc)hOLB93!U}@l@1kl9EZZx@{EeBAy$;kL_fOZ|i`Y?a4lLE^*TH z50x>LppuoOo)4&2(jIvqPBOjN#F{ z@FqH`llEYbM+bXd9+J;AFSoPRzMEW(YM0~>{V;SURC&F!dcCsT*~(7!1v8g;aWf~e zf4g-HJGqbbqRsuuEa?KZTy~dxoR-NoH#}XZpK|n>e?0i@+)=Ddv=IqHyS}9p zmgt)U@$&K$&o-l(m9KCU>MwJVd3Xo0Iz2nL<%rjtk!c++qYvoX@xFcTj_)HL0$Gf` z;(p=tTvsjM=EdoJCSz!uo4Y&=8%!_nR*7rF++`=-+^wy(hlB9&W|whr$y#bnHTC4p zE@oX>e@{|R!tEwwPJJ!?>{*8@4@)JbtMg$$BkX;BPW7pOo)22K?CMl|dQk;+vAg)U zmTnI8_D&CFgfGi_yqVfj-fYr?!_vsjhGjQWpne3d`U&H`Uj+6X#kb|?QO}qAXSq3e z{&>Hu>%(9lN2fz^c&pBUuiZ4iEBaQwVLrU%e+~L<$DK8XVvzDWYq!JbI=;`2&-W}# zXNTwfnl_1=TnqLYC9EFnc)oS_Yr|Ey+_|KuqS}a4Nf*Vf^QXheIMPT{pCi7QAUmbD zaq-`W&+HtE*xBDU+R=SE-fDEWjl?4}E2~%Ao8QrAviD!ld(ECBZI~*xJrDNpaGlKS zf3Z>9>w@URL$}?F=(sScwqC;PI38IFSNEG?WDhJS-8E~!9#^Jg-iSBq4EKF+mdU(f z{z^H_X3oni3raQon$CDQ((K9sJoZMzJpJUawK;Bf!K>D83Ei!fY4vK%A^1TSGrw5N z2R2u6EYARpsbQ~f@wJOM)LWO-+U;(Yf7sSNZ>xrJd%c(SU>9en+o_&2(;D6`&zZ`M z{dQxi`?5@RZmd(BWHFpJw|9{|Jd8Y8Zf*j04?Gy%qfMHvQl9Z^=X~f2z7w z{llcNrpMXZDJSSQ%fVf~y>#RzmxCFuR`Z#8(Uw{lOw~@i%{W>pj5qt&dFq@#k<%(L zoGE;=RF*KE3?MpuKwlk8AK%ua&F1;IhmXZT@yBdD*e*3A+uq04X`6J%+1qsIi_dI% zJ!Yyv!MNIHAMGI4fMeTLO^vhSe?`W_;=XRu^TtEw66I{tH6hc^c6F-sIaagdWqMok zneD?=wF6qPLTt(X6R(VCocRZ(T*V*9KF*8vpeErmp+H}c(5c&7a4(MjC_^bgc4?xS zx;Z*CW$x||bP+7OwaV@sxFX80L5wzFL%ulJN}j-4IN+NC}YZi|U?e~Hx0qm{mm z1bs7Hi1N)OpBA9j)Iv8`xLVzEc2O=0u36WBT{si>qjp<9(sss*30G;yuy}n2AK|o} z(kb{&G_VpxG;1ADdcJQur@GJLXYSfTd{_7JD6&ZEaWX6S*4Z?UfvaW*N<)vIq9U2qEDo>of3dX&-D_YSc5{5J{XChD#)Ef`ExWYtWV*en&Ai%O=kaVgn-1|g zOPcj;(|Rv|zuzwyoTuG%{aOw80vctZAIGcDpgldtX;$2fk=gD_bvf%sSrn*geWbf= z{Mc;fi(5osI=gS5b9k4#x3>&tXJ_}(=B+ZAKIg;X#eFU0e;I8uKkJY>A~hfe!aO}D zqi*+-XH*>2!QydLX5sp68QCdh{-$QwKNmOOy}ns(h2iODB=vC565UGUB#HKm+if## zhTWxJug%EXB>Q}ytKMtbjGEziJYF4vO%{vgGl;sb8;knsJ?7o0Iek8#)e!~L1BOQ{PnIH&X$LIoOzgKp=VS=e+})vXu ztlYvV#0!#fv$`*S^b=E21g!pjR?~kS)%1G(IjISN$anhv@j?ReXy|_2I_=8r2|#dv z2vcVCColCUEde0Z%VI33_7}$Co^K8uAok>ONoc#dpvx|$Z)Gh22$jbp`P%PEOTD&( z)=B?ue{tEFr*p2WxBfsVUjS$g-x^7=F`O}bT>-Rdul#u5la>0Dl;kA)$~^NxgW zdpfh$@0huPcy1z|nY5+j|NGx%mZlcA)9)tdbEd%;W zQSS3v&f}yPag18mL_9ZWOV3`EgzasF6-PPlb>zs9bNZhiOlH7YjYn8*8Su2G$WA51ic&( zv5i4s&W8JS{O4yBs70CxbtQ02br$QnNi+?v#$nyKp`bAW6~k2^NQV1BePSe3e@g?R z1#yukklBc_Fdo6hy4A=kH~V$?{hYOZwXmq2%SP3ZP$>rD8O+#Um}_c`>!(eOFuQt4 zodmYH+Q6xyv8Mst$S;iQ2wYtg$HABp4L1HB3aKcU!3hXPTv&)P1PNkSu>6QyJ_(;T zmOch9_$!4%72`Qa7U(f#0QPHmf7`_BbE<>e+Fd??Dn46nChh;VX+?XRxgAXq&ttvG z*1EK-$mQ!QOXQ|o2E9G{oi8R(xF#nLl3)WtII}Slc##)j^&!@0Za8J8+t{q zT4ip!RVK$8O@Z!o;S#q-`!EnDFoHl(Vug$X6LUvA5f-seTb^MNXL>_Rf5xf5Biu$n zG+^Xp0kKfB7^5QAac3Dvpf>GbM?{R*g-9JiYz^GPsx$8}QV}rXMcqw5HcuO|tD_5Y zML;l?V-~7!Vb{f=zjj6ZTBD~EnRNg~u4{~Gz$mgA!CI7io|xD!Jh90$qIK%c2ss5# z@vu-oXgSf5ph*CrgS}Q-e{etWo&XV0fmTBd=m@X`O>e@Wd%mq;l3+n}w|pY6a$CL0 z5*>c`$ail@+tHTxdr8{U?Rp$Id6oOK7GR>%Ev=4yUti?_{T}&UtG}OT$)_)O*;?1! z&aLDgiB+qWdnrSDn{gV{joT$ygRC&|l^XVl`#FsE+t*iMVTOLgLo3&T2sXIK?g&S07F8NyIA+sHw$QVW8>p zs^z|+i^gY9SqNUrfBg_B${pTM+yTO%bJ#JkU!x?CRdxa9wzeQ6x1+!IGW8-`>qU<> zL%qqm@0v)z&U(AKA~Sz%`n7$VT_k3CT{iZ5OzZ6|e_l&W>+wlsiH`DEFXFZSeeDR^ z>|%)?!{yqp^~&u(L#`$4Y8^*xMnazwK@!wH4@Vtr3o~TIfBMb`H#1t=t{~SC#5Cvv zH!Ol;ZSWd#s%z~e_CtYzXMWse%nNIRi~5G)DfdF)2iJrclYt@fy%hq5C^kt2H%m|i z!f_zZ$95Q4Yo=f1Dal3pXFR7Kp0`f_ZkKuC3!_5$iP2QGKN%4j8Bigz6$72+26Z@Ic1P zNPOcZdbkPZq_rDYVwFl;SH_AjW___r+OndL$v@*A0xsj7+G@f7E#ArT&{Yu`nhEC( zD3jxPz=-q@IY#`AL;X6ge`A+dZ9jRBcrdAHKrCcWP<)p3jlcy+3l+z6fh>S+ zX8|CIe?@N5k7@qI+Mn_BZ>%j{8NV-<{1SZ}_{MWB_u)^x@9i@F#_}GE4p6zRSH&Zb z8<`eljHIfGVG?U%x^^|Sz%6l8f189cVq&tuff{Iy_0&tSfsa#4+g9U*S_6MM@B$Hrt%7eKk)HzTA#j0)q5%U0nxb%_1z=Fn#s{Qc_d)c_?|$OiH)eq+=nQdJ zaxbjxCFWhJ#wdca#_$$cgPJPZLPJ7O#9R#)1oy>fy;5eJp5UH=AwU3>LmR09F#3c0 zZma)0R{mo8@CVb^R@q~?iHbOuuXQC4m+>=V$gbcGZDW!k)3lIYV-3byp=)-#e@M(K ze+rZxSoy5XfIB3}W`4u&7E^Im@VkYFw$Z%C>v02AT(R9QFDnBFN$lQ<>duR^I^yheC!B7a!p3LJJCC+e<>zM5bSbX<~zf$f{0={vXVsAT(EqZ%9(5s0{QbfO?W^@-eTHOGh(8nnWf z{WdEpE$|xG09(KmuoLf%hGc5CVz_>?stHZTK$(f|CKCX$gYjkm;)!vu7k?U5ENag&rW? zR7^p7zf=ZSYzv!7utX1c4`2Qed79hFHR@yPmp7O{>-U@E{NlhjN2>DiFaP`5`xi^b zLg~Y;ugBZ>9R$Nje_2Ebt{QNr+g+f=AuV<)VI;{WxRE?I+d_*99eB&&X_`VX)MSwz zS^)$uI`;X^GsZaB>E*=5f@;RdvREY+k$7Sso;d_OKt@S@!dT!n!_Y?eeT)ckXT~mHeg}t=jkppto(%I95L60(yf3oX0%OZ3XcGbD_TCEMzDXekj*hL#PkWm=XhWQ zQ^yc{Vv)&Rf9~5#QHF40(mxM!-)iC~uKm$Y7Wj)nGRF0BM($ygR`J-YwlC~yD`12G z=iB1sfhiR}HpTY}#&iha5RRPlr^`OTh=Gb&>UUo6hul5` z=HM}jBlj_jcxx97p!?^#^q6;htQJ-J@%?(GWV{#o0`jdb6 z`Qm5etP6L)YTOz`hEV*>ofEIEXc}qYGE1*5C<-I!5I*ZdWn3T^UuR)A5jJ;ii1yS8 zUxpn*x+eA0>eQ%eWxKcYN4SjPia#lsh6R6nAt&=8sZtTx;=(v%U*RuL{4>r@j-#{x=Qy$43tw%{e&dQXh{gT5Q6o8Ae6b?pX^pzl ze}v(>KvJUvqum8$zn`Vb_%-u08H(IOMbUjoWdu(b3kD+~46u_RKafXH(}xw&vHPPP{LBCU9m9Tgq~AIAS|0u7U2=c$e=kS- z)$80opAC{#LJT@mCpfr)8}HMw9MCA_&*bE8^~uRzx7^*=YHGCPs71uk8<%cQ$RuKO zgW!3lO}(`uatcb1u;8lGj!_8WLy-*UyZUs};dM%fr+s>93L2iuE8>P$kri+fq`F?4 zEjsLl?%JIi8Ru+bcaw@qzm{S?e^>)}WS}$2f6JlKfB%~cC?69|ric%-xv3tb!elU{#KB`q{@{*Di9KZ2b>@EaO1+qirN#?eB+sa3a?}cKetmzj`fCzP^NGyiA4bgb}oiowp}` zwX8z7%WRSFLa>Z*`LtN!g}uJ%YV1_kEW932aTzp%c93kgSS!|q)kj(_yhvT3tvLB! z5xdS1GJ{{GhV*jmWh8Bbe}cWI2l?5m&(GKI3uZ}0^>sq zxrx;Xj-}QF1dhF*FQlKpWSpkoIQ^rE=Bb>gjEDctJ+iS^#D*rQ@h`5{WoR?J9TW0g zLC7#-Y>^a;O?+iBIgIUUlCYl7{&jcP!$f55<)=7=mkkKCh0 z7lH&iN1Q}@d3B9~U~TG&?XF%J{liI+Bwl#d>r^h-5^n-E(b%_sL^`+U`~T#X{d#k9 zrmd^>ziU|c6Z?PGa$APCG{ySz6}Gc&3Pt;n>&W#Koh;wylLHb(lCWkLmWve>{050Y zDJO_z(k@d`HN+0C2b2u@(IpaXw>u-^-pV&y$yh$V0V(Apoe=w zKxCa@L}ab2j}?9W>(c8B|KLxwhz)t|GT+oiX6m7y!VS>WcQFR0J?#b3i$i&3J&&i? zEp1?z?GiB=5pV_$Pz-m0q=s%G76Wtf_RPPW3Nr+U9QU~yf6H)RN!6vP?E)Syn9%Do z!Pm|#6%o!i%jh}q{K!wlCz&>JWP5nk^=GJWdDAf8xxaZV$q%IkOl z!N6&)BZiQvKYWCz>nj9~M`Z}FtMu1S`m6cPzfWIHl72PGCN}+6_aoXp_f*?w)j~>7gwj62PHI)5?tWMN{g0G}A zJIuvM;=!rb!mIjM$7y=IpXK~?!Ft@&QMT4w_XA~S^#0t$)i)=}#zgMDfMH#7lJEBh z5@wKg@bS969|YBI z#15?7GaQYK4+NO5dVNoJi%=r%*6VZ)vA%ZeuR-pwzw035m(W9AZ{jnbXx(!n*CPne zIerRrdEo>Ftp3M%^7G{)`-S{`m&tWe_5Snan(&|c;;-KY*_sr2;&JlV@49$I3Oe?< zAXG8(+XNe&e4L;7R|w)U;pPyy$I_x`&4{v+F^JPZ`AjAbeiC2}g0pc~ zo~NaZSATs}kCpSuAvHs47i(}FMpAzOM(z+!G{jtla5o4}3q-^c!v>jOypo&C>G_J) z&rgs)^ZWO@u62}$dXtZPPTJ3x^{?sYQ|8DMmVW}_;*EKICh1_xb zSA$k>k+2Flj;jB-{)?!ohkFiL}MOE#NbJXuBVX7svm}`P)BQ&M(JkCRlIsO4bss zj&*L6ul2vW_1`?;SJRf5$|Lh=1at z-sH&PjZ45XR**$HY)%(70aU~(;+$&L-?hSwP<^96XmC2cD@sri--vpj!IdBg#S2Q| zudl9asmq531Zl64AB~=}{`RBe_v<<9@teV?*YQKjesuh(=lXpQi2EO0T7LcHbzRY$#mdgoUBnr6caeLz*ErA)vl8eZ(p(bfi2M zs5@`BXYrR41AAc)_#LVBga)P&As53s&KRGpfHM{rTku5Tm$@kZb#x7J!K-Caa+chF#aABDiElfxO z;rM!>mC!W;TM%l9n!xFC(3TIkb5pq!^)Jr%bt2O1-=xq4tw2yy@B5pw(2HX877*mh z?g_@JLJ)xDXBis683u$A&40$xKt0Z7EeC_J)*yRgfB^rg_rEw?dVa?K=zcwK_*eeW zbMCy;88!dO*F5~)TW`&~Y#6~&!!moAuT7z1x6<O7-Ta z$4Rbh@kh7OWL%W_&RBbnJ)W7a5FW6sj9!pB?R1~#laa@BYH~f7{%GNDFf^>xRA#cCED6WjVzgpza&rAbZhwp0`Uz`2t z3mTHY2YRtOcTG4sA%7su*=AGeM})hCx^$k*Mz2i)28KSCV$`|=UIm5A3EFVFc@w?^ zo?mxD_j3``moklgzOYkf7<+CGZlBo+ff+}4m%A0Gh{fB89tVSzYryS20d`ROE=(e~ z7{Di!^Q-##!Iy7YoL>5RfZSVs-^RChP2#C3Xl1oujkxC_LVxGj&e2L!Z!S|4>lUB# z$f*pb7kcNq8q=qN%ICqvY7sx&Bc^`{5EYmeX2JDlZ`sZJ*H3>x7gPoz8D_rJzh!vlg9wL%zb4~;@E2s>s2I7E;t6ZKnUugtIW1Y z(7jpF4gz(*7Js%1Tp3J?1%`qni#-t>rw=4jT5^IRyIURayFq1FDE@Gn&-Qhb4HkVq z2wvD@k*o>-@=p^9S980*PbQ&JJYvGEF}l)qSPIOJ-9p6rLb0ctgar!EU5``QpNOE$ zPQcyNSk2GMJ4aY$7^=RWc$0k*clM_*BJTSzx;*;r^?&%mi$vBnb(SSPPqczta>@`e zhB3b0)6?~C7bs0W0xhJz8!&2tIY>X#Jz^9ykQx909F;=p@lB5B%JG6e{}m(#S-b4m z*o$153s=_HKUzrj)0U@CqQ{2=n%r$c9T;v>c&k_`F9Qu-Z3eavRq6p#TMd5tS@;6$ zOxjgG^?%gW|MZv0zPZrt`zmA)OWz+9#a&p@CPE9};5xHZQQ~TX3(bmRK^h&`I4rXg zN8fzH{S17l*2?JL`O^AZhsz|k{=~+oIr!8oI6kJp2?^U62Yr8)tfBP}TU{zjhDOZv zLF=1?=_`7679(I+`Iu_43$NT#sR#V(I0jvt zV7kC9a|O(YMX&4C2B$G_u3AMM8|}g|M7%KqXb7Wk5V{XsHHk8N8-aQ?d3^2bjqJ(3 z^?!+vvSf58_ahb1NZ#R;*`g9_J zFu%ER6Ps2j5FT12RC6Kwjhwd6p^(?n&kEP3)=YH0(%tNf>sWcmQmfJ}LG7ffSD0Gs zpnRGTOpGtTT-dJ0Z>TPP95!?5C!ws19e-Waf#gx!&K9op6Pb66zqO`Z#>j7v&;O*m zNv>G1pB~W6?DdWMoo)h{_+@MmSoA%{R_Pvews1Xib)bqv9d3TegpOFOYutm7ERUEO z^=>9St>C)V%33$kAXsP!sJo6C)9QMQ!`y(9UqRMc0-?n!mo3^59+eT);zwtd`hR%* zqOw1wl6@rkpZv*-vf*Ey{I{<5&1F1@;~(B6dk*?I_GiA_Ow7|c1le=MRZy z^%WbZ8?bDp3u4A~w)^_sQCTuGgMVBA<=_0pF5>`8oxAO|rN4EtKebEur(Tv9|K#!E215OA5$f@>T*clKL2lQ;vGO9C6UuVnFf=%QT1~xi+SO(!PtIVjZZfWI zPS!3SUQ^iQl?{sm!pQ(L`gxc6M=P?0^slvyA&1EyeatO>{0q^a{&m^2mw*50)_>~I z>n}$2y%2rhgv5G{Q;F>f;xy4i?+|fPQpQbMNXsp5SF7S7 z2u|FNq3CqQ!1UKo-x9)npCGMCkst&yibBB<2!UMwT&@HOX+bpL=Af3C2rg)=h3u0< zjX(kD3)stk>d~uM>um7TPk;TNHT9uY<`=o2eMsx@$#_0y3hCQub|0KR%??W=qZqe1fS>Y1Pc%o@Fm3RXoDSyo-PmPk*X1SQh zP#`yOEt5TU#BYWjp$!(yB@gwc7wW+#aY)dj@1+J#eFQZ@m<5DUqvw5`)D%>gom~$C ziSN|+K;nVR(k?zlz#X>*qH&e1LN4E96Sf$=nn{U8gKRV@LuD?2$=+8Q;)Z23$SmAY zSC_ns$h%W0k~nVnJb#=)bf!trgQ;C5tTJ8+4qnB`2XklJEZS>%?=o zb6LMl`Z_UJZ3KPXk@^16ZQ>G@ZbC(-y2UTnZI=)*v;`7F%72ser1oR6GsAFZxZo5k zwWxYi`jyt_wLkgWKlHnwev&wp zu_RPqy;jugLM~DgSNn<6vJN9}VbXMNnn3z~=ZndI>whq^mXZ6=u;knO@PnDMr@Z;o zC-QGUt>QoT4oD5_fBao*S&%HAw!Ch+27+y(HlFH@N@f3wQ`I>&COSk5v}Szv*YBG> z)@}^*>xY7RF!ACo-~VBBkKk%ZtN`JzDO)fqdzxe)SNL4vlw<~54~8PfPy*7~bp~7= znAu3yTz@zOvF&r}t%{Myf9fq^f6oy?O&OydH&Sq zrFPo)RxQ?dK*HibQ5ey|HANd|5;t?BJO!TD&oJr5dZAEk+rOXRkVwx&1EmdpPtws z9cAd@49R$l5^txm>&e0KC&un6(kOEA;*G_N+R5#s73AX;aRyDSvh(!}yXP4g)f`S~ z9Dfb-V7NGwifV9hF?6(rq#Qt;W5;N2s9cVt^1A%uW2LM(-9nyA`+oJn($#+Yh76F` zEBnp6xbJPC?VtMa-#Fdh;ynE$xzRBK+h!e$Gu! z2`Z_xDksmliXUD%aW!F@=rdG+n|sV$oqse}Q#;W~q(0lQA-0KBkDzuRl|;#^1RUOw z-oGVZ+Dd-$t-t9Iqu=`hd(B<$`y!ToUViV_5{v)g`wKM9`uvmgzMq#FB7}yop;TXx zEFL!JeXka*>w_&A(wJMAOa`}&x|21GuaDP=sU~JrCVZ8m#OMX42ioV2ZXX0lk?LDht)-j5l5O>tE!kER2S|`{ zGeomUu_=Lrt3ddXipq8}ODZumN~j=T}e^lt9@ zqLM)_^nHtl(1bjE8jK8>Hq~sz{ zzXC;sUN9|OfiZc3kP^uuMAq_7=d;p6Bvl|Y8Zq+>I{qQG=SB^-=KCGh7tVUGwNX%6 z%=_iX_-oC6{Z6J|{)owBWWQ|U9Hw`@8TXrc-ofc#2sqBjh;Pv~2EtuUQ-7H{>S6FC zEpJS|GVICtpe3w49U;kfWszF+o$~SIMpT6{`}x`9oio%kDhDIF{oaq)W?k~W23kG z1`)h}pNM$IK(ce+WEyZ1uYX$&m64l1C3xg=1j3WrFMec;5{TkU)V?QC1&fkU9GW=W z>kT~H>-$-Lp}I}qoWfr|wnxaV!S)$R?G?Alyq8WSNL|99RHC-|e8CtI=2Ij(A%e>~ zfv?{V^(V%QRN4dI(f+qT_=Oq@@BSB^rH6iDXsFKji#PKOH(9{+%YXNAkCkcmjif$Z z>)(Di8-mnSj3HGB^-qGdWaJVY-wt_5yH;WnNyL3;Oo!2!3AN(}BQu|;_ z3;|N**@;T3fTH5u{2cFGffnph9te-J!`0*%UG2s4dSMW9#f3hlG!!A0Jh5-mem)Q9 zK4&HRz!(27kXetp)_+swPVGFi+|wsH$_TY1Q+sG(Lik6AIo)!B=ROd4o$e@&QyVH= zGy^7M)JG19dFp$$VeutImk)^|p~Mc;<(k*26VX!y;wbp*#vn`0ekKXyESr_*g9S6AG4ST(5^GYO*hR;KJ#Ccc)5mYVfVp^eS z_vvD41XJLUIP5X>p{9d^5D3MQ+Nnqw&dWFD3n)V@gMZcllvHw8UUq0vo&DFY;!P9} zqzojVu@aLvp@DqyK?}nV(?)|p=Y?L(sU87RieXiiQ>tKk#v+Q>APNtJiEz%K`Fytp z%=hvLtw%X4neQdGApM;0zjPjdV~)qfGL5^fO6jYQ16NS2vc$~cTiFZ-Sg2LQWWZh-VuyviM5>b@uoYiTY3~JOrpgftgWfllzxc(WU+tne!%8p{#9mw%I0AQ3 zM67D@*>^wO(DD&O%WHp`cbD43#EfEFjJx{gl#|1>kcYMIaQI1B$4eDtGt@rJhj8oh zaUTEhyAOQr5um!|ymHKX%h_GF`)`b{`t^0{6`?{Q@S1GD51BF#7^YnDR^5 zYwR5JWR9cZ*(jawJ?PiH=Wq1nIX$PmAOEkAH9^i)w+Yo^A0wN&^MeY==4yiw2Y>Pu z^P`u^S^5ML7BUKb$sP{U&|D$yDuv>ej0DTQ(o=r|dG?PCXL$o0YRB-y~* z_r+NKvR{lLvWC6t0xK9<87qTlV6xZLoZ6Mjr!;gS2I9*YaNz!@dnX3d!#u^*wjqU9 z{^)Uf`3JY~N5Ar0mh^t}7RKdU&wtXZS@t4ajEA0cpJ&q&b+b)9C!PrMp`o@}P(Y(| zJk0h}iQ37q?wOQFW_`PDOhV>mDiLav>g!38&H78~qkCoB_O{iQO@sCn2YUyT`*8m9wa@;>mrMM+7%wQ5Yod+*(iO;3C1>S`fk>Pkxg1w$Bgu&y z#)8l}VwJ8vHMwq?|;`EE=u3Xe4ZVW z<&K>>35@Uje;C{WXn;JL)?Q))iDZgB^Kdr%O<8UOzdO=LNQEod9KxhnaGza;`viSJIl^rt?+|}%Qr%? zc+`r`=#VOm(F=S81_Hu5)*!^HgUn0V!Uo(6XxVm1TnQDG z1DF&Km= z{l-KfNki!IxoX;yCqw3;FjY{L&UnjfH#Tbxb$=Hb2tM`=hO}c;fz{QzztO!UX{H18 zqS;?lLq4g=38xmyLJ6hR-`%*$#lm*IgyIfqoik3I0^F#6Z32qn<0_N#h{n+HY`{wpR2Upq=tHzGO&yWJweoT>R%~OXTq30k z_GW!I~DM#BjvjphYf#= z-~Z3>C}0~El*!j8(px8???2k}ft`sMP zGHM&CJgS0F2)Cc(b?6*od+|8?tIcslJnaYfxSC_4O@v!QWVB`6ulY0e|)u z?n}ClU_=y+cNJ5zQ?x?OLZSNHk^2BiB#Cq%(I;QKp{*dcSF8Db01ql}Dk!1*!oTld zmi(K(OX|Psdr$ovQ-Q{iI2&1cf}R{v)*ty^S=0MH>ccb6_YWTg1!KWFf`I?U@BiP4 zkN5w7spS9lUn+rP1G8=&0L>vN=YJ}MB}g1P02YCv{z)Gm&gnV z%(whppP|w6E5I*!bZ`YN2GrxaM`yV2;Rd<`Vv2n#{}v_qsTPCG-~muT%YXa590I2H zbQD6yf!~`gh&{~@;SF#})g>*@0a`&5LI6q!e2Aa4zb!!8YcXg7-vQ3(`*~Wwn&7a9 zH8=ujM97v4fwuowe=^`1j0n=HKFgs#V*=GZ0X?jKPXr_QUs`r@dlNoy7 zms512{4vK)kmvD8==f-irC-u*TETFEkzZPX&hXyDf_khdhwF@`jb}XGcO{WumB9>u73cr6 zX3?XWb7S~fxh7DYuYc9UfyNU(^=Jz+2Myqc3RiCs1}G9d(71H32Cs?b!6xPF-nzmT z_@=jN*p{h?AyUwqM%taS~V%iaoM)A9Gz zN?~j84`MY6GU%)(%jI&(EtmOnAuqY(^0quo%lC4@EVt|BF)}*8y~j4&eKl7&wqRyA=3J)H{gWpvrp_ zy?afp(kRE8@8><_e)j%4&==uo=F%u+9iXTK#4C(5(kUP-g!LGZu2W=Z`07CElu;-Y zXVt^lr;%>(B7gFLlvXb=21S43jwl{sv_T~%-+{{ea+-F~7xbB|#IggMTy<(=R{2oS zdL{4@B?e3wYveNzk*rTCt-@%d&+OpDX$Jl*MZS?$mauWWXdS@ zVrlZfY3(kJFz$O1_XtPlE>98fnPSKUJu3ZBiGf%B%6|#iG*d<)&Z{)-L_`8C^+YUV zMF-9k)2!pOC~)uqgyrqP6vM#jH4%kMEd12xr5*n*XUyez@WgZ;=e!ou4jv?sfYz7J z^}!>RM%&J0?~}+ygdo7y}OLa}R;lLx6iUHm?rKPWkrJA}fnSKv5aA-n=IgilP=! zxI)5DKQ&!fR6a$#?I}+so^?Y%{MB{;WGxsvhSYX&4^BtNneTv9Ax2`Jo0%e!w8LLs z0DtuE4H1PMhM4v(aOk|EF!JjjMz07x`&S#xe_yB5tF#{Y*7IjA0Ixb`W(!Ua0lYfk z=Jl^I+QgF{fbmIyg;*KINXvy+2m7ug3btcZh8}r!2s-+H1>>G)Ol{^F`aGIAh=CC1 zIrE{S1Nq?}TgSK;B(?59EEGKzK(aJtC4cpTXQqnsNEIDyg;5GU21gfOUZ4v?B-oLv zgBdv{r^n#x!pjbXm-{PE8bzA#{^WbQE`U&)exM7EP||r7afgI9hyf^{sh&3Qy;6}@ zAdvgG10k6CVf0C$3yAUx9Y4}MHAQKPFb1OKLeYVk{UuHL7sv|t-#V1)`#a_T1AlMt zX#X#CANoh$8F9~-blnjMb3D&!C!jP4C?A?B^%4+9lg=&Q0W3PIXYjuKD>^-N1hDPf zVTY80DM4ozk_^IL}Ey$4hj& zSBH|WBZ9Aut6rl1TbbADF+{p9@PFU@rt1j#=8xlm>^VQtbwT>(GhIiLIbY7l0i!wp zrt5+_=i{!UJBU(;wL@iucL(Byo*#7G>bNf`&-_}KVvTfPObZ2XdpHU69;3(b z(y@_t2<7vb?w}4SzhEQPTR=(HWr|p_5+P^~A1*~A&Khkx?nDbR< z91x!KYh4$_Z$2I0aL(88IG}OPf9SfPeDmq}mUF(I_vw*ZCh)4)$36{p2t$nM*@QyD zn$KRk_BzZx=~-&vvDe3UuYc41ix=qrCC}^f-|(5T5;$d~XL*|1BcvT{_;5$pB;BVG z33LRZSj}@4Vl%T)H_wle|Ahw$C)u9XgMID|=Q$uE__m++f!*aZ?bt&H`}V&|Z{j{R z<8Qysnb&{lJ^x?y!|K6#|C@e(^8uZiADL4)<_6cf1c^?Q=`pNy41aq?NdA5QbpWyw z=>D?SF?TxRRd3-wJ<@Uhx90@zK``lD@!^!dI=-ZPD2Tb2>N%1-0J?Uu>eX6aCWSqk znIdL6-P1Y{3eqYZ5WyEQo!6%80D=z9=dCcGiQczNM|vPF=wy6VdCnCIhJ9Yp_jUhR z#(}pznzFK@vPD>sR(~)+(0a@mt)e3;W6*Qe4s=6Mz)Pi_A*iW$4m@=}H*rbNW>Z#} zq@Z8U2z8L=^ZTFi7(JWEUviAihY~3|@cA&XNuYy3Rzf_dMS7+MJ=FhX>>VSWWscmG zLN|?-9cY9(_mBrYE`Qn)Q2vehf4bkUp!l}K>QPDecA@RSqksD~l`HT+;~9jb>?`M> zQrQ7d|5MpX8jURYoAzXMFWT!mfY`!`?k#lPAty}rU-LQjly44yrRkt4NDiqYgoVAM zZ8I|quli#?QeN%F(Zn4X6+KtJb0zg4+M%Aqp30i^_dj`bY`uz}S-)se9`wlZ)$h4f z&L6M{S4Q!Rw}1ZiIyc_b`hHXzj-LB^49mZ5_k&mP=4b8^l~vNH{byrn{NX_XI=aW( zGoPP(kugILy_~>L)1z3vI~R~h_dh}SN4o4q{*g}kcl8wt-bnM9^GHbIQCZF{XH#_SoI%hZ9MkY_||BD=JnFhv48o``t$BlX}mk0?(s0xc$A8xvru)vnTr2bXP?V<@I=DxI znMT;^k!NPnEJNCSXuoMXo#n@Wx_)S1l*+E@`2u@oN@K-=)Xq(LuA81ae*LF^<2V4{ zw0bM_V1E-jUJ8{-tg*{xUerONGNGk^^J&}l7J6FNap-#SQhPVctyIoNqD@@Ag_V}C zdME!+^2gav-4k}OrfHYXwbQ&hftmMB+cD@qEm1l8*ZcJtS-K7yl^ zdvzkvXY_p*x{ij~o}U%^{PVlVeh$C%^K;U5GfdNedS9R&vYQpbSFGcwC`U!hSC9eGu>6!sa@}fZX8DdB+};)(`WJf z?2IFjpWk!({PX*lUZKy?m;U?wrT;#E>A%k-Z3lo&*Rhn+eo+^G+K$KE4nP8>JLj}t z!ha84_wBzo({Kt%=-X0x5;_;gz}AC ze(84zQ`#$)Dc#?+J6#vhAKIR_(1XjnfprbYOv6D2vPbJf9X3x zY4sSRx$a-IibCuDLrdH2{-zb?ddI(vqvFxJm%qj(qwVJRKjVVsFa6J$C@HP`H*HSa z?f$0K=63w1?Yb`QzV}WYBx$DzkeVXZ&CoL|@RYWLtm?ohJvsj)J30(dUDZf+9)CnY zMOF9k>&#o@0pNnWF{R08M?fa67h~89kT$>470NvPlU7OF$zR2q0(g85057_mf7OgB z=vrIS*k)ksLJ47J)AKVI^9{ufM1nJDASs@oa~QA^=s&b#8Fx;Ca#0N5DOGL?0#UNU z|0xH89Z?-l5PKJlXnbJq`38BJ#eX0av4BBvCKM0}1Y7tos`Ems|CH$emhJ<}Y|EQy z+;JK|7|@04FQav@&DSb%Lo6-?N-y=W-r~-+BE46D#0{Ap7 zrc{sZ(sZ1niMB5yXauQ*h<|>UNhY+Oj;2F8WYV6}F(8Bx_mnPyh(Jo;uhaHm0E_lh z=x8~Q_Aj8{6Qcd5?U4=o35V*ZUo~I`9DGWPsD7jJkS81clU(*`sNRqvPn# zpucogPg#nhc}(SaBg=> zU9mE18qrhqq3w!#SO6~n2k_57Ygb3dah|8}AL-w7ds1`=>OoqtD}&~{lfpAb~d~?qb)MIU03o&Se*3ki^cOlFGu^jcgnuCWryN85>2QjgpY_ty zwBEyqzAx;*`$IzeD^ouGX(#+;p7EacTcZ2&d8V74RJQbYy{GiXcYb7`^-pxYefP=! zul)Q0VGnkSzBq*Rw>_Mv{Q9r|(Ei81KF38svq>+cy-?^mgHyUBd+G;m0qp4!Y|r=x z8s{8&^OiWDbAR&?cRx+!XyKgVBvpV4a7g38H*~#e)Zdqy?CAX9{m(j5!03UY-x)Ef z9hFb%zObc!U&s05Y8(|Y5eN!>xk$&~?r9@Ra8jhx#<&DowErH^cn%8XZ;yT_&!=`< zF@=D-&r>@jJ$EH)AH^0Z2CP&51jK+9@EM&{L<9-Uj(;}d0%i&8UXX#L2opgA(jguM z69|X+ULf@AMO}&~m`#qD0o{`?!RBG`2pMo;C9h zIvHW}oqt>EzqMQf?Elw4UnzaH@9S%PT>q#4`TzXi=od==m)kkH{vSV2bzY7Cp`G39 z>3?_5_s+St`R%{WdEINQuK#hTl~mtf`fHwMum9i}=D&4q@*jWu*;D#0)YY#KpI(@Q zWTZa^ZJad#-2CX$NXs;!BK&)q3e^BD%B8g3`!z-rS~(=C^GZt?w{KOBa;M8Rv49Nj4n&K_S6?bMKi z9)I)3;~_b`9`oJdvtKR~%=T|B%!MKh@~6Uw-a$+CjifnM%3hqEYhR<~bDOk>t+x7v z5Aj`HcDKz+J|8OM^3W>X5Qg0lgs0mlWz8VE?au6@B)4#T*zNk04SjPOqww-%2BuIp zlNM?p;kxGVi+*_O_Svl$kX*8Qar;z)d4DuAb3DbOHRMKp&<`f>9aMky*&~}TmiqR# zT(^?jzedh7^)Nh5pvd^TS{xVUpug5t^jw`x%irmxaa^2GpWCJ)V|$GeD+$3m;Y4&I@eOyMIvUUa`WxhZ$IkFQLXcoE9t^)WB`j(=BQ z?}~Zp*E`c5?d-Vn4mc|}`wTz6ACwx|7vGvYb9ZX*?)}7+7XB<&FZJ;D7uPa5=$D(9 z+Ez%?-D&Nq8zv#8F4=Xs3f(j4xpkrp7`@U4^_bwB-3DZ8?*sG(DK@|DLS_7bToX;*GhI>+wo48 zmA#ifKl`0`SqGc;6KJQ`vAwe9XM0^8p3CbyI}Xb2-OEjVh_PGVZd++uK7V72O@i&J zdmrEWaovKwmy2cnSsmZ*bT!|W^z^pd&^*WNXUX3duj^r}cyF9f+pS`**Y}5;XpTWo zndd%^SMR0)yl@Xq7dA#nlu=h=aVw={QR&Cyo#R7p|5>N+XkvVB@YP6gWKDEA8V3~b zm!3E+aW#a|EgAHOaFh4x{(l}k^D(}PJM?I=y~V++u@$pEt}_{(t8!~JslVN((C@hM zvc8*UKPr;UW(mi9T7T>8FP&Biv81M}GC!P@>%9q&!RJzMqFi3AH^!k`cbAV)Fw(hl zI|s6Obx{V_T?IlcG<{g&THQ%b5T6jpJLYj5+MN~+m*#DS#U+fA4S%~V-s$>EPMc&c z#=A|SRQ<-S4jW&!D=l4jL4Fdi1#zV?+na{qT>bbpSa=Y`U~4EFCU?fjI|Us)DmN*m z9yQ8>bHk&=eDvv~4%S_)dS}@5@1Wg2uqSDKjEY{b{wKa#MF7e){rmD8QwFHopm{O7^mcI}f>1aBSVY-}+Y z5^#6a*JM)_#((W3zx$I{=xN?u(L&oseR){dwSSJ2$0Jx~#`YZAa_w@m+?ske@y9}B zpMx)dy4PWkIZb12q-wWGsZ=Cr)|`GYRz-JzYI3Jj_=)m*D4J=HD}A?3ZJu?cfNLb>4P)+Ay`k!F-bdb_2dbJ z-dj7`(2&JhA{P7FXzI~bUT4ec%uiHKViUIarxZ*DhJ(r6K@0mxd^q;UdB)e97EX19 zx12j%c7Gv%3$3@t1gHGG+bcZlrfw4p=;CjYz-AwUeeE%3n8=|M zFIjbn)_Nx#wj^N|8`8Dg`zJb+vn{QV`*;Y!D5idQPV}Not6UaH@;sv@P729(hHBOM za@pZ1GCF2wXKA-iD`PyRisRe$DCl~v9A!;gi{{hxu1O`m;O@L9rg>f6gcG-Ij)1$7 z@qc)9D*0Kg@80AzhJR1hAct-861<~{9mUAsRtS6l+)DMX+`!}1YNle}_q$d6>d9** zol}kDoXvaTzN$T0T%gE@cUQf=RU{rJv{exj@8|1YC-ttBSn=uW25VZ~?RLDalLLHi z+97)HVSPo1Yu-IWnUmXco2~itaGIQgi+_>xs?T>NMBb)3XBBI(@-^+&(qo4Wm+>lT ztGV`=OU1pLhUI#LCqWNQA}@BRWm`3iwUh~}z?Rj$Y=?q!+~32`Mb^Z^dz_1Ha(<6t z!A<1d8K+0E(Wl59tAdr`nw1HvTyeoaNxoU5&2S;1|A;GFvCq47BH`z9=vcc_@PCD9 zyi$l2C@*OxP2{PVI@dKIH|xtVamXPY#@;=I5j#q)D9B?QQ#;uOj>hYmC-AzK6$nqM zk|uIaOuZ~ySlbP&Z8Syb4sVmlf!)Hx5fiDCD#IVoy>v0Xpy03Xri&teeJ@HStmVFk zR@m!`2I3}eJ&~}h{(~iDs_h(gM}M#Hjz+(_bD~m|kRFqS+;Bp$dmj@=OPjZxo!@d6 zz2z)^JC41%mRfRJf`742+0Hs%FQcyEj?Pz4;QSeV8QeQWch(m!Zwam3vUuy{v~5z#4BuTrZDD;C~QbkdF&| zZHaG&xnt(_2(OZQdxc|l%Nx9X=qpi7St4FN?-Gxkuy+oxW0ID+TWM44UqHX%p4>a6eb9US3Yj>%_LfOW)o~r`itFWF$+__|r=a6*%`*X(}2~({z%a%z0D9;(j5z zW_dw=Tg%Gx6*~Jtv(jtOuF4>MMLGXG4JWzY=o!g|RVr1V;V|iiP=7t6$7!u?t2YW+ zk@an|UK^_|v7>&^o}Z$k@u6it2})RJJ*;v8Y+p5mw7GH*fi*YJ@g>~sD7d_9;X~kf zaXr2_lAAEZ4m_51?RHE!_GGaf4(oFeU18ffW5HcTZ{3l%Y-MCoP50%~tIyZv;hhbi zMRgzC{_Y>#&3d!4t$*j@J$l>MkRIF9DtHy=N3894mrf@;1h+hX+voS<0k{14xYonO zI*&Vw`8(UK1jbu&CpKKaQS^9BhMoo7?HWIt_db%cQqAKKiRtmYQySNDraoAjYC8tY zaP?-4b7632jZehSOKvr088W!^)*tC4JTxsjcG3cBNOZMc4}WV zC*EqC>u2Hui)q=&TNQ(JeIQMZlCsPB;=DO4VpVQq72?NE*zYGPDlk69%Y&t_#Pj_; z9KdAs&L3kluDgZ1-5y@N7oWP-mfdK5oC~jMeKg3Jrj9f{JKV|Au|~45EL%x>wf=b4 z1g>`Lo6KJs(|=f6hxQsD_T1~0FQP6LD*Tp8S9PQ9<#KD=VqdQ{E7I+wS@=iclPqub zUgbqYKD#Zya;=ej5_`KaeZ{;u<9&?{`pM*ng2S#@KXYmMG^BH;C79ta7r$+H&Q3w% z*(H*Qr}DLV73}(h_RS%|=Z!n4pZiHI&0DTnoA;d)yMHxTcV40w+07J-a$D_Q2lu@& znB67m2Zx=c=$f2D@p54AhgG;SZ>MQ`Ig2b)c{RVufKywIRAy;$RjXE$vp*fCK+-wO ziEmz(-flX#**-sM!*^c6*+&KrS}ESlq{Fp#-))bf^(s?oO00^@SFiTsA00K?ocyUT zHiQk{8GqyX-O7zFBYv!P&0Dv}f&=XnIeWt+URr0i!^St+&joh**ke&xe~NtZI<0kb zd4A#*yGfnfLU5-$hLks%N6=OOi@;L?%G`i)$59QGTZ|t$y`noo~q{YD&~_FB4s;yNB-XB zEPosicnCJ>4OUMIKLPbgG-X(0RkhQHc4=0~2}b8s%dx-TJX>d(6y#>RlVNLmm~Bn5 zw6~GDwX^loC({1yvmfu|+ojg;2XEVp2ee4d_wmH;7b|s}WzQpLrMk9T3H?*M+^tn& zkC~&o>)Y{iS6W40pJHAuT zrJS5Oc4(aE)7{@Q<{a)$LMffAu-TAxnwz!y5IX%*l+w6AaIdas^+w@L(R=3J!rSOi zEi+mEEyxZ&ikcIwWAD6?j+(b=bHViqGzS;E;B*D8>V}Jg3 zjpe0p7YfpH&USaNJd1Y0hm@La>b93|xz#dIU@A}^RPgRun zJHL7IEJ63%)pn%z{dt*bjgh>*q&4u9vh64hnK^*&ko3-4*1{0~x@$*udM^ba1gUCFUF zpAUD5m~-}K+u+moledZ1R(x4(Pb!n#&EpZrFOr+544ZfFfs(XzrqN&BkFuwg!smYA zKFSL|g~eMsi}8sWV|Z*fRqZvr|GpegpT@i@DECAy>{_?MicX-~F)Ke^Z-0!vUb1C& zc&v+6WF6F!6;Cl+p5j)&owp;lgkw?lk)v1keKU1UBQJp|jy{sxXM-p2(%g4bq&>2A zEUS+wO?W%nyybWky_RzD-0y6jKU?8bR)x(zb&sOy6_Gfp$m0{6kY&NcyXX|ZVXIH=4^?7onGYOt&{rZ0lO+@a2 z61|)Im@&Obc2gZ3M8i{++w-v%gNtA*m#fF~ zZpp)KVwViw$5%1W$9>|3uE<(yzY5Nar!Vi{D_>zcuijajcr2UU<<+C@!YW!`*D^%i|t~$6+yi-7N;i4 z6@xc|UGOgKgg=nGH>rR3_pMnRTrvsrVx35f=Z-vX;k#s}5bhTnWTOtNR+=>7x-`Ne zOjr3;U6uCLyuM7$@ypYmKi4ORBZq0@ZOYXiH{05COrso%{UF%2dl}X9nz)C>_U>QE zZa20Xsvpcc`fO`9?UX4qv+O;vMKuP^`ub8d{Vr|N)%$&l{B?ixE_S@P0&uTRVj*#Mk?>ZWq0!0=eZUvzAQN=%O}&lRq-wji$i}7V!a-vve~na&`kY%BvP1< zx?Jg(Wp=&0_oq|FXm{(T{IhVJ`ndFu>f=@_x2x7JQmHmEI{Vqaw)pl_4Z=dq{SkJj zIz50)oX9RCvl4&yysuURw(Co9aO&aWfyF2rj@WvORnGJ_dCjrS7A1wuFZvzcJz}SE z;>s)h@gq%_cC`*Z3iq^E>(lzVe3%ti7dPCQH4Vh*Vj{8Ve@-T`M$3HsHeOtyDnAz#@M$%_TjmA3uhm0}*>dL&lM3Zj4dblVKiol!E1uv?Cg@x62P z{5WZu;XtV!_;=L>;jM&j*y^{Y^VV!%3KM+p*;2JS-?v`p_j^Yy{k45AZ)&IC1dnI< zn`0jzwuN5JeTsLtHRs%f)%LR50dTu(CA9T>QQYnQQf~EH&3uwO3HgYn(5UVCy}EtW z+`Q*O@eF^eP<#&RcwOxVzse4;>sAVu%T-4}FJCWJc^)|RW>&m_XZ(g^r;j(HPp=MS^`x$nZMH{LO3LgHpx+BrOT@CY* z?oZe8blfd=j@_#IBYU4dt4C4jui+9}u#5DME;E16A-HdD134^|$dfMjJzTENOV#(; zZH-%Wwf9HCsKn~sn(@|I2F0YVxG6ie&yG>2oZGej&Q}zVjdsx^Ri0bh)ruQu`p?q7 zpj=#C_`zRD6YkiHJ!$_%q~}g;z(%t#KXpgm-KP$fTU7 z%Q|3o%NIJyW%w$dlUFQsCtSppGhnweq3Uq;WD=l2P%^=f}Q9`n|Ba8;ap)|Ko{6)L^v9s#57GA?@V ztnk9gVDp`NeB_7Mf;B!XBi29W=xycX_6#S@(H{K%&B}L!J)_EgDKBNm9K`oNm=^1G zw)5|gExKB-qVx{Ucxl^Pp9rPsFV8`Bc?^ITi}cZkSL0r~>JA@{%8q>{yOVg_o&A5$ za^llt5KABZVkMhbp1RMbd|}L*!}Yz!SLcK`&N37{JDZGtz=vDV#JkisbnW`weA3tX zalg5YbLDl0=@!hWun}|i1MWS3`nRiZs#*}3kZi4Z;CGvij>2cFI>!34iO*hrJsh^3 zA#GOLVDsZ^9b>P!lJ{+~4@4{Rd^>+Gox^2o$BF<_BW>R?;rrG-ZGHXK_BX?H!M!ls zR!Nj4x0c5g)ceME9=UR;9sCg@p2Zht_r9#J=jmo*EHfFapQo|H>^lgj7!^Bjxjf$S z>Tm`9861I&*Qy}Yw%jg~U{qElY@XV7dz-FD@gcgIqN&r<&=;S5skHks*qDEV6Y0_w zo;#nBT}#_`-BXs+bQKR_R9LETk*uTWYewoGD*yQ8MgC=AP9k!YK0Pv6Z|k&nO6TMo zjmsPzRrr+UHK}Swb){yz6P{V(q~jjhi`7Nh8U7nCdX35Un zr0aP4@WT|M}irTh0p+Y^B zyQcHY`#~Rks}_>)Memi*lH5*J^Pt*-OMqQ{_q~JBb<*na%Dl zF2~O{u=no=uMSNtI*<4rbT!Y*+*Oph^0X>+ewps;V}D-k&UJXXRm*>bi03IJ^)6bR zTepAJLz${RXe>W2_51N^viI}#Uf1p(#M-iBumpqg7#Iiy#7-VUh%XgtyZNi8B z>s(zh)w;F(uxdkF_ua7l+*>EB+fjUQP~BF?lj*YC)#k=)n-~df?>CAvG1u6L`d>6fzHS5Jf zXohyRYKBKtcb8MNX3oh&s}Jnr+RNh0-W*_9?J?#W{@m>72jucwew3SJwKL+S%03;{ z`3j5*Dnp`J@X4tc{xo%cL3!fwp6w~YSLdU$d+?2 zeLn8wW-eK#Hq8?C@edEOj+;T4oO_J?eLf_c_;?af2o}|G0hzN2^DnH>C0i)y%eSY5 z4;nCk_aE&2NwexY&@GDoAR3t3G)^NW%e4%01*QiEGV4Vk&-UVDGviQ9M) zFXBbK#)<_@mr^QouYwlKG-m_z@PDSGt0ue8gz?AbAAr1?pTs@M9ye6NMf2V7PY?B9M64! zpzGo=nz4u226HyF6#vww8$TU%>TsByVW`IMu6^j=jbM7o?-|eozLMrYs^J8zeqUa$ zaq+9PCLDr(lNY(UlU{?$^$?CVPKD#agJU7+1r$Zwac=}us@>VTM8HYFf-CY8-GYsQ z_QF77_eK>n?(7hthMGB?F~-E&LC&#%TU&}KLmVjf8ZH-{Ozn!_a?DdF7LPUl2$!+< z881>-W#X)vgg#hz)L7~nLiuQMWL@?1MwJl=&I9h?}rQ zM($*j_?d6UgeL*b?3mokpv!1Xx+qK2TcbZ`6B_58Kb~WSFa_ zey5-PR(I0ozE85lbv5d4^G`T`@Jn+xksHI2_0$$=4!b5t%RO~FE3BRu>UU%{4qUrt z>{bbLUk#Gov{@EvIV+soe5kO;bfaN?;M|JSvP!)Db6c;dzRjHVp|xBkSdFN@b#u?X z=bm=Ty!qh1wh~jh?wiTQFxThOxy4pBY+U!YI^W0kRlTfI+c^#}HA{MbJMESg-173e zsIy@5nT9B3i^*NLn9=duqIEZ#vDyxT^9&C$+708yIVkGHW9m7(23L_4w>@3*o9|o~ zPSf1y^pU&8Z4#f1t<`Pyvp1Xj)kZWOYDUrd5jdmkq0=U>{e8Ya9I6Xv{I)B_-haH$ zr}Jsw2I^#p6pH52;XKlR=c^!lJ?!+B%%W76!WUptmn`@^4zfJ%@DafF|zf4bh>+(^;+rnobX7_ zvv4;^8_4@@$xCBZf`~a8hWKn9?#b>tZ5A_Y>W4G*8V{2Bc$ny`8mZwiCu-R507b>x z>)y}##8^(jCf2|m4N40}yOvPDePJ(&nqut7CO-J~d*W-$QMc}nopC#M-Z|_nYhc~a znO&k1vy69TwZi9rbh;YPljr-rns3u7Hk-&(?W0Dw?}>hUPi*r&S&gpU$-Z{1dJVOk zw!e+`7sdJbPB)CBVi;T6#6K@*)4}B<_%tG^vKp1ZF+9q-FW>j6AGW90i)z{U@s3Zi z?(E^37EdoZIm%LTn*|$B&f7)nj7$4;94y=CTA0c%T3sf8gE4nY+g=QIub5i271|9H z;$%QKgN2CTFqvD*NttZC=khTfu0va_M5VID@SQxAbU08p!@avL`R1~7z=}54nUY~$ znQQmTu|iQz>C6sB_th@jD(iXr90mGgVbm7uj<$aG4;Od6SG)Ts8i1Hto7MGRB&Szs z&(5wA>XmtaoHrY<-aY;2+uY;JQF~n~MU3-}>#l{EdkI~dfv7s~m5jp0P0XH~Elb03 zr47fbi$}V;P}IG(yi})4(VfLjEXV8XAyjAA1$ghnPJa}KrYSDRjxl7@mC~#02WO8x zmu_u)rOiu+=)NvC-XjayY^G=BIj!>c`bw;Sy>TMq^4_)PVpC{htLsk=)B9!E!6D$< zn@kszUFFRd6XUe{q~|-X+C3_bZibJZdGeGgDyNHo z_0_ZS`~B+DW@Vjy+hZi#$L=ybI;X=cxwuhF9P-J7xtGlS+$gvE=c8nK`kJ4L**ZXM zY2K5%D9ghtJ8O$>_cpEwU8aZXO-GPj8*;g9r-$MAebA=)+qfF_m@iI|bwg@?Xu7kr z2|iYRn7>Qm*-@}FFXnC&ZM<0=_(S)9)IRUK$>nS_S|5UVF*NAic9M(dX;ZXRH)t8z z$zWm)Rzo_sg$OfcH=NNOV`W;mLp|8z!R`XPnQ~F}H_;yN8cjF!j{TvVO@sArHxczt zh+A98%9GntDMWxfU^_b7gVsH0pi^YRd@}7LVg9T~%>aWGIR+Q#NXyWq7|Gs*6xv-P$PU zhAVcB!tk2BZ<9UtZ%KNYpZ6+6-r)XdUW1wHMi6%C*m#_@m)Np$`oP9!pMAbh7Va`&$?=s((eagDQpMKo`3#?r#<8&Yty~AumF1&9AfF+@{WX93F4w-Rtzr zZu&wy+7ugieNb`mB3-bwePynbXRQaf$^IRFM&s7LOqIoP^bY*RY~-)vxjtX8&m5)A zu6?TUGbkNxbL@z*e4golEyd<+kNv`a#;fA=ybs#NJv-dz-8(zzTYr=5c({vkm`3ia zYDQ_&`44y>_Z!2z;f+>o~b5yxOPp*?joexrn{Sm(o*PrB;Uf zs@g?LSY2|qe3T2CH?cAww+|n8$vQp^*z={2XZCUX4%0omp9tH3#tZL0LEccL+&e3H zG(0D_f##2HF?~+U*Y2aI^YBE|`c$&XfhO(zX^yrZ9j3$O;#s|wu|qhg&gAw$ZbBcl z1~;7?;%2{WN9p;jzl<8ibfC8t&V zj+&-XjK%gb$DtD}+mv00tEX*DSI5)Nm^qhip|z{fWwS+pPHvasJ$<{}xZMSGb~*$ zY%#Ewfm+{(Z~Z)HzB`IIj|LztgoN`Mg2U7bu*e4jXCkoK7N4a z&ArcvJ55Nc*R$D-7o~q0();@9D%+!R>-hV)_PLUO)A+uIdruWQf(#!I8@ zK&(Os;WIx1O(U&p2b%kWi8WF8(`;&HpJtQ{-`X%%glVKhbvQhn4a!U)P-s{B&7ZC| zPgbaO44dz+sGPE()dguXI1sJ2!&Glhk-yP7PS1*pE1yBo@vl_AEq zsh{qDMQ50KekfFf4Ez(14|Z}Wu$3IN)hT%_FT3mRdbm!)EE{5c{7lAb>aJ+LF%6|I z&x6q&i4 z%j?tI%#ZGb4o1y=@i_0a<#V#+-Fd^8tEef;X9d+0?T?OT2WWaYA0O!QEW=e%`NLK6 zVWy%W^D%!OjQSiu@UxmtxmO%c%gyV|*2?0wOJnugY+tiQTdkeS>YQ#gOS@hDx$L5U z#u?0&#bzEi{x&}iZ>#;(dNc=h@ln~E7#DEk!)98&{8G2-`L4LcvAz?+-<*#2X)-FK zxvh&zUrx8}oYlJh(NNr(gX+239(UvXaWmTGWum|7ZFKTiqx|xo(@W>m%ai8*2HT_T zGFoGpG+VKP)9d-F!}nWT&Nsu;8djozaW7|?b)UR@EskDhOX7s0*?!>F#X z<4lHw^0LVNs~wfvbw{AMSNpJV+|tHTdEfh&owtp2KZq}S98J#7esfBX>1?Kt8l~OD z+pW3vw8J?)*2m*=*-Zplt=H=ViBwfxsvtf!mpH_Hzrr|PLxG_UEe{f_wLHh=nnPf6P~ZE z(1&H^Yq}F9GbeWF=E&;JGtNgl?XviIjv4~hd_l!w_jy$wp zQ8yYTth0llad&5Acjd?mu7TBmUYXTy651(K;xPVp=w#ihGp^CX*}=5xEWb)!rXSUC z{`uL27!8LiXyX6;pMS3@@IRImc)fmC6!>9SRy|Q75D!Q6L(5HIH*9g165OM8s35r+J7~pl&8aQ#x0Ty!Lq1AI>J&qNKgH{4 z>-pBiD`=}{X#_ltXlIQ`k&bC+ZFHX*NXXylMu*g}T52MmntEq#^(=}|OLLuBCdz4U z7m?x>>BPhT>u*JW|GDemWn#7I6@}^8E7Agq#6(Rx)o%BBBq-4!rE*X9?PVE)NF2nK zN=JUOVXA{Jqch^^TATPC;???eyZ?R6X*;4C;+3HVJks@Sr)7A7IA3WKU(vYHV8%Dd zRSYE5^_J@=jim^}gkV;gXuSx5k)Q9SiS4@;Fk%@HX@;MFPCY?1#A}Md9JHL-n5EXW z%xqwPeUC0Su+-B;v@^PB0&H_Nvn3-Zr5n9TV$`skou`HFC*MA4qQVPgl#C{NlWg@ay4iVhv$J%BL=x*L+1jlY z>wOzXid`IkcYaiAdRuW*R4ZK2C5rt0ru6Mi6uFwH)IgN3rzkG6r_LH5Tg-@Q1{R0& zh@>DIS4=&fr8s27bl`T-CLRrMWK9Frp*+PAaG`-a0lxITh722sPU{v%CHS9%V5?OS z6)Wjc7)*yM*D$zmxgv_ipaa4n7O-Z}cz#hUPD{6cL~hL>KH$;i{vvEC+y z#ZyD~*@kw~e$%lR$~KC6yG257>z))={yiR$wC^ZFT-va8{eQhqO{Y=FFD~IvLt~dU z{_#qq9Cu5o9hBouYXt<+R(vf`Gr^6Ay~=`0i_#jH?=oDz+3jJSmdpV|C) z@vMDAjCIGZbOTG5OtFcU*w-qIi!!wHC|ohn06&Lw?hNEIF-&d~Z!o~mV-_nj!{kJx zd9DAn>&+I4@ols8YUk35(xFEB3OSaOjuvUF z15xap=Dj^;>DKGM$3vUkEZ*O%w^O~KttT9ncIt*`%1C4B0~6mcGV=5h@jNyy97r}^ zkdD`iiiuhIMkuBTQ|jlIEXJS^v9hQ!U>CQm4J}j5K0k2@x4uPDSQUD z^6-g!|BU8$Ui#PN7;lS{M%do%i98t>agq|jMFi*F3mwatVq(AW{ihk^6EA2`8HLJC5c9U*@`oBrL z%+tx=y?v4qPPw`Uw)`6#NOb3dTOpkht;a)8iU3IKbm_?QBNy0X9z>;owAa1FK_5W* zzmJpl8t;>Lh95wJN}FJB2UBe=J(zD<+(i%+cE7iT1sU*CiJ z7%F|zHx{5g2=Y_10aR`ZY zw7n^F)EpPX$9)Nmc&=7?aAk(a56eoq*vmCV2&M@7et?I(zF^LOMT+Inlb$r$58}tq z^3=3jIkwU-Zut7@uTSRbgf>y;g}uL?cw9gGUG-V}gcw<=yW4aX7XAtN$i_i+4MOVP z>~4@oQSF8Gfilxt>r`b3GK-H?E!W}9)fFov+r~(zP@}k95;wF)R#Er<$@vT)ke2w) zVIb{c6(rIgCVD1+`yv^6IJwy^9eEo4KGXNNJ|3esy?Q~?|4xgf>;1-0AO8=3(WMj1 zux7Lqt<$O>V5;vRD|0${&^$UBO|*fWcwQ7?jWcaMrasCepW>1PP^0rG`$SNhMV>Z+ zR*ih9kew$8xMgXDa(8gOC#}LwVPPAp2L>H^h;CK|O{|Q6X*3e1cG*oMs4lXC-RC>U z6qf)Vpc1eftu>w^D6A)hKX%aLH!||NBqS{P_LGdO8bzAW>&@tO8h&Q86G{=VtA}K{ zZ^UrH-N8m_&o`xG%6WE%Ez{BE&>^AfsLu@vSm)srQX=m!WV_p_ABU4ZUgPv?wN`&W z-Dia*eNjMvV_l;^zA4%Mg_Go1k{DfL%*~|B07`0V84s+C>P}Fi_&h~8XN?!kkPgRS zjiY)OB^&UWgBja|-EPIUY684u4&xdy1H#d3k222POIqm4r7>V;>6{@e1BL?D^y|Cz zxW%;L5rHF+Ci9k6Fy$cg7C~CQi6X0|@?8wnFcTGjY3p^pZU18C-d4WYM*5uoZ_rvl z@Bbga&`H}%uMj9>j}hc{bPx-r#}sgPw8h{>1Qzom%{?!G4VRM+bF`& z$@{VKzs90SJ3afcnLqhn*N@wqYz&Ii;@JOS@!{2Gu)YItv?W-;Ajv@BFH=KQYAEh%ROQ~K~B|@I<@+#ik zd}WhGj$4)?FbnCCGVltbtin>Ed+Qv8_q#3}o&CqoTtN88&fK4TT)yicU-O%PP&pod zBxe1-=h*9_KY#nlE0T$qrC-|=iG0`I&b;6FvtLmyT{=w_Fe5{el8iAU0qIChxOlFa zf)H5Fdc=H9Lna0*F|+-^@RepI^Hmm=h7XAW)69X?g~}?-%-U}cc(y0BLd?pnAky&2 z8X&|y1mzO=OJP1cuA<~-;%dt}Y9~;CURO{Vz|wWtF>WcW7vXFqHgE;{$TcA$mHzAN zTZ-P^I-UI$_v3q>eCF5WD)|+(8%`;Fz)&Sl`0^#C4h+6=I;1i5=O?Dsk5yJ|u6F(p zK2eeUK78!{jZ?Tx5-qH7y#=U1m)G?p4}V%ZS2ki-o=dR_y|2IgpWtD%Cu?T9|?E z0Ar!ah#e}iMahe!oSf{A-;)S`FlJ<91)^|2<5}PKV8LoV9Kn@g0Wb%m0Cr=2EgP_~ zo+z+>^0{R4Ytzzi^5oZcd%OL{fRUa5#NnUZ_{VqTB#vuwkwfFI@{^n%S=aEbk#+)_8K)b%hyd~`e4iIgoSJ!!8A61lE^ zw0yeP8*8TdrK`;7aAgDt2a(+7`?*4~k%$@rn+=~gbp=dd@TVop{K1A*=X9sIz-WM& z8j}LhkXtLqV8U$KP0O>Aq9y#s>)RB+xU^J|_I}od)wdh%eEvRQJx5nvj~k-HRS~wi#itWAh~JuYW|dhX!8IIe>75*ms) zYs5O*t<_TeGUH3VrG#0KTLC;1gi!3*ZIT!yEpU5`b%}iPdVocQfY~o#CI^=Kw22~` z!+KaVAD;z6tWqpqnNDB@eLgW9j-afuU^41S9Mk8M=+`#?h3%7x*XND@=83${`Wc@Q zcH5WbAu!{jR;}QFXgHxY^@FJg%+g$K89lDHez>3p8fJ!ydVr!%4MFU%oHenwTw6Zd zn7j;0TUvTHaFgxhbkM;CzjUDY7f7y9i?4lu5wQ9^B?*80pTvEepIGq6S4bQd+Bq#q z_#02YxcHLC7dHCjLdC$Bv45F*ms{mV(QFm*biDD!)$i1Qt88z`R@E_MTf@gVF4-D5 z!3X9p$3XN|hqTS5H7#Vx{A44a&*4rdS?#~_IN$Q4!fv8W4@gYAG>*AxJ{N%4aK3y! z6I_n~jg+p2IQhMf)?cUkzg@?VxiNh>9NELg4xS!^lTU|rzw+^7IE-*Cq+jLo{0K|_ zqm5dxiwZA)r2a1t^^?ne{Zc>nWQ_gxzr+z)x`BiXb~}W4v@G3VdCNPs2}@hwl(Zp0 z2#W5xCXUJRGP2E*v0Ui}0}<7lKTywG{~QEmX80-Da-tz^itxP=$;?XRqk%5F)PDW& z^+qiSNZj?U8xWs1q>%r|SC0H~Rt=veeti}@2+qrYfa}`usmS))_W84re9yn~XP-9u zb`Q>Y7*(=edi<41UI<0s{PEi^@;*%ZnxA|}=99_~c17kmLg+GcgJ>FnkOX#m4@ObI znKc@|Z-W^->S0c7$-(KIn6!`c1Y{>dRZG7Y%kd>EQa^SY>wSQ1 zf+Ev@&@5v4#4q03b@n*IojJ@IOMBT)%gpz!z*)}^`U4AX#2!;7+ff#5%uco=`fodi zY3DWZc5giPRJO&gL=gA+v^+`Ei14iUl)v93%Ya*r&+YJ`%@5!m!%mExd($7yv(zu zu~-J2G!Q5rz20|C$tM9b=4<#2l=WiW^FF=w+ zgz@nuTpr7Y4Bm(ncqRoIPX=8gaPk>BmM{Ree2G8fql{DDaQKjl^j1JBkr{TQr^Qr% z^u=XAdC%Xz{|~SG%jG7h_buPp{TtW&{O9dqAC(c?!fnMFE5}nt8paZm7Di~T$jCyM zi<|{oI!NE$wj*ZgS!6{U3*T{yak+%;q|A)%{z;gHH6<-v{qpgO%sj3a*WJAeKoF-g z?#%;P7&4>k^Sp1nnSHxS(LUrO-Ix7;Z2k0e@(;G^b5iPm;~Du^Jlk0FwHEI_h|S)C zxbNe6WJcw39r0tW14%4tL(KAyWgX~>eZhg8d;kkAI5BiWr3S|7$0_b)zNg4~$XfEVUApV}+3p8N^fjQbP1)anzWpr4>yHPkgdO9Kq1iSb}q%a}< zk7$TC0wX+X++e8@XAE^VUo})mTr3?;EhI;*36X3%!3{-U*`h*;sNCRf(n%UHW@}yZ84D*C0RcJH*MCpASfEwW)74#I(WY=i970bDKvxpy5u% z3H$O&vGhL|F5^WZ&o|-cd>JpYzi^oRCx1gdK8}BVQQt1R?H-8aj2^6bg&HsfvCMbL zD%Q#ujh3p(RSTGjm+S2qM^L&^IVHh@Ezhi6-KRbXGDxy0hzj+8UcQD{e{@7>UjO}vcC+$TodGEqx!Hn&z@ktDU4B4A2cAn|d9J4DQ@L2QTcWO#j^ z)`6h9yQS!$)EGV^j-hfeIQ%=)MM@S{nH&ccz^GgL!^^OaHdCxOR;+hl|3JF5(;I86 zck*-6$8efO5eQm;*xz_%B2G<9P^=YJ2U&7NS3tn{ZijZ6xlL_gmVIK$3W1mG4BF9V z4;To>Tb?ExuY_lWaP%tm&-~`G2q9j+PoN*eYcWWOl*3!--b4Zmfo0wDKAc}6s*s3; zi%0HYJC4wX>pR)snta1b$o}SEKL3jkdOX&qZ^+Z`TYFA_yRWZHI=!zsr8f@}6NwpO zk&RB-bYOu#4=*Xgk5>cCYduXxd2*TUuIbpl+UaK0q*ZVX9d5r*%<`7p(yHj5z~MA5 z?-mM`Ao#YobkNRLwzt{WSJI#M{&^NH{lSYeSDg|hdb{px<46-rx+jv$aP6`3!kZFl zO`W-D@9dI)`W$2gznE^TVyYuSupYJ-#->Pb(}v~dYWS{^UaRjzQ}yG)lbF8%**;s@ zJ{tRHJVBpZS!1uDogR=8X{=H2GjdOd(|-JFg|pJjQ*oY}uV-o+{CIS@eGkL+U^ZOb zq@Rt#2pNp*zCM@0R@VDiU#Bzw!fV?vpG-PEp4V}Iyq&`!qeB<YyVtP7zva24H|98dHg9NN%{tOx2%Vp=&Z?7Zjyq^@IJ zmbS}E(11~5wmUFI@^XXZ0>Un%i_Jg~6y%BFg)#@E-_|Khx>-KJkV@ag84pZvvpr)xN>cRCg`4VK{e$<7Z;jDX;6 zTxlfdoUmlH(x}j2+S#l`m(X(bTjs4WZ38~F96>axXM*RR4KJ`aIB~-%3NW`W_^TFw z%ZI}R-MC2stI}@DAG`VGj8Vg`fAz&**e8{7bc_Bs4Z<}VL`6Aqc)bEaa}Q7VIGA=$ zp2Q}(U`_S~voP>d^Gk6+FoJt74+4^Q)O8T5$0+ zKJ{#WT6$A!3(RtDD8D?`T8Q^X> zV%kK7%=@gaud9CFixWdGC~x_Hisdxi1|H%4^|)sWMQB9kh`9)lETW;*MXZWEZD|iH z1*J+UJV+L?tkD1kbFH>;iclk@`>kNhzZdBVRmNnY_&FOmL<)Aq~F{VQ+V>e(*PGiIQF?2`Lk`9N%m znCvS-HLI0Iars%5;9&=kETrbnst8yh6Z-}RT+>Y{&mpD)Mm z(>awB?ji*G5g1toEsac(plw{CylXD{{8Zj{Q+<`O^Gcium+=fNVA8&v8bAK;^$p3( zXnRB7Ly4;WdIE6h20j3mGnS=?ga?=Pvc2>R8bsf`=d0h*h~%$-zT9g+=6c`o$Ip$j zYICU&u*6%pN=327fK$!PSxcA=d!tzHxfKjQMMg8L@(}Sbk@3PJM)1}Bgg^D=7l-6O zyp1nw|A?pG9JHl3ms?+5R}<5C?5z&5?IXUrfXIrqP^C`O*0AEj^Dqd;$0*jhjnfAH2{8iAbt;*Njv=l&VZdzlqFD;1{48p=Ft z#kd@wnx&OK3YOp(NoVsCT{J373hTJC)(}+29PELqvzIgERTQ`vY_lr4yvTYQ(CQIfHzwfuK8iZNxdn7_@ zuHNmYOzfbsJ*~W;S98puM7U+v>O7jvMe+oZFn(7q4u~)emS|I0*6%opQrUiA9hCUV z3;JAu_B@u0u#5m$U@z`a06ejL%lYQ-E41{;n#q#_5W7lR9GGF8XCq-MxAy5d_F=UN2G-R7lbGS$JXcJy5A zOzk3btKOdwiR&W;uLPX>IM4cVcj@0rVyHiV&nfd1>6gE{neY1*bPQn?T3WTxU`lPl zNf@IjtE6s+ZxK_TNtt3Ti%g?roBWp9=gHDO-cr|v68H5QhHTlcZ(*nsXQL+#|I%S@ zzdEJ9WG%hgf4mQE(yGjwK`k1l^}0dSV00M+M?zD4#upF}-|KmhxX+C{`n8krCs*#Mp$V(7g^U&Hm$#Jsh?pH< z#4KPj)hcUa^vlAxjq;3H9b*Z55hpyM3Zu?xm>2uAk$UFR z=N!Km@nx-vgQZmsWh-jwN$h3{!q*sSIkT{@6zg%Ym>rL9&|3FE5aSUXuSS@Ec5C{z;K_gjzXX%9tMj-oN>+}8UUpgTq{^$Uc32G~}yUwmUG>g-4 z?i?*$fvxS(WMCK>pfFtR@BByCNC~PyHv#o}Wyu4}{u%$~5WPR`wH?1>MB;Qk@IvT# zZR9K`f|&;MD0J!5Xrtfno#vZHQxXUiio1V4_x|2UvCj?qgSSlS?-65vf8(uQ@46e? zS;B2YYEt@nHafwYAbs7A{NOahk0|i3uIFdI%by%t>Wh2bsNS){%A$x~sIzLL+Zt*N zyjt1;pJVQnYYo+KOAj`tDF)%8Mk#D0OpOW83Cc>^PySf~_`~5Ge*FKJXWjPkTlN80 zu~kSd@cv`tolG(b30k=0fr=Z4fh{i&422L&W0Q6uw($| zp+4-W+~QmPsUNO!uMh2QQF0;v|G=hUT9|E;*et@EUhAd|-GB>=H)pene8eTzKMw{g z(vqAjgek<9euk)66UODIeT`cDhlk^7F7Uu6p@;j1O;C;>S8p=63NWHp& zVwehCZ7I9!@9R)tWaP2r8c05EgycQ@@$NILg8dX9#lLWE&*S+_zeAp_!~S_RQ}SX} zuiNkal-2twL8LB!Ci-$q^d@;1V9{X9tia|=-OeKmV}f_Og~5+rBqj)py8hM;skGBt zwxcHbOu@hH*!J!Ct(i!!^%CK2IQ1;XEj!rhTAKutpN=xC?xT%QUb-Crq(nn79zVQ= z9RH-hp48#V@!8Mk`n7jCem08ECZLFs-Ih;pWW_`g8kFjP#1^IQ=@MgBF9#<&4$^NO zK-d7T`jbEX)m|Zu<1xp~$SR2=C&F68p|3B8Qon-TXs1h!&KF1iTSxd)AMir9?dOmA ztevuIVF#&-h15bS7e}iccrcY30w0^Yv>IV)pqOSrOA}I01Lf{W@Fo3K*7wupU;gUP zxc~pqUqN1fDSlJ$5RjJ!^orhE0&X)CU z4ru5z1X~LuMzE$v?({@XH>Rf$KL_{E{Myf)>1~>;`gweuTasJ`RIBw-DyB3Hzmw1xu3qbMWCD z_j`Ev{c8(}T)y$LmhEd}hzKyye(MvmjTn*hJU&o#0|bUTJgeB}cCyin!Sr(64(nag zYeatZJ_jc4Ed7t2{peq~wcW4hGkC6-A+vA8U0;`*{G*3TzjcJ~d1F%ab>Pw0PJj4+ z3aKrW@gq!)DR|0}*~Q{LwTVv@W3$^wygQ6Vh1P=N!Wyk3Ag+$0p?Q0wqfbyQ{hyY8 z_dl(XNS17Z$ z7J>te6n#r5(@IwAvKEos0~G0J6f2m2Hq#eIEwTok(|+nYXN>7G29#(EeB?M~jfk6= zu_7Tl{nnB0m9&!s;8K2fUhVYd_)3B~mHMCSpLw+O>$Cr8{`#@e&m{=iNczn$H}NMA zl=}G8|63;k;qi9ep-dBc4lO;N0yWmd>j%abCRqSUK(@b=NbLrR1v5=%BgSf?RBOn2 zf3+6_l;|xY%o=-}+Q31UIiu=vYGzNb*osOhdCYl@^`t~BL5x#0d{!>Tv4_zPoR+`MSU;3t>IUs#b(#iRRlH2Y1Q=XV0H3R`2{b;QFIbfE|=eyrIvOju*6vc*B zksxJQkTXF{GeoU`AO>Vg2)tNf!&=4He~JQ6vF587aiV!zq?u4ljG*JNe7}Xc81l&1 zS~7H^0kzmv&M~ys#dDyx2|5@86SLvkHgYNB@^7YtcO}b|X(-e_PRa>k@Q85N>&o|? zF03COAo**4PV!@ueJ!egXNk@K>9geTTEb7i_8Pgi*OTBs^Wh@J_s7S*?WH~te`4jL z#(avguwHs>st7ZNTh-M}w2KWzlAx|DYGy;IMt<}HuB>q0Xjnwph(_0~vfF7aeQl!I z+6d(QM17-``p!n#&BfI-GQ6G7laUpQ!bygVhF$-x#JP~Vudv7O zzq%-|_djO0APxQJxb^LSKW=^de?3mG&`98AkC%FUQ)^$`DeLL|+;lltJ+?-bZ86?_ z=T0tjD0m#BXt`ztC%vw(l-E)J(e*{Yb4ts#^}o+JpV}6bzt1Q*&#^}Odud|}uTUK6;m@3YfB&D)UmooL ztLq5-|5sG~|M?YFL7vinKk44jcYsiZgVaM|&=`X$oaLlY4}r$Hu5QgxCw;OtQ z1$e&xx@Z2L-q~2`+?*;_e^w$ZO=Y4?;H6WSD;IuxyXwX7I`f4e zf9gN|zxcKe7(fgGn=tJLaL`Xc1WF9}j(Kl@1hWKuAbWrh8RLpe`Ux1D$^kyS`UfD* zfpXa+pEKfBM<)<~t379M05I^OfPdX=ViTsg&TE$&P=V3|<~kQee+L&nHouaa;Menw zt4;&C0&GG{CY=HMLM6h0qrSHXKz7|nm3Y8S*LqP2au-PE_5Qul+U3vRCJ@2ZUIX&F z$7Zh3Zi@Z&(oAl&0`?Br=C}RAmt6hrP2vH=(Oym~AfSK<_#V(#yiLm>fdQK%#fNU)llBe=GhIVIU;*d!wyu9jMIN^^zY0LHtFJbIFIpFZgeL|1B?CzU1v~ zN(^-B2jANSktt<>PcHo!e&i!pfZi_t3}7;Wcinep6PCH|%ghqczxt6d@Ri@XOgvN1sF(DEKeUy!2aLpe+P`{_HOOMYiYXm)`$}KbizUf1;aBFn{JDMFHbA?=TwZ zI_8^JCj-~_&YAz%^HOzk2Y@u-KY!%u_R51}g6lK~%t}uRzx0gE+51IXh2ZS<(4Twy zUrVBYBfCy}eTmS1)-UOA@VWgY=Ke;6?!aJBmv2=&oR21z-W(4yuIN zb5#a&&S1cB0FkrpW@O+F*lsf#aL;q@0HH*m5ClOLbW%Mt}J#fCUnCzE8K3)O)@P7}*yH=32j z%(u*jf4;rZJdIDe#DH4Q%@GulwG|VPS(B6u-sBmPVKz`}l{rcdu=Ik^q_S6L0Zwj)Ljjy_!d_*?K(M^Ln1FVD zFF5uYu9B=p@fqgILJ~?b4SZ|LC$=zsM%WC#e~BcOqghwyL)a@K6?8727nAu5pUrIA zw;$NP{WRM2nTyYZ657nhG;|o!Q$8{D7a!AtSW*G{#lc3ofYOog)$s(RW&%4Kk^s0( zxdCqZ_LX9wnft6xH1X{()^oAY0E8saOw_<>lP-O)sY9>bfIUaPZES$2!^mqQNw7Em zf8u3R&l7MbiF5M_m`&)L`2BQ6y>DfW7P*aGMF3t?L!JOV5SdG@*pl zg>N&P$?M?+xKTKS&E)IfvQuLqsw$Ape`7hmXAV%BQDY!oze%uENh$ymEMIz4MkAoZ z>kF+a5S0?$7<jWsaK80>D}x9ZJ?zNnSlj zHoCFLv4dRZC&4N#2Hm*JeteKiDtMak@YR{CEWvU#(KKt1mmcQ%M1vpwp}9#?ff=^1 z4ftBV0273GSA{XKOcdm03-XEPe=4)o=82Xj8?nO4v}%j5>j@x@gyr)@Ym<%ep^fN& zw8fHF*AuP(YB#6J1h$EI;bcj+dCApPHqM@-3DE3iBj(6GLFqA*1sK^?HeiL9KCrS! zhm*+x6J;*`F=f}~WvLev4F>7537`A3Ze99Iv!VJ6N9q0i?zI~W>*6(je+y+c(Eh9L zNGdSCb_eM4(!)zOra(tuJw%!7{=Fl?_6A&I1MK~{*7JVpucv~mUFJQ$+_+L_B+lR1 z^0Fzox@^cS>3rVv5SYNa*6ht%W`6i;R_292=3ItZw(^)_=_`});+Ll!=*D02wcJsS zL8z)6!`st5(e*T$+P7UOe~+D}zwNQAa{N#GHce*uZ5Qohr}uArrmCFu(@ss3S%2G= zW2^p@zp>FRt$KW3Nz8o%-gOR>zwCBW87-e^`VYJGLvD6`M$6R^(1~@;G4(t#@FX3~ z#tHJ5rxO$9m%c~`SSG+9!od~3jcyXNfeBFKWCF(WtZ<^G$}DMqe**Z$4_zVw6LL}> zUN#WBp>XYsU;e1Ovg??VLNcndPvsYvee8e%*Z4|R*Rc?H7ODpZ4R}>GIp&sVXP_v|s$qzwJtS?2P?wUsMKA6X0_-2CK@@`9v4F#9h9L z&rR~{dpF=Sn_704e=prs6|4#g)@%OKzVla-|FfQfDs=Md>&qEhlrA4z>($pLOXh3K z6G!L1f5}SEC6vR3<13%zFFLrhdBk6O#gAZDMve!T$NMZD35R^ao+@BA_E`R_@UY4N zp6eGTmFAC4ieA``48j#X$Smh{(3n!Z_-)rI^aB|f8^$g4JV1+a_ji#@n3UC z=;ZUnCX-qulo|v4lF`NIfAkY7046GIz||{*K{w;lp&a&#T;G_YLvlB-c38=zks|Mj=##yHbzP=ew zD(lb-36Am!M3?{mI~~g}-~2}$Lvwl_F~&x_dJ$dx1r{4x%T>?6J}m~!-|)VWjth_3 zN^n#d^i)ltXRQ%1atsvn7abwPY;?R*?dx5!brM7>| z!{bjKG?}fE7(%%8+!{e(B84d+a^y3B^6mSs0nWYVKhypfI9ovIe}yam3ExHE>l(K( z(JAZmQta|gD6xB_j@$+;b5vuRRaJ0Tj{gV?f8uX!`R`#lRRyGich`8n_VVS;|7<*$ zt{wMHyXJV#{o&z{{k4C{J&b`S!O|~15jF-0B%{Cm3>zC8W<7j~-*dMYG_79=>k ze{f%Zr|{$7;}cbt14cP5)0G?W%dh;3Nl4{-fB5>r@QMlXpY~;=Yi*g>%TLns*IsN| z{E1tyIQu$Ff3=5$8q>=Eq+Pw94J^}{*yO5fstSl}u668d-i-Jwo{+fL6-VQL(uYqI z=qhuh^-CX}<2V4Ig{$l`SDF7Q+s6USe_y|sSNYfPKk(vJ_I}`h%0KWw;>nS#|f_Z^$Ry$Z~(YpxI|U2brSvy7ud)5%)k#E zK26_qOkcRSdcCV~R}4qz-?IBH7pf}JzkV;j<)7cHANc$#*FW&za`OZKEw?}Lf8X-w zO9!C2i;fp_jf(@;#Z#f4zIzVbS0{$=xXfR0KjWu=;I^OfyI;8ZXZ-vZF8&$+FSsl2 z1e9|8Id3Fae8z6$#C>&VzhwVpmd(84R8jh|(W?(hGhg^C9(I1i!&l7rC;Ts)Ho4#M zX?fv?$G^-S*!LOsvY8i6;#aR9e+o&{7kp;_z)zfX!QQ|4f5T4GWOBb@-xux9Z`j8d z?_X_3#4p@8g`~k3{-T}5$BE-cHDewgQ|`bN3_k|+A3iTpIY7h8seJcrh8=*EnXN12 zGa07*RkvN?$|d?!Z>#MgPm-*p`5;CTm}2I7t0fLMd%%Cjvb z;B_5lWdKkBU0wV7(v^Drn`sjefPsOQU*})AtDax^k0bLs0$YPWP>R-7uzwBMU_5~$CKd+Np zU%UU8@%$@Z^-Iq`>hX6x9{ADQ|BOEAYfoQ2?|<1xdHw&6OH3|!_~OSG?uRZK@Z7I{ zCNQq~NXUQeMFdxUz3u@g*F5$!Piz-la_Q+0ozq|CtZwuZaN*Bk(TlIwVRp8=QXH=r ze7OC{e<%6kzvjQ6`7-{iJ~I?>Hd?s&1pe3}f8JQYO8?}ns9#^ZV7}+8!F6=@o2N(u zpuWy=s^I6SqH(D(_ccxFdW3tOv%Xa6>+imzy!NXv(!cyKzj*@z_~z@Cw@az&YYT9^ z^jW>+!cdpJ?-zaA24KLtK)?1;*&G<=wR<;Re}~8)mz|oIU2)R&@GGSau(pec8rV6l zUc@-pd1>ytFF;$@Jn3Hf-gy^z*FHF(1OLk7%wPNAbP9wkUpiv|Nds8U2G^Z@6tD#_ zO`0}20qE03L<;m8VBhF1UX0OobQi}9kr-|ha3ezup&Q%;AqM5DQu+evIYDQ}_W6Hp)Ec`WsYt2_cn#-`RCe=xe@ z8Av+r;&fM>?z2Sg4Np)oPC2CQyQpQ)VIcX$BKMBhvn|0GtqT#9eL%2t%Qv1;QV5o8 z|0TXO^5utX7#E6-vjS7td2O1KB@Of&UB7`ECR@6 z#oQcG$j#cKu4S{}AXi`!QIOx`u0T*GL<@D(h?AzvYO|_OS?tXnt&BkOxut7*I=z)J zx6?#on~Z`|7MdTtAM+FU5@KoZFy@iu75G#;D?o6-;c;|gSYNVEbHn;Qj5{Q@`JDw!8ge^J~KBhXeSeK8&f#(RjuyfKDTCwh|B;WvuoO#f-vbdKQ|A9Wsf zhlO~Qwx9s3*pY7jNpD%hu5OL&3+-86nb;2lk3PLmaBwJ~5&pt^)?qY+e9{RhjCy$I zOiJ^$;=jxX%bmH#sY8K{lC{@Ky>$d=y-#O)ai&LhH8#48e>++m$2(iM=d0H}Np@%R z32%1>;Gzl+*&s?uROkEXcQ_q5-bq1B0(aePPGoF?Q z$F0yaW#v)s^bUPT#X*_r<0boMvdrv50pZcYLod0t7A*`VW^_B)L`{H~if}S}yf|?u zda!*QQ-_^Ve2~j*EN}QMFM5c@=y@%e zsnBPntH^J;tg@c+X0aE+=SeUu?jTLBHr-a~i{c>-tOh|vQWc|QM!ad6Cg8FaikQV6^;1rkLh}$H+$B%`?-Ac%r+j*bvzVKaQ#|4^7%@2kd%mVV34+S4}%NoSGok}|(7nfypn zy0uA(f12dB)^P5Izy)(Ge1a2##6Zu@N{NZ8dvYZ!(yjsBDt97urB1U-I@c)kplK;$ zq)E%mOfXNTG`FENk2c35*I*{5+UIfn&{jZa6uG@QsQGXjzb`j0*$7V%A8SkJe1U#u zgX6Je5OtQBt-Mv-Zs0UF^SV}g2~1^a7Nu#rf3|bmalmyxbod^-gG*Z`Oj+n#oBEgR zN{nH!i`(Lm}tUc?~gwK=)fNnOwu*{DZ;U_!&p{ zI6MRvGn6_RHL`WCp}5bp zcv+{@2i$=U6Ah&KR>5o`&zPAhfEG<_WDe?OXRotz1IA7di+K5c&ln!7h4SdqXBMbP z+$#^OWIcF#fn=|siZjVEpz@(TiS!I-Txs8qw4#~(%%572c zh>vD&tO^;%Q5SUeBYStu!%jVZpqrz$H;P9ofeF~S2Vgo&D`n6^*ZLyCc~vQ+{75Xf z1n%$#0Y2BuM+1A-9(gf%C)dae!S>vZ&sJrsZdkH!l^j{EgP#1@R$QQ$yf<$F?07oE zTc5mMyFD}_TG#f24$5S?rLcc|f1YIf8Nx6q=C|S55hy9%&$Z5iv^UH~ZOPImPZzCV zrjdXX_&9>8&+dTlB%us7EeD>pu{6hPQ8by>a5r^J!a?46+WkK5`JE|wl54@8MQsLz z=c}ATG)%qe=AOO)bcgY(NZvPj*rp=C`;P82K2liVF`nn0*=(t ztg}Y2?x83~O_mDI9hSx3qTU=}W%q_~x>E#4SGuu@mfm*9u^BB->(&_|duZJj;&rP> zz|gCjuVJSs+K;e=NPOtGge8z%^CQ4bCs6hmdxHf!iTAkT6fKgwy$t+f4x8NHo?zve z&)hLD9JW7<=Z4ByS#lDhe@Gr%G4SY@+xOl_$kuoVM|P7`hRC8ZqUCB%U;fk8=N1<0 zove|J;1UJ^uM{*~n!RH8q5_QKQ+i6WkQ0@$Oc^cDK+0GZusaO~M79h{UUMRo7zXw} z+G0~qyxY1^df_eHFbA%qbaNWM0Kxox;t`sc;_7Te)$Tc)I5Q!te>=V0spEC;r`u}O zrOAeO+G917;@hEN+Gniy=c&lCVQGaTm9o-%UK=KyC1VXon#FF%y2VQzaMQH3IFU?M zlaqyVdkSP=-vhB#Cgz4~Q?+aI9i@7AJ!hiX7+iR(xzesZ*vhQWJXLA48)8apR~mMW zMNxJ7YhB=!y2=x*e@5QjZWukQo181U91|o<4Fm?1n-6E)%~G@tSe)!egRf*MS0~J@ z@UD?c4_wRN$>BU6a%J3aisUPq{pLYzH~whY%cIq4<<|85j^sW6yagUr;sw_;5L0pN z*wYWVg-iq6XWd*%tMu-K@55ZgG#t0vMo2C0oANlb3<}ICf4kc*@_gCLH@X`90$ntB zyUFP~ImJH6iya`y+T+SwN_Yx%%_;c6$-9X`o_#yrU3F9@o|P)8zEM3o>NTWK_(bh> z32~2+l|0NIrNcrJJUD_SvmnhK4zC|nKEKk@3d9$=Le(gJ>I=)M~@pM8a2ewza&3W_)3^8WTP z^B~PqHsO(K3o;m+4U6o}49i8YbxG;X9X;nnLXJ*4A-LrDV$L?upRI!o_H(Sl8QqQ! zlsK_WqT8M24Rx0s33#6P%K(&+9`7rJIfAG%RHMt8WDH z_NZmSw9c(hN!3)HF)wm4mo=6zciA3`_leB&My!Nywr>kYqc!EQq08BX-#ZVfRMr!! zf+{NIe_3#PPf+rwyqBB}p*!cktX)Us)3RIklTU0%CV5<8iEbFyn*U$?87wK ztPj=N)7SJ~S~6)SRO{fOXW_FU7Lvcd@fQ(CE4ntcaVvMKoy(UK>Wh(|$=n-KJ(E`G z$fvA5KV>OPjUXDOB|wEEXzt+l=18x%P1N23e~uyZaC{7#4ohiPoh9DN7wHFEH4jcJ z39T-_dYC9-uI9L4;q4EjW@gIyHCJbO22pzb6yJ+PthwQ>Dony@^`wsY~nLT{2DPFncj$w@{nP!FleY0o2fA9#cIw?bZ@w zOu^3Pqk%s5XFflEGVes=;HHyb{Ca$N!Q zCg(tmUlu}UNCKDWdV!K1%Df1Z9}H7Ke;iY{h((mCBrrEda`3I3I9Qrr9dtJXAv9r) zd%iexoy+W~Qz7?|7UPFQ~<89eP}0e+Lnjw!N_wyq)eVq!YCjpQPTj_#hlf8<%| zb?l>}tTa7G9Z?gpV$Qua-_q_Jq}M1Syw%hD=(Y}dm)2VOgrqmUPSR@Q^#UtH@JJlC zV$VJ9`exf{%|w36VNG(h#%WftT(ncmccCuKqo#!2s$YSEJ-vPr>Dg5IJ7R4!cw!^{ z6cUUn%YY;7%@P|-_XGP?j*tZze=Zugf}7@6%6F|5I{e|7+p|YqtIcD639{H5$FUz} zCx-){ZBLdN-cvQaarI?D;;R^x5xskN*Ac4vZQ@**%f76J8{;;2ew*OwAeV-ff}!9r zeP}&`b0lg+U1pJDA%K+SMXNpC-b-1w*K|&DM5P)(!g8i%kv-vd%1cNQe+yr%fE*RH zJBNwfi#gT1UV9|A z`t(7@9*g~1Gt7RRtun6#;VFe4?RM72WZ^=oEDkc7(B68$tR%+mRGEQ+gxu&QwdEi# zY}z3r^6xnABl%J9-Xsq5f0+_gTV}7%VCg-IRkc$vV&?U;bfBz57}Y2%%VHXPankF3qI3Zs?0CMqNEyS8Gr3~+i#3Q(eU)RQ;m)d zgm=#p5M_YbwCvs^YY>NXXmn3yTs)UdvBqn3A=~58o+jid@3IY}f2R__UE3+Cv%imF z2k=^zoyNMgLa@Lf?P4ZL%_$kLHDaSd$_$doHz7#+rg3_+`FH9x=~F8$RTH9TTlNk* z^a<=R>2BtPa@qMxe zdr6Fys`YHs1T77dAh!#|zeWHWzG$+b)&|Xv!Y+7LWvngkeQoNPM>My3+~wqT92;JQ z3GFNozA<6hc~U0fI|tb$$u!J0+lw$tI@?_c=%(E7^*PjKe{OAK&+!h|5zVvyyk@e0 zJV#G0-1s!5r3X%B`S^E7>0PCuVb zV!Ss&hAak5w*Fw-$Y|OFv-{f}M!=3%@2Q&_cpuN&{=9guilKWbFq1Gf?`(xixD;~2 zlbil9z*IC$f9B*5VtE0BzJK_QyvP07-OO_O(0PXtJR3=dZB44@uLITc%WAy~1k)%f zOUtlX?mg9<8{Ce3ocdW9dk^t;bP+Pj9PGhJPzg~)2B1OnT^@<$`sANdhV>q1lIkFD zLI{j1W##mpaZqr>T->>bsgZF*Vp>n!cr>l<*?QR4e?$s5((W3gjhQq#xy=2R4@!e<2CbsdRppsP5{=l0e}^uaO5ru3iri2>`}D3NV(2(Jb{?;f zz-Dm1d1yoc-tv7{EpjQ|wq%&=8DbCeWzQzFY(w<^d*MLZeFwTLVy&XE zfBmV{idPZJ3dNEz*0TwzcbrP{-r0J+K>Wk?4yIE?Ukv=}rB0d+JozyM@@yfN|UE89~wTQ3r@s&?ei#V4A(n z5|$Y4j&B{Q#NNo)6S|?=)8)DClcs63b-4hez^5mo>v~3&*ic$?4NeXHx(F=eLjjW* z`sg>Il}7hNS@n=}R8YXi%_5WMln5fqy@yB>m-9w(;Xt_r&sqlA-t?QBeS+Qde^(_7 zP?R_;SZd-lwV7?)C935R2-@LbvHUz(ccWzuxKr_N;Rq9c$bu`<8s&Y)#=4XGJ?78l z(`4bz)Nln0A&>Bb(dy0DonD#N?$zgVJ}ftQ8wmDF`}lL~n1rm{6{-oH4EUj?UP8l8C@hm-ZFN#%k69+{! zw{|5#ZS0>K{=u-KyA@@2Ggj=wL_$MUoYooYnF!8OkO$SF$1X#BOY0uDe{1}mc(+p? zBVR}qu!5_f9$T` z#{J=dxVISIX9*0QdF@5gf4x-*0%xJ0=j>)wm5xu8lx=}{`fruHg%6PiR3Q?Qs>7Ul zINm8EtsApU#v+`JoaZfQ-uG$LlhHu+J6coTO`aw*IiJXyl}bYQIvY+{NaP#(Snq+C zk^6FGdun_;z=r+_%e~Y;=jzegX9eL{0+7~jdzNWfbGy-S*LLODg}#DYVZagI%HC_gtK_3MVNs{I41b>zbh^Cp;ny-1e_8l3be~l%c6{s+Dbr*H zc|eLqt&GlW760Kx&(H4z3#IEaa;XFm`Kd%@wpTGlMXRgjS7N3(4%{;EJ>lct zu!jh$q>xNmuF2F42;^-wgv7qMlzQAdQN69H)U#XoCaAa-L(EZ-m-vBk!8p7RCDbUI zW+_^)sME$of1aCUU4gXbXyj8B+st5PgueNVf+I4vBZ(1>T4BA-OhVy?Q9J@h>GO^3x6Myu-fmD54{I>I!UmdE)UJ zbR$8#M1yy$9i*%LERQT1&1{FENr@=nbY%9Kc?Cg{e^N=3uU=ClQJ&kCr%L{oN=@WD zhXHGB{)X4{^ETr6_3?y6nDx6WoB6yQX#hQ_2&r?;1r8ZdkBS>_?vA?^a_S#PT_H16 zck0xifkuq4!Zh!_P~odbV7-L8NaGd800-%$D4H87vI4J8hNtZ5)p7Kw<;*{G&6~nx z#E@3hf5co+OBTb2@>3_v)3mf*xzafMcNX75UR;%iA+|})eI1hsFAYD9JCn@gwYCL! z1njCY&Pe38(NNTk6&~*g-P3n0JKf48f>e8M^isqLzkJRmUv!?uxmv247;HXUzv z^CqPF5m`p7J&WmyJJr#>U!n9)qVX<3$jnK*e}JPej{@({49dqd)JyBemaoqPTFMHGfuojGQgQmQ@r4?vP5nkr!L1xHgxwM5hJ9LBYYLahA1`q(ZEr0BKB-71j>VZ z!AuY6=C540>#>Uk-+Y@%LwFgS^42~)?oW~l^Fo(3>#Zg?l&Uz1Glh;UMbw%i$A&^m ze}4lM*Ral(-m}iJ@|p{->3cDhue!P;=Qn13Kc55v<4^Uv<(I;^u&EF-lSuc?Q)} zAG+fuP03C7-Ypo)z|S!1C6I*3f3L*OPWp*GP+y>zyP$k1#;P@{eRB&9ndZl+Cr6uF zJIEdxJ2v;vggEzJv6wu0RTE9zG!{0sIt)ncwmu#Ns@Qcy9ig}8LjgY8*xgg(+(}## zS7HlDLd}J$x3k<@qn|^WvLgXyDV}XQ!f4m~Z=M$C7 z4cn_F?(>9vDZ*k0TOWAuT>j{UwFDJ~NcCXf##vo&3!ww1s2?#>v}r&Jkj}xcKhGPqDF zklB$kQcXKgkFWwCwS(&mM;!gtDV#x95lKFow}d;~lv{6JT@j7Yq3l5&8e$P`3d`|J zL&66Z7?SMJHONt(h*-QiwL)pY>hquwv2wYPJ&=L_6OKMl(@{Rb&iS=|B4f_nL zB3d=&C%e4PZ^gN56g7v74)Tr9Q-qja_{zWpsVLgOg6>MV;Rox55n>I`|y;> z_k)b@*gKjvno^f%fBIUgK)fwAZd!dG5J}2pH&Rg#Nc9h3-9Nql(ZaZ{i$kRKv`Gu( zO+88UNL5-4QFXC6W{pDf&cGJ&NibaH5aX%;I)Qe+)(M&!|PQEcfL}=*qp) zhg6nO&SM$fqV@5k3fLaJ#u9xM#BMbd9!zC7D7IkJOU-9S_HtKDN?FQeNog7rs8P&! zBxgh8yr;7!rVoj?6{PZFJdlHJMVm~cpqE--x3Yi=#D_dfo|2nIm`dci1fqerzUSN1 z`qrX~t{Z!Mf2Qw`%$TE}J&&DKpn^P**XNN^o~xJ=TI?zkMKpv4TCD`Uv3hE6OGhQ` zvNR2>wjOp%TOjw<7t`xn^pUy6eBaF5?cvAAJ3qBxI%cpmil=2LpSaPSfVY-pVy1s` z5Qcs029K0-DcXHPM_!YkJ%N_Rz@D=2?6Kp$(pfj3e@M`hgqUrHqOWcCbm)i1`dG;x zfo6!(UipDKqGz0{gN0EBMLy6jWrnwvZ105h^f;e|iD`RZ2sS@_1^0~q+_hHVJ0}?A zlg=K`J5gj?-X==3@G=%uLNjXC?M9p*54+x}%k|cgb0&oFOPTMl&mB-Pw>?RVG15v3 z-n!#be}l}lpLi%OY`kNHvaG&`FmR7w;0)4LLGp*FEb&P0xWdfKriL`h1b%IX`IffCnADP0$(}F?xA&O ze;CGChiZRyFOT#-w8*`iyvDPK5eA<%f8I2Y&&+#FDlz7?!QEl%G)KTt!;fx?5D&eZ zD@|fANJm15V}zTunYi9l`1?5VUd}!Ygb2BnF6%1yAvHIE!KfEzhoX-`N-?~j@80lh zxIG>$FTX!AK%>)QXU<+jnS<}yMo%wZSn9*cf>}&oZbRy=Wn!~a3*}&=@TA4Be>|>Q zJoG0hf7Vkj(G(<-Vw!o+=|Q~EN|MmhbP7#iT)7X_x9?3SF$m(&h_?v8*XHvEKfAI3 zMtH8GkqvL)zFWL7t4urUL`p^kg^17u)`1_eyQ8hy&WQ)^G;> z7k@DlhLW_eDWU(caqy6`nQLHX2j|1DNB}dS;X%9^SC{pC`#iN!6aMd!s+>% z4JbUzt`{5(I>p)u=(d2a4FjN!uW>IQ-lU1P$yp)@1 z4zQaVB9x%z-qZ;AnFag1>4W^%#af2<&rXICrp?5a@RUSn5;kW+7+*If4f{{O*+)2WHTCQH&I=JF5h^}UwmZ5kPrOvh%~JcL?osT z@Wc#l!&`qQ_sM^W-OWjin|0^l`SiR^NQqmqoXtlvu-hk^fx=bU+#2rPwp&H+s`W$f zcXAHo%$1o;_T)oWMIpDEEleY)&>7cy9nF%aud^(oTFf)6cwk8NeTBXTySi&>?3=%ycU-;^`6Za`}vK zNVa~RFhjE>&){?1Wqjiv-0T!DUAJR88E!m9pN+8G9V%o&OH0h!Y}C+W-f9>RV;J?otfC<+CzY$ zT&12%bwviBf4BA=SFpTd8N_U5)CnEUo1>>%s_49~_WS7kJBb?^lnQ)ScKqBv|eNbxzCAMJR`FKW$=7#F^=ZbXjLL1apZi5g{-|HY7NH=dc=pB zr%@=($xf9dZJaP`2&_G10X9RiUHtrR9=nwYo{HZ!f0;R8kKAE+e-PSeToMXYG&khI z_4RCj5u5PqIJP*8qhI6qwy#nzLCcg-n@{rfwb)J*{%H5-jv$JJW`H?_!~jR)z~ zM<$WJ8(~OY5zlKK66RH@7fsJrwHR34jA-ztw~E6-nq2-!Pu`m38tk)!rLgC$efCfB)q2;zPJwOw zOL*5e`hf-{dLmm5D#VEFh15aDbw+Jau!EKlZ~CuICH9F7o{aHq4ykTD%vPw@Zb@bs zf1BIgeSKacqJVD9-=SnUI%XvPqbr+xaTbCJTxWmHHZqrmas{IGY=D$mI#K5N`aBIk z$6`U(aqdJFrIXMQZA>Uh!u~%{D}Nv*|HuFNfBo-3lmCe`asI`b4Eet}6TAM8%F6#A z!o>1#>cq_zA+!C&4sbngNwUO>CjY|&J^X?6vj3%2v=i)lCx6tF?4KSC`QuMc z056|xGAE(%lQ}GZRFZ&;f6rJ9FKjzDa}x3ZF!Z1ONq-0-ZJ9r%Pb#8F`D^D27J%n>Uv?Bei4A~){ZUz|ewM8$ zX%+6A*~|D*OD^1B3S0n~+056+dNFtM`~BGTk2AVI>T}iTs90GV3tH9zE+QlLDVMMa z{AVrvk2CKd_exVN#;=Ftlw47nf5$ve3f<#KOvdCx($b?p`eVYM$5UR|y7jLXssj2w z^PeD{;OQk<-!0HhTLlRV9Ej^>MiN=Z)<3hDhhZNc&^t~>9+AH=rhAq524EYs7bR77 zKq(TKa$c%&N>sq;m{nCi$4F4*r?H18+^nn%$X7^QK6@e9h8`=_+4fZU$UgE?&)K|3 zF_FbZQ_&A$Tt=d`C0BRsbx}@k@-DsxZnm7YB$9i`wHQ`bQWC)C+BV>Hqs%<^tYYa~c0yxfKKL%pZDmepy;>ydbL?n67%-e z*|V({DZP6AL$EORf6dj3BxuBtOS&NU`Xxn03sGP4CfCn)7{^Vj`9XXGm|y`|OJ^4Z z8c+3A!MA-I0^yES|3qDHVdctNIsfzqC~iN|O@E%_6*6zy?UF1cb!~sSn|m#d-ao?v z!8P)DIxM~Cj7pSg&ZvP1^m4TmgYR8yOs`9hN{e+HuJkLf%^R&rvyaBYDv^$M zAuXtp9Okw(#VoWANF?J>q^hKi-p&iE9-ZUS;m_i?r@0$k?Cn`jS3FLz2Z~el?~K^j z?R}VOAw020|@w>s@m zk`U>eeVyN2i0bu};CCN=%!?8YM&AeX{e4P}r{ZlhKnNSzeqdF&#>#_LflPJ7HB_B^ zmG2EACBH8hTnAW90MZWV?>1p{Plj*WE>isggQA2=e-Vy^VEFGwkoPl(pmy!^bj(*a zdcoth4Qg8?8j*X5ok1}n&nr$ej}mxgT(@}p0IgfhOC6A zsECVfjV=StU1D0##-8&n8j2blZgdvp9DxnC&PlzLocN8T_2jYLl8W2%@tN?`RVwlh z6mLiSe>I?ZKD(&^vzI&p=oC((i<#`JImY$Gre^E$61^g4hcnhx7BzUbuyE!0>G~%Z zcm=0*F06Rd$zwN#tvm@v9%e~JCVw3>aE$hWik2g7J2u_n~Fbro=s8>&D=A2%J z2nYhxH7bos6s7R70qh=XAyza}X~B5Vm^mB4(3v&&8Gk>)1U(RJ(e!xI$g@yeml18m zJAxY&J=2=8>sVfMzwy9rRBC!Vy_PDGm>2m+w)`jY41& zP`xfINTRBnT@+7%^PqlrC-$oIZ~LuqOhT#HI0|})L3}Flh~@V{z}@32s_t=4PED%B z5N-Duep75%;&%x5wl1GMq!_bZ?h`dvgJPlXn|S_)E!?T zMSqKm@C~-E`MPP<>2c&K$2`w~aZ_m8*(v&`Vu{QQDJG^?$RO76YesWc4RRkCXLgthOzq7tdJMS1iaL zTv9tgMzbL*!87pNU3Tkbkl%Ro3)$jl8hT6qw=(U_gtn;AuDN;)?v{+TncNJ$U7V=t;kcC@(KJ5cWO5|ck$bYOn*=x zCBIC!bYI}dgyE|-!3kP0gzLEF^{UJXq3*KS7_u8@BYUKr15t6p7sng78g(0XnW*&D z260oLnyI%)uJGSx9%m&n1dVlTh3cao*UVARX*|~b%1djt`!JNQ8s&l0IJT%a;7{px*f6|La>&hd2Ikf zG{o$UZ{9JK7NYQ*?{Alwg5>oEaFktQ!c>`AU>{|m-z)9wo;$m5=l!L^?|&kCIz~*7 zOy>K2$UipsH*`EKXT}Ub>FqIjNr&*asE1Q!)C?bl$S(d{EMY#VZL;Pj%fbtGzFh*Yx%&1z&yuFRvE7 zoOojE{ykSNjgq@~b{)gcjFzSzqW{ zsbN1{6owvAY^EotrSI*&V{I~|;{!aN6T+bICYf8Jgw$znQ_z9*b$|Q_$jkgqe~ZmE zgS*Rvk&M!l5(nW@7UOMYH}~##I(Y`TUM9yvs)g0EPE#zW~F>lb2H6O!; zmx)D|5l5hPdX(vJuIUJG#QN-McGDjT>}GAZGi0@MTs)xJGS5iGHE)P1xQ))^Nh#-C zNx!|&N`q+%Km06=Qh!w?)|P%VN)&|}Gaq0eDBm{M$;z8xlj@L?N#`!k^!e0j zfB_O2#t`Bvi}m_4>MTLOv-y_0ur8-~-M|A=ko{G+2{oE<$;lqhV-{e=!(1z>P2Tt; zxKM-p!{$u!()*yfpWxPIx9Vyx6BGWKVJ&ahF&)V|=k|VAZGULyITYU<#m7Or`=s17 z@|5eotrF{*{BDeh@ip*cRy3@}tSaNcN)GeZ0}NJ_qt}y7jxJ401oP1=>|*KZHfJ}J z`hF>0oKO?#CzNh60v3FIVAJ*&OBY0}(g6BcA5kEk=fhmleOKvNWt#K4aE>UOjeNDO zCR%D7cf}F(Gk@l;FgDlh>9wpGpKl|2q(;{!39X4^ERe}Bi`m1!>`vQ;Av#6$vQFPV zyFEd_Ce++f8`=HDf_<_&Cs(UCPR&Q{bp$aq^<@tDfD&>R-cih-Uo&Z2Owi+&8a>;k z%A_3pt{_k|2$We~{5}eS1~MV)NZq_rkkU4ehRdQw%YQPIpR;j^+%XRP5NhFDc>_s_ zk(#yiZPeAyJ>-OvX)lrF@)oLS8_z1WIe$Gkn9dD~d5zH?fF^-c?iG}=`9&mzducTG z2`p8OR=#5@Df{OYZA9;+_17z{HUS1TE%o({x*OP-DMb}`)UxJ5>B!jT z=}5UFo5gAV`RIMGC$ld;qbZ(rE~EH4@8Nxu{C~O*MqEBt?=z}n28onaj)o`q<2U_^ z4BL9}bsuPFX}zT9!D|9CWNv}pZPKIYLSVO3PTvnljbSuV6#xBfR*q8my9y;NB1pU_ z^(mQ@>>jDNnM$+}CaPEsUk-!1moyya_j7PhLU7@$D69-#pT#O#1e563bR8DterN)a z1b^%i{YaRcL6$_$=~|q0#W8UOx#P#7sF^FmvA%u{?5iDy6JNX1Sy!kM8z{K1srUj1 z_BZQIS$`5*VgC<0()(tSVzD`V9Ql!){uZD}R zZlCs@HL-&*#^>?2B8oXav;Dx641eF@hx3NE^-?(zDp`)%K3#+R|}}(*4AK9>xrie8_LXAAeO=DYA2) zj?Cy%5~_XYh9L#7o`WdHLn&Z@_N@Y|gaq2!wlsHpvsEq7uZMaeEs-N{<uuO4?)_z-?=yqopcLs9kxRg5zFSiHs^ouqd@S=sxYviIgiD)5H)jowSMa+5lB$Zk!CDc?PJ)m2q8bRsXEsG4vK;!^z;8;#%*30} zavwEe2-=n!Kxc7aR9rgR<&a7Bc{ZB*M3|jF^loNm@<=ru$A?z|Zn90Ygum~>M-uz@6b~Z z6VB<6+SSI{^7V;_0ZnQ=3qm(@QK!^4a9nW(#sh=Ow+s7fJK1hxAvLNcsyU2|P=AA!qd*_xn^Y9?NSWki zil%<76MenJ+NqLUSZSS+Idd4Dt?bZmq)&|aeON^{Usbt41x_+#>!w`g z#?z{J%Ida^HS4xRR+`t7(Qtt&Hoi)^hu4nY}79)1*I;T@AaTMNGvDdv^v zQE$;)rmyxqp?|ra-SGMsY4@aWY`XM=gTZp$s8Z7{F>&6zt@2U`u zP+C(X-n()pE%2aaXmVmeyHOMKCthA%ny)KQ5a8hKrnv0?bT ze;QPOXHQ9q>yr{WN5psmf2APFr0K93lSY<~m~_-s(0`l^O6gO-Hh3N`2*Wsf{YD;j znJaYAZ1!$|#@4K3d$ywKR#BHZ;gumymB-6RgE7dBc2b}~rS&ns83u45c6``I9&@0# zy>jV?d{yMw|K>dYgl=gISZjvKA)q-k;k5{60EUW`y`# zspTb|Jt3K1L(vQ?nl>~+3t8a0L8~;r;-@YU9Xy@hXrl1aOn>8wM)l}SNE@AJb~sOA zK&woLgeh&;3#yLThiGXPKSY#X6|Q}cwPZ^AQGd(qJ1GYJ#35W-*Oz#uM+AvJbAJ@eM=_FKXq-@BT(3!a)ZyQIOU$IR1+p?bNTGQQ}q ze_$!)Z^QP<*Nz_ByJxo&W{XS|bsuCnt;k%icQYnpa=2G&5`JQ5$r{n1VFDASf|#5b z-$`_sx@$m;_4S@5-v@Qr)V0mNi$W6NhkuG_Ef%e#%HI~9!gcix4HCv%8=`PBB%!W2 z;YCtE4=(P}9TeZ|9X)+%J$rWVQt*nBBYp^h9Dl@MZNQQpbUgP^3h?yJlVIrKMhJAV z^a_ETh{>#4dRHs64jGb*a+r96Wa!Ycof|rj(J0z=WO+=9roMOtz=!L7=(M+x_bLbz`F-s|@|AOc zR@#syJniOutcf*_)?Dx?+i`!+m&9StVOhYSzG4#vO)2zb=lnUEKV)HXOExL zbBX7NJ1%NsSpnA>M_{FzT$>z*jelrUx>@C&dn($L9#T#n)x}bwK)oFsr1tN2ZV`00 z@1W@%eqS(*`!V!jD$#N+BfuK>8Safcssg^tT%6~TPQ!8p*JkO(><>AT9bdlJfh`}x zHmzIA???I>LhrmK@4jsi1lNGHq4Je7l!EhoC-j!TX|ZKqNMQqs>N#?AzX`8X6G%)$G{X>0&V%p(D}Mk zuSZ7Rl$a1`a3QkLSLS1qfrD8-y_dHo3I6g5)%IvEr#s+>TlD5BxHgN>@kvFa$|MNp zpXE*1@D-7LNmuBh(C!5-s9nAQLBn5p#nCB3m~wSI9N3uVlj9fTDc&XQ6UXCBA8HOTI$uV z$u}~AICiK0uJ#+PqM#b%#KK_CW4=UZ^+WG+rP$m))?9*#Bv*iAj%j0h(Szp(d*nJSne19+q82k!(xH_lz4y*Yyd{{bSx7$O1bVD!vmg#q^(k~<;Hu;O3i7FKuwXUp9 zQanmG(Q#&oi6a=)ZYge&pGZVQf18eg^0u=>t=Kzfv^VXX*UeaF-`YIJOHF2aD}$_a z^DG&$vBh5ZgcGf(xr`Df@IpHDx2Pl5&o#>v(vYA7wSPKZ>B25@nl6|?sVh@+Xxvh( zjZBU`1v+reGMD`U#2M=ITEjD9uPVX%xnuc+v^;u0N>HhU%F-W6YWlcyI4v3OL~M`h zFx2IJRi>haWzw@qo>gUDwYpw;ii+cT7JeZ|iXoa!PwZ;GBa$H4BClcV3g#|-C2ti?Q%ENGeax(7=tt6ck$LESldJ`oz?bg|9Uwa9 zV%P9Big|Jm%U#=r8nRprbYMHD0zTOhnVLgHGS(+OIUq}(-q0Xq&wHMMoytqFcmB%n zpLH`{vF&%F>@zB5&1mky65kYk-8$DIzD>$*EILj;4EL0gPiv}p@H1iF6H_SS*MCd} zF;zr*CG%6kfF&dlo+W*r=mkkKw`&3U=xOR*GtBss5zP(QbHz0oBF%Dvfv zN)Rp8e`fm8mK5rxyN4b48zajL{cMa?zOc}Rr-BNOM7U@M>Nb)Ir2Ca^>wakP)J&Ug zF#&}@24NLGm-x}*;VfF$=||fQYJXxI{B^og$i%1JoWlJ4soS2;99e*In9W!Ss~2-g~mrFdzVt7y6a zxCo7@PeLQ$Zl4P#wXZ}-8PYRfD1%U9?6BC?A!)yU|aq|CRvlSyfVKGYs(V?l4tg(lfwGWrTy%nv#( z@^jL!%8g9xr1e6E#vD_!%1q6s5=)G!^E@A$fYewHh5f^2&zDrQCLVnkK44?ca;3Y8 z-VPVUp-g&5#!ABYC4Ze1K?hDxRa0t7&72}^3G%i9h$r@_VK^@D$!Mf;`p_T>;hdr0 zLQ1a<0_O<~$CS}9y+xN5?M#km07ukUVD16@RXvtY&j0Ts!v976z5M@Hf9Ez($?Rqu zABIB&qmC8k0@7wTG#kyTd+sWWQ@VK~5m)FYaY7c)?KZ`T3V+{Ip5558;K{+iyxBGY zFSxI^vsf7PI^`KMa&XR)96uQ0XV0aoX_pTPYx z1F_ac!1JvBQvrBUKsl8bfRnp_g!Fe0%+t<+aB}^R=pLH^SKByXaIX1%K4cIB3kAZ= zzpZ>aWD#uTfMESA5lK>>ACIYVK4EwKox74LE#`Lks5->!? z_T_3_@_bC?Gpfr{)7UeKW9&qX#I1Nwu$R$&!ykvJe6mv|UhzbBT zqH`uChz3D570)5V<%!H03rrpA?3emrtQ>Efm8uaq4S!Hw=j|rmtSF2fi$HS2nqK}m z`p@y08jQ*)GcnW8S3S-V1Ni0vtRQyYkSnC zn~+82=c98CwjMzl2xf>lKBB zl24$^`hR_L6F_*i0eT3Lrw_6hAtNR73XP3kaoF;d(>!O&CgCGqksOAQ5KgtIufrix zf$WFl8);YpA-H8$W$&DE{jIp@VkS+5SVX>|uCS8gF56F`2$ho!nw3(|^7SEbzU9yy zmo6?pp7O3V#1m$zAg<8Zlg-%!b8@z^$_yn7_@5_3IH zGf%30FAH_C8j$6-jUQ$xEkMM$N?VzfE*mk98Cg&=Q#`X(im5=P;;_`{BhJ2hxs@2y zN`H{qUJeeohg`G9;?#0nFapAZ%vQRW%`y0{uCl^fuA*1brd?k-l^|OZ^SR0U}dpQSol)*ni`hQ5gg5ASd09QRkY&8ROLKCsv9}UN6+2 zV!@d`^mTP}II51iam}Lj_0r3yj4AHkFIkc;bx--QrMQ^ zgF@ik)$iHL!4u)N>ie>}tmtg5o&Y1SmWmc=-)XQ&*oQIJ4!a-zws3YJWs*#a4cv!g3#aehYE?Xq(M=}ZKn>u&K1%eZ;=Avc)<+PlmR?sZq4v;`d54qKGfy02O3noL*MGpU&i!aq zpv>BP^qFaLqcg;9b0toxiSJ&Ks6%RGdYCHqnplO>ESiUe#^fI&tnP%;#9WrBL#)>T zi5Y5fr~P4cWJpDPal_|)bbPHuuan8u>aGCrH@PhHGb$V$Z?Jue73M|r;US48@px$^ zZ?*NSh{9Y(fIPQ*BWsnMYk#@CeK;Vr4S&C^HcpIBaPI=?f9o%W1&5PU`#tNQIJ!WN zXhdH%F*!=oy@*{IO3F<>jc>eM?Nx@Wvl?Loe8lfZSX-awxYtxaIjXikteuUOsjG7k zPYvS-176doT14xV?cYb*Mx>=m45jt~0y{M=ULhI)1!d|KZxK6+Cii6Ikjwkoisk-;=rxBpuA z)YeDb^&Dj$8u@Tt9dB<(buUq-=bcWJXLVYldH84|$nLFlFJ0hT5JcCC80hY7ddqJXoj5hI?G$c zG2eD;nq3i0B1=m>mQgi4Py>k1pz_z7^PuEWN0~lDZB7d2ZGW@=*X|{&pq?Vof5cRm zcZWzyKz?E9p{cS4<}Q2{w9$CgBT;xsC^Oktx(9mxU`@8~hcj;bgc?VXqufMY{O>=j_p zM@I;2?I4YjFn^9GJHAVcae+Fiqldj-=S0>&tW0g&Yte?RP%G>r165Z+s3TFlTw=#C z;7*oPaD0vxku^<%xL*XP5*GcQucwfean+xvFcaZu^p?0En-qIq<8*TuReJ}kmlbVi zr+u0Of-9Q3N`>N+_AYW|n~q`Ir8%5}wb8RVQbMnjet*4YJ$+l=!8To_8Fy<;Ev&{h zMsz`g6^=A2BWlf0(xZG=P3B~9`^VOt?kzFjIUi{5QBl;Cgp1=aqGre{HQG!&Fsp~W z7Xkx;fY;`^cHW~+kd$Ax%>wiO3WYdrH1p6<^S&q|xh~4CA!r6w<)@PS9j`|TLZN!W zP;oR?|9{}3Pax=#DF`|K*578YBzTbkalId=qtQLfGioHz4c0%NzsB{*~l%g zyyJ0AO^i4+&fFJ(p`$8V{~7-YGNd-x)Q%UX@kN0sHkd>1mID`nI!tD2##}*){+%zj zvB=d4+qAKm)QQRSdy_MI>=`!;m^UxQ9=Iq@+v|+wmNNqEJthqz0>9RjL`=?DOPSqh zoqsD@WBqF9dBkC+v3h3~!uwX_jFq)Hf_DGo>96rUCV5#z=-eh&n!nzh{a6|AFM8MN zcE2o;gPU6GtNOdkYeDEJ;wLQ=z16qIP`;;ujk($yiXo-pL!-@|NpQ*G8;6%qc5yf_S2Ccn`m!EjB(R>WYAK4bkDcCAfpDSjhtk`ZxtJn4>k!;{(^w3xy zQ(7Dz4FYW}xWseqOdhWn?YCQA`-w+XTAzuLyUYApbSiBx%m3-x6j*Ob^W(L@n`15Bj}mz{zH(osGxHkz_BPrRLk z7Lqyb{8D~5W%E@IWZt7ocHrTA*kRM(vts%o8PSSCP4evjsvR zmCfMT_%Mtu{?V7*HSASNImc}U!hgs)n<*JEK{rYodwwJZ&Ee{#Ee)fp=n?RKs0Vut z?j}fQG+a<>FK9!h9@*fS(*VLNX01QqDSr#)1A6TM zu*k2!$%%gvaer!D=AUX8G*c!GBd|N5{3=%S1b|)oR1BI#v)iMN(r_NfC~MCfXhU>i<2as`5plF^T*y6|3SR{5Nw~D zLOKwbUwi!e=Q#1pxN<}}|7`nf6Fdm2lrUJC%jAM z7^e;x;8W6L%S_F4{KH805O4;l)fUfXuErT)%MyUe8Q$xFb$|2^Ys)@+)-=!991CI= z)V#o)xXIML$ACo_PAcLZx#j9Q;2k|>fII;A>JnR4^&uCpJwuiAR9Gh_4EqdZ=k$5d zbHL6+9%E$g{f;zekC8liEOHw`sc~% z?as*m#LGW^>VNc?-s$%rpZk_PaP0r|`D6cQe9qtT z)VZj5a`g9m{B!J|jTb*glaKN(x6G~o_VfJNzZ)|^pnn+F-?whfO~#&8=4Hs7jkJs% z3*d|`_S`3yX*|xQSOHx!&(%MQlK`&)3JZKQWP8)-W!PtCo_p$BxO84X`oT_Qh`GQZ zBA^#p;0pI1NWcJU-J2%Te{A8BBJ|W{&+t`OF?vqVwYp~Qr@w@odKg=deqw~|9f!)` zGvUR5Ie*6gvKMd|&X8@}78sTn*fZ9cGiO=Ao4g&Dzqkz@S<*$SD;${xE-$i7Zt=iY zS+I|K6Gx7(>oRu+EK18RE7xKlyGAXG;6!v#^SsBLjuJ4VdB-+5>w;m0LGc-zsd+`< z{9R${P;T~L|6BsH>9%xIqt4d>^KV_G!1d=#8h`kbW%!ro1fu8aCa7hZXZVooPF!|y zxCFT_@F0*GsuXJdi3FPiHa`NJXSteZfqgQ6F09rdg`t?!Pc8oGTh=F#KM{&X*UA3k zRsgZY+;aCfK76L}ef^O!{hwU;i=*4<^zh%9{q_5sqXfqe^+*0senJiQZ>;^zUCbH( z$$xXofBzf5d>a0ZH}Rjij?w?b>c4%v_|a?s_xW!w{2Omi%$N+%G4)TUZ>|gwGaHC? z7Vs?l(BATr8SeBe`J63j;3WOwd9L$I#q9^lnNP*@Rp87K5BgqqcZ&l)E7rQg0V)=I zYGj=l7V5m0qnPFyg>`CP61ZC9ahs5a4}WDeJcGEo!97=E`Mbs1wuuLP3s##Gfy{vG z3|xm|S=&f}P5bUUwty|yk`t#Y>$K*HD+0g2T~F?na}RYS-WX7OLapn{V;4L)CFYcM z4p5QQ%9Ca9oOa?4b0cpZIc3lfoXmXGpaG28;HwexsOa(nALK;R_zZXiLO%fqcz+4T zZ6p5WtN5pjE6af03aX5RNj~RazViP3?%aQJ#h)DY{F4jpKlz1e{Qu;uzkd4fxlFnL zWAne{n(@E>ilx7C^fJ2tasK+}{P%C3Q@j{8?7#i%-2aW?Z*F3QPJfi&IBvaV;2p7U zs?pJ8(L8ZA?U_-|-Dd4w0@uy~`+p?Oq39zw#@O>5bOzC!a~BxL*|Y{xrOQXXp%E-D z=hOxF`B4cZ8z;G1VAxY{-D`jG=TE!z3!kWw?+U6 zoXM(u?Y2<+5(|6|{6D2h383#T8ruuc#1PHHfYJat@-cT^mRI)b zG`@vLH$|K~z}*mYQ4zs6`k{Z{m3(SSYYGjr4HVdcNVzYm;TIMO1lN&d2)JuPzVb^n z@@k$%MAYqm0E3tS#ykssyMMX$t_&lfs0&WuNw2eaY8P^e7XU@SWVaax*4mQ3Y>{pb zA8!7awMd?8{+BNL%Uni(>J*tSU;Gm*J*4>K$p_E-zUNf^FZf3{r_bjpn!`(P{@6xc zJ(JTob39G#~hZU_?uj98)^=pa5#!VX9>hqby(xukkhzdMaK!h&J?kP%Yb*n*28 zs3>Y^WVXCO4I2bfAZXV2sN5bXWTtbuY=EZ@f+@y9g+UlT&Fcu${=31O55l85LX4St zmGPV<2u3#M%J!;bmVYx_ms7_~Hk4 z(@iAi0SS1BfLcad?&q{V9-qV4-z_PZr9)kHrS^lt8XzN*dZQGUn}>E%?Z=AAiW*NcHh;u{DISdYSN1Gv%bI zMA*(K7OAbHnjzsDq$zJoE*UL~y`X+KMMSYEDGIps$+ke8Q4rtc z^f5$BLwyT6!52ehXJ-LNowK7Jln6n+Kv1SC5=`d34cr-ia7@5rfStt47nutOcBM)d zJ$2VF{eQ)ObWi7xr9Zarmu%s$vL8An^FF?8@Go1M6FC}qq~$R^%bT$ zjF{2P1~D!Z;I8u3G90Gw@G)IL&KU?kH!;g;7)^QWHawVaNX{Y`v{WzH1_@>$$sLsQ z6n{jz)E|vw39h~ezDJVXEGejqskGWg_bFotYM%tR{rmwTS-2?9`NKq`W0g~cwYRF< zokzg7HN$#;IpLQB4V0hgzoe&*4Y+rNg;k z!)|_?Z~i%0zjT)uGLy+){P&wLRNeX0=YMD4axZ<_v|l!p$o|lUum9nX4E<#f7w#`w zrNf_c(fZ-D@r1>qwu^)#oE6C+>wJqku6^c69_I4TbAC$4LZI7{H@758Q{p zuyhb9QF?*~)WE?s^B2^jOE7T=o`=}le3T3h4Ko%6_4E3$LJ>h+-WXy_yNO&1(SO6p zD}sd;Y#%q96xPKDZbD8I)b}S;P-@8tBTli^ZexNtCr!GfqBbyO2|lEEDl}$>DaX=2 zSfUC-bEJVNEko`W^M{MxpkCrUh~}vFG0)L}Q$*O#C4$W;$i6>}0^qbXVrQr%AowTZt^tC%wj3?YhWdJ5ZirO6>E`I7+!%SZp+oJ6g0|qiv+Izd>s{uH z-|KzC#KjMeLNEkiCl)br9VPFqu;ZgQIAq^G)Q=3yhTpc;;fvS4u!dA&Q-5E!-Am5> z@S-K2^2vSttc`C>>c@V2$ymip=10Prldz){Vd;WYK0e!Ikv!Qb^)VIF5R8I!S0Ez- zGV%=*-U%1ST@<8?hi+q_ee1~|9)=7Vxz;BL_@d9Zw2{aUfJDoBVv^K=LO38xYDF-% zicM}FGmnzOh_Ve#ePe(55G368M)?IU%VN@piMWzf_1CX)G(M}JW_#*bfeNOpSc zOW(X?Nmq};Bm12#_ALk0{A9y#J@e8%-XnVhL7zVTs~zk|<`_ulHt>u>V<%A{1jtJ1 z9<}Zb^nkM?lwM{C^2$IE8K}`mTud2SWH+DIA>vwy#AxZAss%LGEbqHh+<0>k;%1WZ z!yQyhG8_?vqZ>hW&wnxPOAJSfOpxqKavoMTEQK zogPFzN8Ev@P2Ce)zUM z{(`G?y62BI-0{(O9T98Ok1yVM;mf~#w(fc-JARc(uPf{Pw&555D4o>tuSh>wp=_8h zdntWl+<)MQ*?*Unrz>;+4!|kgT^zX2s$Vn;;_B37?oAK|sA$Mzp9~ zCyM=$XDiBG+X~%{(SS>l8)MLxw$LyQIMSOj(uK>+HVZM5V^3sDR4{`a(hCSmkjw@j zf+=}Cw{k!jj+-?`(&fyCMsNfv#e%uedK;5RTl7HhzJII;YUzbS)yFdJb5RAl6oILO z#G-!qsPY`aF~J$ZO}QjcBAW;4Mmr-LmkZL;&(5xCvYpAZxm%3ICeADqMHGQ_C}=Kj zM23ZAbWw)DU5Wzj=!poKL75uLHwYOuKw-Sofh{PaQjpMd6{d$w!3}h{%pI!qbB1)0 zAy~ix6@P-w4Y5a}E?hGDG=p*k`8FjYrMSs95!7V7rax;9eG_6Bnu&=i(jnRLA&cQ7#b*MivUBqcCPB zO2dJXfdr)pvpGQ`FlBv3ZS`ihk)`W+j%1`{CW4|AW|{5GrrcpciM?>GB}sb*3PQaB z1DfmGR7S*#M$@@i&NM+IEF$F>xLu|$nt${jVu9gCK?pGFzBY#iyL!!E;|=P!!*E`5 zO3gX($^`A` z=`<*|$8qY&N}r?Zr1U3<6r%-Q%zyLDinKFb3}PUtZm7O|n>HDNqFKlHRg5@8Chr?= ze%viuk(n(sA)neLA8E9qks-V2apzavxw8bl1ax&Dgi|FSqNjZLnSV9!I;1g@UpC}x zUJCZKO&1+@sChqShrh^zf7XE#IzOlL*@QEtpnQg8wJujK0oMS@l#RlTV1K(v#^W0n zE@pyJ94c8+JH(nka$~yI#qFSTS0`9|`OA`{MRn2PY$e6fgHY2U2aZJDfS{Q`_1P37 z6VLNoP`Ek>6;rAxNX`g?(S9~r>MC|B(je^9rB#Y;av06&9i|nItT``TlC#`t(!!A0 z(#$2MNA0@d#tkGh5YmGVuYWa2?+x6e!fQ>5G-WS1A&uZ^FZ{A8RowR*uT^W~^xFs3 zAK(6;qWyb3u)|+F{O%b$e8rI6;_q?7SM26nr+?!t&$hhmO>0k1)4L&=;kT8mqw~QV z95*-!+*nd4UhZnX^;0uUQySg;q|Zh!pStP`3-dTiF|T}vH48-TgMS=_dJ{3_Z<2@R z;ZR@NhipWEV+fT#PfdL>i#FI5m{0F>UJ-=Si;DfocC@h|b*(6k5l@AuC9bPiK5#Ew zATZbF+dXd4Wr19m5bL}fkEYHeBe;cYJj`8#ml!y;sSrx!IfSP+2BCu#4al48MXUP) zMF_;m08#B${r)5jGJgw-`U6Wf|FzN!ogbHc_BIGI`~d8MRVmzRaTuH}<;Q`-9c8Lw zZ9WQ~IKbaxCx4Htso$^l5r6vzf3dkbzd`BDZ=L$G*S;{*j^6#k5B`iFcX+6JZmOR9 z!ei5Kf6{Ba^}KYyFa@yRDQ6fpZm2K< zwi;1ow+OZYQ7Sy?vpkGA#@Yk5XmGG;;>p?PoM(qvtgd z)UkovrWiR$@*N099nzo>j|m1Acn1`whf#+kL@1i;L4{G^ZjACwJPN7aR6a!wZ269q zoT9}-)2v1)JZsQYaPNsgz-+w=__g^;H@GrQ4Nd#;4;5#;> zY&nG^MSsZUKk@jd9BQ(EiuamGbTPv(O#cm@?Nafc6GUg0XtctT`wBN!d18c*T#R?I z)AmDym$9GbLBvH64n$0Nz0+8wBhHaaD~}Z0xd~YS!gWzNO#-6DRP?PRU>Yq~;PpM- zU9hv&)_)^YTsp)Rv_7!lJdBC3;aYpN8D$JjkGOR==HmypLZt*v~xwV5E=i71ztm zKVmOk4omjJ(|>U8rw{Ow8=b6p#rrzB^W`Ug@#Tvr1=XxJd+D~U{jvi+;SRrjgQu(; zx__$OA{)9QE6@lkL+Jou$PcRx>!XoKU>NWzHb}4z=uVFs?TUns^*l`-C=XVyVsiAL z@m~JmnQcGa22P+yF6N}UyRZ<#k=Eez}`KsQ57$gmr{=ltAA2?tVO@oEf;? z>ZHrrYP?jiue=mxSE{@`3c3axCEt>x2p=i#a`FDOU4|ZZPrvf=)V0S;$C<2PFRY^D z_0Dhnr+fpVY_8w7=XZQLeagO0W`8o|;k0;Scgo*=>EY6y*f34kr|FU5EJkuR4oGT4 z=-#&db4X=`kW8t{O9KrfXTj%)!rL0;%50O>j!VOqsl&rOTZ>UZ4`PLasXSONGh=Oy z|HMRe7-^;s)7Z{=Y(0bze0=9Ydi5K*M`+vHSnC@Kn=7<~%_Gmj>BDM91b^|&Q9hp) z)VafJ*J(j8EVMpi*lFm}iM1juBC6*rhso$WfwSg9xTeKX!f8WZqXU8g4+v&@E=b&gfewR_4T9&^IDe6N*50MYXeou_Z9& zy2|qkVrn-v25$L~y8<`vWs!^F0)dfC_#C*)7!nNgOgB+QfiqnR0DnCe8hhL}ZoHik zaQB9?lL>eRQa(kBk;o_&<$++|bdvtTUW{5S839S>ri$l^vEZ162A2j3bi*k!74uVm z-B8&VQrQ>g6wuYHUFiHe?T;LdzsBhgnfw*!{K*UWW4^z2MYo1tw#qM@Kw&{0FBrdc zkjf?a(orhT_Eo={|9}2JT^MQI~&u?Z=|C?@Pi;M9 zJwkzF#4Wq8^5zn>j@-jsz2Qs_y@~Wfg+s4F;WoTdxD70L=bsb73#E^LV(DWHhnM=+kbiW0=q#5v9W* zRr#eU5mO9;eOixfO@LK|Ph&@_c%6$daNR1o1%>2#pm~T>ig|y%MxwrLkVcVoZSGrw zwbhYAIetm3}V9z%OgK#Dk$M1!6&X|L&UU}BCZ&)6DHl}p4$+s*^=+F$JU!Wbl&&;7b5h8Wdc{oh3r~oZ-iXviGSCIr z^=boUNPK=5e+OFV>vX`me4sRP_P1_)bkgabO+_x@#T*x@ zR@#4&h)gq?s`n=xT#P>HK{DX()N7@RM=;PvpA2fma+(?k>Jl8;2m`?|bqvV1D-!rj zmI~uQ7_($WrgxW&27CcJ!6S^CQY6bO9kQoA*U6Y4S^qoN>d$6Xk_1mua;;CL?42Tdmmied3D}f1 z^37fphsA>3`H|-;-ws4pQYlQ3y5(@GFF!ILB*#SLwhpd`$V$igIHL#!6^X7^K|_Bd z&>B}r+{N;M=|ts5@OrEVGamwRvm2K!lc>*ScBa8J(?c~b_Z<>b(?sbON!Mb0sOio0 zY~s>Koa-RnjYBT(_7OXKR0UQaa+ODi1^0AtO(PeGuu;&ZJb+voJoi%MJ{XahB+Z=T za&6`i2T?XdO_f3pYYJ6DL`=ex;Y^Qt~9`G?*4^`4f> z&pRkQOvRCW$-eQBzv;5S^6Y~TpFYeBL;JB|kcT_}L&>opnJ4Je-}s5M4^@9|2Q2v6 z5!AA9*xD=sE zkfKh>LR;C0ZCRtGOKl`<$&r6AHA_cYVoMj9oKYgUfko<(1_l8a_=x$zN27%yXkZpv z={8jFMRMmKranY&>u^di&k)4Z4@;;#Qs`TyW@ z3TuZj8=2rzkQ5=H1Vp5RlnBy(hDEe+ki;q_th1%svC+wIOdX8V&NhG4!`eeoY1u+y z3Q-6$#nA8>%c)1zdmc!f2?9g!QxW7PMXjVLul92S9&j2GDU{E*Cn$<3iF}0(8N%JM zM!NyeERshjvL70_8L4*z5>R=o$`*gVzqtpGzqS#2eQXd3I4EobjePR6chGC+A_&2+ z(Nx$s7-)3PqX817Nnw9?!=`5K#M!~Jat=~XNRWA&oLtFh%Ke5&nwH_B@vsgecJrwH z{wQW9-Fuw_v@S5|yMU(n zBw!6P1(sX4+I_y!g{s1z-7Dk(u3Bq<`BRUYH0otj?dPJ7PC>#IWF^1` z!sgQv=oKDH`mTRQ@z5W=rrNPygNuGXKB>azsuAvUlPlEmi+t@M^)(Ly^a`qZulKwk zA+SlW*XteBE=POu!(sqf0+yl%6el!ZF^rr$fFDJ7iH_bnaX>RBP$daEF?Nm<2e6eG zUA zu;8tF`^s)04+y`~BWS*a(u;N&OM<&&k|v=z-Y$`%lKstt!45KoMFRp%+{?kffo;Z{O>mwM!-tfdFGfrM-BH?7g4k~st-2dH(t zv^G$WYw>>}*nknlBCo+cCb6|iFg>u~ZYLPaNJlB#AV`8*)FkH#Hl(i~SInV^M4p~% zalUe(igc0J{8e9wm4}+L4an|37x4tEalY7Th$TLvG)1+jRb(0)5}#nh92j(Ukd-h7 zXqCj}rUrJbp&{u6AUYNj=)Jp_ZNTNt&8gA2yWf8-Z}}Yoq!V!Yz#k!wiX=XkX?&6p z-d0KIOVCleijOO1gi=>yc`R==L^Y$4sDU5Tgo_%sB+?6sS`XJC4m_`Ors_!GRzq69 z#+2ACDr>$ic!4Fxigu+Z=-vSDlOXZC-x_ogB)&vpl`{tzZ?VnTMM8LQT!@Y}@KY4$ zB0_(+JUSjz)-UrU#FFb?S_8crts%Kye$Sn=0n>57;_bavEXoH6I~!o?8q?T;IRH$v z6yFr9E1+}&D0RTJi-E+U6wiK+Njeb}3?{mI9R#UAwlYn3j%(-^yct#yF&05t6k?^YcP>34AlYbDcnFBmMttEdX zu1au(bW|PE4Fr4uqBw!55k2t2z14`-VLwP~9+ZJzmNoQ`Y$$Fua_g|Kr(xgwD%-HQ zENkSw%6VOL2zftF>wtw^&1+;!8s1hlAfna?ztTArF|Gr^R%A;EQpFF%icwl~3VeXo z_~D|eL{WHojo`x6nn>u+d7$_gk<@>A)ltOH>+wkv%`;X1g8`Os`aAD12mEf`0V)(9 zs=46?T6KIlzzqN_m=;QideRd(Bpe`5z!xuwT?A!*@ZMiWU2wWhR#T0^`*;?{^1 zI0S%P1i|@m&^ey6=keTh!UiFI*c_+3J* zZCI$Qbz3{w6*WR%>oQ5{ElRK}oGYL#P%@^x)mW=pizPLzZcJ;u5{h3~3-=oI)@W&- zgagU2gc_#Tngb5vAXRVE^8$bAHH_4FcuesJWg=B;U=yVVP@qa5RcBELoY@oX*iBt@ zb%kvM7*ziX$DxSf`c`ANt~nE`e->^VXuI;G<52o6SK}K!%iFd=MOR)q4mF?UYJ5dk zZWnEX!>;`9V3?};bT;tmU?ggk3r5}N98WskD?B1zO-d4dh-$Y{IyX@BuxL&Qo~e5GHZv9DBz zAl4ItRz_zrPmw`{a3&H>N#KZLGmy9F#%*x{!Krh<)B|*JAqRE#vVoE z0gdf~-fEK8j7Sl*|Gt0!N_M0RajQvIJ9##BHZW3iq{jK*>Inyc4-5&BckuB*`ih4H zBvuG_Mt2fAeaMbsda;AnGnKqf47Ta?Xz1bkAP@vmq~_HxS_x<^^-kY%r4x_OdyIwv z!ZqB8JxFY)a0Mf+-94}U`#lVgO{QfBWm_O6JGcaX1&6n_s11K*V<^3Rfg zb!S&t|HL!YI?;cpon!EW(vztX@C4HhYxUJ1?a|9yO=MjIBsIzoyy*0yZWVBU&F5iG z6yIpSXt2aec4H7h;hA0A26W_X;LULqufmdPwuClOdgVD+8-Q3BcSfYLH6;|k%GB6~ ziPBkLw1%|--HJ|~Ub=IkUK}{2t^4AwHT$kYjmZcx)&>53aD}{hS+pYn5P_CgFSI0*#dX)95*d&-Hiun)h@AF4NeHPYj7Q_sUwBGCVr?&iC^T$Dy{eM}F1u-&@0U z_ixsUS|7D2sU*-N1grn&+Fdk#_!p=L{o*X**T7 z(mQ#w`R9ykI`Av< z6hAK7wgo_jI*zb9)}O~=+X8=GFVykt`jg&N$MKW?dHke*9zW@y$8G!Q_PuKBTdj+_ zmdby}a8>_Tj^iQ#L=;_8v=31BYe!crI)JF7%N$4H1bRoe3>6)qC*7#7)&3Muy1k=( z6{>7FB)a3MJI)=4q1W}IJAPh2Jn4lxE}!(>@$pID9iN``-SJ1)4nS13V~th6C1RcE>H5WAbkF{yC!KuuKkDdi&;Ebw9o^>H{}-LIJ7H?KKXM~e_8F1ans&U? zsJWlxyX4AFHQ95Gs(U5~cl630UVPJQ%I5o_f30awzv;Jys$aYQk{!eD8R1=W)h4s! z>zO@4NYU@YCw)zsqJ8h`|E8_$n$vIEy=wd7n|9XK`+LopBUSgCJwedXtG1KTwWfdc z>TEDP>F<&k^{r-9)%{H?RJ#}7w4`gtH*M!n^?r|I-nD!B8W(JIb+@l^VcC=ZH6}&Z z?r&PFtNWX_QtkMmZQB;hr{7sax~Wy{UW*krqjc7a7k7z`L_!@c=kom>1`bsOBN z^E(F>PNj9oQK952#Vm13_3maFIJAN^4-!W(V0rpQn*{g9k_L<7;vFkEjt5W}Ui7>V zAkdGiBtEX{1}N~+q-vB*qC}~AbEw+??+AtlJFdzp#OhoZu#{*4h6|ZMXD|V_px(odU?ou>45{jAfu^vL=nf`_ zK%h_6u9ZZ`S3B;{{=&OD4<*V$=dj`^1$_#UM2A;Dq`KeX|5Sgiej|T%Jpr_TjZ+Io zsm4JAtU)AH`!m>bRIB!90wvw}3iMEPUHepf)RzDVRa-m?^}H>hlcL{pq^WUTgBt(Q z_jqmvG-}*^Q1`iD^Z(wwKj*XSf7hFgT-ATRg+%hXuK)I2_iy!L zsjmN9{_Xms^C!RltM*|~{TQfmTEEssD%5Y+uc4yfs`2RPp6%is1+V)wAeG{u>a|X! zI(O6-BIaM#pQ-rs)lT>|zj&(pZ2*<3otmmA>*T-`9I5F4kShwY%;AvtMVs|Mzut zDvs@c*z3h^`M<*AvFrbj?s|P|kGuc)P!$}XSLCh``R#u{Mw<3N>TC8Ne{Pqn_Rt-C zZJ4{&X2$m?TTq-S#n*-0dP?<2^j`!5zyHt)KnTu=IAsJ;hA(4UJzV4oR>FwB;Q>=` zit7ajQ+$kE5y%ZL$^+f3$&FcN#6K5SzL@s8J`du}8$o3_x}2|P<2+*XCLg|KBXU@I zkIm<}-^_o9tPNMa)EK<&dRvq4%#4LN;#rDW0kgaf51uX!N6;ZQCe`Yp{RzZ#%m6H89EP<4o;PZ2iwob$u_k z$87et%|mT_Ja3Zg?YzwkHVn-7b~qW-v+Z0r z;?3o_O^tP}J6PV|@-knokB8pdWVxG=!RJzLS5`4tbbZ&} zbuP{bRkkLlm*u;sEd)mXNK6C-=+ICN29DKpha)zw0N64+N|=Rj^K|k?KMsRH17k3@ z%h{vc(v9a^H5rii4Khq>gfm@FY-n{Y>KT8ze)r4IgFCm%lb=|tbj%lx^-15t58duZ zlXx>ruNQ9kKQ&7u;}l1r1h(~X50APx$Qr$QXibN(ebkB8<`W{ zLSS!n`@XWJ?hE`eJD%Zj7dAD~cu5glw-u&GdtcVxhQ5y-568xFbj_dN;Wn>*LquZ7?{*}F<;5y5aA}*K6jhUtCbADK9 z$CXB!HJdEul*G~7ExuH`D`}E$?Y(nZ0_OtR4$E6Z@=+APf;73ZL_k<<0)ES z!z1&P)FtH!hSkVAdFSS6JGq%^CQJ&9lS04+8`PP51-LZYLM^wKyD=EA(ldVx*SqW- z-p&S?#O_bzII;Tpf(|U8_PC+Pei~u(!apNxbrIS0M*7n&d@PNDASXCnKj)UST;@jD@@b9+>8eznfHR;BhE zL-|=I#Srt6Rj#-Af*QPI5?z0Lh|J$K!F2i=IF1%X@t#>xb6%6p!5rx4dQqJsW8}A! z{$!18|B{ehVrj;e_xXG7c#aVq{qk|G=0zHx^W#mleF^Kx_#D4o-wMC)TCL{YT`jJY zb5X}$L#9Q4J^f5BF!e4 z?XTCz)zJHxhGBF)uS&1^EvM((cc+e^MI z4Ow1ho5y82eP@5gc(W@(cPy7_kkQG$N-caecNcN776v(rnUOeUdJ=oQIy&4~7w_x$ zA(LJl^!xNF7{(P`K_C%5Rv?I6!Rani-I zdVGv-n1|)@b{3CaTIxyNA1CGL#;RZiebbK@$wz{Xakz`D)w{pfk2CwcU8j9n(`9+Q zRBX6Qdu4xr;k>7}*+)NLSL4CwdFYI86|O%A_nh7j@7ddB8}z+P5v?3&mlcoM@m5;P z%bBl^zWbhX<5s>+)5+#O80pPj4)-_YM|a;JVeQf6w_9|54E^(6M29k}-Ee6wd2sv~ zUd~YJ_~x%3mH)nsFQ>9k%3iuSwXWe$hKKv?wtj!J(#7uD_xNZxOpe)fGk8RDGrA=9 zTTDWh?e1si;oY3`n{Evqw63Pj{^K*6XudwF;>{+zXwbK|o2B8xB>h06vuS&4{=OK6 z%VY!hcrBaLczQ6r<0I@1ZzP=%YhCaS*`}l|$~EeH+<;8OAIXu&OeD>^=o4ufo>axZgycUTkAh;FD3YO*9NrG!Lr5thpN1Cghv*Oxx9b&<-c*M@$Bp ztVdTc%zTEDa)bTB1^XW1oAbpwBy^$|*5iM22;Q-1Qj)GbC$+ZDXpN5b)Ub=UWmx3P zusAQn;sd zmdPYIC)N8sm+#oe_kgz7rCEl{dOJullr88yzc1o^w6S46%{g(ioUE}8TiEaL`?!C; za!+0|dAyeoYc!z}KVCU%mF4(!ocTWLj^$8j^b>0ZIN7C!~U&kwoH7MAT{<8FT9;%cTJ zhc@@xsbx-)Hg#8ITWzoYV9>fLT*ZGzWW6$+lI?X<2XC5u?0T+gd75Om!AE4bdnbP1 z_S;!-@zGS1yX)Qb*luTj-g|BOF@8HuhxLBZ&lc%b<~cJ;U$f@~2it7^rcmr>3r@E-p`M>X&PpyP41t1=}sF(&aP-?`Ebp`9?%#50 ze;k_k>v^}Wi=nj}pGE`Sse8fx?q$<#+q+CJ%cxiP-=@uOlkNBO`#dqj0jmf5kHH|x zYIl7U{ewMNX$RQdiiV#Vy`;V4=7i(aJj_LqvFn|j8=H-( zzdqXb5!om!;zKpeJ{-86=c#|c+L6I!k*2Vow0tl-ZQZ%kcQ@L`okh#J7S82)Fa_wbBg4<3d<%vj@4a|5xvj?pdYky#Je=b()&``v8n1u+M^!df__255 z`TnDoY@cm>X*+7C}lKF-C?C7RkKNiO7NnwEnk+Mk7gc|RB>vGbT! z&KTgaNIg3_Jr??hbCwwrY4*0*#V02dd9)A1wcZ ze;i6<8D>+=hDls+`$IfzHjiR=JfC{{^uA~7t9hI4M|1xz-0**ru5&F`-@aNrf;=7e z_X~fQyAyL@*Bfhp9DVdg)0ITYeR#a&1H9^c^PWx~3-o?1FGF{ETD@O_P0@szKG?4| zE6t+$X!@|*!S!IEu_>m@_ue^*PFhqDYq7H9!Mm|@*wwr~R&R^!;miI?Yi`&S=_45R z=VwwufSIJj&3aXxo8j(avnL!lr2Ls0eW$LcyK?&)@E@qzm9vpRaW zSv8K5^LlRwY<#ue2I2Tvj!ZbZSkcc-`xXt2@eqs4a96azUQ&gy#oF)wF<9aa1L&TyJ3>i=){ z-elWRWoduVd>{t2LNIjtzVBxOfzt>fiJqs=g?2=obI*S>|C^PSYt^74279xGz%Vzw zF*CQ%QErBczkS4x1-kO|mHS8(Yjy*)bybz8pi%s zvvkfbL+mY-)@S!)ADuvWLM}}wl)HHC-tOY+9cO=@UF;j6df~)y#}}ks(Z!3+XqkfS zLx&zxH0CURV0cT=%gjxLM1%LLj-}U)n(V3^YWdEQ= zshqk#mm(r>9_>3yCj^TE=U8>IVjZOV*g|+QEzxFrRdk#>%I%u-6CpO>O@r_1q zsj2ALE1Dk_;L%@ViF5I%@QLSA`D|!X>@7Y&e0*`(4WG#C;z{LoMs%e1tSeh6Z2Mf3 zi6|Z~5@qO>JoJ@E`0_2?8S9Q*r{|0(WXozRzbwy!lJ!l+76%zkjNW-|42kh~j_6h_LKV zY8$@n!P^g?&(G2Mk8Ac2nqq1Qay{;gPR0y&As2dscx=S%I#IL`x9E@Jg7)r=*_KlY3(0=yEmp$}jc!e{EhqdJv(FvUw5 zItC$8?y?fq_K-L6wbUv5s_n~kH_@%0tG&uqy=s(lYcA}qP4L@#Ssh6v$N9D61KNrC z_V5VkA4KG@G*$(V;!dRULynUNJ{axpS=HM?Wiu_&W!gxBzv#0b)+2vU?fPDbI_#AW&bOv}Z#H4!h>ykownI&xh7 zNzSMT62hIF)Lri{HeV`<=Tpxj>f%=$XL8KWpEpTt10~O?b09pntJ5od&oA_@KiMH+ zKdT|-2(@0Px-rc?D=~jYewN&uRBBqklq#ubN&ocCe0$L4=u1(7kFHYQv|tY3tRrN>ir31DPR@KjK22LaADF_sf4-hC?CO+{ z={6vXmy-48MZT0~G!e<4)_HxecOo%9$7T1G%>Q8fB3&;SXYYSXx*J`59T^iZMm`}X z`rwhZ3=Ax49^-NGUdxUBv|9dnDH=U4b91E~>$5yRYvq>@<~7=cMmJQm4oXQgMsAC% zc7LuIn0?@q7tIIdyIe}*z}_PzSF zymo&qn~s)I98SuMAT&Xq3mTb5qd9lXs z>YMBZdH}NVOJw|_dKjja*mmv`G*dggwbH8*W1TKozCc`GoVNyMX2;xZQuZCw z4^!`>bn5(GuPj9?FI;!H?jcS1t{qfbO5j%NNx^@rG%vwNbC=%g({J_*!K^QHB1wOtjT-3&HCA<^bUK!&i+8gKWaGTF@t#dKJlWB6iAyTNw0d(j=i~ z3hK~e*j@x@Qa_Ma-=60_^f1l#)=bk#2Yz~p zHg%R*I7F>GL@7jj!HKZGV+jl;`FSsNai(-`u23$+B@eZP_x(eWl0Dp&d23@wNsxbW zW~g;Wrp_n5B+&{hYOEPgu0C-oKi3(TNT0l4Uf4*`Ua?HietcuYgwLdp-LsySu5V+q zjrsnvGz%GHCuS8c5)Pqb$cbXwe4bUxTBJ9Q%CKZoj^#I)#>)a3Ftn{;br9f~dCU9l z-LjCF#_^>Z-f`U$V8eJ$5slIY^lpFPQ1i{`i9{pzFoeE{Ih;j|PGy6*C^X$cMcy*% z7y$4OtS9kbxSr=<{~o9G{=_LC@K2nQaep=d{?8+nPPj8?px1tVpd=ffoOO zKmoXZcO`xk23_|lhFz4B8Fq$>rTKdi{TnidKM~JDzMtSu=Y1Z1H$YNm+FXAxW&aBd z%01Fw8xeo+R&+CByJY9D;VJBL;&v0syyIPp4_PdACl~Y^Zx+5^B~qC2MEnJX0sv_7 zPe7V?qtpd_0XgVVw+6>-%W(`4n7p$6j0A|A0dQz`nax z757KTb-mv5UYauZeBSdWa&v!b3QDL~eYiCzty4c~_uY4S-cLe$M+x5K2@*XopM5_n z&yTbXncA5stKS|yxby1nE-E9Sl zDtX128*`Z7|B|#sSL4{nijwgen_1Mf4%^^$^X<;foe)EDciZTry`q2jqnGH(v?F~< zld86*i2i20wTy;#CaHSwCL13kevna@y(6aZnNVPHB1ms9gWT)AH4@8~JkyX9@%Cn& zWJ?jVCT}l)tw~-4U71n?x0Br-?^pPd!ko?C&l%-5nM`E$#^YsyCQPiS z?)E?zlgw8J9kojDmMnjsxyM}I#$LbqZgsHAjf#{p90=7j>zj3Z8=bi~JMb#;=_PJW zhD6#4Du-(Oh_~p`Uk7h~t3lZ&ZGI;$Lm9(~ijN=~v59ezD@iu0NxO^gpnah|lbexy z-F2r|QZl^AeHU$v)K@`Iz0SI?&hQ?)0aX_2HYKizdh;;9Zd89;dR)UWO?xfzNr1=f zp;roD9MPBeE1E@m>Rj4zrh<%Vr+d26i{U~!2|pxfU}3$5H9g%*PDSG4ajf^T= zBDL1qsNJ|eYaw9*e1i$2|1LzLrkAhn}0Ya)LYx# z{Twd(0UH}goi<{P90uuOD87f;S4%|Ntdn2kq1h8T$_3l?_p4WR*+Cuhbc58oMybE% zQblq4Oc6}O?TU(F`ioFs{6&&0Z0{nD;hxlufnCA9N-|kvpqBmt^zaEy2c2YM%_8Hz zkQ;_Pr$&F|W-sNyXN?5JbcVwqac-ZZ9KT5xcbo2^uU2uU=yBDFTKRZB`iIV=WAkzZ zVO`C@9JKfrzk^o|ROyPokHOW{DLVc5P{2@jv8F;X9My2=NvJ&JDP7KL%)J<)B2Up7BE zEY(8=wTT)b(JD20RDd^Kd82a88UtCFGl9~|8iO~g@}!s$5@`%*aiYyS&FzFGP?Z*{i>IlMNd}T!70D*1*yUqL8lVAlRFqje!1L~Xyi#vz2Eq{-&{gNny@k) zoJtg<6~i{whMHaU)L*h35S+sC5BaC3+Xha zQ)Vk#2XX2b@EwEc>k+$+y19x&#WLxn3obt~fOY$geR@4YM0=*rVl+Cn88(wuye^sY z(yRF9Jj^#XWHLgj_aiNDPgSaDaV8h92nJ;?sN-hHe0T{E7GifV08a^s43g7HcA3cMi<%{fc)ss;$c8PepHuk;q{}K zx9O>AIpr+9EETuChxZu9$ulw#>!UCrH5N9yM{Yj!p175zzA3blQ1c`PmhOKE9ks5P zww-%TycI`7HQR*Gv!&tUM|VY+*j0dtV1N5J-tQ-xxbHh1r4L_+&k6%7Pi&Mo_$<4W za6&H-&)8+bTY@M_YCTx7_j*J@m=+Q+b00MBq^KUW2X75b@NC@DAo9yrt}dYIK}pB; z$=xTpxL`2UHSb{|#N7Iyo1cHtZ{_~Jz7L!4ts{<`w9|t}TQ#(ryQiu+v~{bT^wUg zNIZWn(N|TQ6-TmeQs6ul1TlFCBKGocIkQ~W+$32VaKIBYo+fIEI#_?^!YvG6lq>e~ zy&3sbFF)&157pf*PzLwuvi7r)N%<*>c*%?!Di{@o-q!6xk+5Ai)-ueoUwj18!ArRx z56ayI>YiV3g>F$Zz9kIet*?tF4lo^=Eq-Lw98P=#-lOmW7b%u1glZmjTRx1br&^wA ztw&}*DOCm!Bv&7(`+|RuMjfMgFBBbn^NHXatTr;Q=oV_dk4J!^LM(Ba$SxGMOI~&zWH)8lRi!|_LD%Ll; z?V!yJKOA;Q$l5Hx9`6h3hUqazw~45rx{V~&T#X>jDj`R$?t_2MqgQ*oWUDvUUfF?I z-fZv=UQSJt7dz-GsumI}SKq=2SHza=ULq0}W$wLq?0v5>iQOI-A3EV2 zU#0mHxN)(|A>Nv#_6drqzf8A-8ByFPJVUiITPm~aN^!D0vTrgkhrwF}3)PWjSRit* z@_sML*T+Z{z7%uHR?0HrJMA?V^oD(|$zEkK8DG{H(~DasOI=L)UmJhY3&nK->F z`%h-}TbC`>sta>ZPUxa@SRE^~BJSvi?r*d5S*Iem2?l4Z{ACpRgK-W6`IuuRFO;`D z<7YfyBO`e(G!jtJo@V!_%@?;EaV|IdwZX+16NPFVgP^rwLqQ|XEw)w$f`u3R`G&%Q zJU&5t)y04NRFRf}+a^vI(DY+N%~&z0SH&BNL3QzEFu4N0J2m7t zd~ulg+TGjMD~Ih$eA0Vs+v$rn#Se3TCFu)}a&UiBRY9ABY$%rI!kCke*vVQPno@5E zRxn-dnZdj7vkT`eo+S-YjP%-|^!MUc-fuI`z1Jtc6<(9}yFTp1UKoOHpewG76+~Vn zsqHJ%F!A4LV{K5E~uR3&&G|~&k7CP(d$%K?2zNoY$DLPE9?oa?xDOV=v8Es z2i-Vi!WzB$l(>;8}3#FJ&@PRdz*da{#J^WB&BJ$6A zBB6JKMe=!S(iE#awdqlJX}Wht=p)-g)npHgqQ2~vSJJ+RAa5NFI5EBgr*#U zL7k8A9*ncV@9XyAZKd7b=~S`Dz)6ZzO!)0;a=aVu7m*4L8C42`_(bv&qBqVLB_VDa{{E&q$8tHpCs|?%LV{FA- zJi|xD>-aOa9+0Y^lrn4!UY~1!rM;yD*QGL8Q=t`Ba{!>*;8@>9#*u*!0L*_^dHn5} z`4;Z_Dd2USg__FKkD+&9hT;okDffUf0xF8vp!NjMfggZzLT7|vwKc%Fq5{2uQ$-Pn z;f1RPMFv&OgH-iF4^gMUbmOVvSD~+OB0liPcdkf` z<2WG@R%iIPt|W&v(6=hd!18~8`FH*lx+<+#v^S!mf`)HJdE{tx6<8u zjF$Xd;s^$h`H;xT&bVaWB|?NqPp_jRv?aP-SbhZ>kXb7+@q)$ti+CeG@7=n#FusHp zd!@}1mZ;!J*fiASbDiRmBkz1z5bAZrHSvwp@$;E2pMZ7ic0Mw1A_9M}oib~1vO2t` zeamzU1=j7Me|=~iMf4WTWDW|;n{0Slf0gJ$SM1}%X+Vs8D6D*0fNc8JzD+zr3Z`-d zQ~A{podZ@&5Gb#&Ob+g5D?;GjWUejwgJ(*2X*t@zyjnn+d_9bT_(6?3Y-H6=hb>5D zVQ2|kzTwihY71St_UV5jE3H?1mbV1G)~j5~b;AbxA*sQICsGN3vGKu+zX?ufj_wf& zMq%==-ZJl(CX)&`)b^JhefFW09$F|qbU%7%QTTA^9PAUSN^Ew&1=29a{ehx~xdJ76 z10|AGWx#3rTzPZLdXi@<9Y4Btdl-7HUraMhw ztF7GLm*^6PuYu2Yd&Fz%6OitrOhSHVt400X+{cBylKO~(%6qtUQeAIgcDv_t>G5!d zhXL$DaNV~Ew)}s(a4xr0L+f3GdrB058r zG;Tg0%;K3bypta(CANN&XX-xa5oQx{8wfWo9Y<6U1{{AL*xle;lfc`^nJb~6vOBjq zH|U`GIvQbeEWtLkL|oIcW1)N{kSUT)&e7*1ATCoEzpX`)?TEBEAU^xRi>!`*U%_R2 zAp3T~-W?)x)5Zx6bxF$(~^F{_~n!^cSF+&tXdEzGw%qZZ;)&&) ziZ&#rMRb3v0V?s3L0w~F;=0@fdgl&IrCHwt>{M3mHntoW#hD+?Li#VTxxW3Vb=pt+ zK_f9GZ-F;tpKMMJi~6`XLfW=QZ|k~l_E%y`^CDtKTIH{&7tDgOf&t)U#*VM2=+jKd zp<^iY%~d6}tVgGpj$@jxyLnn(p}Gx|4ufk}uSI`o3Q=3@BWxjXppvWucV7nrBRVCL zJs1&B>-o_gPLA{^M|70U#H&_zS_$jY;H}}#bg|YJ;#&B)%IL!s7`I*0&4kl!VSnhM zC4trl#PzAE>(^+JhT zXn23R?<&n8##s^Sil~-A#QYpyocEE@m1GV18k&Wj`IW-&AB$a(p2f{u^zID0qMPm` zzN{697bK8STp#tACD0?)hf~~ zS8Q&Pf&`OZ=J&8O#9h(5rS)|wrR!eq$lHJL?$V~1kg9zt0k`7HiyCTC%m=snuor9t z!O(04PK2`FyT|Yj{2Z~+yfQf~3fopn^3@yPX4ICNammPsrak9qdU#&iH^XcE>}^*Y z#=L)CDfw`;3LCHV)ImL-GL=OQH4!J(mE{G61foN`mq*2WqED~Ci_Gn%A{J^k<^X?V zhj;~0!>z*9XOn3Prl~{@cYKa9_@qwJ-enAV6c0KMDc`wdjH)oxYaOfoIFH?skr@dA)o4dUOggjC9}wqDUNX!qE@HF zddgO)oB0mk{LZ*BE!iJDm+@N0lWe?|nYwM461kHEmU7gN~H9hvTciBOTy}>x9Ki>6T z>7>u*Vng3pVkb5A%F;t^+;4cLI;3E`GmV!buH6LQ=*e0@@TCuCK?o;K%?3ihmIAGp zvppw?PCux$#A7k~)G)b3tBc5u5?PZYn`Io zd^GIFb4If9E&E59xSxyRJ9kvKPlu*d%NGrMJMYcdQa7*cSN9R>r$T!Q!hs5oIJD zFQTWo$O}zkuEJ^+e1>E|a%ZM8p)J%c!rPvWDNwFiKYDTu4~~IS?1fk#m%rZn4HCFHh1E2m$S?KFnQANy?dSw zQSI|m-9KYaQe!@PJ?71Wj&I5Qo$L2?nfRG__}VR9@IKF?bb8m~)iW}G_T8EY8T9TI zlda;Zp@Qpu9{nYg4=;I|ZhrUP)Wse?hj{o;>@u#mCYz3D-qmqANa}@Y0ZG_C(q0}u z^%UN8%^j5s@oADT&Vvk;9px@G3ISWO?u9~tRxZGFlGdA=M@BYdd$0(+AgIf%$lY8i zWN_~lraH@8;$F%1cIn=KAAUJl)JFZM?;*nbe!C@7N3+*EZ9C3&d|z4MCpQpE6wx~? zIZCVA)Q!4ZZGyP+_QDE*Tg}}KbNSxftyW!cqBIz_M{BDofTQqldm)&sE$mntZNn8b z0J9ZOgw^mdJczl~S||Vq;KOr%`8H`^hKaYDf=W$-N2+Q7RMBdGjrCUI5Va5*-Xc=n zY+)zzSQ1iM+6wUC);G8Q%tzhtURdh8UMQ_slga|j__|5u3}&_{j&;GAX<+Rywc-AKvkyCO#*X|Ko+Fq&`VSugO$YyH@k zUys_JsrTnLt^7h<;Oo=93Nyv`k?^QQmX?QKV*QqsnS2$Gx(-=_m!j0%f&@`|;gB+NTrn@3&l0m5Y(%rB6Z!B{U*wr;gCs z!zoLVdDntD8Ke>CTC`ssPI)<>`((TlS$g~Ys{#^$N$LM$*2my)d#{kF>WOrUZ+CAb@_SFDY*|)rVJpD{!S^HK;3k^x24)njbbNG`K&n%wmwP0xgsANhY}Xq*`t=iyI@8*WZx z{E@H{a=K3ReEn8FFJ$((Ywd>7<5>ph%;?~%=b4egeKRls0I#o5+Il9Td`lhd-_kgL z=2?gVn45TresPGtxj%aMELazz<+y`f1iKfZZ%job_`PCK9N&&zH?Rj0(J+FJNe!~!aF;ppj zq5?z@Jin-%FOv9#O-k3o#Vj7E`sYo5=>S4Y@VnxLE;2f@^%phDxLgUO%LYb?2WG2| z)DME|eU|#Syq`=Vk@!KffZsSq0sy7v!5V?;l5-%nco*ZqEfhRB#sHlG@2(aPP4Opx zN6=5Xn^PZW0SeCa%q?uiz>M~hPkV=MIh`30S{zP0wHCSsa1g??bUXmuFVxz9Sw5ow zg_{8Y4(B~(&bj0#s)UD>%mMj?`C5b>ZY9-j5pR$V!$fCDGJ2Ra%- z`QXIdf|M6vsv6>IaIPpT`-z=qbG24a+NTlltBSrmncuKkY~)(z?{6Nei|SG$2U1gT zQ;`EyZLDkA&}u0OV#z^K7HX9jc&ZTIe9kqn(oRG{17>+!<(>w%!c!)HsMUYy>3omP zFTUpo_gW+p7ydkBo&FuMaX$ZV{PsUE&VQlPf9v-b8hQrJiRWO>= z?xq`O)l6|P|L8|nI|Za1*g{s>T=}HciH(*E>sDyoS2tk-&$bX1fQ#H$Kfc`j=tty~ z16p7ceXwGb^3{!4iw$*u=hxI0Tvg<1x0JX}?q8#z>^kqUh9@p)AcT(vWU@x*Ul-E}a*nsUGwBez{oT-0FiuEe*~`Hl%3 z6#1gY8r2nLX&XckxS9Z61wgeF4pZVb^~M1a<4?O=RRAi|08IgSU<@aE006P!c=N=w z{=7q5DoUq~obow;*AKT`FJF{VZTh}1TVNGq4t6pJL|BAOnB&aw2^cZBKg*B_oxePVA}$ z8h5kzP~w9>&(5h{#5qw!>V;uS@PyU$ykxUVM1U^LcbIK-h;K^z_& zzw(B%{{5ccQbG1<3X{l3XGtsoIO|s%Nm~(p@x4v)6PkEXV#66dW?fgg1j3w7OpH7#ZDE6^1#xAtwkL4)&0PISY@ zavNfTawPD>g@f>|uLp8lrI8oXn)5_XUqeHd#@}*tr`$34%KbRo>+A!6VFk~zF8Mt^ z{OIe%52nAz6a1%tEVzHj$nl*d^GzoR9`kzQK;?kvKKHH?T=T_uk#Ggli1wqYx1G%M z!$$yrax{HX)$&zIbj1(nj%iGr|9rOE`XaR|l!F!pX6h{c2RJWH%q^^~!2HUrdSW2e z#Uc43Zl1Dl;wf)y`C_Dg`*712#MMSe)%I#2pgqR@{TOVVEI^AB8u;Qa$il=a`sW-xz~MMS=` ztRmN~rt3k9b09V7ZkhjkS(Z~y2N0^Fe?M0;R`~^P@Wt1K4r(`)dJ(mVbCZ&P ztb$b%ft!lx%X{SlH~Y|gJ0!U95F3O8I_RTC;B3OWS0RCLJm_PU~&kdz| zmATrDB=!7LrNZOr=xW_>1yQqDGm70Pwu6zSe6KH3NMgW4JxoCJ&TZ$hHaQGcRtYGD}b``DG z#m+wI|B;M$N)%m20x954&1F3fes`g54G$)$R2EooC_UzrDt<_wH(g zUKO;n#3BIk-!O>EiA?*ih~qDRDvvPFIfduSe~@dxkYj(a$N$7OO6FVP;E6XC_|q-C_$vCJ&uTRDk6ga?mQgia@FkPrY$k%uz*B4cRyRUd|DV&%t zSLJx9n!HQ(rimbM9HZ`B;6PDucqUdjEL7mifmP)=7n&A_KkCmed;}kV>JURDZ;a0p zcV9hRu^&CC=XxdG|C+xR;h%X!`>%*ovpwfpr_R#mFYIuE{hCYs3vTc?B2E+3zYubp z8~+1o{QKvL&HO1OAMLgF6$GS-MdY8^;aZMh$;W^2@XX%ZB^okAqU^#V|OXoan z1tdCBl6<1SL=)7jVEkWwA9(lwx3B+K?Q7-k`_DZ&U>Y0;5)o2=slbVjP=g!CJyhAF zcwr3}r(XZuSJ=eZ;9OI12V3M^U5Ub*(!o;D)Zcb2&0ekpUidA+0oBr=--S`*E zvOqvvoOsOt@O?Oc-=$x^kS{*^#BCPmI_Yn=<_6YzeB!_B;_K_OwgNlX1Csk2)p_18 z`}Z~p7=d$L3xsCE19%J9&=F4zuREx~`(N5Zz(bh0TcPsYE8twl+cVee^Ao@ z(hm6D%b=>MU6-o9nufc`pY`r}cuCZQ`A7nl{<1aRa#cYPG0f=PfP-$4k`GqL3YP<^ zC}WK!xuGI-!(a36wL9nC`MJaHcY6WhHzrk0e%k5VE*t=boO)_{K9>trL-tY&yfrrj zYASl6gU$heLc`QS>cQinse+3uNS-R9t@FIVK@`0%0Q-j_-6HsPf92Qxzw5tW{jI`5 z^)I|CG-vQ*6JMDW=q6UJo;F8-iScAtb3x=#qN3z3IPJsYSCu2YP68VxpLKp+&>?&_>`TV!>v;<<sbt3@)J?4}yU#p>I&f75%#^{y1z9Q@5uyhzxLRaC zOe+WSeA_|EEmsj8fl@vmh71N>em@RwmCjm!&hohy`R!L;PyZtb|MDvrAM!wd`IO1- z^k>KCdBR`koqpd|^m0-M=393rk2~ji|Mq&~Ar-^$b3OE0`taXf?>w)y%5Y%OtHG(& zHT<6Uvt2e_fBZRs7r%ZSn;-x3-^dCwtg?QtuYUYD-*r_8Rx=RmpY?!MAH?o| zcP&*>Jg8RD7u=MGPZovHlKD+k&{;t!=L*qTR77`FfNydNgq#A!_O#caqFa!pLI}Tc zwBS4DZ>cZV?z#5+S-Ws%09MTcwP|v$&Q~PD0~p%pc2=lzD8PUZ+)+U%5}{y=P|Nks zt)s`SV)ZK@wfdp~zUySSJniaRdj1=K3HZhl;$Qgfu=()IDb4z9{ANsrc$!KV5eJs;3VS zT;RAT{Qz*^_wDDMi+}o%KR)yy-|e3d+m_h*h@7e8~D6QAw>6}S0?g8xr#jq}{@BQ^CSSZSeuR((Nud=O>u z1xUG?cgD?JTmmnssRz?Q$`<)BJTcHVy7;0aHiGbY9g+aT0w3)ENWT!{I8S8wpL^&o zioYP`i*UuWyZaLPXOw{|G=OiS>R|uv?*THT=G9kS-2)g8ATIJ>zce7xwGepMm;pb6 z@c(F>NV#AAP9z29@3v}x)$OOv)Ls4Q1N>iWBcUM8Bg9g$di*(;g$PJ$5Z&>R5}?Wp z-Uv@W6934Ibgw&&Akq_2&aL8AsJo1Z(S@sF%y)nUu9*rem4Hh$gPpfC6m z>JW^nz^|4As7}~_fLn5sDMB}hxuBB}Sfm`AsvN9UUQ<=VZ3o^dG!AwV@2i>*9|vEm zdgy*2j}J!k2d#bXFyQq4H5UIZO1xN*0fy-r-~EqJ;>7@Zpy+@%c^3i_+(fWLjR!)? z4K9d5SAd(VDJN3ldwz4E?5d7mxg62}Kwxp#)K9-$f1)LSULz=gsRN-CcnB`wdX}U( z5v+x%o_u1PBe+D{_~6Hu)l~v7_Wc3ZYXC`b~Ymq-W)U`dcYI8 z?t_Se0Ps!%&3};2#lMPwtUqXSu>R~ckE`Ku49Yw)k}we>YL*S`OK&RzmP1(%H=>ALFMwN zUGpR`d6i4+&X)uAF4owtI%+*M>aIT2W^7j8R87WTpA83mr zHcA|7FFwqH&GbIjdW*Ah-h_q6WuXI1@XD=GBYRWmkoTD03jLK-GbaKl~X|p6a7j;;FKE=uGOzW&9(1kT|4A1Gz?0D?W(OR&L zNh+mD_cWy6#5w1Q)kzf+bJN8$!?z+Mdt;zyjpm?xAAYmr-N_dm z^NT#b`9)SCDBpsMFC9PA5o?{lW1CnKtS^~<(~7|)^*^k*&VQNldz+^pdF>M3%L>?k z5K8L(1VT~H+|AB1yYa(qV}aike@OO#Z)WDpw;#XPC#g5beUs}y%kB4acRv46iHgUU z#XbKQ->rPfb~gHw>v~7U;mv*hqB~hmzZq^PP$|kG?qzRI{$K@Y-NhfWJ#!EN<0!iR zA>9j}667cd?DFQ#6!>o(xZm8wcW~!_<;@h+&R*Q4Jk9v!&56^HZUrhO-lXWwuYYOe z_=o(aZ%XsdQ%aj8!8vF|W^R7{j-?wv6o_AJ{rA(@DS_rgg5s6x=21&llKP^WZeo}F zMUX~zAkdb8@J`?tb>Q%kq|%fJS2z6_OEFFT#mnEMd@|2e%PPcOM1K?M#XM7g%}qZ3 zaO9n^-)}*EnP-YGX6mLIDOtT4P3J$ey}xO5raDi?!YB*!8Kn}w&szK{U+PUn{&QVm zncoEbmyM;Yn}iLh!Vdd7bm&G>sp99HQSr>)geFx81Wg70f8f=f|GdtbvG4Uz-}U^- zqi&~8uMRz%`^6#~-}-;E!8@dXQIyW#`yrfK88}}G+Pvv*stNW;;5jv>EcLtno2(^& zNOry`X6jj6ZMSoru6)ZO!|WXMQ+_yS|BK%~nVVIvf7o-L4H-tciBQhb8R1Fz0*8vZ;~&o5OE{*`9W<(7sgoxjNs&F-8B z^FzOns>;bf>DYXez*ImjGxMBp4p|VW;(KihX!hsYOVOc9tv}Q_HP6g*nwjU_ejVTT z>q|-hhjPZ~rNL-_-)s5*kaUlK*kpQ^JeP* z;e3|Dc?AMzSH>LjG9`Ucscz-e5DkVse z@)z-bGJAOUG4AWCUtPB@CeiCfGa)auE%S5T9PVp#{hJ`SJLdmm^7*Ek?>oetbe=*wH^DZ~0^S2n4Qtr0$|TL z_I*9}-a8NJp;s0e^$#rfA*Yo6J8sw%25f9lJfH~;PN++|Ij zsQqq#BDlaGtIH?vMO!9slp@uuc4j zx^mZ#lToVf?uXlKC|LDO`D_?``;eT4aPjv$fA=5%eftn?)u{2?EJUidiS$! z&*RR=u2=msbNzple)nT2nx~CjKkx7RJhRQswd>z@v+?S0JKgtn=vIIB&#lg#x%&?s zbeBi-hbj;6Y3qk7|4!RKRQY$>`Ju|c)9z1va!-3d@!x6xC;mGf{KS8!U%zPcABz2d z7j6DStpBFXzr_0c7j6C%_lq|FiTh2Pf8)|$wD}L&{);yMiF?j-K~eVIhZCrKn*N#Q z=DFa0&6n=!U-PwZyz^zzt#AC#wD*nwnGV14Khwz%9sfhI_dj&}FN*z#j{ilr_dj&} zPu%o#{J$vnpX2{Uu|q${|3kK~Kga)nEAIRu+5gsV?0;DHFKf=tzex5kgPv61I{J%o z|5E7JAC~P|a$nbwy_WKy6@Hfl;&6o20O}O8A!bpEu_HS(Sn`QsTR@HC9{Tr*Q z>U~AJ|L?4Jx?E41l33d>YU%zT&uzx5p#>i(@tE1f{SUtPhYnfi0h z+h>ast%;*3<`>IuQOYl_{h+9SFV8Mo(|3?v_1Ap-`y=By;@|tSoO8(3ms;Pg-vqm0 zbN5)~H`l&W1y%9YuUPwj&-3#`w)av&Q8djlQV83B(e3_Cvv&tY{cl+I|BhM@|CL&& z{!Xoj)KUDMS`Y7sIsbuL*NVSW>;I6hQ(Z`>)IU<|$p5dX^@hrB%ADnY{vBy9-1`&X z&qe(Z(&wH2rKVZwri}M{JI0ro{#P>nZPAbWyLRW#53&8BM30JZs#}!)9icr?Vt=py z_`k&Z|0UM{bNRUI%fFE1f0fst{NE@cyvqantNeV|`@Z*sq3&`x{G6A2``^#zo}Yft z&v(1$_xqP{_kPHBuA$t2?|y&t|4X0$^LE{TS?|wz_lqC0{ij`c{;|?{WP@ehb{sesSDAi2ToT_j4&!|9^%?|CYCVJ>UAj-Rnv2 z^8QmE_wV}eawBR#6g-3dX+PARHt+N9{%u4w5Yv7Ji~q0wes0!((%<`>`(5Ss`+hAz zvCsPj@r)8EnksHby}6$ait{Nur1_l-DZ8PCf{Q78r$vj)DEld<0X}FfO>0yN$6a&Z%{-ZxJ;Z|Dh6uTY z#drRxu^d7p{F!urB$~b`Ief`yjHXSDrYqC^%#Q?lPG6MvJ1y*vZ~71UE}fc7{cNw6 zPHq3S-dckHq&M21besB}F7v6gZFf6}er z#|2sYuXNOQi(ly}Z~R)nZ&k6(e zW}#fxh&y_Ju*SxKUP9#Lc+e+!(3_YjNC~;Y#|E*J=F7tSgqkXi(P86{0)DoL3((e&N@()>HXmvP%lm&v&2lc}DZ?&|pSp+QRtZ4XC502*N52 zmh>ZkUA>pwL!wkssbE=4yh-IE*`F}w+i4ce5F@LH@jhQ~T7h){tBeS&l~Qgb4%Iuk zpq%54>E`$1VAI%x>Oc~=QJPQLojaAGA8D;dS z9eJI)vy>}=2|`;V`KB$PS92o{D&tx754T~i*nK(y+#%G{pmU6!BV(%g^@7)V#5TDH zn&&K_hhG;&w$YT;9Hc_(!;wKlLN7-6*u3fUkSl|eY0q$)hYOMNV{H5Zd(vSAkho)i z%u_Z~`K9pnUWqeURQ}p^@0y(616((`Zz~z)e;+$8K?Oc!NajZqqN> z?_5Sh{4&}SE1YbIqGn9gB$UdFaO~!XJojEVxauZ>vvnLKD40?ZXleK;ctJ~e{;9OQ zm5bIPxhGY@yP_Jq5M*S%*6~Y7R=kjZTc~pigZE#Pjd|SXa#$>~*yerk0)k=B)Eii^ zNnk$iQAt(PMvOGHU+5k~81m$rIIRzA>T7ymc0hUaQ@6VrQW@FtqH-&oE{OGK8xCVD z5v@-{SOo!czM^!8nwdf$c%Sr4;S>6>e0dQ7f?+AP9=Pemj|qSxnet?=X&<$J^01A} zegscKvXZjhkV4%dCLl~3vWmz{fc8TXGcKK)p}eS47|f*~R1AKv`t&%MGUQ>AR63Qd z#Xu-|?xmV;Ntx)JBqPe{Ny?QJnz~;50ytknR!fr_H%@HBi{3sq!3E4nYq{pqTKB7W zo@a-LC=7mT9$Ju4K6$wwMRRa}5W4g-=6)H#QeTjDRca!4twz^4T)LgK*k;)kpr7$- zwsgbnJE2+Cweom$C3jGK1?g^FN7vGLR~GJq{kBssR8oFi)|VR!Hr2u+y$eGIG-WD* zRFNJkFfMOFe#wnJ*XU=?9P?3FUSPAQg$%3rf!;h2xPQ9bc}$Ppr1w_P{4jj6Pws7gNAIp0TC z;pKTUYaMSvwDyO{o5MG$_yda=s*kBMTN+LJ6T;uAw z!{U@6h!qTNJ4>#abgHMKbr8hpwCq+|HG*4Nu7mSAu`Wr~=~s1G`8bJoOof2_er2qi zQZw%<`_-*=bgaSYxKl=;uBFY-N#o=X^2BX>HBOd($)p%OoX(Fdq|Ibr;5m9)2V__5 z(vcY~Wr$BkFd;;LLO9EuR4LGn;;d_1niMr=Y46ZbA z2U27Q7?`^orspFD>a$ZT`wwkjA(;U4vx#Ak6~;&cj2Ll0pH!Q|art=mBzsW7#W7LZ zL!>4;;XQD?L|UuzpgxA;FUVIEI;s1^n7d-34vK~BKuqsbKbc*KG z&izmY$Ei?C_cUE0kS5vAew@#)6#TgBTS62u-K@udSMqt9vr9NOQ}?A|%+)(7X+7Q_ z_*9FpYxbT7nQJtdQFQvC`FfQ2x|P)UA>fE0f5NOy%SV8`+K?!OC83!mmFYJg=AV!V z()#idbBW)8Uw^1KjB*0Jdtz7P_OyWk)SS)vv^y38hXw8$TX(Q67ljvUqdZB(2SIjl zNLj*vyYm_@EbZ1-Afls!@P6=7aA;u;IE~&`9xrFTV50lEMYrn~`-PUq1tsx&Uwi@FkV%G88kAgg4Xm*EsO z19AvO(`Z?u%M7sqv{xE-s#0}Qm~c|99s%(hs4SW~krA$pN>y*lc}f#jh5Kuf_fdyv{ZS z%;Od%(a9*&;&w0$12dKf^%h?UuK`|JhJCfEoJVjrp6Q!QePE4>1?#<;g{&3HRav2b zPt<>Ynh_KeP**qV1E!aWgK8Hp*#p)=<)h9MLk7Y2?BUhf0V!jvK27veZr?4WOXY`9n^Q8OM#kw-4Q-gxAJ@jTvd z_e~twR-mPUZ6Q zQSlM?b3%}UHcAtVE=OR3mvhgjjXAk2<2ay5f$gHNz?Xrc>C_|2PH@M99wk45y6)iP z?UtsOj&POZy)yN$&}BW6*fYdwsu$MDq?b4bV2bMbF_cyEadr$(GLIUhLE4R)$xt5C zHj7DJa~(NvA#^L(ak6M;nerun;UbE&{qnhX6F+Rr1FRPCG|?3y3G>5g<7w*D0)LWc z@mfQiY$m{j;FJ&m)ONfQb{pkQ=`g-=36jeZM*{RYaLHpyVKI$k91LJd7-f}83a_<_ z_N9q6-VA=hs%cWuGp1XPhvQlor#%NZHJn5%sw7+4R_F~(*K3=`02(~;-0TC5faS!=HN&8g= z33<>9{9Ljwbr4y618jDGxTt+*X-UrxnF1T`vB(R8yXG}U8}%V9>4jaxsG-2sE8z8f zUna#4o1wo7yXu^++t}#elFa2|X2@U(N)ax@c@8e+9 z8|i$IK;)JM`l7-QTCRntVON?wb)#1re1>^p+aP$~fcyvu>k9T;nW})!JU~`;iGrhd z&>yn1aP=a~%K>wLoX|t-LNvqx503E33~zg1-H^)3sNE$xWuJ48T*Su>ucNe>+N#kM zc&)p|&pBH^q9-Ek{2-hPpwti1O-!7LOetpH9|2&*L%H8!F|KP9NlPwVFXBo^rTYg! zzJN7o%a{b{zzux**#}PhsX%K24=^HGH4b=y3=Dfw-2lvg@u}WNj&ej1Bd#^kbB|bA zw!@lZ1ak%NNprf9#N^dYt|fkYBk$N{d@qRcfG`sjr5Dgiem$Vn>}QG;)ldArzf4Rh zZ6qgsi$&PI^-meT@?*n;^IX;{9v~N#vPld*b@DjZS!UmjfH;4Onf(#U3_Ct-s-PK3 zr6}N-=fDbopIR6R!oE^8dF_0fuV~s8q-eD%s-C6xFfDw3_*EYn0Kr8eNMb^VkSKtA zAh1y)(UtE2)nBIZTvw?!U7v&9fUt`(!vYO~G$8*xQfgHSetXH;+~bkFbma&)aw!9-}-RKBzuXyofmIdVq#$468SdFylP|1w+6$$l>o03@T^x!(NCY7;a+| z(eU`)W^>)ZxKhiD?Y{>InUMGNu(W4HHVWBR89U%BFiFk0`lGC8 zAG3&m)(z8TtXBa#-`RadJ%*FkYH-g73Y44gE?F(^uN^Fi`IvPFgC|veSc@&ID_R~N zI(?c)h(38Q65nBkjBkQ{n8$SnLmTl#m>@z%lX&XoY5|EV=^@qdc?bZ2kBCgiFdf9K zf`kvpNquERK6+D1yau+J_4Z*!eBk5)^G_vzfA(7hs0tR75HaI0iAD<@p?Tg01Pw(%PG zVYlY#!)8SG7hPF2zAB4RBal_s!D}UEgTb4zUqCCB5pr-z6Wc6uH}o0Ea^DMl@!1G} zBAce6gkwCB-wO)FgXsVoKp zCY=7jDjx#{!G6{gKAzVDu}T`*Se%75VOhP++aUv+;|u?Yq8!DD+p4VYcKV6 zUJH6e1LD5GK3d9oc$LmW(SgN&&4(ob0@Ps|VG!|NjY}1uY+d6-IRVq#p`2J61o7Z0 zlRMVe^6)$rUX3t0`3iD;Kf-Okj;EpMqtXnfipBS-ivvn%?^9F^Z z&&V4L6PIFIig*xwJS*%ukknpRvA&ZM$C=lEruAX$qu#c_@z=J%B~IBJbaAR z?ZMavnsqFMWD8rU@p68peL^=-<#CMRa2OJtPCwaj<@rjTmlF4XR=diga%iNLIT%<8 zOWY_SNiBs|-sZi@#f}k}xily2=lQ|{>S#UWK}to;37|)yK*EtOy?LRJT-?mHa{Ws6 z?M&+)g_(I^401LeD6{TLRYi&Oo|}}V>DEI0$Pl{`_Pmozv=`HqorXeWH4S~HSGb55 z&+&|eY%#JBAk$fYdsv^cpvvL%%mqk)HZ{R>E8wBJl@Sw8@Y9Bfe0!2iq+PN~c17#K zth*77;V1IgzUv1^`kb;Ze7NeWLK1s>!5(!4Pl(iHtV)8>>S&tiI4U2SJ!d@A(Kcw^ zYsK&5moRF=iW&d`70~_abl23;V+8>6L)tYmEW7WavmTd!W>bD#jL7`~!tGDw@WI30 zp|O0CQwm#SiFvK#LTS-Q)EnQ{DTbXgc>6VF3D@jU&a4BMF`&VX$tW-P?XHuXt6{ zkJRm$PJ;P=GipfBA4r|v;qp`Xde39f($k2D5q9Fi2y{GKD3>Gf-MsDNIVO)$9}a-S zKDkXKkjZn`5uwMyzPUx^O=w;d-Dde35F|>5=bpr2h}!u@xko8h)GO#wAHIBXTf49& zt!~6U47vDjgSm$$L~nZ~KPp9w`(&Y-Xhi>Xa(3#NosQ*2? ze!q|O>t34qKW5kU?QiV59SA=8hh4|MZ%)?t!*-gJRT?k-Vb@dZzhc*o&wt6T7tMcW z*YA;`DWC1V>YDxE?7H^+JG;Kg42=9{*OBCJ?7Ey&Jn)BIzX!*l=ruIKkRz;wc3kc?7Evz@^5zi^*s{h-XE?>ly?$~ zzu9#^{5R}+>+k*(c~%#^?nr!>g*z>k|*6&IY^;JKF5yY9OL_uDJbh z>(prjZ4^MwY6vfo8KQREhEGl~CqJ?W@5M=fStgQa!-5cTWn?|4riQEVI!!*SHIs*V zDvn=>t)bAn8W_)^FTqF6NB1syrjN!-zEmk|B&F)R5G4VagUdwaXvCN~?g>znTujMp zZW+K_O$Co0Pa!;3_3aA&j04vmPe$HaO;Ve|=R;MEzsO$3GmuMr{!445>fypOTvd^O z!gNZlWRz37u~Z~0B?1xOpd7eiBbOq{6HEx7B(L~QopJ+N_lt{&n+i56;RPtE5-$yF zF?a({pC~`Sb$?+&q3Ncjs)eWrr_jCx(>IArG-7#O6KQBS+2|j0v+u2i6Q9=tD}q5` z7^va&(Au%A%|>@LP}`;FI`Gm17Sr?RWT_DC5|YDEHJ+f{-~eR>f-G9*(!P&W2h%|x2ot1vYS6Tc^PY%Yc%#XX zFA^mMT4YB9^Oa8Ki-e<97vORNK!}~IcHN?RySnl`q&N!f18~h^sLU9I79IjSC3+5K zb~Z^+@%Y_fEA+(o;%*m9+pL9U~haUwmG_N>i>VDYqxFv_HkgPHshTXyAT$ z8al9-pyifLN8J{?Wz0P(Q$pdzgMj!Xbjcyv>O<8hVrkKki2@bsKKC9b0yXNa_vx$@ z;(I(90S8fcCG*@1-vM_{Oa)`hFi6lvi*@n=fV`5@Vg0m{%6xou)946)yIf8}NS=Dd z8)S8PvAMofK+xo{CJfB#BD@K`s&*(X?2A47FrmDSvN1^CpGV$T4HTJzqY%(Vb(vJ` zrNZ+o{es-Z<8Z#jb??-nI7?jYc#$4dS1;y8_vbrII_&c#JJjneJ{cC+K=LknZ5RHC z4Pfm!PttH=RQyHI-IsWO>8<{WH<4UN6WwB*I-9_E8v6jAS$xqRe9}9X;HlwvWzPnh zD@}mAEHj7!`{V`;-(5Ml>GO=mKsrS>n(Y9op%^#z>i+iL`!M?EvKiM>GMo>keG4Q7>*QmSHuvMfOwz^g%LEz&aUo(dOZLidJePmO@ix%c$7d^*}?Gf<@s(^?p0s_MYNx!NGG^ zA!!$<&2ozgOhJ}^#gXD+h7JxE$g3Kz33E{*N5|G{u}B$~8#`}mu_HZUC-&9cGr)2p zBq0u})KLY0t*8Yam_h60J?$kvhYvqS9DS)WqQO!FkrkC~tkI$9N7B-_+fHzYX?>Pk zDO|q;hZj{#BQqa*gO4(=BJ&wxym^sXxIJI$N3ONS%#;(ycJgt9G1&y4-{9A#;?=ImJm8 z<+UIjnUEev)YuPS&V^%j<e0q(j)o4R)N|!unwkpI)%DPOrndi;U?}?s=`0$79v{ zGkk250Dc}p>t$0RQoOF^}A?y#C4ehB=(HjVw+5d7qDe60$8De7cHgAfrOO%UyXx^S180{qTvr zD*5&vcLu}~k?Lrr@|4qf$R%I;l-`kA#^?ZtKzP5H=d}29UYHLQj{PYb$w59{#qb4L zz}%%rybx~sD7M`NHMk;iPnm8psR@^fvGmCfENsSkP6d1Y#SYJpRxnfuG(DRzU}}u} zo@^kzf9sS@YKKdO75zYYV7sECVONi1L-NP!eqLqAApvc5$TC-MvDz-txC{Tn2{i;*)Tba>^Al6kL={-2 z15eThH6llxJOf#uh6vM=+e-9>+J?LY_*A&!s5&aaJWF9V15gE7n zG+(F-EH4p(E`vA}A63hgch}%2d$ga;86SB99X+&!NUS(bO*?$qYUD_8W*!JnOooDb zGp{Qf&FkGIP!xx6ax()YJ6K$)KNF7+MA&dzi_@OtT_Q7x^gAtvr=^!i0AG}!Ww~&8 ze*sA`3KO98;m1KkLw3H~AxJusrG{#j;uLElf!S@aH6x=r&(Xd~#*|noOPjsjYoW|eFE8bHP{Fgs*`Ef}&b5=7JBKjJg_Mp;j?dQ<#5#q`KQ=Q~ z^GC%&lz7H3czB>0gcAP6d$C=|KF)E?7%Cx%JwI%{EDP?x2z4!+j;qLnDD^tjf9ZCL zqof-&Aj@Wx1O}{-o))YRCz;LJ97z(CgdM61PL*F-aQV)`Yc?`ZjNm_QdV~rx5{B>` zs01&^h`JD`^>qnKBQ!a<%kgNt8jXG~A!(H3W^Rm)s71_#v-!20VaRAEEkz@*sV#*F zAZ=`m4g zIDqvU4|W8P=B>iZlE3`L#R*c3hA*f2vc;^Deyk67r=4R%E8A4bM&}E^NJu!e+04X3299sXd8@ zkbHb|;UjAanY!uaA~g91F|)H7w%C*_OmaA0^SSpkC!v=cCa=!&RwPkap*gxTXv=^X zDAQTY&e+5Y%A2_%pTqJzJ zPvgz{0EA~)G0RuY*o1w_;0sJI2=QLO*>TniZ_n>v+3`%G1pnW#_x~( zv&>W7#?()x-2Y(^e=yIXV(z~k1dKMHeUUxspS}ItAYeDsZDL2jaunrgTd`{v8?xS6 zvwTGd(GqpDz&!Vyq8xdq-%o0HCdBk8+gB)qlBJL?xL>Tdv_`{4?b<}`y2~$8Mi*~Rv}z3qS-Ob?nW73v zV~r;-=#OWab1c*z?mZG8uexDJzkuMPx!DT$<7{@#bVV&OV1=c2s$ip|z`Eb8d3e7b z)`bOM2u>|9fA2Ej*hY^Ek773BN$_roll11aBw&Gl5Iq ztH6^SrWGWn$3sBd7b(T9p81orBwi6<84bi5~)|V0eC5Q^_h*qjX*8St zIu_p0n<$ZDcv(dB)-}CvtMhtlMjf!G4b6&n#ewbt_|tg9EhKW}NP*VY$7yCi!i!MFk2AWMC7>K< zsC8RDEP5%>^HBubtG7FWa%d;G%2{}nh{@ZVz<(+*?gZizM|9cx60G~#%S>#lB&vRU zAtQ*Eozj#5F(C<2d!{mD#aJ~|xgz{Hp9$~?6fjj9mZzmhz)gc(y^Bw7rK?5@Ww&&A zf3Bg`LQmmK6r|A*1VL5WWDg!s*IG()?NZ*rw2fj4_LY8+vrBAsIxG=5EaR=tb<%b* zfHc#~YWjg*N@AsIp@K%*T)KPY{9O+WZaf)-q5vw*nSw`|I(FYy>O}(mdM8LW_fIbJ z#zcI(1#!ew`>@&v!(+3Al;~-PoxZ#Df1!HyK31@{Op@0h3D;w$;tdwe`E-BwJSnzK zU|8hY$%=wkrDWr-;}D~Y- zK=6WFjs7XS=lvkyt1Lvz^2=dr0vhlkoRYDIjPWR8$LIR z{OlI~8j|NlC!l){&k4$3$F-*1{)pb{@>Y>Dr}JZ;k_yM8)`}kcR#Eu)f+do%QO(#| zM;D;kxLWj6_0iho^KP{gb_0J>}s@;I_OIfR3jfK9y-Vt;4=i5fl2Eb>Rap+CSe;ax8L$8nYy=kh; zRvFuCnh56`#%rtDCT#q%yPu@wVYQP?5k@}`S6nSgY|$a1giiHfXvJyOUiAIqve?_I zF8;_4vFge-1k^dS;tW`D*bI@}4G)K{;gOEwaS<2zIi)H*JL)i@v6?jXhK?1tz?1(h zz(m6tV)8N9YkN&vf8&fZi2#TUOw%2RE4wC22ll|zWEx*(N%)~1tOA|4FPg>P zQtBblGu++LYt!*f=^rJL>(gkTZuipctC&%1f3S~=lY?4XGXn3Ze8DP6ttolemc z#`7K}>BAlaO+`MWzjDv&m1}*ILOT-9{ej2C;hDPm0C8fFssmD}s(e27?m;WD#xNU& z`cS+)qf|iC+|_4c^fh1GmTfCdl*)X|r>fGsgqdS0hj$#x3j<1v`t@)Ny;CTe@r7M>LkMk2oyKKE7fxKDSjq^ zVqp+wKQ%_Rk0-U0nJyrT|H+ioG|>n9AxQROap+>}M@hq=RRBQm32nYioiok~XrfPo zR8m>Qi~$~oBycBmL3`{8uIIEtw4C;|b@%5%HrroRr3g|jA$pEevGB2XO{6ZFw&w_u zAw;X`f8Gj;l^6RDwQHHVVYY0WyNJRBKRkAdDVn00+6Q8Se+CIYW8*AIoAhqw#8`0#c?kF`4K|?Q zjcJ!1Uv=6zE)@QVzc2)#<+F~(Oh2yg)P zn+1ap|NZ~4cQ4(l^7x|gKadSjP_gkge-a3HkZ=`yiV`l~Q3HJXGXl;z*LrTdPVK$N zdm2?^JT-L@AqgRb0RP{#+CNUs=Aut^b#=MTJU32~*>uv(d*@FZa7;cI6HwxlVF{gJiax?H3chIBQz5Szeab?zzvNwMgghSMtubs|)|Mmv?)k zK3DN#zU~jRcc?!5pnh&IxBB=HWdnbI1-JX+sz3G5Sa2az70SC_IrkBbAnc=ui)#hY znrNrY1~>|%K%6L;UqgafrSK8BvbuHbvbou`F|(2n<$(i3P(A^kRU`!dU7?Yx7LpM7 zd%C~}{8V+tlzfvn>K02%}j<#(#Ib}8PB)K+>1Ifh(86hInz3J^4~ z&O=5;Wt#^sNu*)`Iq1kP3uQ&|s0a;& zI%5irnWpAv9pg>~C zfJTO?9`!JgT3v~cHvMxQMoJ68i?{mJzZKY3V@iH1OTT>HiPNtK_bE|E{?ZVX!1mYj^CvcUh?!kHnlzJeAjR0+QVr-1>{w69^3N|Xq zpsF~;{gra2zfjJstx#}r<@9V)ba54`fYVh@s;g@NNwGk7OE!w*QUhd?46weE zTnF9*%M5OAsQQ2S3VOQWcKPZ?91WCCS=0#1M3w9CGFcI%Jb|U_JS`!f2QI=cpvdOZ zOamlb-57n-8g*JUPk0aZ02?x-1~R*H8({PXMBTQ$2=`cu*?DfyO=p0kY+R9jQ%Wrc zLq9xnd#sG-v3+NO+FoU_%8&vLg*zGnY>@*U{$cQs)eEY1|K%p)?@IEMyjdP6WrkD*4oRfO=6G+3EDF zUt~Pc=X~(ZUulpU$}i`V%P>|db{7{L2(?#Nshp`UKX*luZtTj*4PWX#;vBU$BQACn znK&LIk!*j+sIZ{8O$|WQGHUDyJUwa*={Uc6D8x}zfLw>W%M)M(pg2X*HTKn_kR6|v z1|$!KU_}Oo1@LK~G_T;y341P|b65ZikH=J(S&zoZ;jV-kP$M?Gtn>d1D!A|rodY-80@BR@@Xg)*ZAsYOKuD3N7?%w+no z>)U%Zj0AQWY{`(~@7Vaz-5bm_^JzldQ`hW#)Op1J)W+hU_C#COd=d>PtQk;#84 zu}Z!-dql$3H6S2#4QRGQ)IbU>lnv(PKlNv@-KcW9#8cPzD;2XClE@?bBMYL;D-o417cj*9KqK3>|tP@f0ng(*`Mp1k* z(%EHN=Ew?^GKCnh#RlZgQm7Y=0_s!_h`roXyw}+^BdArC;H?I{#T;kKhqQm4cm(HQ znX+h%0FCWp!y>$bFIsQ7-UMIlC@{NJ(Ap?hSnN-n!4pNE?ney3nXIYd>X7%qTU-H( zlma@rMyh4eM(6~LonAOPp!s5som3-^s^aS(@)5C+mH>ob*TKVo;lLx719#{nnf1fp zZPx#7*?8>8GE8~JRB3QVYlwf@{D6}gMS}oF*4*+<|GcnefFOiTsRMR>fYRCk$`d0T zhsR^$PebmiOx76tj247L%uUIv57ExDeJa1_kgG95e!5{ic1v|bmRw2N)v5N?w}ZkT8Kg%E!}f__|QKDQO;Yd#9vzo#qQoc7#HU){+*nt1gP-W}*HpN7lw zsbK)zJ_^yNzl3e14A-Eo6RLjt;euV-+~{7$lB-aww^S)nAL|}q(N<`)zj%1fp42*^ zC{pNE=jv5CSJD|*3zGH z*c=jYz}gczxD%Bpi(afyFuX7}(#XFOuVdfO+LZjw&(C1+%Q=IQ@#8${FW2+w)CH`b z5I<0)Y+d76=L~;t^k`R&O=gd5A6?Jb3=W;_Q)B5(7Ex=NwH#llx{-V5tv)m@SDItU zSNx?l4m$K-X|g+7Wp~Oi`DETlS5t9%vqNtguybPsmCC%#Zo%oG%ZsDv>>!TPe!h!t z8=t-|FV!aqse<{qsV%vPQ|U@Y8IbGKUTp7sVSNF<>iB=$Y-%Zv?3CHnDYFBchK9p+ zwxmADddi~ES{Td$%GlZXU#>UQqKf@H80X^(5TupaqQX$3sadAeVO&R$Zv-G#eKvnL z&L@Eo7b7b?9{;Sbx{;p_J(_20AuZmVE{qc1~cq~GJ@QphH^>YxAT?rTQ(OR{oX!h z33yCj*NiXX`fHAJ9H)E}xcD-Rxrh0*h|O=5pEiHW=9dJw$E<$OATYk>L`QD8PoY^3 z*Wn2Wjj^6XmX^))8$k8(XI}5Kj98goic48^?}9dEZ8o}Oq|W4Z&%I90us^mEjA_#& zN{yQs-QwAsp8l3b^JilK^=*@6z%7 z?*Leh)cNN+r9kZ5@!^V7!5vBB*=Lyyf3AO}e&(5pgF*O}CW=3N`LJ)_Ct|o``XkKtiZAI-8{YtqJRS0Dh}D}KI=W8zJ-a8e*J`C|*?Acut`2`E z7~lP=f4x3+V1c~^-vIk+kztsNbFLzGQg>buI;x^N-0qqkBB}w9<&!HM*1^^XLEHE` z<#mwU5*^e#jnr9uzCuX(9Qlm`l2h}?@t4cat3UP2=JtN-+o!TxPy`Dcss3ll9m$=*blRIb`g+(I+}Mn7duAFGm@Q*cSyI3Phj zj{NCd$hVqSZ>GYg&LR~`IvZPP?Vp!@--Tsy!EOHkl1diA9>@kTYw)Cs0YVWzsP+a2 zBqvjzNtQ?N z`uE*emOs@uVB_aX-%kOMl*M1i2$v9>S6Wgv`YOct-CkwELg0@^FF$??ALx)?S$N#?>uGF`-LLg%3Oz;%gcG0;^#*`2(#J-_ zsACTR0alDU9>2#F2#hXvDWiXj$M2wEcl->7UC4-Sf4C!ORzL@GN?{C-j5^+2*!t%b zh~w{nsACu_(m)+om4vvt1ef%0sN)O(;`aZ$)Nzc#^xve8Z7{Y&`4i{;@2TSw1;)-O z#gcZ8ZBFVtD6hyOdlpK3*n$Pv@0vgS&M1|R{ca?T%?Y^!`SUxDtGItv&rP{W!-*VM zy;ltv$rjkomOq662W`|1MSB0MIZLCXNd=2>=O}1SlufZooE7 zP9=5Wi(2Z5Y)jdS?#c~l~`xgDXrLN#@RK%$48stZ>EwWiyBFk z8GA++uREoPATB6jX&LEwB2gzmJyD>^0x^iZQ)ii_<`kAZ5c8` z8JUU7_*@-NVcLIyfzUGyexyJ@#I0t3>J)Sh+1f}?YsQxmmR9p)vj1Y5hrdezQWRGW zU{s_`W#kY>!VaWl_#yXBYMQEGpT_1kk=?DqsAmMjgfVJuGf>dq`;HOHT}S1}c{Hqv zen?NOhp(?t6twVSZ|8Yy8$sG*S}W)#^-4R0B>J1%Ez8XlK-T?Ng!JVn#W4!}6PLBV&JD&g!=wmfMWV{gC5!M;ZT_rZQ9p8hfO* zi-A#>#evp!p3MX+UYtV$&)8YU@)+RMG~im{68wLGYMoK9 z{2_oxrr9`+`%0rg^;5e95*urdB=grYR=3fKowL5u!ZM*Sh|Tf5R2%FvC!IOfSKIpg zHi$s;ukpXA`PWEcmpi)dUKT6QaJ*bu-TYVOc~Mw9uSOY& zQ1w%Xk-)9)ECV1KQ*}0V96Mp=%j{nv%%=(?bJywR}!!YjF zz;YvZV7X!B7TaMA=_{%KV&BOye*DYhhn2-RF|ym}fMoP!Zv=q&rK^7r>t^Sj`2178 zfib=Q%EvOw6s)_Z7~0~%E*p%J{}cBd3^Z)?#k;d{HY40a`vGQFIio@OUo2x1^KlJce{trC-w`spV<{7IjV1o1Ca@S*!+h=+ba{AP$* zyV2_}vUuQskL4FbJfvNIGsHt$`b8EGY4czth<`a|Cf^M4P*&-SAs+Z&B7c#^1OE%Z z8RCHoHoh3*F>QbMMHUZf(MS;gQjbRT#SjnmQj}i|F{>9@|0auBofzbcAs*6}!52e3 zq_w`u;vsF*G%PqLyY94srY#Kwh^w->8FtP}BID73;?w}Aj`Z?p>=^KX%~f@5&I2P@ zoz4_|o%00wiy{Vpric-c%aJO64fBW1ABf_5m=E-ARF8kmF#P6+wkt!BtARCGO zNvqcTCdfvvn3U|GMc62+_!qW#V2Sq#DCJ!Q`9y6%;Ic8HZ#(K1q`*giACgpN^e0;n z#^x|7IZXL}eMdkga{2S!01A=CpYIOviK6{{$G{}W`S~6JOwcduPyeu-m$yXaf4+;r z1>O!IhY^386zKIxb!yp2c@FcPj1aEbD^VSLUcmui&IsKv0@nHWiCXLbYP%TlzixL8 zMjH5wjx_*;Y`rO?RvCqi;rt)zU;=}IiluPMqOYWEzx+bR`rkC^NT51|k-kG5l*NBR z$aa524F3a>YycboMSj$Dq+&zT1o%)(0}Z8|rbOhFO2~$>*=_PnKA@HGt^<3~l@!MM>cZ9p9Jy zySH2w5xE(EJ@U4Zq$nVhWnYc=1q)e-AOtAdwAWfDwiMx1v-GL0h7CkmmF0Q?O|~6E zEEzb+G$3Xy1MrcgZcj0!sfU0eLuf=nEJkZ@`R3k0QlnV3h@r6%c5-lA1u&AhH==>W z`+%Cqz{dduhCK9+SXO@Olhwb$yHz89LYO9~b$;i6bH2iyQb+zGTUba!4;#G8LL9PC zlf@@29?UZPPU%iKY`)cTY?0g^A=XfpXa^);H`CnCE5D6A_Q z?f?f^fS84K%}P^dG){JAX?`y|HMp?(o3iVR4KYiO+UJ))d^E|w+=+%%B3-&rwUy&d5#H` z4(E>|Omr_8@H_Mkw0WcXCH-s1-+Ej?zNC9Jr z)hWvdD7lGEk)=XlgV*D+m^4r=hR$)Zt`>jOL11ue7Y$~As zyqY#VbU;{*$-l&CWS6@#5ubnl4{!Ie?x?aQiu!@nz=LSq2Htxo=wXCMAUr(%{g86z z%2nIcK6UEenT-U+06`ETAp-uf{XFIM_dkB47;2U1<|9;6KTIZ7Q;I@;?mQ6-isk6P z_V3r<=j{A$QN}s{aVLxwHGXqdqr~I`b+CXz(J>wO?0h}JE|Jd z8^QkiUGd3*^}QixyIx_h=i(*mdd&4sJuI5IM$mdn6)u~Vy4cHYdf}gBo1ZoIS6s0D zm$>SRD_*z>0Km-z5EFO+_zBd&%>dw&v;pt{EF>dIP=Oirzq#}0_l|57b5W7BA8>vo zF|@i+l;L^HMz?w#s1$!F5L6!obSFuJAE*=~2Li@in)F0l`FC&%eKlJ1;^6{bywa;?Y_E}E5=)kLETrM<4g~W%)L}`nz z0>5p-i&AvvKWN1^y0}FCs>_Rfqr6GtyblK$239dq8GCU#^oM6EYU{(yb-(z;aQaoJ zta8xJWM!3AF&o2NvDaf#%P#`d`20Uzcbi?*l+$0-lw^M$j3PEb<4zpRj9$t)&aD152-^Wf(C{a}@**ccvYNEfmN$Gm+;&Z;VqkOfSA2N>) zRZ6_r8^+m+?|%Lp0_`^w*-ul^7!;@TR@IjmU-O&SJdS_T7q%Zx^Fwe^+{J78g~SEQ z#TQ+~s`P(t3zR@m+f~03y=>OihW;g03I6+as#J5Whx)ANuR6NguLSd){fpLPtE>NC z1e>O_6vf&;9ZL38OMdfV_1KF-DP(>U?9yi7jl23!KwbQo`Q$rN##yNBJr9eN_rTJkh?GJysQFeH> z2Uhm!@Q1Oa8K0^8{-WK=iKgDt)h7b(uX+1wzcd@Fzt(XPgxBkSck!R%wg2|T-Tf27 z?Z1B^^7hYff6&<|&)6syKd#?8zuK>_Qdham`+T)=mCi_9u(8$Er@-sD7FIWi>MMtyk+J@8~f3FN`DoVR}AHC4U{?FJl>AjN_NFyjULX%Uo8+PJbE8hewoXQdE7tc&x>*VmS6Nv^C?%3zf*sf^q-Wa5B^Y=(|@5XgTGOh_77zV{+-Wc z{>En>KYXUd{1}zRb}X6qRW3m z;;!!|zWA`Mf$$-E)#}4GZmLSH3PG*c=a=^R{7Q>|-_EEybc_1)UH%{Pn8KI9JN{^c zJ6lBj)voxF+Le*xDck1*mX3aLs;5d_1n%;m#)B$DLBIAJy58p}-VXl7eq3B2@o)C? zTTb!guah9ox?P-{PI3N+@nyqFVz+<7{_hxCie^LiA|%sEEz`T2Q0RY7WeOYR>Gee# z8dn=CuGDVSB5}_TyL^2nIpJIRA?yM{Ne7-l8 z&v!HUi*p2{;{R(tPx^4IRwsec#je&DyZGS-lZ(FmZbx`AlNWE?{Zq#AyB!22e28La zxj4x3%S6imtj%!phv>B0FVU&Bpjc+#)=#V;U!14c z{TG&A`_;DEf8EcI-&|__`J83%f8Xv#`-}1vnCr@D_s?19zuKU65yt-kEonL1f5uf- z|89NP_FWvRFHCgC2E3Gbd>cya9{s_?v8qbp7ccquSStCPTmC(kQ&oS8P|*I`pHF}J z)XjglKUZ5j@J;&=o!++&zx}WKIqqRXX%Z+cKTPR|tGtLv2Ce_#6$Mr0liv_{2^cYE7ic~T%XfR;WJN1Uy}8o^?Tt{r=RZ^cB?NcSz>5G z0-qm}J#>pQNPh3%hsb|3sy|3Q?qc#NLb-lF{4k>*ruZKyQTfwfoF6*)I?v{Op1#V% zx0=w(e^ahr?@ig3JF)3Cuc<19edx;%r5W+p`Gmy9*LgJh*Y=^)hpZiG{c4Zymwgnj zY46KEey6|e<9GVYK7Oaa@!~b@f8)Q?-}vwJH~u^Qi;4Vll|O$>{_+myuiq-%bq+@tpK0$iU8<@?|9ZXpOn+amzwyO2 z-F)Le)9p9@Gu?gTKhv)-!}*J~{4$)sSj*VO2S1rIKg8vy?d)F?^M{m_zJ2sdVt!c3^!OnzKk?v)XZ#)it51JP z%ujq)eR<12K0A=##N~&fEPjZ~4=wq0emKmFmZb6D#N{V;`XMequ{ra@VSZxCFK_uL z=ZXFhmmgX(KYobI4=qXk<}g3BB=h%ulTKt-of8!%Q(-X_=wH`nuJ5eK zs+Nrrf{i+^lqY|kJF3=ot%{Yrw%RF6FoJ}lf4=il^@q8n{(-riEK2#sU}B#uV8`G2 z%f-5`*U^96p^OC$put8<@Jx*>q#h_j3cxbBmT2SeeC5C6 zH#dK3{%70c7hi_(%jn&|`}ySpcj~$l@{8e%E|yXBuI*m0hZav=`T!im4A-!`44<1|6xJ95AV2r+s|jcKRl+eUCY(K zj?ejNpVxBYXTJIJl-K-+p=^J#werP1N)&Bg_5a~(|AFoN$Mt@W`@iQ}mw(_p|H*&$ zNuPf3;arDb{iwSB|LoV_>%085T{OjWzm~Ho<6*H{)HT0e<1y*Q@ru&5{Ptli zuloIN=k#a$Q#abY7)_V@-(fR!t|KKCvl+_~Mmybkwje!e_W+=Y0D6u$t zt+2O5uZ*){qtWkE$4=;-Hj>&*eV|Pl_ZBau=*B8bONVuH2QHqt2&SRf*Rg+F0K9^_ zSOy%mIpXe*{b5CV1Z-?enBdEWbR*!a#$g1W(x6DwD{F%$VbZ#t(V|s z*Wn`T3J~?MHCBA}ar5z*e&T=W3P#4xqf8-m6KWF*tGC(SOU!(c1eV83Za!%f z7eGqEY_{~&DY@Uz5HvQC>kGN4ABw3$1LNcTJCeMOcU3iIOot#khedzvtXq}&cs3pw zFp?Cadr!jadjg*HUcjrXDPqb_(SY)km~DyK+i>bqt_s>W4wwi8*tAi56?oh$ijzZ{ z>1@8M?xNnEjc0#Ls~H5%2}VlL0EYuq9U47$8EW`MZPr23o3EQlVFXfr`>Kkx1VZjv9l^aEga|pF=Q0jWg zBqU#0bHM42lK2KoxBe`YtltFZ;l-JP4{xImxRBLcKU=1W?xTMsXjomFo0;ulOo-N* zI@ws`jwLq~an_SJKD(I}F!C5d&8%-~#dizx$d|s8owL%(OlcR(QFjeVlXfX7|CUNv z4o@EVs?IQE@Abn_;ls;0^#z@dAG|$7Tw$I_tCHE`q-x*ysVc(B1P@^^v={tjLGtMe z0yDFdvKxhfM4Er~dR=!EP>i_lknZ_H?>7mz@gih)Xz=nqoi@(u;qNYJ5yWT{#%N;* zXq)#!ajkqK27Yq!4T6*+EQY%!K*D_1uQBolW(McC8x#mOa$1EnP=vrqS@e?uG3plB z%KY$!$2+^63O~!YnheT3z{N@RanBmydmuVYHA8e}Qp_5spyB(r z5Hn6)W(zqE>=Yj$;6bs``K)|Lq5F-{(U$8GZ^K^{e`^-rMbpcc{7H*tZ&qE^j!Pa6VF{l!Lsq&Ol6V1e!?95`Q8it}ydYb@ z*3-wN_xhxBTDn`Rs(JgqV*!`%C$BO1(b=>&R2wcoTIYRlk#p=0@1=0B6FKF5fm-Nz zxP*W0fUoGCV7o)=UO(*}lut(09l5n{l~*(tC@Hd5Sq;_8Oyx9IDsc{1i6ym5hvb0m zJE-Dzi3g<+_tB~NS-28tPZ6KI(t&)Z$(bEb=OyzKZ@>swJm?L) zhxU?UFze0UR)c}+&j(Y#_#g<+T{q%c$M9oNXr+8$cw`HTnI{&H!;&k8w~0T9-9LX3 zCVYv}!;^;$)-^0qaN4#ol^v*^RpZ!hLMneI-8$6z7IYP7R3Q91-YH-DP*4}3@F+#k zqL0{BG!GnlNQxSslU>oJ3v}yH?G>^fX$_qF7=%GrJ9HTqX75Bi)wwrumR+FKR6s4Y zH9(o8Qjt~*t!RLPK8?bI=P5h$8f_PN}WE$^MDJY)H$#&3d9dMPM( z_!vf&6V5Cf6V>Z*OE+#{;bENE$#wvanGjbBx}3HIBBVO-9W102jQGCQN6vpBn}es@ z6SjK<{4jF?s7PV853aOsC&|6L*1%)tiA3H4R?c|FIBU&F6@v+VuRW}zRuX{llAP~u zvKptUDN{WiraNDcOAj;`410`^boIUL3D*lHSC%^>b50`G%-OaH zfh~cRN1{e##y7+qbQ%CpK(N1VcmRg(oZjVW;5n`zc+YDQY!NGUg##@07pg;91H>j- zAZgU&*2wP_iwT9cF7<|ktn21LiIitqZZ|cq!lz9qa>Jh%fCNZxS6MfIRE~m-ZsB>z zSoB(hnO$oiseZO`B<_i>-CaW;j=%)Y!t~vGrgApua`-?O$F)QaE)CP`5JaHn=UF&+ z44WFUm5)^OgLIQzI%*#D;2=}b)MvRscgD%o^`slQs zq?1bM)WEz;$rH}{Lw6RkVZ0hSf28-$OtnFvyMca_%ASf3-2xW2qm6BLsU+Imt!Z$QR7^9@E1@KwA4J~o$jC!vf7 zoQG`!TgOF<@5-k=oBD~l)c9!6I2M}>DHysGC!EWy*pm*m<^f@0Y)iV+1btQ%|8Obw zpgD~83K4@K+`*cEt(efH1P}6HOL-J6we!yX-W-$#ceMWAK#s~^mB7)7%fmFZnc`U| zA`>xpy>~f3rC^-N2AsK9lTb3Ze!9cf!=bzfi)D~jAC9{cD2bw$e7a}owPdps+s?<$ zy_50s_SQ7BSD8^A^`fVs1a!}4$DGq}E%v_XLt@W((PBV<-wx}+_qKf$m1~p^!sF<% zIn7An9*Jo#u>6Hu!Wo*F0qhOqM$1%8E~{>pNa7;bxC|UOQoB z*=;K>0b7=KDICRj+UHm~;W<_^cmuqI!XG<*t=>=`+)m6g{emBr<$U)va9YJXWPIrD zw9|P zodR2KbFcNjcCrHi0)v=vgNH8>Ha2ic$i13|l{e#mi4+2MF<<2KFlO3bsw5Rvts9j% z8Gsxy)TkzfrV6H%Oul@1u%ew*!$Nrcx5$znnw>r@@xc#RjEJvjPRO;!bl;!@)~2? z=~5mhLUs}zOHV!3li?8Z%{DD*rXz4`G1coHW~|&Y{YZ&`fX=L38XsVB(!4#BqZOS; z7mLi+$u-gHt%CU@{>GPeS2NYk#>U~;d~R~Q;OouwRYOCUZaKv8w)%UX5rCXXsGHD# zs8yW^1+C5j0MUu)4>_HlkE8N;ZB(Kh({IBm+RI5?FEcj_SB20FW8IwN^WJll+&(9L zY@f1@7}x-9i4KnbYvx~IipwwDd+OeJ+)(0Km}0p=#%R<-4v#k)e2K6or&I|gVWK1> zbMse>1d9spN7{mb6F}c)B}exzn`1+N%X!9HtOv8v5g_(U@rh`bGraC zBZW`fJhvwkcZuXbkzI8PY)2(85oNV;;i&4(G*xD&pmH zRSO7dw>U_m)?d#Kc$KIJ?C6%(c3Fk6q)WS6-}1M0q#-cnx?FY~Zv|9@O)ufS5Xj98 zI*Yh(o=8SKmaGedg&7ES0h^n5!D29Pz`8JtjDD}>7sA`ojnL=qS(bWo-{l+%o?Fr` z3rPx+lwwU?2U=hNq|mE7E+lq;GLkh+IFWFF3%7tMUtp$*Qrp~jf(BF17*HoTap1Io zyc?cxVTUCtK)rmbb}|BXE6AT(oh>iK;qVPaiERPTdp$1G7`ko1<;gMFY`xe}hU*gx zT4)MCQNcC&(98PA%pgx7ctA7o@Yk-I@l~yF%tofkX)HTNu=Pz$U+e3CIRT+9bQF2o znmI|3NYA%0HD`GTSe=W?lTE-<-+w>k+9@2WkxIVU~Fcbv1d zAMOhctt_>^!rQ{OH^sUsJ_)E&F9_RVruURQjKbaDkJH}~ExK)nz~|4kl+shnBy(7$ z$W-!TyB=-Kcj8dLdB0mJ%iv$zoovw=hQk~WeLx+lo6-a9&f)c(ybY#@0!9gAJ(?*?|V%11}rqLWnlIu==gbrlqB1!6{wrpC_e zF~oFcw_p(UiA`-f3w9tzuHIyo(b2@>9q?782#$qirl$clq0P7&sW7G*?_Ap`Vnp>v z{Bp3FGD(q+`?Oj1c$rg(fB4Qt7&QqQn!4e(db+$8rD>4KRCyRiITwxS%Z=Bi4yY+#Hh$v9RK?u}II(ZTgF zH?;=)k{w=sNp8k@7QuP;jw{1vD+A*=irhMm5_#V0s^x1S*JjL79mdx3WWgDy>j?u= zyPjHqR<~$gn%V??_fP@8qcAql00FB!YAOS3svPqwhW9F$_o~+K*Vh|(kHuT1I*V3> z7}+1>eP~-axq$T~tz0BRy8gM#D_xFam4`^3aVsA@&6?A5-NG8Ho9kPvQLZ>?DeVPs z8Z(CoI)y2HXLS@i;WHBJ`w&X=;zar@*Q6VNnK&DlIuTDi#YYpJw-+C3*sZA!14*zg zifF*ja;6`P@c;RrQ^;BRdtB%8f&ViKS%d$ikeA5FyB`V}{7{AEH6W*2O9Hvk4~5Jd z#lNDE_32+y$eH)=Ddhfoz|Tb93vcD*4~49f|B^yJ&_5_-Q7e?>heBqV&x@b!J7uGP zVGVFy&jM`=OBM?N17M+pzM7Lgr2DPYU^wkI@f>9JHUzvDc;uZ!6at z^oK&G?dhLT$fREcmTb19Fr{K%7Zmxo>X$Sk@lU3XRWem0a6%`bV7J!?1jgE zP&y$ZB8Lg~>-tMnI=Kke%*8 zqGSr>qLj>HrMz#)(N9fyynAj?o2o+QXuj^f4;fgBhcM0ND=8!5iXq;4j9^somfJk_ zt~usqf!5;=yjooeZmqLf0|h>iHbRK^p?kG`>q#b}PFcxLEPC&y#7bCyJ9?8+;qzkS zATDc@uLLVN^4jy^KaakVZLl07m(^z9mu2-iaVPNVOZ8M8GS#wMXgQ4)Mo^L&-p&}s;-oItG!Vbz=)F*=Es!=|Szcz21^m>49-%;-)$WLJ z8wE8uIZEpej)ta3jYI2yY|xsaKl+(`&?r2QS%5{g?(6D?W_{AAi1&!mxpC7Pj6>`d zRRDr4$z+@?-DT;F_V0YNxP6hj`-n}g5cKyuddtPPH<;{Gc)(ar!IjNBSoR!RTLmV( z=sH4}J1lCJ(4#EH&b_#xIJd|&>(*G;gIg#ux)RQh9)^XFbgbWhlwJo<1$Yepz2*nF zr@}cxi+tZb)N$SP(QkO)=t(L^rU=Adm9 zPEtIR5L|Aq9F7$ncIg|GgEiLPUBsMr$W9BGJHWb~6(kz%Cja#7G+iYq$P@HQuemmK{xz+?!`t+3_>>Jd&ZmhR=?uEs_0!J#HsR5g@8D zURo7kS~MgIB>NDk>mv;j_c5;{%j?`x6A}H!JFu}IQXX=DL)D|=7%bpY`LvY)uN;9> zhgw#1H@F6%^Mk-nRKLXl@KV#{W(xU10QI^9#)~{0F0F~kHAXC5=T3i^hO|p#`R>@R zc^KBa4efbDpQ~9LDm}?ec^i{f3w7v4K%v~n+8%YMQKY97AG8^&c*T|ebSs5M(piNv z&&3#+`%T1u>xUnMqYCjPUAk9}A;LkxX@y5?PKnV&V)JS!7fRP5a zrIc8%FlpsrIu;5X0|C@K@<8RML_(+0B|Uv#4ZC0tC^h?7jXc6r3Vpy(S79|z443t& z-b@{Pv1^8UaU%iI@ubOs374XD7NzS_pO^=$NDJ|Q@-wq-aVKFdnpyysxDl3M&W==# z^sf4ce8VVD3OTLCW<###5r(99X9sP<6Sx!*JjyE@ewT$;9PYG~BsjobiC(E*OVR-6 zkjSE5i~h_|mVYXAm&21EKAZAz=Qm91O81By7!;)FxrlHQDI=7-n(FY2VN)Jaw}j5> z#jr7d6qua2`p5(_c74tZF`D&JMkhn+2J!*?dipG=WtbA{qMK+K%uP_^#Fp$O9ej{d z&6RGW=qQ^!bMZZ%xAPr!M~GN5e#1f{$&EbWi+;~e$elM?BG4r(0*Rv5q&-vvfc(dU zAy$ZcfNhy6e7pA6^k8_n@$DP@dME>8*=R3fHeTeG zLcL|MW?a=k?XdN6y1`5fbe~!LQx+c)p~vN%okCNieRj#FHBGX$}MOogHT*9KqL^0TgkBR8g|wY%j}y zT@*;*fVoGqmhIxCPZ)vN2Z7uQ(Bbz;ZT(w)fMsx2&(FE7{k092TOcZ===x-lBx&N?N3F$Eml zY*c5k+SFwpL?Sb{D4n6$$gE#Nr=tj9yfg|o#uJ~)QeXeE z-o-v%>=IQf(pWDj4@?ICrtP3pmFf&Fbn0}-gC2ob@OYQk){N^do<4Q)b`B*pUum)E z8rttTbh^Q!?$;rtYuCM}MRe9Jk%tMum zbvsmyO7*}Y6zMAI$@?$ zMjD%&jii6WC{!7J&*qeu%bQZZhz6^)GB2mVL|*x1m#kI8VpqNt4OMKV--WwEYue*+ zQ*JnhJzgO=rh$hlD{taKE$l!`FeB-B>%7_#nnmoQriVP)K>5nWQGs7xSaXym5kiK9 z>1%9zwJw`8)geVV0rrG{yRgWQVtl@*Q8~N!8;)_m@JuZ4X>bG?pd$t@!EJnSmBCOj zhHS6gMecA^DD#ZY3Ewu*;RSSRGX6mPB0PUM+*oxZ**C+l?lsU3!LGydIPbpWWR=u^A-hYVdALPd?IoEE z`U=Wd?BTBv=2hILg}FW^v;td8?8qF(y`LZ;y^AFil<@Vkk(UhD8yc+~UE#H^QZ@yW zvGIIY!02jr9%V1-m<4tgs0`|?f-GB`Q3da*tZiQ8PQKy&Z1p!TQuZO>^#Re-v8^=Q zE^hr$^Zi{;tLYto@FsytK1&aURP5uJrmnr)b8d7!kRnORVs2xm&3}1OuyS~{ zk6A3P;S%*TXfd@1FG zW)7v{*5Q!cj58tkc!u3iADJvGvFlN>6`h1=kUP4zQZX@qMsCJHt$_4yB-ybOXUf=A zL(=8x093MAB&7XC-LrJZ2vHFh6r<>kDy=1IF&4!uJ*{njhimNXhGY3oO&0s0jD`*i zF{iRSLsN22^%jF{nnxWTt6MrxK^|Kc#zK*Ijw>vj7A7vI9GR9I?oBxmt$7?^`3@ag z{8&6Hx=w|EWQ%ipLWSFw+Vs(jLGk2iLKD#~Uw_@FdHvpJ_s8&lZ1+29QRW5C{IkEC zED~aoE%Y4zvQ~a>sHIW3c?g$4;n6k)=IgD%#9p2P3=E!!;32TfO5wq{lLk{TY#>pn znut4cVaFH}fdT`Isc@HT&&8%rXssw$N}2_D0HDayZmNlT$^+-Z;cw zo4Nq$kSeEYr<7t@kVWO4)>eL$EoWJfm8m^19tZ(xa_^kg|RPp_@r<=mq1T0Du7PmAD|jrfvZ7 z0>lRp1I~lI5y%4V$a}ea$b)u3Ss-Xa$v=;#J@3ogx>7y7Cx6ax7~Bjoeq%o7c|*@c zhcQ7VI=YCpiZh#JWWGvcY_%9dAj$+#x!qd$BPU{Nv6|P4b&$6er%D+qYguoO+7n`b zr7e2R``RYlEQD8htnZ}uq6ntm+4Y81qkRH|osj+fbm-V`daAVJ^QYobBc)qjxhdOd#1N)q9+)yZ1F5 zlz3q30F$#wc#CH9?2?IsvAGG}`YH;BkLWbTy*k(S^#*?v6t=gwW5>XEbK~fFr`Za{ z!e>Jzp?+U_euQu?Dhsag+6hhQrO)c!OZGBCR~Rdo`;N~6ttp$B)Ao* zvIMz@bvT$=y$R$x<+jv;q`T73t{NY_qCIHEV2@Vubep$g?M>%47V&js zrbFdaOV~Ty(*v6YgE<0z(c44jMoJg0OsNJxwQccbDtUNs{(=mgT-TeyEsaR`ytlyh(*@VP0J zV2-3~{OiHZk|hW$P{22(Fgjrp?HBN}yNtWXgLKmYt+) zp3FvQ)TO>Tsymk<;=a8LCdxn$jLxP6CLei+wTa2&{UOKNtFBU0)WDbh7ROs6GgU5+ zc8WhtZF`vD{hcd+qzK(p;$>?_7gR)0k^N(P+!FHI#R~ko0%1WV~ zRrE%l%64@leD4`BGD-g!`-y?~x1zzFHQ`# zFU!Kyj;K}+NA3wepA&~6lASqN#qMN=-I|?f7#hp)2SpDRq-W}4Ibhodv(W=t37qKtYzmZ&iy**9o5*qnama+L@R)=TM3x8=?@Kj{poYQ+)Bc?iL1}H|? zu{>W2m<+zn_pa`tmE^|Ij9f88HRjyFfUsux>GS6RcPQ=Ko(3?C+w+R zWy!|AuD!K5sQR@6g(wK4rQyy@h_I#A7{ux9EQxx*bQr$uUIEQZ0pkztvfG8}=mL|` z-Lb7Rv%sw*$KeWfN#uv~yW|L4jmUSc-4M5bvj|Yo)0YgemOZu|773^QcxhIW*>4mx z`tjsW=LKTZ7FW@HzLKU4o;@#N_nJar!@1R1>!2_8jhFP9TLm=F0%o>|D;@xi;Xpx= zIcZ_&jgp@$rJ0Y~o~t%Os;NH7TY=pG>%QGGZ9{LpmF1vhu5X?m9z={a?~z$g5-$Z*3niRn#{l=Fxwk4NFo+>nXiq^hi>L$4n}z3t;c^%ws6H+m?xWjEk z_5?A%wReKI!`7?!KsaCb$BA~2K#uYcPkOwBOl#~G$}+?$79}Uq=)3qj$=fc_?n<^c8Rw*b0mBQTm`KlUxr7OF*@)DrrO7s4=Wr0I@ls}cL5O)L?nW(7`*TIc zlep{65@5qfZB8XnEURZTN3<=n7Wu-iMx6r>{(kPWZI2ipDDDo#Tc8o%CshLQ z{9CF7n)m)lmGBwyzTQ9hBUM6!ht@UEh1c?|_DPixh;(%2HNIp(ilSoSJK}wfaQ{x` z`#V)acAE=|lA7XwuT%+~c*+JxQA~2x3jKclEmcDK`k$vtm~Z=?A&VrS=+CBIsS-ZC zH$`*SCr!eYqM*E(dX3?1%H_(GA&b!B3Qmv#Os5hp;x_63rR~nv71b3+VPB93prYVm z0-^6hk7tO|Dj=wUSN}gjM^&vg=bGR8zrA-I)IbvU25azMBd$BUr_J>Wjjj6U@O{icXA_q7rEXmT%bqz~^BHHo41i_F&!)55fpP?)s7P}@Is*9}KhEfU#0DaZ4FV+dTbe8nWRiTx_!KVG06>o7 z_!aVu0AL!L)~jDloc^F2dP6TiVP;knhb9sqE=yy7)88effjHqMZOW`PY7QEcj~N7L z4~}GUQ3O#wVjk;kPtw)rIvPiGPz)Jh4EaehAb1?$h?b>W8^5_ouEVqACXGSmupI(;X|Ih~>6243F zSLxP&n9@KbON;06LLndrd`N`v671p>I|zQ4qA%ir98Ymn&Z&?Ia>Ro%_d*)+%Q=i4 z3@1YYqXI#mV(m5*`V<#1N2(zjM>&rnz>mbGIaY&t7Lr9nai{4Oqk?R>!n{5ts7nPP zS>TAUTuf%pw>-pzMM*7~=l2}ZsazmfW62bM1k{~s?kM3wLJmR@J`z(nD15f#sD@T^ z>4DW5UqoK*bI`0Zdw1ZY;$+PVf{?U79+P@-CFLqA-*R2ZgWu==VIU$oH&T$t<(E2; zU~b@LN&1CBNPp_&4|m}Fk;gv_#SJ;2QA@I&U@+D?f_P9{RFJk!xCr5%UjPLb?RRe@{r z$RpyTM<b_N-ng_|vV{02U*2(TOfeFt zREC95+KMoYCoag5!UID9tdAhDCXmNG1d0N%0bbLn!Q9TknSnf}Es*D*(c*^5VkJAU zbmeGX+To|H-bR&fRF4sicS_o3@F^Qih=hM76LQ}IOPQrNbLnH#8gzyr<%ODm2)m;~ z7qR(n5b$!ukGTwTMJ8zz#+^^S_@`IJv|a~#=$c9>q`S#x6$l0ejtUP)MFKfz1wcH4 zcMhe3dkQd>#f<_9wE=K!$T-${=jw@htMGIDpTfI^Tu>2@F)#tmE$g0xg;`K;rk8G1 z!3ZHtBRIwZIY&xVBub1S4{`y2jMf?gEDV^W2lG5Y0?@gX&tqXb$`^oZQ^*ws0GVk< zwWcUeurMGq2WV8w^8CLEhSFbwx3OJF3cS>L^y!Cx>b6mmo5ezI$V)$1`Agkb?O_H} ztGHpl`juAzF@MP`n;rX_*_ixJbk}v|iuu+`z10k`&V!T(dCJ3Fv4tyth9Nc>;Ix^Z zj}e^CCBT>`Y>xnGi685uKgAjkhTDG%PZsI`zE%?>P=NI33lHR?N!fnul27QJWvt^< z()MKaoo>ZKalyGUC#e-6re%JV5AY83!%ox?aXNaW!!;UB<&@ z3|{C56QkD6_Wo*<>!%kPcK`&geMqsXLq#F0tbC}FX`xAYkQ>r}Lc{#j4N?y#1)8~$ z14nUT-s|JY38X%1v_blHK@0#a@)YG5Nnpel^`H*OW~7Ek`G?U_Tw_H=V9MbJ*UNb+ z6tw{f;^Q0;s!LSzn9NN|!jG74yfz)MU5JTFA@w&v4u%azt~AnyG}bBvq^+rlpL`Qw zUtRc_abkLmo?yg(5X9B0Ye|2DDk>TUcXZ9q{0tH9V;KiUn7W9x2M2%Z{D-#^Od*CE zP?0t&2oxxrv{8cv@&L|ds&g(1zz`~g89|0(doFdW4&@755Ev4`xe87#Puo7U1mjcQ zkUFGTl}UcfOL^n>Ck-h;Q|v8c_osimnI3LH{vlH23F$(AjubeGLsS9;u+QUpApNT& zYZphJfdJ)HCf)4|BH)$AEA$Vr}$Id{PD3{++R#yuqEo;K7%INX%wIZ)^-l0xQ$ zr9s;J&GdY1z84N8a zLe?1s@@vh1fjUJMUZ}1g5@zXpK1A22etn4{&#ZhJqzrEf>9?!TmXte1MFW4!UH+#%D2`tsj3^MiR;z5y z!tv~{7B&Mxo3w+Y!H4-mLlmT)0q@|0Yk%m-WyG?7zme=u__KY97*PR0G8K(7q^@SL zv~S^X8O0MR@1=cvpY{!chY$Ikzp`s-SsD?Od8>E=*LGQQP9X#fMsfhugi7KM5n``7 z5DX|E|7ic758>Kdb=elf-ey(-1^`QdqGJA?TLR$8_hTsf&}_?GXQ6jQ==-1de)apy zwZKw;2BWS(QZDqGuxbl)h4_{Rxk0EV-q_nWX>zS`Q%0Q3*cd(r+5*%K_y-^V0M?2RCFbH>zb!s(e}HZU)F$0NiPNq???N=|;{aV`*A3h6l^k&H0Rm+)fz)ZNXh zR-5SHV}M0pG3yjG-5`)TEfNH?zY74#C9Zie8p>x(R#eRGD8cHhg%=_DF4O#*EJ3v} zd|$%slm4If^d~>|IajP(ur%}rjXfmS6~Lgi0xT3655bh-R07#7!2IanuE#Uq=TOFf z3VB8ZftS1%5E`%R7{MHQr2ghMwEn1? z)dj+30lcKcT-tYQ7g5}TpY3DKf68?^o{*2rWANS&!}muylJR#rmRkOvKbxNW0tIq- z=4jhEh2$CvROI{Cgr4GGh{3i#A^%3pZuz03D?RJ zjvqOeQi)eo(IoSK?8=`p>7DHJ_i7c_D##6i2e|@9gXewQ%EtG-fB)3ASH~ZJ*Hl0< zBH)Fi473MEF(AK;(0VSzGDDV9Q^{?L-?Z2dCnhiV9Dbcgy#2Imyk4#w^M7e8vhD9> zegfX7z3;BVNZO0?9YePl7GGb9BC6zG10vULLf9Sw;^ml;_;-8dFka?glrr$U+-k_G zX#gM#@=JaK^~+|Nlj5NXc}rb?9Q7N(@xsgra70v{$^njL!C#y4wn_kQRRL7ojm#l( zgAV{Bnp1cuT-I&(UahlzNQDE><@zjOj$HQ{s~PaGk<9;E0EBe?ng6BadLk4HKOh7n zDh&A-f5^Nyhfz_;mn_p?3gzw8Zg3@n0?KTwJb9t?T2*;&5YAXmv7>t}v4M>*q zl=tw6;MB@gI#>iBrdAcDjj}Wf;AHHx2=E9jRS9gx!(Df!G`1oIAe)NGmr$B1s{Hq z8-`qe{WtWQAOYb4rW`2PTLB^G zoe`PPW`G5M2%Y8&YR+RY$7E4n5?_upYK9y1ebSB0_sZ-`uTcQgd`9Md4F?6RQsnc= zh$^rdpfZ|0a19|DK}KHB*z5sh_}hpm&<)TYHA1)YH&O)M0O7dYXBRHyHqW*O_uSlp z`0rlxcgO>98jTxJMnTPC19`x|a-4&nf#Zv6L8?=Khg{~VRZd=Sz)_$MpI`7v>(_7r zVk^g4=}@a&KIMN;!5L&h;W_`mPr+HD|BHfq#EAJ{QgF-92Ki;_*~PW}A1Ju0cdf0u zYI1N9ee#XXQd#(%EG>Eh@&Gc-=U|BWEiibcXe*w#P70egdkX+25GabGD(dj2*3@bs zPsCM!ufst3*}-_x<~GaD{9|IbvL53S4D#c@rQ-hhrvso9GC&MsC#Z7;JsALYj>`hc zUw82!;M2et$5@K<@*2d1Oo&g!N6bOIF{+r5VJ85>6ItHgM1S&Gz?~OfLO%gs=Z!*{ z(PIF{ph)f;OgI2Y6$WSuiUf1qBVI%%Ke)bsKzJOyb{ItmhC;S!laOo3m&+OC3Qs7d zffuhbFD<0pkTU`Bl~mCIj|r1f7!c7U4L=OpZ5DIHe+eeSK?oWP0=&)_%5H#10na~i z>yskkg>WO(L)a_rhdDFFg*_w3oP(o(eZ=Im64S+s4)H>{A;RGBqxIpxm@ugGUqoDg zDAohNaB|bn{uG_?V1$7Cd~PJ4Bl-NH8#G@ma`OI{6Vnz#PB<me?<& zxBRt6AVD!^fvA;)YbgXpY4frtk2kv0G@MUoC_Hj2<)XeygfVxSJk zqa-5z1hBpwpAb5S4-qHk`2e_o3SVy{LZ-IVL!KWI|EWL79e;CRGf$YO>cg;!Ld^3T zAxn^J^2)p!Q90>^nSzpoLf0LI(626ps+%X>4+`C1f5V)iAmM292^R@d097Ff$_W*! zFaWW@J#e)3ZQ=A5E)H{qdYjNf$N?xW^})FMV);YCA=THkvg9?wxgfrOEfm7fxxZhN z`b*06!_a*gFY0b>N3O za1~m~ofEJ~n-bim4e^S^ZzY&RvNAgwA&+Zq$yagVsl?*nDKwFg5g!UJBBaBIdLz7r z@}c4g5%Hnm5<;PU!U<`A;eV(&ex6{lgr5n8_6aA1Mg9q&=ZXJNbp5|^aMGU~T=a{B z8~>Gqga5+8O+Or*?*1!5r~XOMMamCBm;W1E_8+M|k~a_M%k3dT?eUWc@_tZ;{!5w_ zgva<$Zwlcq7V1qQJjaKEt92a-pKyN|F#b?+gxdR1aE-1b=@U+W@;CiZaqc|vAByg- z>qz~CGXo)t77~)@2EsE*R6KfvvIsZG1c^Lijmv zVX5oi_@elq&V%%SdH-~tpKuWVOa>icA(?34_zQ(;mSb)cHt(M)#}OcsUz^dK$EWxn zAA^wp_uSkGK)f*s3$|POP#9kop2;1STna7Zq*}J!mw|#YnM;vzRH3!m0tku@Cgr;S zlN5vepG+7s0MZtA>KM0A!kXDiOOD6>^)vo&_DctnLTkT&$+?*)KL?4IaWyF4WVLez zKHrd!Md^CwKjUUc#^&mWM00NZ|6_uR&{1U05m(NQ(1q5>*QWF#ay#LZ4w2MfiOd)O zo}1AuS2 zxbc;cc<$-nXy^G`x5EG2&VQ4G#?sH)^S`DW&%Zc4LT%=;l>9x7`l}8?K>Gdvf@gz& zjrX_Y8~;!HBjdzE3i`C{swOs#2q>Z3$u|Db zY0I|rq0^Ra_d};G+ukRBO1Aw^{AD}%#9y|5!%zHWJNi;<9~$mUt$oP1A8KtO-;yu2 z_KEkU);{rmsI?{D`Ae;R2)Qq{_KC+0Lk@sL=5P`q+wilEh9O7a_Z71JecxE(y9?8X zm-wG;e~JIuj+gkK?es(0eQ3D$L)m?4xDRFbCFI%Z-{|LlI_d%xmu zK1AG~lw114#4SAAh5upV2+sz0zeqX4v&rob6ZeTb|1fb&T<#Ysx5TACMBE>3CM$oK zxP`c*elc+i&jx--xrJxT-5(}yq1^a?UrgK**Z3jjmbeA~A>w}LBQO49;>bBF@edQX z}z z0(=aG-{B%a3N(I)OaB&bf?EuK&%a+=K)}K#hr&V}0l528O3jB_`rO})M{-{j03#=a z7yuRqkKaTW_+JxZ{Qs}xB4C0)kDGt#wlD3)1M)RA+IEw5M%#9yJ6rjlvfJQ zgo=_7A)bDdUMT)WM;*T;nVx@$JS+ca;%iCg-w<&BK-kfMr~fWJ;fL~n`chQm=R7|I z93qe2=U>X|w$N(}p~VsMtNTTI@i{o>AF9hDl$Ah;wg0p7{*>c#9`7&V_WTlK)$)3w z)(mo9!$P)wU+ej2|2Fv0Y-d7sNi@mFh5kww`Y!(q$@c%gp8Q?929l4(LLB}z{%;aY zU*2zeUtQj(mDQF|6b(0og5nfcj&qAqLSnnGmJs%>wo?y z7~k(-7Xnl#|DhbLfWVO-3|)W(AjpLnm^JymB?6ic$@BLBr;rCo1-OO$hz#;O2_Z)| zaXk@6E^rJe0u%s$Cx+sPV&K$J%FzipBa|_^0P@u#-DKP z{g8GhG=7J_e~05=;U@kSzWE)V{tEv^=F#XEnTP$~`^Ue3_Ad&*!+-6cH^;vv^_FnQ z1LG&(uA(7;X$klJ$^24&j)&F;{)T)0{5#zE6>fs>_t_am0l@t)cs_6LE@NLlF8%*y zqfhXE8GR@6gtp+`{wt#|7?tqH=xe3wY|{+zVqOInFMuZxbAS{Osv5WmDwv^ByskkW z4IA)y!o8b+jN4r=3PE3A?yB<08KBCeK3A*P?2JlnY35pJUOw;(RWmH%UP;DOt{ z%i)q;76 zp+3Be#OtZ;C77RCaehBFGnglJI-4#1PS%~2bX|~t2ca@V`uoULMY`z@MP5BiZH}{@ zJed{qC>ZKBoSN))OKAEcs!l=6MNt@1qFZ-fw}tJUjV=f34iZuIhK<%#S$B{|z&&`W zP;w~{w1yV#YZD zV9u1Rfvf#G-9P5oJ>x@<`A|_$*347qF6+Fov9I2Z$5af_o3gu=A87tdJKEDOtJnKk zOSY`NTL;N83<0lON}P|ga1wpHy@m;MpTn1b&N}*(9239349ZJ;de17YwfEp&jP`sk#DhGnobbvxYD@yP*wjsC@gM(q+rzO+M948_W0&_F)woF{_s0HCAa%em!{&l}Q9(mw&O{c0QxE@sJS*%5PZ-GzD`je;Q=d z%ICH#!tKC%l1vCl7Ci8p@pnD&lI`tYJg(d9F*&^-f)3Ui=a@AcUk5QSEQu43AU?kn zVI|AC_8PW6P?*hF>AP)Zz}|RhxqSApJe*!SR$iTMZXD$#yK<}@1Je@7LgC`6qF1?j z;?zBL+jtIFzY*8q5eLPrZelrwh=^{RvkW!Ptx&%Mfp8DNBr});N+d z#}D|(XD?s3!E2JEdKlKZ5InkYymVSQ9<5<*3Vj|5lT<%mW4b$TH&M&$sZv!Pk4nB8 zX|%@`v>(G((MDHj2JL#2FN!ip=h`fbdC%oSsZ5ksO79C_4(y&Or=z3JfAbOFYx8>X z?Gp{o-5!Z3bvYZ}x30h?2nfdp@b1e~lmwc{x$MK^YrNX}I7%>1Fz$u7qF;^tfyL36 zTQR;K4HcYyAm3L!mjhYg^0axr>7Z>HY20EfJ{(+Ol5dKAv5b!DG&@{XxzWG37BAd! zZYSC`v_cp(2QGv@4(PK4e|p|mXWoysGPZowsy-?%o$5H93R%}JO09n7CYQGL0d(}h zDyJLjfVXCj7q>Gd!4Qqrt-rcQD?T>#A->({QG4tq`rTR^+dKhIXVGQP0%&dCNa&v9 zxH%02@L5@*&((meX`{&u3w5=;21)#MhuN;BRXfh_yHg~QQdjyme+tb&MQPoQ%tm!u z+`0QEjP{f`Q?dX~N`K#_SI2$7tU-TV-JNWEyh6!=3$8AA)YU+Xn#1GLB&oDfOoMz8 zsXvPY4KhP-F<>B2>XB zRNBGr{p)V`&N;?+f69bz2Ir-5?>Uu%M{R9DC1e}^#usono{~X1p|E)S(_ELAds-#~ zt{L;1@IoHlUrJsT<5dqXu0fajtMwG47WY{>Z#MnN=cje}emaLP4Ou)~tM@uy8{%YX z@p$rSwGDMuF@|gZOnXHv%Zk&UT|IToTlxll3waZZ$25sVMte>4n z-jvo|qdY;Lbh>tZZv;P-lM^>}*EfG&Y4$`348LA?FOvz!@N6(vYZ)qK1Ow5z4cZ!ZfkirjB4ofkHQS)Oq9V< zyE%-+HKi1rIi)65Gf5HBGkcuA0?NIFfOU=iEX06Yj zoupHGOO9&~nvKu}y^spW3fv`)C%=Q!Z`vG8)qQX)trfxT9`kIJ%n*I;EGH> zzeyZt^yFdrwQ;)1xA5&(%p1(7+LqX~n&qRyymvHH?f3PKSG3`=4dZm<)zxI`tp&8W z+i3MLe|n;U4@U=LjViN_yU)s|Vx80+-%STj)tv7|J7{3jv8i73p2_iGO*3{|MiPb$ zT{Q8zXu@+dTreEC4cs+3erI96)#t;c$h%F8#7&DFHii7SDIItaOMz=2`BCL`Gui=h zf8$s&-EC~&5c>k3JU8DRBlR*yS;Ml-#cGp^f8*2bw08Qn7OkmQRZ#MCh<9Dv7sBDT zRh1YjBDD8U{PJ^py_F5%8#GAxRbv(&uu?Q^?Ig>YK_~|$B@=tiZ8X4p$6NGxvt?H+ z3T+iN11;|Wc(dBE>EUf%tu;eU>O-H*j@MjP4gE5A>q1pkz!s>BsXIN_6Y?WHZdBpi ze`EUUcM3weCmlsa6|>oiJ0?E$hT_vM+SG}kDy1uu^j(YWv(n~$MiBdOpZtg<&_F%JUrOrI=tQbMVjbr8J zCM)dUu~d(RS$K(f72Wt{qy6=q6?YYCe_?m0weonJp^yY!I;x_Q7 z#G+2!=vAIco}BJF|wOTOM+fAReGAIz`;n|k6khIaV$by*j{LfgTcl@qu#(zKyn zzdB7Yd*HQju%G?&dL7DYexu-##(Y9;6Lz!RJ^E^Tb@TQn4jJ>B9JjW>z>SosmTKZX zka`qtV0=I}1_@^$Y*yc_+pf4tm;jf|q5 zes_agm@pCu&zEeRK$_Ot++l``tcy3U+;=hoiFQrfJ=SK8RP6t8~^NH&(mJ91KNw9t5d?7vL^AmBQtKS$^2O^Q_T5^ z%HG~;D*~}_UfyoL=TXxo$L?XmrbX`4(NZ^VHc@WvG8b|6t?-QJB|oy`YFld!y-PQD)HeJR z!P9DMX*jpnE7D5i!D`1XZ*WC6ZFCGK?NI4ao`lZuzF*hWr2E{Z5B(U0yE>{>T+4pE zVK=zSM;Aa!#x~mrfBRys-YjYL=$(Rn6|63Z_t%yMiGA$fm+bV??g^{9Y-`fTeSCP| zc;S#s1!(tn#25PRVw@Q_Ii9Y$&X!$bI<{qU*GV&@_HfMSHa81~t2fjRmxJ}(dP9=Q zvErcpyxrDYV;Za0W;CAQ0vx8s3*y^I%pgqqA8B1WVx#oRt9r3$9Qiq+P#&<~<# zaKR(XUOV1MkCxYFr?$TMKBc#9GdR(Ng1UwDHopWgKN@diK9)CTW*q|5S#)&@{$gnD$yLiIFWpz~n-8~O#zPbAiD$hRan6A6quMWOq>W*koGU_e?A`?_}b`-pt#e%FoLtvOw41WrnSG9 zpX8z6V}6Z99QpKY0jq$wv|yf4^!pJQw_J<>CCJFvA=F*xYR#7UG-wf@pQSUbfv?g` z;#1U9^*8(470=xH@?LvQR%v>Y=6CAizwgo*Mgnu$^vGr!VNKUM>pc8CEC&yz6#J5C zf7IUAai864$xC#uJ##DNu@Yy#YO|xI?Y57yDYw4F81%h=3F`s5?x}g#!9-of%ycXi zbn;8OJ@V8hm;J{UWdZl7K`jWnfrXFKkbGbBF?XyQ}I3DwQ34S)zvvk*!y>r9x zNn7{G$&8UH)x7RD#pCT>_Z=|bTWhTIg4yo4k@jF`JdN8*LHy?ARNVbCjE~0OG+A=m zasHkSPGh(kMNk+-A=RJS>b0Y>eZ!lB|4sUVq^162uP)OXdEJRa3-T-(B^$ z2b8v*vggw|?1p(KG?laErnTDPCJtfm*k_x0F^XAN=<)2Kt3tD5S7!kzV{|=n?XJGeD2M{yuwEEHmHB`rf17zbRlM#W>G}S+%L<?) z=$m)1x;5>KyK@y?kptU4s&I2E@HA=-eTk_L6x`6~jAD?VQx3qduBjMmzp?wWgvQT7@~sDrHO6 z)VNLf%(_!+pBW#~e|}193%AB2LO|Tx^{5{<>$FU&1R0FzNoAJ4)!}w250R?y$~*H` zvi6L&-09w^RkzRTwZgxYS0@(YqdqIsW&4hwTREhmWS8p~TDQhEp@o}fI`wMD8Hpqc zEwZ$}mE7-#WXErLd1z zUOmONaNAqYzFK=ilp*SP1eJa>xu=)$aefI3#yzq zGCrw6$iKqIe^|$K-X~}3od!=q6`I0hHv%-oQ_l{7tvUjve)8`Jz7;c}a?ABwyz})n^$w+hpWMW9Y`JOo+dFehF0Oa+ z0(4QIU8g^@emCeF^PU{Qp#35=6Q#A`dO*M$(-D~Z(P}{-0ueYq&{(T;k9n1?C-CDmMOh8BVIUi z_8I4=pfWPWcBu+IHod>*<8^?bc&`&_b=C2S-y~o^zOI{QzE4}^vxsfj%uTj?Big3< zJ!y>a%++xtCAPC^%?oCCP+#9!wF#P&8qb{+e~Zs*6pR_t&uo`Ouig22e4e_O)6GzO zoUB_P=N=7lOidnU8kej;HI#EO?$LXHJ86u z&`6M1qLZ|*NqL=51$=_ ze`*8jgLJN%!fdF1e5`IswaV#vutuw}Yh9=$A6^e|D=NDbA1K(hp8!mA-jmQgjKMTilM^xRchB zortKr_(q&s=PNecSyNT9Ka0jLvmCXOTJ3EI`+@Hr=jG?uNiJ89>j%SJb&8$e&?eqFQmbWcwkYuGbx++Wx8vpRJ0bjuA+L8QWX<4)|8k~>9= zo?P-Fm&*R#uA4bY>W!Fg9$=?le|GepId}5|@1rfwkF^}sCs1QnMt5XbI^)_ z)dSF(+ME-cWV3pY%=2zd?lCJb^0`sA=a=D9_kNd!q7u`|3f&3{t-7an#_mZx3eS7> zv<{td?xj;)?BX^ri``yfyd-BLdE$;Iq0-LSNu5!G&KS$1pBJ_iyk*T5f52{Yl(oZJ z(ou1`x_sn4Wq2=pxCZlPb3b%NaOv|}(UZ0^jlP&OAlwzLp_9q?eZ1uiH&)=jbe=({ zdsK52w4^B;W6B+QxOnyLVP1A>zNUBgsI%wOTlc%n%UvNBV7pz5=2V=0m3jcDqszv) zk8#@vXZ-RlrEd@YwjGP?f98*ugQV+iZ|xFpwLT|Fs9u9(|1@p3?d#(;x>uo)Z1MeF zzK?n@g{Omc8E*+SM8Fj0>2;OH{m!|(1idf}PtUIF+(rA%RdV>vyJj;Gt&XZq9qv$h zQl4uIw4mIaNQj%PPr9x<57#KKBjzEHno*}B{Sv&cZnO*fA8FGJb0H|8mP`| zC72!BAH$^9tk?sZHNgsUle%D$x9ay4}qy(zAv9ger_`PS&Vm=_>% z9Qf{U$D6Y&jqZkV(`1k4&8{uc$SH8PjSgi#DmuKT=$+}RS7zGzM~XAizB&bYn{Cb) z?U>_jvq=hC4!TvOf9(vbl9^4D7;Qg2p*mgd>2iE*!-_kzRBlTw<;}PKin~`U`uOCSJ9*91>gML#Rlf~a=5)Kkf3UeWaclNR>A^X{bUqas z3u!j!O+1~R`_FY>DRP6G*O~D)K$u=dNT^@X%$(zQ$ zcR<=b&wJYof1XoPjQu58N$b(?go1J^^C;SOaidNrCUe^N?$P`AZRliym_+Y(pbJ|J z`9buYSGO}O1NY1cDCtgas=f3Iv`GuZWXSZ@bh%uHZL@}nAEeXKvG>8MF&~sSv#4Y5 ze$b>Rej|a+mTk0^KOOF&IWdJlnXh}v%86IltZ~kxf8AOfH|If>Ho|Gm9c&M`Q?fY% zMmrj6m-v}A_8ptN9M`8*eXiVB8tu1IVa*|*vmiJ`*L!(9utQL?zUasrZ#`N^Gv2;W zZ;N{WtS_R!8KP*DGiz6wj)!1pPi5wxyM_xQ?|E5`*Q4JBQVO}7a4l~|`~uA?^g+M6B1*VBD~$mie4Cd;gs+q0 zC1)Y^uci^ixPIMrsIIqxMRk!8MzL@Lb|M?Bf1NbPU^qLc=S08u>a_CbU}A@=I6by} z!fq9(foybZs|LGSQ6FnZzN_S!q}vN0AJXu&)=kGXA){W`9M|n1T^Q)^=iz8eb}Kto zaw=@@jkdkQ#$*2m*SbqzSsg!zr*nMr%spl5hsTbNwoiS9dn#0SQZserjs?Hx-Kc+U ze_yxDI}p)R+@{0r?6?Q{{tnLWTqz#2iqEK|Yax{s&84+IOTu`vIZJ-<^-2@=DQ62Z z{kST$rMcB9RG{Yan61+Lp-a`)Df7qWHDw3ee5Kn1biBD0xJ{B8jB(vHdn(q$RX3u7 zdK@At^B{W|E%p39x?|4`^N?+>w^v#Te?z4|hs{0$_#9@IU^>Uc{>&dUw|`~FwpKS* z%RlaN*h4j{hkUoIAJ)6<*Qy}t0toqC#_tcWs(6gILRv@huG?)_)k7HQPTTICE)#c0 z#z;kn!a;r5t4D72=3Z%x)1g21&%2>U?SK)E4_EiZ@9nTx8B_1Dc)4sS0o!%je^R5e zv)tGDvDyi~{uVd|(XU2T&OySfP1mK5axPKB&i70|-bR5=wRil8x~_Bbvmi!iPriyP zrahcZ;~*bR-z}pTGUeB#vs>YIzjW`JW%bkH=0)~hzNCSBm-^&vpAz4Hu#$AvW!XuO z`)DVZFmFPt^cp$ySuHuRy6IZGf7d47?2Js7#$*T@!Q(E$^#2id-@4L5Yl4O^hyd?G zy!W0A@4YGD>PP$UIjzxj&`AkZ0Hq=U*2nu4v96XoUX!-*`C+zT~+Z^jq2SuH|S_p=Z?U}OnVU4vdbntl8+12@%PI0F%Wb_!0W~vDeZjD_3eo8Lu zIYM`9Bk0;Dws{p!W;iSeX^J#|i1b~Z1b}b~^E=_{S(U9w>v+QZFnI!}P#Q20CY9U_ zF3Jd~MO}>rVu|YZ95k3me>C=xDY-+W^cU0saU3bcoh8`vnP!4RPDKkA4> z$xT*RDRVrwn9Ikbbq`cG3Mn^1f}`WU&|r)77$s#{7VNF%^rs$}SB&GL3*0GRzxTbA zPX+Pg`4xL=uGKb5e;ZNcZPcLE$=|};)kCgyHgvGcfi4J;2QX-Bd{m>RHEzEvS4CYs zKdR9iXV%A7)B!KRLpS`1#xG0P8%?A7Ublk)YLpa@3y+-JLg&Dq-4-XBlclLuF$)qe zk9{L-`4C=2@66BNl>&7K;y5>m0HMumz|+#{J1>@Vu%Vcrf3cMjqP!gj&6zVDwhSQ!H=R>Q;aBX)e}$S zt9bMQe{RDrJP)40TD;pzQVugdv^_j8-GyE$3hRlAJ zx*X68Z2CqrAYZ295n3c(U+q0nI6zZNCI)k4;RDp zwzDkKf5io^gOdBg3x_i}L1|yO&jXZOkUjv=N>Zk%Mc}YaFh~qAal1TF0Dr3JHV$)W zkuL^LoGx9c%GM2tu5bMg9uc~XePW!Lw1im8WGWI7+XNIVQff0V@LIh9rTg3WS2_~euwqa>jGQ{%COY64(6jicZC2^250LCpJ(O5>BSjfFrO ze>MjOupgQ7Z2tKbF|F#;?I%=QyL%r#4RR1T4S|V!yOCK4+ih)3H=J)8fuucmA+7ILNiYOiyWcJd5f43xV z!^vI8^1X7txv>S#Tc1 zMb=Q}dSj@GL>r^)2>z z{nnr1Z3qyK7q>2B^qdpEn}C*#J~e-a$M{qlg=5y6AyzU~@^)C*@>1l8UH~ju1Ftko zD;)d96LDJ4IuUl-C3qCR_uNS_IXm!Rf)fO>K=Qk%x^C`XP=k#SLmVr9e@82lzVDkI zPL^40*ZQda7&KfBtSvkMr*Cv~u0q61U7Ok7mJtErtX+&)QbK9V<61@tX;$h%A^~}{ z-)8^S6@T9`0Fb8<4&FuS8)s*_hp58^ZZeh2YPni=B=CNnzgoWuNWR$>qR*kN_k3vZ zV>dtF_8kJMnz}{T1tdD`f0G=1$v->jX~Nz8@)Bjx>HAf5Rr1$_pAy{czpq|GHfpD` zz8|bg@4(B|HL`A7!Ebu*tEwQRz640c>Jxbvh*Bj?AobnK!ow7dz}T|#{`O&A5blzo znlSs!k>?@AWK46V7Mk{BoZV$^55`9k%%H>LqDbDPXEVt6^*Ju$e<^3&N*LB9Lts^3 zj1B3H$Bu_d3nM&aRzLs){E^PJJ+NZ+KZ~&7G8)OGSmFZcJk3quls?fMOT5|z=SsR> zUcIl?pz(q)uG4!3DR0ad$V3g+N-4v#CXM9xQ))zqGJLcoaG9rlu@H(L_M)Cfq#jQv zDm0{4PQP4^QD*z3f3vnbjO&x^BOwnV(XRP)v_TB~O>MqgLqzvcO}y1uL8pPiXQ|a> z2CJ02)A%4_62Bqy!w?{vMF|RkS3oe~Ah-GMNTMs|==_Up_9XWYL(RA9BOOF7;n1FC z$#!*OBR9}!JWN{T&SVqWbpkr{>~)Y=x5Brc%UPWaGR3BPOa#Xs-+c+UwgZ#=_XeXLFfHED+R;S6%hTQ z^D6su=gYeY2o}T(P?@72P3_0wD&XHU`!wY^kB?juEOUq*__JC_`lH~kBZC%onh6qa~ph>u5KRyWgYJK?A|f79s-iSyfKN0gMGN6GpylMYPQ zM8Ty-5%m5alz7!s z*4c$g1PiP^^2@v0Bw-!Bonc9V;1?kmiehtM5KFc!y-E2VXS)&DyZp{gZ^qgoa8aLm z?H|XlG-5%^L7~fOoRfP@FAVqmfJC4-s|W6le^`h+tJk)_GRpDh8CUSw3|pkXb;~=v z@ub@YB&@?v89^=;|5d=Gep|l=Cydn8CQIu0VIP2&Codj4ego-cL71-?_wr%fRA_19 z72t-AUOi896}L#aa3A_v(TAVY)&(N0VDDhA&?2JtL%N5ko|lr=0qkjU{NqVVqmACM ze;p*KeE0@PDRDs;H}gH`6DmHd-H?*%Ibt}N;F+9`Y~QL z!~4v}0f1&6$SV~Jq+f5-Z$rotRL=%Fu+|GEneVTKd z)wFZj^?^)DF28uujH_c3f$XGcY9-2>nS=B8#=O{4Rdi%~5yb=c>hv!C-8d*h_YYRC zSZ|2hv9BN@@D3NECrVQI?esM6fS4a|r6C1(?t8cqdzHhP*{F98KDl&dxw@N~e`fjD zJZJ~oFS086et0MG>7*}g?l?t-Oj^Wvr# zF9J8gdz@7W1hou(Zd85y;46ZCPUkR7qNayQUBh{OLC!6CS#~GTgXZfW=hWFqk|-Kl z>tJo$CLpdAVlHdy)cwlM_+l{r&Y1ls6{*8@a2s(1zG~Xroa%_H}=RiQ9_HPfD-+9$?pLd&>&sOw<^OAq!twEfKC3?YuyeU$o)s z_SgTda9>lX!!aRd*14)t#x_osZa}Yf$bRH41bWrv3DeEYuH@U5W<-aA!mRK*-Ig&% zb+lbMuYJI6<=ND^6f&6HHI)5H5NVN?;em5d~xL8`y-=a0K^&LGg*P6OZM?QF0 zusK_IpFj|j)YXb!1g`=%1ktzpb}n(dFUS)6Eh|Of65vNa@y)Aje=;=K>|ecnPbg_A z)S=wT19WIM8%dhJVtXyc?{{OADI}+QAFRPGH=g^VGWFo#IKW95qGS?;CO6g(E{)$1 za55u8t#Cfz2Jrc4PG5ZnNU2BaR#l!x*LqwxD!Rh}N@1gPyP^dgrsy>_h&wC7yUrc3 z%t2OAC?72cR+({-e^n zsmRO#lPWC}3>TO^@(K7gPLM&h0aEofQCH;*i4@B1aarySf5}|j?bg)sT8+D}W zE(y2&wMrrSe?)p5(t-SbhcHsFaD!qcQ5n`~WxrN-9ZaHG51{$L|I*Cr07#5p=Mn0| zny-b}Ggwx+>Hy1yr~D0w^)X%bm1|#zrfg8Vz77YwtC$ ztqL||&9+U|-m}1=<$Y=%2&;9}?7MNv)*@Bv%xYZRe_&ePyLG5dBwabQh=~{o(QID_mVddBd>dx@}j)KSQRTK7y4fSzS&Hyx8!On7QDaxdI z5bE%be=@cPTV6ASLl+)%;MDol*ij1LQIVEKCf^YM3qS$>R5bLnDq5kt%CXKG456KPMNi z4v`rq3Mo&=;;g2Ie`SLm6_x}hjSGRS%PK>1f8HWxk-^FfSJj~g?c!%NsHDG;oQ8lI zH2tKodn+th#wQ|+WozWcWWQitG`e%IzJgm=7FaJy+L4>`8r0^D?<533!gb^(yAp8E z{pV~?vy5|MvQ@o9m!8Kb@$gBq1k>;dJi>U03>?d*@4e@^ss}k6>{siP?gpsoM~rr+ ze@^$&Piy1)4c_enbTDon#k&a{I6EcGcAm>x)=?MWLT-sXbw~oK{Z@QX1PlV4468~v zrAU)jLdWuOVwXuF1{SzY!};>jXx&OduZ(IFGUAO0wWwP>Q-)2QM{m1(z*$_t{q|4s;%XvuBy*7e;ICW zMGdqcxRrfzHIgzZmdvvUy$QXaG?D8H4!_zo2#mX2j>->?>ABx#9No4_6@`3TFb@l> z9}OJbtVes6j}~4XAb1^x5a~tZvEfRoVAYuPWi`x?K4!TbM{!*0mOEx| zq|JGp(e&0(I3>Oy)ppFSqIJe)wb*NneZ_;_p7rtMs_;w`g^eP?bz?#?f25|Rv-!XZ z^^_TzpBcz=I9HcXujCE9rd`;K#9#LnEHw3ir5KfiPyJyxOfDJ$yEOAyO>)NZ)WJsRL_=lvRpFc; zGAvTrmuYD3Lc>f}<>GKamGDL?g>MBfsw%pBhzs;&)!roHE*qthpJf1X9K-3K5r~Yy zzw$o7jLDg0>EyLyU=vMkr)els#}L+YHo{Tl7Pl7pz$VzUVW>7Kf6HQ+*u15DLajr& z2tC|7S^SgauN{o{RcWKvfBXIG)od<1^k;Tq-FTX;J72G1&9Zh`p9zzl5o1QaUPCrB->P zyDkQ$_^HT^x)h_gTG-TO$Tpa>q7Mlt2U}8=2^HMcafE=afAl=JM8gA1NKRg7RxriF ze}EtOj<<+C-*0~L6|6MsF}7T4sGN|og@!6Ff%3Ox@e^QC0D-V9b`}a3%;){-#!*B? zmLLYtzW;&PQ5zpiF@w@2t?F5dVswGpAC?o0q92&L*AZ5eO6rtEfu{@JX@N!|&g%o# z`w?uVHq>L-{F@$1{8M(s@O4JsEytjD_%DQYq*)w9&TmT1gSac z;U=C=IPn9Wk)nkTM-A4hz;*A}Di26L5>l_lPuqz1`I;=xb=l<7HXW2-fig3Y*w$@&4>I^|7rOseOobrh>*NsuX=e;i ze5>M4EHWl zmF{xpf7tTuJ1-l{{Z^)zcZGClcdx$Q+H zXbSyDY7}==8atF74Tc>uCi#eR6UloM=)QDsAa}Pilcw49uM4po0Qns`8{Jsgm z1SuiXF8$H@wgX!`R&H;PM^=V)TK$8TX=e)sWR^uav`@9Y?mVp?I-Eg2^dna(0>L0vEQk;+_+1VMZ6qj}j(4lm;YP zsb$?1Nfq!pUG&A&zSi2HhgaPXgf55z0 zk`mh!;>`Z3@n(j}SB^ZvAI}+TYq+j44MKs09X>WJFNw|XtF<`^PO|oQPvbT-e^sll z8Ut0^(^(Z1P}qipf}#q=WXn|PL4%lvFWRYozhM^GwPYe8v)gI<_yzO62+-@q` z%ubWx28eTMX3yWfN5TstY6Tp;e_ZUFbtun=?tVVBJNk**pihnB4VE}!93~7>c1kvYpz>AW z=efpVx|89ZX@!ciqRNc1wSIw4z7X}r=wVW~$3QI~x9TBh%L)8SfBL2(Uht>3m-`dA z%G`%9jI8$6+2t6`KNb}cl{u9-0rDPTg*M4R+5gIC550X9IKS`?h`_DJ%O-}9TGV>y z=8}A9Vg_k?ZZi493zg8MJN7w2wOjAKA=IKv63JT~yRvNC6a+8?(mETAU@=Y-!T`3j$rC_lMqL26&8Vw8`S5fy(_D`7$osi0CAhG@(WZ9uZDo9c3$-DRj_s)j$)q=owipyT87mia8d@CwUK#OuT{lm6}>x9Bzu>? zOWLp94`&Vu=S%@zYjD3mk7gcl(Hd%za%+<~*5x?}5?mw*H8~hR9>MTWzbuZlE~}Px zjA<<0z`=)ff9R8HPHEG-{_@CSvv4m%G5vsfKEL5EgKk5gNPTfW8R~HKF?He8DO4Qn zTc~%R9c^32J8Q3uOrT_C^fPx2u(ok$l(hHf^_&GCQQ`UnbQCRqBnNz!xie%!(giVu zod|%PQVp5G>@Dhsy0NOV-xB+$Iq;h;%^QzvtLwc;e>{^oi6EYqS4~I!0U3ZhPU>cE zYYcL}i{|wO1tDUZR}ui^qY_a9(Vr@D{kMX=Sxu(Rf~tFeg*w}_^?S!`88iIoRJxqY zxka$F!0fr8Yvzz$dsV3Im4BtkJ7 zxekZ0y-duqJ)=UUahOa}qhOiJL-RgEJ7zs+x(JtHB-a%k$|5-=L+|_b@k!}#N~B-0 zSxYi>syYFKdoCL@tVy>KB$Rk>k~H1Ax34)Je|SB|WrK&ArpsjXyc!${62RGmR}WJf z7WSRP%3%iHwQP*O*ng_gn`Gp4*w`ch(c*xzoZrr5I1nO;P4@tI%LTmC;@H{>Ozn+b^ zO-dKG!9L_IJLjjmQ(xjOn<&eB&mY6}e|;_K_gmHtT`F~B;I~dfm~71)@WarYL7}Rk ze?P$!S&?E>R_73EJS7!2hx*cmIH^@h8tpl3$soQ-$n3c0s?hyRO@>AsCPE2VpeTUB za%{0an|K!eYoVl|TSx{6!Y1b_{smue(>BFp;To>Pr{X9tktSfIs-DuX<^Hb4f13~% zCe17Ks7a4iExo<&2)~pqyfn=HN|PiUFk@3a8m}{d?HT@l&(Ufx-z1LnJIFD|8fc)E zpq=|0M1tX&>`-7h;1OAH@L{l%vkFBADuwORQl3`@?WNevq86HJGg&Fjg zzl_88K{tObgK&IWO1M>*$Ywy3 z1_Z6f)ZLHRUx){S-pvgvjg?$*rRXJ@%7B#|umTjHe-u;4Y%t|5 zP3uvjSTw(?GTws$`G|<{CPa=OIyno6Q%VU@l@%y> zdC>5i(A5o-*9CPFX}i@FetAvjQX2ZB2Tw}S|Sszx}MwBffA zI_6N}3?CFr%DCvg6(i|QDs22;-YuID%3Iz1m|O0V-oiNc+9B?z^Vgq^s1MBr2u~1S z5zYq1cNgt_CU4vZ8W}9@#KCq(#Hv0nEw1E`!Y2V#BZlllp1{pG)ycd4D2b88Q)5ER>uo) zH51@LIaBXInITIse>#ajQ|>}Wl6e6o_O-@+I4@t|N*|3d1@G;y(BAVy%fuVG(3D&? zqW5Luor{H}?Qgw(Ih;js`OshRU@@y~dZ+ z*T7L7FdlEOs;KU9#b z7iVz5-agnJe!mgHGqGiMRVwR%Nlxr*DnZ#x&~*Z<|7LfpUXxUGK;+?J^%=SD*U5uw zT_Ew9c&2I6e-T#F2Z;eShb^<6SK@R!2TUgc`x-itLlsOee~h!4_ui?dR>zK3Zf}W@ zDA3x`{9T}ooI3GN^_aL1xDOD+KMoz}tVE->b>e5PN1!0fbG zlVs%cN7%)JUot@2>3fD(wPytcg}J>1()?TPa^t{xgKm#4&j-Tqe}929=9mb{GIvZ!z~yVIkSCjpR+Z*4}WgeSziNNhov3;x;^Q!=+hoFk;mZ7 z$MOpqlGwMWTUNH$=e1pu7FcZK3%887AKw~x)hW+%aiaHHADtspb(`DMhpRE$!&{Wd z3yFIq0P-*u5IuHomx5TZci(Jeo0ioto_oQ-t4;;BZePUE=v0y0U{LoOvJ9&i2U%_9ssBtp1>frz;^YkO!R<0({7xS#_o%B|Et%1U6ep z!^XIHs@XK{-_c7MVHlKqLkF>ieXDDExRgiLf^P!XOazblNvT8AHCmT!PC}VlOn;CL zlztOzWC-Ar5#l4OdQEozHp}UUP6P!%m!v1$BgKqe`m|WV`X`j)E^Nq)PHd<8YOD*; zb)_9$rHI)_z{c^zf)-qG?ILRWQjbALVmHa5k{d5np^)ToO>w{bkh;-UmM05<-)$UT zNtxHj)}&{U4H#T+D{yal)gb8j=YRfq;U|AaZv{fC9`-xz@lKYr`F^%;5Lea7Tv!n? zcsI=t06nM(oi>c=ntL#~o4GUg#wu3h03cHY1`yocBTFXo;*QSo-hv8k|{|es~iS7qE{-;D(+e7nH#tx8PcdpeuM!F_LUo6}5S)NNB ziSX${eld#X$VX2+4U8+MIQQmD;h^5zpuC!3CS%%fdR;GDUMQ1Py{HzDq|_gclLOj5 z_)rT4UEm46;1s99CU&VIG*}mL#G~KgWuWyp@i6 zM_7KntlDr@`+Fp4Pp`Z!{geH$+0MuOq#uBC_dteWmlG|!kF`+H))2GR z5Fo-Wtk+Q)4~YV&wbYKc2eEu+g53tIzHnL(d?cY3VGJFGlSz&W}hpMW#~xa z!-Vas2=;37wHkt)HGix#(Oc<6T64=oD>h$d`E2Xf8TV6-Ql@FFRS-+!OR6zp5es>E zopB%Kw0FE)9kxIu@nC~10M}}oQ3Z{Fa-@%wP7MFqiwzu#Z0dX^~#ZlIm>NxOW#}w}AV;c>Y$l+|<`c z;I%*8XIRAFkV7s*M{U+On8iU&tTr5MTy-`fVpo100N{v1Ll0hC@UQd>GA&E6%^*OK zC1Rn$lrV`{) z4c~$$u!XKrS%W&Ga?#7R$Ih5~tgm0W!B7qU&UFMlsegv;>&`94cHbyP*xOAtEjRJo z3_gx=E#s|!15PTzPam-ie9iLbz?1EOrdx7=Aq22Ac%78$TpPHoSGt*AV1oge1CE)* z&*xWx@ig^yvLvvFJ!Q%)&8^=ptnFcba{3I-{XDNvLgdfYLpIxwJQYa8tPg$lQ-5(+ zA7;pR;D0#mTuh%sp!&p9S{Xx$sZy^03vmZyVUgYe$=R5f$5=NnWCz}!cr~rZEE1k)LL7!JTzL}R z_(w2U6d{(8Jw6{d1RU=tK_uAT07&Kd+Ys#oRGV^fdIxN#aYCn0*?H4KAXZO2hLHJQ z>3@4pW(F}YX;6H|{HufXW}G~n*xC*D`b_|ebo+~tl~*rzW3DGpNkvaf zuk85)dPeyZal>H#hs^Y!;QQzqgZ{`84saozFQ zqqECm#xU6cra3k$(6Qd}Z3$GX8TfF}`kfQTYt#qOI8aREYrN5trfZXkk|7AFTc|eF5FeB6i#I<1?QP8hc=Y({@}0v+LN9e*RVDdV%N zE0&`Qfs*9IqFa{rDS>cmKGkBmU3<6O1&@^nS1J{fd*4jHZt?9=>3HcXLmc`0?i|2H>`xjZ0x8Wy>GRCWc z$Jm4g6b|K5P5_a=a6vQthJTN0C4De(_LJ7<_jtk{#}TEREmu)5p*3fXkiM?K@0D7i zvb?{%9cvef+~92w=o^05{`DU0KKMe<<0E#AhF2+0ug1Fvm5(uOM93u>q!%&WszOrL zp)!VrP7=aRVVa0MIv{vEmv^F^pB?CUISG~>%}{G zA9a1s*zE9Lcx}NR7-7-Zq40vS11(vj0SzdOb2b9s?y?cReU(UC#gl=(3C>)-p(Fby zOWWn*!8ay)`G#^W2>9{*SBUm>^U!EnUivVoiWi@;i;f_%rGK~vnj-{m>OwM;Ys$>c z`W5im@92KRZ!tcE@v0|}VAMS-Pi^2u=`=(ydS8-{y?ub%G`EK5*cK*Bv5~G?LyTx` zSY|!zmPq<^s&>g7JpZms`T)=nqOXEdn8~i+FpN>O&H*&lK1NIR`JmtK=WsjwfB1Tj ztp|}MXz+owz<)}>vhd#f9o{Fe(Mk2$jv0^!m`MkOL@j(6?lD=VAgx40IL zC~{&lhJGKSlmfYDBSWa?J=b_9=P(nvafS1DsY~jFdqW*WZ>DWDVuQ@{&fYVOxY|=! zELX(dry06lfuOm!d0A!5mL?wcp%yE3W2wxAyms!hS$`i`mf{PF)ue`p?{ySY*)vjmk$z2(z{j=JuB;%h=_JHXs<{4i1o6FxTCt=msrI1vS|}_3~gki zj}u~#=k1||G7Mx-5=KL~z@uA1q_#U@r8O?r-YQt?CF-07jw}jSlL!Y)s}D{M&boed zX&qLV(M-!N6S^wsyKK0obxG_eTsa{kc&g$Z1Al5M+(oWd3dJO-MSoQ{rNWmNA(lg^ zcVTphWscA#z``sFRssC z+Z$IbQ{SRI+vjz*NC%uDELX&bPvzvc3p)Cmtbp)x7mOYGoo!Z3Bf}F2WB<$|UYcP! zwpM`f1&3^BhURcJS>hC^8CWPMYp)OxAAcqls7E8ZG4Z6^1OgGaKBHiqkLjs7vy5Di z)|a1Qy(Jxq^r`4R2q^*8>OIgvp*>#K&g|i)4xVC6DPJu{#F5OjMVC|B%WbAqdx!q( zoqIFvpBk2n%Uyt8#gQKFbp{sU%?zl^>7U+|oUXiouN=FJz@LEaGYZjl!*9tG&c&SSowz9_LQx z%A7S>@W;Mu(yn20(0kV4G9-}+_e0Y5$JGMCc;!w;0hiN=j~}*<;*KK-(*6M@I(~6O zN*tr8%IOrUFn=8uHCF&TdR*+3x`*4paZj6okm=@`< zV)EDuGi2^ypU2m}q$E~b<|k#I+p%$7xMOq#;p%5p7RSGo^JAn%Pt?+zWPfaPbX3z2 z_33p-x@8tjDz_$We`kZt0>2_L75H`t>856W`oV*wTYUMbNFZ>c$b0Wmsrb%Qb{Vk^ z3KKDXhUxka)KBbO?|~qm{hXvQtCY%u3i#SAPiNTPTiz!5ZW@mXuRSXsgD`_l)kGY) zDE$k3Uh}A<9xZ5#RvErJeSc-o%d%wDbeHA(g*>OJ5>M#`ReKrDQOX6ghODCE_^3YF$OXjd4HuDoZ(!rLj^k} zxcFzu9LE<1wvvyo_?U$1yF1H~)aWbe-T2|~$iCvgc#kB5cjc-=53s+$xVr_|u*Z)N zdRjq&=pS76_4CT)W{3T(P=@wOhsC(@QGOrOB{^uV6k}(NL$LR)47*R_7k?H%#*scS^7X=W$Zx47y_!;~!I+!LD5%?@ByTTL&Bb4D?%BAH zcc~f>reoWTYlz1_74sium^wXjn=$%nAsHi^E?YqJ7OWlS#Du$T0AWr zZC5QJrFueM6wQ4Z=#8D#3%H_X2*)$fJ{iJ%g3Z}fN%m&udHdGIUy`PB^N#aq z{86_k(i-gZo_`lv4RA$cL$)XONwvI!I*@UfYIy$mk9&V$+@KZRfbhmxux7tz@_oq^ z;16rVdSS8ka>JxO&;1LtT0uA`E``Qv=HgiOQWY09AAhCE7FqS^-fI)|FJ&wUM2Z(9 z`*hW9WElQ#q>3T<;A}nxf{AxfWpBI3m01UE%=htBUw@3l1QVly?s6n!6O&|2&iVZ? z2+9A1?8ixfV93_u&i2DxN9nYaOlQRpRx8|;Ea@mi)~J1iqd;RQ&4i(o#$LESol20f zc*?+E^##gZ(bAr%-oGC&LGskKieit-@5FOi?JV!DMFB`Pl9E&UW2GjFP(QJxVy1vL%}M zdtE=K&o`WpPh_ig2_=&a+Bl4~zBBe&jH}~~?7*!Q{EYS6GZXEJ9ekvF`xP?RdcKeB zN|1fuAf~RIX66Rdd@t}}I-7=}(DAW0ZA94@lz#%F`=Iw4=Lj4w;5G>Olf8S%O7DX_ z{q$kB9zzzdFcR?EeMLtT-#ry~_*)~l;$da92h@U39>8l`W58`nAmlX1>mX(7{XITy zzS?8Uy^#QvVkbi6@zGiXtK*o-vXk@;ot+t$KR$O8$}dws1fBymftzK`X{;`zSYhfdjpF5D6)faI_q_4@v!2cOeVQB-z14`tOB>pjDud&A4Ak*< z4MZXMJ+=8fg!HJj2}Lrj0}sA4doM(0l0bPQ51nsW_K2DU7Y*iIo;>3%l-Sfa+aEQ) zv1x>vQ?I`Z;iwrE8Ez8Mal9SF-N!#c`hEnFy-O&Q94Pmlzd{VPnXrpIYx_31m-61ZIZh3}9DrL#AKeZS?OF>ouYcRr zb>512L$*$_VI>lfTqR=6J5|Pa>UH1`kkj??L{zt%T6RdAD`6Nn(h6!0}=*uybNtn}6eA7Waey zMAc%?uu6z%xGP9akhQ~+Z~OfTtJ!?M8hDznYv2=R!E#J$`vct1ZF}~*5PfEicr)xO z`Y-DBoCVLeN0g=(S<+%oX}2ttVAs#p{fKY;aV+H7`Tz>!H*W|j8tC2!W3yn`W;yL` zB&L#bNupE64Y8lsU6b-LeSZXa2}4N;$IStACX2f->eXapL=cf&ZrK_V{oAIH(TX36 zFq9a9A0ZPR28?8$blsMKmCwa(;P7P=wow4lu02cV%Zap87Jh|1UQ^VuUwtoj=!RGi zevQT$%}{LX9uNqQ$ICa;DK9+25n{hYQ6sFDaD`2Bfl!hdqJam&8GlRK^TSZDMz9U& zK-XJ%wucnH2rhFhpNM<>Obg|>F27`*E~ZaSm@z*^t_)hT`YCq^fn3|Q_AT-fAUcW# z>23%9XbIvH%!BO+QPvLRJljGeY-jxb_1jmT=r%a}U6z87#&rEDu%l)C32~1op^15HB8^Z^ly%iRFS` z0qZJ+{za!NY=SFoR7-qpSdnQ~aE<=^b3-ps4SA94f-zu*=VMOOn+?axLW)g|Td9%# zzRq=d=fIfh?IW=~>adk5Nzv6csyV0A+F2HHQy_}8_Kn|I;D6GQ9hNbZH(F0i4ChKZ zd^ADOC;Wj`JwCMBz%3(lA5pYvO5HO}IxVh?Yf&vYT;smar)}IY?(aLhvTv-ZXPa|9 zCw{6PGb^aK3rb`(Sm@rb1K`3M_+*a~t(LrJrOk1Hc^LM{`^?T~g>(x&BLuBJgatS7 zD`4jhYlz6qQGf9%P`QZ+@qAwvA{L`a->!5sMDOj0BfTDFDbh=!5291jPxn6VB4rwG z(YNGw>O8vP*Bs0uI~vQmpJ%#e2B+;>gXqS6x z%q0_g$Lk2^!KQOD5Bo(EI2x0hGlmMnp<>iTW}wSKktO}% zfcLHFkAGrWQ0>u@??MO&jG^|`UEKC?lc;T6V;-QJ@HN7+!# zl-R6Q0zkE-Lj%39O5UEiat1L_;v^zAzN|Z8@D2^qNW)$Hs&y-&$F&LMZ-MJS;sy!IMK@2RPne18i&&ydd3ejw#uyVnqms~y{nK&ktQ z@2y~e$WxGz_BGeOqj7v}JJdWj1p{>}Nv(@24eq*G`XZ?kH8J3NWQ!-s;yxB1&bgbl zs+PJzNNzC>(7V^OFuEu0d66fF&}quO63I%`YgDeVAcrQp_u(RDtG8Q~@!LI*M+xC{ zd4KV`*TgVo%--uG;bg3=5PF7B1YIEnKm|GH-3qZ=`4U!X#AV}l1!*J!_1q!k!4&~=fYes7w5tvv%vPTGta3VCZ0K!jLgi4+ zHGEv@<%NBf_%&dNtyT*py`Ebg|Ab$=d$o(kYk%NSvISuqn0j5@0DJlkc&)+LQ8 zQApr93Tjs^=f^vq{5`tCg9#+kcsi|Uz0DI+9pyTEb{*o?LS!AhlnqCX`N3bIF1{Wt zBPCQ8Z}9CPiSmg|{27sNjtsBAvi={@PkL)2f!MwYANwotP#ALw>`>3L7LnFT(8JVd)5HuG;~JaNZTj2AguQ;e?H+jpyU>tjR|uq9LMxwzbKbcd$&lOi9-c zmYptSgh@aPRUA4xG|O*uT0btbj9Bks;6X(0TneU}*zFiX;bs+|MI+pe(=aV4K z>@4r0dEW!32K;+05n(;8h+_KZ{yG3QouBo-vw3x;&*G{FbmUDFB$mVMQktsv$!z_H zlf9wguIHFbSQJ?5UVj_F=^RHLq0AdR5Tw2prt02ZH<&)*gkA3`_DS#LFr?j0#!6Fl z4ZMYTtd%(udh{Zj79u^6Lsu*7*QVx#_xSjqBcXcORz5N2B(JsUAguW)y^y=o+(7q% zBf_5ove+bts2 zNMC!jrNuqo0Q1j}H|%K0=0y}H*)ay}DsQ$D=61XKdn$1Yf;Z{Jep+3ux27-|Og*Ok zY*U@ir9|M2kl)aweyF_shI2k&Kx_3rR5(5u2;Z?JAThdRC9YF{N;Izk&*z>}_hxVL zG)}Ph=uEWszJF^`z(F(SlphJY|Kw+~D9?6(7Pup&@6#lP*$^aCNz{!q%M`+f@o@)* zD}115AE&(Ao&!d*LlPDnZ_T|3EF?}Kap4~h;hh^9%r>tws?* zpOU0hmI_FGWnQel5GSjpc-P+ohvSM;Btxt-<-s`-(THvJ^zwyb^e8*8!d<_(KYx+qQbu&v{>83WdT~ zxq;g<(#lecV*DKw(d)`$JSJIjkkF5K#DDdh_U{ws$=o&W(UGL_Ev1lId+(=v230v7 zyf=nf`65no;g(oAX095onKeX1VX^haCQ6#oTyrM8Z}97WcLv-@KD#N)Rqj|`j$H!3? zMS8~K+qAu|a9{gE9PpEwP`1#>qPv)s^kN@%mA61*`LZ;VkQeBV1b@1oIM_lgS-xe{VW)*CAT^s4ryfRECZAz@g zsG>qmd~{l(js56qCS_w`CfkxXBCfJB_xbUkwY;pcdWfwWT7NTSzLU$| zQvZp!?}CurvWrk1LNgn1pi!h`a2S^1YhZh|q#PdxNQijfX@OrJ8?L~TcrLt{PKPKi z-;XWtZgn`S#ggZ%CCle}vF-Pre(oy_-$r@!TwKF!2z@cxC%Ic z(?E4_6G%J80%{75iZu`YrhmVpm%#36apij85VX1S#u2!5)d}H|O7yLX`BAnE*GbvY z?2L}fbk)I3RLSHq_srI*^06%pb!b*~?W}|A$${Q%qZye+yjcV`rvoB5VX~-`hq)|s zR9msRQ*t}oeDQqcHK%j&`s8z`H}U@DpLCS-gB9#`rt+v15@&4*GJh96>%I_TEn&}j z5|$`|>MayrS|-%zE@~j>YP1j=uNZ3r5>c)!Y^axLymBwvlq68S8TOPMu8tj{kg}((D{u; zL}*ZM9)?1h@%`DOpN_L}1I|bQ%)r5ho@%pKSt;$~MSn08>{!`R>0|}^?5v|A0+u^y znKR~q4|6Ryb8vrhal}`(q4q^#09e~SWCcxI`Mt+wlTt+db7ScN%M5s|*ZhV_@A4QA3N?q+C7%NU^gWIFrA^PX?ep8RX!+T}z8f3Mei&I?u4|Divsr^Qvn+f! z0)KG^0?L-^LMK#F3sFQ%yqp@skuG9H`T`jRg-Y#F`q7Gafj%1XVKk?WL(MK#j6U0y z6y-Dzb0D?k7PD9yUzJ6Y8Pk!qGJLD&P0Z1of)`8kxOR@5y&L3($tfDe4=xcBH0{Z| z9IWNRpZ1`(^xQN|A8P%+aW+N8sM@N6>X*#q;iF35JU6&el$r5+G3j1*!}UG|(JkZsnHTWgPgB!`kYM}~&fdQ$pa0Hxy1gmze4lI+jf zan8flhy`NoHi9OzeFlI-@qbxfuVFUHNQImObzv8qHn)pau_H5ce#%Xk`XpMgyb-#s zjK9kmp9}dJK>a1ZCkiv0EYUarT7AO%RP6&r8apGqdm`s3JV6PoS69~7YZ%RA4>H z$_LTTp)NWqW)!oX$U%KXu;(=oHG!I)8B(oMp~|d*MJ&0I8IwP?HGl2-J#HbFLb>yp zPW?fuycEFYS;6v3yY3L}BbG4s7-jvdOi1AAk9j$_$CFe&;NBT3ECstAq1E9FBLqq0 zW~?f7H$sG*)D(nDIaZW!jpyZC)@QKH%daO>=0e^|)!3Pihb>v*Xv5*SwLJGV*qugt zHkYkBw4zc`^@yL?WPdZS=I+Aom3|1B#SA+#4#dzHhu^c;DAhgQ4A1x_Yf6c_`Pj|+ z&G#|B(Xn>6foi5C<80jRP_hlbc<#OO&WI(u`$5o4?s1sNk=?P0Ct@4^i+>T;=S62{ ze7#Bk?Q{wo@#lWsoQiS zS#;YA*z+V1^pOdq_7X;o7=0}!Yt3}-guN?nj;bliUe6b_#42rVXy57N1+OraOjGQ^ z?$kFHwRD8moIdig3BLaULsXe&@NEcMTX zx{$0a`AtAAqxb3+f(8TQN)J5w+6FH^(-h$7wq;3?Z&lIs=$%$K0Ao*wV;RFVNYdoN z$lI1(rCAjBx!FxH1nvg1F&K*bZG#w@yU|7!ISN3WM}M+kTZ}&pFFsgdiGx{W7y-Sn zDv`{3A2(ZGL#q=MZE-x3d7&M^E_dJ2$vV*Dh_pJ z)&LoAS-_3Fh=r%phb?;phvl~zqZME{0 zP3d=z<*;eyU!@lcY6%*BPZkW{OkQ&XGJg?M(#(D0^Eh94N8Gy`cjjs+V{5Yjl4NdbC4X*_9>Wx^+yoKHfl4UKLu}+_b691&Rr+>k| zkawkM!M3{x->6UN63Vf$VH?V58r_t)6`j=HnWn8GT-&B86zHX6ZrJ)<^K+2P^{HNo zsZ_$9i0kvYobR%VNDXrn1V<3o`vP}a^U&C>mzKQXE~|s8+D>5f`4I{Spo|u zNouv-`@XTG+dm&O6@tfuq)1*Iu^Y8>J;bmZJKzTdnW-2;5Q)6vyoR6o{s|`}Da(rE z*?NK_JxH^f>)m-pBRwdKs`J%;L82{4^Lj-jTUXG&Hi#}&M^jhqCOKIfCpH5|vZtb1 zr&IHWGuy5I_NE7ap=X0?$A5G;Zzg;WULwd1VFk4aZhDJ_!?8#HO;=0plkvd4JHAi)bIM^VKg0 z2fX`JiHwA=>L&A3*#v<0gYQcHZ1d5oz+i=C1wd?mTOf22kMx)Z(hh|O0DSaSp)}Zk zRW^Tp68b&<7V9w;`yp6`QU6`R_xC)R2!0(49B#cM0C?lqSJD4eHGe9a0MH)tQ^B;S zG_-M^$G@VUz$3pc6n{=j1*(nQ@{}GD0KD?OE1LdO(fp}q0zf;-Z-07^0;G@g2m3uq z?Eji%QzlS%uHZGpO#lev_x|Zw{HkWsKb1@X=s`TyJd{s)p1tcE;4bTSw+R5FV zlii%7UHxEPS|#~Vv0fnM0H|2wdprxs`&SKBsDQNDmdR8SqklE4qqEV?*O5mbnLszv zAYYlw*OAYIsFj)s(b?3H>?srvPoQgV=f0k)t5Tw;SiCq!WTgt&|pyZA+x9NTeIBk*{cwuL#rG z=;CY7qmM|SkALs=WP{vfLs8?4cw_=NDv75xi2<5G6I`Lxq)1nq#L$+@fQqH7j4@9u z6HZT$`ZcDHN}|s`jV#;~>D6ZgITfr-Vn7xLDEHGkmA}R(a z)^F5kwzq3kNw}1Bg{@o@8|E;UDzx%v2pROXG zXSG|bGM@*TAYGczR?^@7HyNzoRx6#J0jlpdX^?LV(-~=$8xNNLkNv;bA_GYT$3fc_ zXRFK+^GEbi4~t_eNmfC}vA1>8z*0ap`K)a*?73!iT_w^b(VVlcz{(i9(l}6Y5#5kG zurdW4e}BK_SPlNaf3F1o;7aYbXwKDOV2c7>O(Hld8XSfUEQx06D4`Fjl)Z@OPEBJ` zm3Lg#)o>MTsrICu4R^5tT`?#A1(+6k!Su^g&~MjY0ssbF z1bhpa7C3FElb%Y$WqgJ6v@5;2+d}nQLbWpre>s0%{bTz!8>H($`(b+P!Qhex-L){i z0+`-v|6&55j~_<-t^4`$Km5RS=6f7Tq-!hWTkX>CsFg2^W)3ao9j_OwN)v0Z4=alX zm1MAo#IQ9mc#ETf9WhL8ncrn-pI9cDMj2?5NPCma0Fk8+PZ2T$eBW6-B!)gR8dxx+ zF_eGKSRBJQxv0XG0?us8LB)VmYgB9ceTpBXBmBk!~=l(5+lvzbi?eYp^alYJ5 zy8{k}XJSPGI4xA&#Ml$feT%>WG6?eMS0(R&n?O*}695Yq`$*un6$9=++iJHI{^9s9 zwgdhTkG^(k{%|Y8{(fV9ZPwC9B+_RVXt#g##kODE|M?^v|FAb2f4TEL&Xo=NZH@9J zwNjORu`T?que3qFq%T(cJe>vKbrE}DTpi_7Bl*$^wC@|`Vx=j-;y6%MhQUSxS4)Cm zPnK4%BEwh>a~vHHNM5gjpIES}-N$GGT;Z**6M+_XXUBp=%2-R%7zdYtJq>~-h2VdH z6Q_6&6%&maJiVKquI1bjs{cvJmL^?73E^%b83y<-7wHaBw21v+rL=E81Eg z%1sTT)zKnY9oD89<-qChdFL4Y+T;AHH$)7kmHMV|&Pg{QiPkY~bdj0(IKa3IRGb=!=ee3oQpVUffrP}j(+6s8u z3wSH~UwvjRRA)UzXSC)&F7uZ&sD5QTXtb}-f4M1E(yg@~qO;bZv)-V)DI(Ds1HSmv z948GXfuk#v7C2h_K6W}dlRtsXfa8=7jRs$wIY^7Iq~Eg2avW1dmH>Z9HUiqo(wzw` zGUPxfu`JhugQI~ZF$}F4q76%&1*_oDXudaE7L;Ludo z<|NUEP42Y*zxS;sTDPSCzkPok6fhC+EO7+2_I-dXtfxeO{UBREZa3=AFIImwRlB`^ zaNYaMn?H_3E4F0w7EFJ8JP`OZ6FeT!tnJxcPg`_dMRZSF$vGun|D9{r5HYY#@_|rLNK9%0@9S?y^-|tC=r4cf?td`wzgjK*c-A*3_#^t&k$-CSw+4M} z-!50T303q|`Sasd(L%R>*&cFEmG8>i6T9zj9ZC-6alET{OAAgXwecQFyP-ItFT^{_`|P1UWESTi}XJ(`&W~{ zIRyX4mw5vclz-#L|M8T6^Z1Nz?VG><#FO+FZ+~_7Z~gdZgCFPo*`c(voaQ}wJIe7|;69-w6n8O{Oip)@|#>*~dda;V-`w z#6x&U^aLBAqJibB-$5(aX)8llyuwxjS67nFZ>#|hfq(z<_ha&8E^`HjBx#Fd=o9uB zD`G)qDPnsBoWnW4T475vZ{y#-K>m$q;*bx(azHVINBv%fWsf2?2s3yc*Fie#`H@EBl~B@JK=)RCe9QdF7g z$LW;jn1AFdlIcnd`b%J?K%!|2cR1k5w54ajigeoU66WA*+FEJ)fF8r4NqSYWA<%fn z$_x>VH*h2PZnYoJ(M<>idG#HiYv1@V&f^;&nAS#!&P4zD@;8?F&)3;)R1&JS(yg7i z`0p58`x~GCFF%@p>d*h`zxRzT%B=&<@idM*CV%=k9Io@o;4hbW-RSwDvzO$i5q8_V=Ipa zmdDWFo`zL%LbcLw;Q8!n_f~}q4jLDt3ks2r9OkeX4p{ydch&OWya6}hvM=tEIQ)#Z%-p@zy5M7i!FyW3BZbzPa)Z>1jmf$7V78bBorHfaH|Aya(KOhZ7A zuipG30nI~oh$$#VDVWzt3!d%*ITuy@$et6TKD_AgJf&Go4xCh=KJKS|#~9k7T2kKd zJ*ni1^7MY$C@DJWg^iqBnWm)A50ffjst;=2$6vp-s@<4pxk;(J9{S;ROO80&?&Rx!V+6xI9{2iq@XH2_$6_?wk{xBp;1^X#pZG9f3Q*|Z zMcukSlQE?FK`qm409Qb$zseoT^U2qgm@21b>8EAkLlFgYS!b#2Wu|vt(*`ta<|1w1 zl$l6SG6ofQUfTA`leqTF$z&IN-A>M*@dq{iLOVKF!|zG^d!_mpzc>G(laKpsn3wV@$$o$Q{&mvl z4?$nv1YH|3?S`!8DKU!Nrze|s^XtR3zx}?PM2>qM7SHU$rC>kkP zTR+@bKm7S50|Q!G8!q@^_vS)qKNs5ZC~SVc85L2ZNN#dT!Cf+leq8~7V`4uO5VNsS zk*`}eHZd14nKB3`U)l^fvSxn_9!s)UAQ2p+Q9h-rpo2VC*D@`v9VxysWcv(3<&dg! z??*53y`MS-i8sQlT=G=y(H{uty$rmn2|!J9PwfZ4f|X@D{`7TH z=g;S{?+PwD#QF2F%zwR^!cRK&NDY7Xshx2WGq@`` zBPJ$E$-n>whM@|73cChE4P(Uhas5B`oGuq98||oj~Ry#Ip;{Po$QCxn!d0-`=c#_GPXL&mLKDBjAtkZ<k7_|<78^GVtVmX8}TW-R#1QKO0Sx1J);Uj42fSp z4K>`8qr0mat)@xF_w(z0C?LG-dj{X7%Xy z)aS_=6SVRmy|7`gj&TMf&SPeMpYOPtiFX(m8sbjY7M=&SU4zz+KgFYxx;)wM82?xh zy%(UH1d7juL0L~>lE(Euik&88PW(Z;#!tEYHPdAs(c%`iP?7vuq^6lA+9A+6TTp{f zM^(IG|$KIVuImk5;7C~ss6VB?G5x*$D0|2e+&9jBTf z{hTeI$2iwJQGw4pm2<6?&|>tYpIPLr{h0G`lbai|jZMPxNv62(S2_aYM_}3l<4vIC zY&L)H!+KFnn3g0GJCADrSEBgz(c-wcwK=)Nbt^cK_kvbELLL((B{9yEd{;_z zk`ZOZ_IRA8rRw|rJ01fQ3kFL`l;ct2D29G{*|Gumh9!>IZyYaf@x|N!i{teOgDLUGbOk*rA<1fgxUncq>@EH9z62=Xk2^x7CxycdT5W zeX^5pk9!&S!wJ_xEW_H8@lh_yE+2e(E!w~l-a}f(jI5s(0XfYk9>z17`mxU?=y>WK zxFqu~nI3p9>h@k#&I40T!0z`;PU3%<7Qy zr6irK{B1+1J6_CPzNXvwQz}k~a&`>L#7mWex5<=3i{qnky^H|&m%;IwAMLl@Io`I7 zk=s#U@C}A_F!(WcAN|x7KhN_^90%9VA6St1dH#i|h7#3C-sKB!bShwx=LUb*31TvO zE=sG11EOTW?wgS4f}PqA$74K>dIm9^{JVV|FvSH+F&5N_K@RD)Z;*eWW=u^y)FJI{ zjQ8U_#W629GtP|YM!?7&gUJJ;CmGZE_kAXZ_~N+L8yU*kQlXsP+j!W3cb{>vKV@v( zxx%iu@A0y0X=R*{>*8*9nB0GnN-jwFxC7lV#N;lHW4}CB@ePHT^d^>Ghxm-0yZz|DZh=XQKM!(3{*V*CmS=v*;N$-rj1y%@{}c+X7$dL{RhyKi z@kzy{s5Bv|c$_usH8rvnUH(WY-_I%|U@m*8CKF<~NAx7}pk;^O`ig&L?(e9_I8^iC zr)9vj`%33@chDG*z#+DH;}!%mtYURqw}TDyk2SkIjju>mJ~MK6G4}Ly4zXz8oU!cV zS}m?4BRz781*OUl(=sJpoC^~BJT1wbw+jwB6Cl=$j!(H>d+E@3Uyy}J>-cS6$vH{+ zQiBt)G_GgFTzQ?!D1meG#FNcdc=AOZ?*jzmOaT5Kb}_^lf9#Bnd!6GU!~CZ4;k4f`m{%Zt_*R?4!u8VE(&{EV-!JxA!(YI=I*uN8<`(S@!pJuT3G6;x?DKS9> zdcayo+x#sw#c+)>UTtxD?3eN>1&Q`_Z%X_u@RihP%z~1D{aGlCN9?^J9&xf?aa|s5 z7E<;XzTRp6Jc9i_*{|PN%r%ZdF!+kzN$!&>U;MRg$9qvhPUgj(fPx2gLtw6#MCB1Z zxEOm;LQiI*f{cIgfDvN96Vmf%qRoUs@hp+&(iMv9lq&QSNHN2BJL~cXVq}a(X|_ah zP-^p;>zHcGJKG2~p`&e%?KY@KDDufnR6DwS+qgXNhz|5}@<#5W8uVo0Va!es*FkCJ zJ_;)G3*|nya{ro}{~-5|_$;r=)$#aI4wPp(VeN&Edl!FL<_py2%O5)6{V3yo8~271 zWxPw$B82MWmYNvlKFW8=Ud>6qV_QjeaqnwYP*pA;wP7NSCCY579Mo@ft`y4oK?bH1 zAGG8iJV&Ef=ZhMjSvgHC|H9e~3}3*hX1}1~0t^>kngT3&M@R1DhfLStA|5)1-~;(Z zA>w@*UowB-1KzZa5pVjo|Bo_~WW^6o$_1V`Mm%p_FP>TR;Cr?E>G4OJcKQ?MaU9mq zaRddWrrh|cE)pgKYH|UMsc2Vx+&;P=LYY`(*PpkkJ7?(rR_XS!b{>5wxKH)R<4asc z_TA|*#_u%lmq^1c;t0uB<$yTC;oHB9JARH&#Rq@cCm64&ivA+rb)4UCImizdgMYEM zqxJ8aCDE-XI$a1;q(W=y_3+B8F%6A~J}B$B{=&0Qc%`27B~p2&nx8{hR6R?h0|Yje;8OUyMV@ zPFa84i=@hz){ZM*j(BDya8&rmzWe19)CF&N87mcMOqwVHs#SKF0L+gNPd!+f#p12tevBM!!Os) zDW+qb0CXNu5-)In^HhoZYkeO%#gueK>KTnbdO>x8{v+p*b1Wc0bg*ut5sc$x#t2|CGNpsqNvU+17Kc3t zr8!kb3au%aBJffz>3y~1-A&``mP6$d_FUX8n0k^59OHO0C-;_X8&Fk-EKO8UO(gB~ zlIb?&6=!_>L6-^^5+4F}Z#tgj+?05W!PeCti-UZ6AK~~8l z{{XpvXM5%K=ax_EExGJd7 zBAEmFtnyBniAbFN{4Q%llpEof%CbWvSXZJQ!U;4nRTIwO>DOpVtCkPmWRi94_28F_I%Ub#|sMiM=tBFzN@&1hB-$cg}rj!Bv?Ku&K{%lAntD~9aN`J2@tm1FPPGxNiy0_;9C?N-&uE5ruQan?qlbJ=R)Sc0 zf_{bnh2y$Jd4jn3{Dg}R_OR`Iz@4A~D~pFM#bG}thL4k~Tu{Pul8+O+{wha$#+_2*u4x7VYsN9DWPj#$WgpM) zKy;SZ%;$}}^mRjmDxUL7ZV40_L%dz-2M&%0*B^$dWPcAQ-KS8N2e?m?bbs#==L-5b zWl9g@cvbC*|2hF>eK$%v*K#}Oefbjf4>RO~a-veE3#;$YHHRmg&k~}(6Z$6(a>Rge z3@*4)XWXF{NB%=RjFofbEq|k}a1h@y9^{-Y&bTG|cenBz|4G+tv^PixOUQ2$lyMRo zP8v;9Z33>wzk10*?V~-(8Fcsep0@o@80L}~ zH%0jum)yLm3)=uV#1@C=h!2e{&+LzJ0ajlBC;jDXW&00M`v36Vt$)0G@R@Fq)B8r$ z;SqHC%@t7^^~~JM zgT0$o9PKa6!HFmVL7r|Xsbm{j{T5psJIxEhQ~{va5_#FKL0*o-hX}BoFOF{D4vmLD_K0OZA6G7_eQh?CYVozc@B)|9D`>*jIL&2 z^wkj;{*Cz}j^&5P_#E-ThdsxQ;b^1UbqTcGO{-W5)%uX{DO74I%wCS(3j#WvTQ2I? zc#51UmKFoJUmUBYfNKp4A2Dr<&q|r?$BRqy3D)3pYHZ zstK8H$&_ibF+5^=A5gVtwMOiQHSQ0IG)PM%=ydc@=@hrJ>?@CW|r%V@9s zeDteC5$m<4uz&nvu|4w;ZhR%QU)}!Ni^8F5-&5>Q1Z1tmBfK@>qdYc_y$+6C_)8gb z!8!89Tv|>A;QucK@_+TalWejvhFxHW&&gb&ZF1!9 z9%Bk9zf7@8viOJ-SU>7Id2yG1l|L=!jhv2430dR$x(v8hO++=mf{M`Xc&5ht_bhkX zP)|gZ#euA2P7s5Acch*-i=-|-Fi#%lm}}+azTsiI#WjUGSBJdAn6t*cK}emhW;7oA zo#bHhb$I=&5l z*cUcu?c~>d^Jz8moQ{4C%3}13G=EkH#fZV2JrxthpZu;|!Q8zaVr$KH#^SFjDSS}- zCtoC9I&bQ!b~&e$w}el;h!iNVJT_p*gmf-qtbbNEt;Q3an|7YIJ0DUA>++u z)_?5HIJbB?p`+2BINEc8TDcVt_IO@j;AL0eJm_QoYF5^laijPj@g=@p?#uC1l$;ou zO*!NuwLHxb(l!vrJt!}LxRW01@;M=K80qz|LJ*>KOF)^qNS&qMxeFWb)*sX9_AX0c z5`Ta$bK0dP`uqL}>&qi=6?VXE-1iV7Uuk>h@uN-m=NzT+4}L^`&W}yHb6>747vk7Q z|C+33TN(J>WF-v$H8n*@*<>lU?uZZ>Bn^L&^EF-Y{E$+`DDZTqOiE2|)I+tdD0nqs zoY;||V!@wW%F?E!N=jiGKdfNA;rjQ&(SIH{i5pj_1FE6+A7i*46(-q)n4%2zILeww zP?j%q9pk<{*$1ZFqOW}9++D?g<#3l*V!(QA44;g2NcDPAsFG0K-|tjBTW-y*fI(Ms z-7c1gDtfH;Q?=FGcW9miRTObn-c#dUg9`QwQ^s7OEx-So&2@zOk&-wR*&eerYkw?Z z;uG&R?9ulv?zjuP8Tt|E07{81!*WJrmmdww%0phY3_&2|Ze+;-^ zXn4SuuE)miuP7~2Kg}~=$uLnaQ4Y79YL&;gQ(a_KUH|$rZoqZ%dwM9ZNF_<#@rV-l zh1abMot}#>FGU5MWP9#c&XDgmx_^B|JDzLf6YXTyu1`xw0T__$k!Son*z7@Rl7Pp7 zzCRRm=R?Fsvu|Esxk~=v6iZWV$%(^-y5T5CldA0F>Zl)n(quB%R}`Ad8o6#d8SHb%#;Gu0_DgMK>6Zersj7xA*QU2n0{fm=4AVm`#+WM6P` zaQ~3ua;mf&_?}u3#YdvL|7BwYy72@l2z2Ht>GOmbNoHNKbkcI?nU?hCTUIbwRI9$A z@Z8Mv%9%fEXKmwRl~hDE0yi;~-Z`i-!*JT9>W?qJJYVW+yOavOV*h z&v{O8IO0qF*@heT-bLJ2W}8q}dMSIk@-mN7brq?GFNGG>s^bi{&cw!TpMLLzmS3WC zpF~8Z7?(XgD?WF=I?1^q zMtj2e_cYnc{`~wy4jF>2aS$BT6{x)SNU@bwv6gRqL3yw5z+7W+8Hf zq~%p&_}t!hltWyj4^F)G3n4NiHB>SK zk83HZQ8$;HAre0a-`2lGoah0U;UN+le}ZkiwE2@&oO{r{aGmAoH;XS@wAQDuNtmU@ zk*CwBk(L(k^P!RcfZ5^Rw6#b^BP(AD*r+3}vKLmq9Am2TJD$1?zT;0g7j{m5~#`5Hozka=89N#licf9F|L zCF1_6=wR_+oWKXWhq zo=!eV)~v-ra(z9atSTNTXzqF0bgrrhSP;u!W z%4deTdGpCqeDKv#u9G@#kz2DM+3T8*yaFkjMOUUeMRM$dNqr zuj6`Hv%{LUYk$zW;-7xdX-=B!TDTc8Jb&~L-PY?|5$Wcn7gw`t@+6G6_`?S}&PkaQ zRcx0H-vpK6ioPzWmQTyE8Rn;RH`MrJP>=a^Tt0i|;WlE6`M^hyM$*U%HaYrxLmatx z3ZiTr8SUl%$cH`UwXFEJ{dt*N?Z}J#=E2`BhnRSVT=du@b?`Aaa!PnUrbHG(O@BNq zODoB*v_ig#mP*QLlg>7nk3EJlGkStpr#~)&qd~Du@J)~haYZ3{ zTIu3_le^N}-poNecRY1gvB&-SfDaYOFS;?_7?{O}c65k0JQC#prTHaJURyn*Pr0w( z@o|}5>?xF26O0!Od3veGM;;WVNPpvd0{mTMWlxlVnJF^ybSp*oP{(t=62|)iq|ht0 z=^KwX>YZt`@T+1DCXcwIKS;xWp^e}x?fca}wtJ{^?=4@3Z*d=mxQ}uhCu^0>d!I|o zK{)d`4h7~h9*(i@Kl#j;9LQ76hLa!v&3Ue^-%jw0u%ibcHT$cv15mrH6Uu>dFFNX|>$5hYyGN`1y%D~Ku<+~jvjDt)R7 z&N=#ButvY%fP4eCA9!*De}A+;&wc+R_GWdc#cIE?jm2f&Cyw389)oqc=3c_4b-pE< zcag1k(e+7B~D6>?qiIhBTshD zyFc?e?{|(+`6MI8;*)$dYAMCMEoSr3S9Itf$|KviAMw$?{N`9;-+#LNB4_@LAAHC7 z=kXG>Ue2}?&Qu=6d4RfvUf=`PCZg;n^3?J8!JXKCZzs#3G6Co!CF!eV{llC7#s<*d z+3YH}quqFYZ~Xw2Z+VRNvb7({b~FOtf%^$07C$sF;D=TQ`_dd^V7;tmf7}Os{P+0j zF|KpYU)`4Ex+<1lwSRXKEdv4jkZH_iX=t)l_g-4&WRc8|@zlA{-S*eKXc?^<8&^O(fX_w~%>Q^d#M*bMDab$Z z$idz{#!wcgIuhr&)Dd?Z^7Qi~C+Tb-pM04u>6G^)A9skspnokj9Is=`sX-ef*cMAp za=s7)ULYywB%KgYG463Lx;P`oNCHIxiZdU|N&@l+?{`POT=^}@5iq`3sK&|)9k*|_X6N?Ep-VY| z`{kZM>w%FzPk*0l4@w`5dRS{`N`VJ-yjZ>>u4dzd&JZ8GpFm-Ghs}V)bXeCYzKC6& zV;49^wWucaoAYvx4GuX(rt~ddd}-5ek%x$RiFQ3+^6b9-o<2(B1NeR=y?ha0$Ok*- zCRY5>+m^M2A^-iYDiUtmmg(}zH@VM^yLi)1Ky(qRZGSNqUE9kVJj?~BR-a1idL zqDj%e*jlXSLYDYYGL>^0(!l!PL%w_?edL#GK?Uqh4BT=Vt*xt_8G&_>DgGF6d6}m( z_&iz6hH-u$`Nw^0`JE0w;Sn}Y_%C?qzDSSUqz-K@j1``6p7@cR@~e?Qd0xvG`L`L) zRWb08K7Y+Yb+GZOas`WYj(iTw1?(i|76ydp9+(fyJ>=wm|05Pv>3dMi5b8>bwAjYP zS&G!nK*v}QW=%*#ptv{T8MnCLmt(A77c2VS)I_y6z2?Z_E$N?eEF;zExe)8Lk#S$F z{Ip$wBL(v*BfGZKrgsj23JHe2@#>tVd z+KY}`9?T6m3!X|&GZVwBx%a-DcT&aH-;AHIZcmGK4Sl>mfb;dsV=kcOo`1zvapBvz zRN8tb%0K4aUep0VTcJGqqG#K4{G&EXlXIDs57Vt1fZ}lu@q?!&=KX|F?H|B#r?&o- z*OnnM2{CpoTf|{;ERhUqd6ieD#BnA%bU<@XGiNXNWgVbO(A@J=@F~hnErWoDzSOUR z1Fo?$5UG+hZN=J?GD@`vI)5%Az22^s+EAr)p&AdzXJLo~o19?%L7V6CV|)j{CO!x0AD0TfdzWZJ=(Uc86ZJ|QqM91N$%O&~4rK*cmn<9813 zl<9V*RQ$^ORx#~PX;T_7>c~C_1-6CPO|(#=<>SnN(Ht)i7EW8_C@8;Bt<)- ztp(YC^0Z;rRv^Wvhky7%l#_`OE7-M&Cl=_ha^nX7%nv_uC5Ji`18zX>$8ilW9^t*D zdc_wUji4zf`+KO}Ql>iPm8raxM;zQ3EeTsySkcX>H>AZnxwN(Z!ZF@|;u>GnJ}N)q z%-gWf@&wgIEVQ2X$oEIR8Zh@M$ntxtTS+v>x;JxXw5>BWB|f=?YW?N2mz=14#!@8_ zCkItZo&x5qGh$37O*}(AMXMqrM80#~FWj+YcI5+o|_4dSeNsPk|sA6 zu1dWR$ibb=nDLn^Ia8BBccD+zB!+DndZb(lqnWSwOQO_Aq+=m;^ElKwY{(t>3>uCh zX5@LmDKE#?Cp`1I9yEhJ5wP67s>UN-LGI8#%sAR+f4uaL&mzsqmT|1JA-J-13NKju;golfH#_M$^iOBbwydr!+xpYnpIvXp6)(hdiv z7zFXEVLz7(FA^FYN6RlUI6mI@y!}g_{e?~aZ}JOQsq-ht{-b@Cmq9NQFi3i>i5l`^ zq#5e3ZwwSzmyt&K)=z;Ab`$adP=>`TQ62MQshumV7qS}g5ANH(9)qbPlB& zxk=L(4$-fbmg?2S_%@fhFA^PpcLxF!#DHh*j0dnktZdzJTaV7>J7s8Z%ngJe4t)BZ z!un3fILR>|@~5m9K`niT@|f5fv;9oVW9{2wN3fnncgejS@cd)F*?~uXQFrFAd2DG@ zK9F+Cu)ZXbp9w8LJf)e;j{KW;yrfOh3{&f(X*+K=L{hy!X`uephR<{n-V_f*vc9uBi|=ET1}<1BKVRMOUgiLG5~_j$&veED66Gyc{?bBj&j_m^Q@m%%U+ z6bz$KicYth@CqbP?=_d`{KTcaJ(+Nsk~s+sb?YCY!&<}YyJjlx-}ImL>MratHpCo-f9Q#@61)SYFOT&i_&?^mJeCr})NJqA+&@>*&7x`ce1Jlvvk)bK&**F6th9 z(HAMP@dUmT?tT>@KN;6tR;=n5wc?7`e2aO=Hb?naVO%U7=-FHwT~MNs_uPg)Ajeq; z_Tm!`k#SA5%i!sE&rK^CEJ~t`q+KjX8b=h+;mBhO?VQ}lgjIQD<4+3fS4iYQ3x;*2 zKqYi|JIHta1s?y8>M#6%+xiL}|MUD`>MN|CuJuKI1v|HhM>WZt8glVSrxvFAK5la} zI?hiucPMF|EM|R?Q=43H-hbrT{j8%f)Y-9ocv~;w4)vgPrkx&?R@ipvv}a$ z>LUk!TmGzHGVmHsHHmg1$OTbF?$h`ssy5!AbxQ7f&{q%P)OI&bn?E6mmL(oZq2ctYBy3i&#gWS%2q+p6aw% ze7;!yQ44(FCDunSBG!UDb2s8McO%J)v90Gdw81>&-HbXxl1)NTIT22CNGmRQ*UUgm zC7`jn5HK$SVv6N|SBH5E`!Q(UkV*=nV#1kJ@aT4_uJ}x;ob1v#At|l&7V|sx|k%OWwJUn6lD@r z?!(l``BXIl>9|X^qHEaO9LH%TeXiuw3`+9N3ljLw06EKaz&eoO95G|@_7MBjR) zIibwFQ35l6nl%#&oUF9Jn^sLojR|SB58+y}>9$=9aPYg`cY&Rc66fQ=#*xy&|FOEB z6pLeSbAJ)ryO2K?KR(v}D2Eu*HxKnx-~Uu2<}YjY^lkwAK9rrc-5aMtZatR%M7-DQ z_Q;vjD~)-F#Rj(*uQcNDp3KyWLX$9~2y{x%Q8sOV|6Ip;Fz+D;#6wbi4fupyb#4t+ z8%A@f=>9kQa}VG7h$evI_mo!9MS8!1T0Vl(fgs2soaD25%>^X-)Rq#*#^ef!^OP39 zJQe#Sgu<7hLhyLBhOFx1&*0Sx|kXC9Q5I*Qa)$%NFrbJ7H{u$NJ5G#`;xemcJ zp--gZaBinCUx=p7R**QPRoh_<^I7Z&QFn(t)vLslzx*0~`nEv~OHyTJB~ga+^v;j! zP6WMrOq@1=?ZmK@KL+$cMA-+IDIeNa@EquWJP)9yq~?#a7k)S3MbyADCo3JkBED(s zI$3<5=Vy-kiPv{ozo5$pj*1o_J3XUat5ag`(RgCUSxAcbdPzkelb34$L{vKp$7Ak8 zOF>Phw2}$u@||g3{eSts4abNn%&niYu)nMSSXZaId`Ej9|8qLb697Kc(-ABZqHf8_L&Z5Uzuy*}Tnl9)V&gwN zpi0dpo$qccSqNw))t!%RI(}h=%QuDKKTE zq5Y)|-@~S%3Zf0#oHLUg2o!t@=`-FfZyp!`Loms+V-DSN<_6y$Sd$HIrI#N7{omzfYWhAcT>b9$t7x3bydVciQ@jdHa z_ugA^KdL@f-tV~ds_xT|;el6IkA9s$zErm(|B637rlV&`Ts`l}3>U;ZWu440lg@jT z<-Yn&Wg@q{F6!V~zQy%5O{BbkiMQh{4TI2mp4CeoJhi(;zxfER`OBo&&!Ld}$G-k` zdi$N7kKpa*ZWcVT!(j|CVZY&T|JdkmHrYVUFwL;w%=uYwkF<_Iac;RNe{2kbDvYo?*a z`)~z&BwKO=OMH(6$3?;#TqkRcX9#?m9f@IzJ!A^JuhAWTXBK2r5CdD<-&I88b?s2J z2YVIygX11L1XS4F`CkQx4e?uQ~PZ0})wifQzFE4*Kb<3k(N{~8D7-LN_G7wvq#Z$_RW z_n@ywce7CoH9yYdpZ^q~ZG$6X_t}0=zwod$wALXiK;i;)wpP=Rf9UzJFAl7|y|{1g zt;HH2jzS|%tVah8OU#>`<{E&`Ryqd2_`ohcE)IUWnH@#@5H>%59$-kawTtHyY_SNK zf;$d-QnyXtW(oW}~m!ukq;j?I`(jyV3pZZvS<)9IaOqcXoIfC0~=P zr$aGHHk0wwA$Prhjh=sWkH>!gkN>p*t&9K`i%Kjr(Alj9_AH_n*7N0aZ4&ZnLh-EE zH4PVw+CaUiK9$D8LEEBFYOm*23uN5_VbaNZd>(a0XtV(mMOb03_-H}4vgccwT^Z<& zUPmz#BnQ<@4R>N)c!ah}=E@1XKqah&fGvEjTQFPvtgb|V(?Y5q@y!%EULruuwe0=Eyuc&J>S&NS|dAfNj;-PvylT zxse&(L+EG&UUVE&=v%#xxL-6(GioT0bzRdC zpQoLJ2sXfjXHgUaHpL=XFKM!ti$=Gc%$E_6uxqQ9ll6J-vMWRS-q1XLZ*ht8M63GB zP}&=Rnk7{B)BsIru>o4_?hP!b2A0&2e*e)z)5@WrQ*||U36L81MJ{TiJhEQD11YWu zKlU?>A{j7^uXmDfa%11wb6j6-`RM!+9meda*Om=i4GrPtTD2`+H`8Ccyi^aG&qJ04&IoD&-k!ua6d7yZ@yV z9vI@>&O$9I-^gC9!bQQ=h+<{$VAq1!%1CqYrlGv@49^oS<*Swvq|%YwXaiZqKk{Y7`I@KeAcAC_C2q8T zfqBF~(k1_xpU!!`!1MFt`8_@b~2g0M}WNhaEHWS-o=w^@6s* zmv%k$3$=_m!L@?pr}l55l>2S50TtGK+k&VrzT^4@bw$l^5BWSC$Mt{5(p&8R1InR) zuAQD5*5JNVP}J1UY0HUTE^!?uy0sO5d=i{n)&l%$5y#Dk^(!(1tpb`Z(xN4Dt7;-z zaB)v1((WOti>NF-w1JO^#v0$CVaN0q zb_K2%8tdpCNLjU&Tr?jo7O#z97T$7zW|1!=$fP56x`8Q<2g5zJBNWj)SbAfRXaaPJo zj&EVlhqUiVZ_mF*5tqYx9N)&Ck7}c55s-pjnW#6qh@dDaLES_OKDD|R_g+igH`Jvu z?M>8wnh50=<0!v;yD$IEI+Iu2l|AZmt`Z=5ja_6_g|KF%NWpzsv)xH)(^qIq{`a;u{cuydnb6>hHg;G1Sfd z$&N``;i+v43sx*swTytPE55Pc1?se=f!$xmK8v-rIWW6wt~c@)Lgm~WsF#K|$LCn$ z`dzcX%G)d04$m<)q(Rxv-&@F0UZY(>|F&ibT~~vhLmGJt4F96-$%A!dW z0f={4p=`l@N3ueHN2mEb7+g1Zmu*_uZ&i%}qhg!h;aU8>8|DERj$}@~6^t4V|_+Vil>luYld8ogC zA9bl#&+7dy;vWC2J&bbaU+xvVKcsvM@y%_7Cwfh%hU7`Lwl-wSiu4n|YimFLb^l;r znoGh|KU9THt?L^!Zm)M<6LL^kC$;l5+Q$D0r;5=pLyGAempMm z^YM5*wD0k=8rs{>`Ow~eE{68@bJ@0*t{>xX7f}>j9|QZp%=&{Z%V68>+ZMUCL%WS_ z2g0^n(f)vaAKE$C4j{Mf9-|1~(|c^!_dNg+__?6?Sqz`Ez6W(YUgPKE@u9uM&+DQ6 z@OfxId>+~lpL>6N4uBN*v8SZ@J+E+HaHamH=K`X1MOmd&@bt?}t5#*L)g<^W?{951-pVpiMkK zw;vzdlkfHg-v2J_zh%dRUE9uk%dH%6?K5ScM&Y{go1uUGEuq-<5pBx(x$SKfaq`{P z;Io|DKG|n?FZawZd+++wDBD-`)!sDes0U{ zefx`XlsVpebQ+fmpPNmN<3jzR{c%jB#(SUJ*7)4>bKB59$GL6Wwz?hs&Imm3R&vmZ zKx_te)-r#W?=4i%EyPXk*Wb2d<^W<qpx#@505T=e{;dP}Kxsa}YZ{{P-BQ&djd%k^m7@NvMSX*D>ow21 z5ImVLYL^0)%jnovG@CXnNi9V2scYQKT*+#?I9`7TJY6G(v)tqllpqhqeftD|RBeE7 zH=F{`4rHl3;OT!AuYpn{I*q&z00=E^d-_f+{`q~LA_Q)}zcU0~o$vM>EBeUq?*h0a z`S*W!?~n5dyZ~vw?>7a8JXXTMn;@uu1HOcNMP9(OFPqg^{G>4Oicb=2FnHxu!3V|( zV#J*hXGf>c%ki~Nui%xK0vuWg_7}ga@pmI^ zUnpYnXNB*3_BgD8JcH-Jqr&qX7WiFgSQ&qkgWurqJ%e9DsmUvX{%b{g5EB2sfSJ^W&b!`$0p8{hA{5&X7m)zgri>eA|EL zw$HUbK38sVo*F#<30{nRxE7CR$M2CI?mq)^js1Tx@QL$T*yq-8uW^1bPD;M;xz+}L zVVvdv`StX-=W1~rD;&q-pY1=->xQ|=%V{yX@nDAeyvOGn*4^Pae^>Z-ZO5$`=Fzt= zmz?5#H;O#lal(9LVaK(=y7CL#U%-Fw*T;T4!~NjXMpHD&?YQCZP`Fsv!#>>@va{>M z4C{W4{q!0raou+%y?S|(f_}+iB;W``V z42N|?9DM$%hA*r~SmL@J#@8lWl{o*ybGG<=8$){Bml5)=c!Bz1f82KlgA zo!#FKe*Zu1eOq(nwvy(1{|cOnIY-i3VDWw_UFK9vZMTn)+U{1{eSEx*4it&1qQ#p@ zluBw%zs=jk#y;#mZp8kL{XKv42WEf5MuL|jcqta!?Twj;k=+st1Tqte%nx7!$@**Q zxAU64T-o!t+vi__o~r%X@4mRM_GdePuC9tL!z-SW#<%Ex1nytMymQ5OA-AxV`t5vf z!<77n@5^uDRKWKfdicD@hcbh%d$7Cs67+n<^Id;|ec->qKJJwZ`iJu+`yXCEe+T=|@8-|{KL0YFzuP`{=U>Y6zpb7x z`J@eJ6<*)We_35!&tHG(b7lMD#r)N3uETc>@UO|dUVu>A>+8SIU)j$g2YvoJetxxu z?|zq`U$0(#2by{D`o-=1uRqS&_Ql)zo7XSBcSdq^J=fl>JY^ByZQ;wppKCvC$lLkj z%?G!*|Cj4;Ul;f9UOg}Gzm~uL@{Rf>TfBQw)VncW&42iweD!~W{NwAt-~9Ma@&EqS zi>r5E-CceE)!moht3d>HfB4(;59Hm${bKd%HTml9`umr!l7i-k*KhB>TD%K3-rwE? zdD++U%`ZG3<@t1K&#$)^7bj>Lry*g-6P9i$drau$<#&EIZW`Vc4CM;PNZu>01ow3J&&U{7! z-xVsPVP0WLLO*z%?W{_ul`T7J-I6G~1yd8MGS*S`?PEf>sn6)nGGLS=F!i8Q0Jc8s z7aZF9tVi&Hc4de23b_PAg~f6&;SNPF;=s!u!$p65cvpI9kuXccQ$e1^sW9M+4kq)P zTcHnbr&g~>;IC4WtQ#s>rsPiG0R*&1=g9H zcA2W1`Uw*oE>Azf!pkl@r9p7k z#@l*i243&}&#OSz@Z zFxw>AX7R+=GkL!Qx~#eSG9_+pG;8%-8VP6uHIW3rF`xen!_U=N_(&E9zIWDT5@%gX z@JP0QhANdN#7|9@)eW#ADA-+z_tQ=sUTwc4n%zFUlv0Eb5bOy_m{f6xZB&j$A z^$%%4lCA|%V_<;5rQ}BuO$9p5vUFGW)LN-Es4e}62CaHYT9)kZSxtzi-1qhiT^A5c ztGjw`U4={YYS@tfcqaI`Q8Nt~!KZQ^i}sj*RXEXn(UsHA| zrE$yzbxNvL{Ma-U^@=5<{4wcn5&)tvcm@ zaoVvMrxF+c%T}K}D9l1tdmlS=J^vbYrDgu4SI#7Of$o18h>r1v) z{*2+F4)IXO!;3lu0cr;XC}#0vjbWk>F%hvq`y?G1Lrg?aFvid}hiIE3n0=C#tRW`W zc;r!s&_V4$2Yn1}XNb1*NywuPVuRX$#s-c(pP+eV*oTz{1dU^(4IzcrMhZ*ORok!> zT^qwg9pa%1Xk?UJ#_-UFcxdC%NE-qQtpg}Z4XMr;CdLpG5h&D3Nn@CpLrg@(C_7@C zW0+V&Oe}%Pr&*Le#6$!S`IAh_8Db)$h;Dw0y7r-o)smzSfglbL$oykqQ()|hluOru}L3-4ZQ<4lyM3-hp3BqF^;%3 zMBMt6rI9`a4|)q84t^HXhfqQ9Kn2AdDY#p%3xU1;{CXoH0C%As)tfJVuWF~*SAhDeJ*F^050L|Vj$F{F(l(sfvnoH3-$A=2h}#4(3|z`*PP19c2- zdx*A(0pl#&86qx%fTfNa%Y7(dcA6V)RjNzdU@lbyY z7TH69VYdN8x6M)OW_t)Q>^5MGeGIdQ0K;wr#=)T;dk8V?He#60xCN&@gcx=UF-D`3 zJ%k;08#~5Oc7`ZBpMpyEAaK}Sz@h81I_3e`9s~}%4IF?^K>$gCZGXx(_3qD3elrsb*P8&!@Ii5V|c_NIcvaD)~ZY#zp zHp31s!icUpHgGwA_{fGlc$`Y^01{o1ZCTb1-3}^fsDoV823)C*DeH&&U_nD0Nqa?AQur_RK?IWUAFa6T#P|30=Q_VVrizP>EpPVgIq*#krhXER8uxaaj^!u zSPcl%6hk?Gc*|HBKJ?i>^~u*>7FfV2Ce9!er-5LGrdg({jB`qP*em4*hMBUW+U7X7 zR1Wa|zN6Y64g9cZRf7pJiweqnTc|xP)ZP|aPYbQLh2GOb?`#on0#=mdI>?g`#&oPn zLf=PXCq=Xg0^SJn@SgUZ=$90=IS6^W%`RUd!9~u0WPgT3RY%G~5(GF-20OWH5|DI7 z`PF0+yueNQ!}N8Cw7p> zp2XykXNe)H$^y%1LQ=xwo`KrsjtRLz1r?H}Mr zpFPf$qEV^W@*}9tCCO~DVXR*EOYBE8?n`AU|G=$ork7=qKf{NEr{C90 z?d0gpdi0|;O?}p%5MN8f$?jJuA#D9rr~z1im-qZ|RVMnBnWGUsgDky*QS~{en}6Kp zqRBPe+gW$UQp((Q!@S#h{XLmyVh&M3D}Up_X{&famvKsWZz9;ef?(}?kjm-@PoKni zO@bv!(|F61mWq6_{ysDYz>p=iix#)%r(r5ZI$I~TMM_-yNm8kXq%`T|-keiXjX#cm z=wXx6r}a&g=>g|@N{IJVz4rkL`;@NNQzsAWBN_1ovwFlM1fh1hnmlXnw83%@!Y$&5 z1}|X&U(b(S@;7Kes%w!ED9Jhvi>$y!{DAXWdsb!F?eN+qkxPSSuj-R;Q3J9zh4e(I z)Xbx+x69%U+mjn8-YI5%xvoDFt%8z&K+Zstb*I)Zeuu?Lv%I6fqM(wjiRZk4RYL{M zD)iR~N1L$a`o|OG#v2&9V_x&cuhFac1WisRXGJ;oBR8cX)ZW$DSxFe+189d~u$OXq zzc$yUcol5uQ`>P|58kN43JV;WHR^geAPX=EfP(#v8_wsFz&$qPKcX+VEuWl!_07{2 zkKDOCnQ$X)QEapBnbvVZv@u)cW&%$DNIvWH&9d!f@m?K~M5|TDyIr~oUDjSB(;(V6 z9(yQUN{pfPk&OQGg`QF8ZB*_H10m-t9NwI#x^5hbo{{drE z6`-vj1ZY7~N&%KV6i0|82@1Ik9>N%i7Gw)*@MZzTUoVixTjZr=OH&lAX89G46BV&VpIMoTf8 z!WYjIlgslduU@0xsYW*r&|-z*Z4p?NWyF_s{8nM4^by@cf3x5Gcv7A8g zjwyfnh!>P?q5t6m>nLP@4PpqLuhoiL%%7hE4}}5LyNV-t+Y|wx$9b;Zo&-imqP>*Ho-vZLqZxwt zy`8oiVB*4baWq3A%`&p0nH11y#n$NJ;sVL_1|KKr&u-wmcNde0ZtITF82&H#yW;A- zX!pI*qkGK#0NNlw5;ZXSg#IG#xA_}~4(L1ZlYEc&8LZoXk5C4>sqmlVpQU0+hd#ob1}~Et)L2OtX3hfM`1M0p&1$*~ z;0CiX3Ljb^Cm$PX1 za@T~Fb5b#Xca#cQ0f>uJL^XYF%Zu7O)m6FSQothcedxf46KRcMG>G)b8;+ZkOEQ z)Q12Gs|0m&83ZIGlA9zwNKg37s0z!;&@qb>#MjP$CtYw~H`Tg@Hccf5Qh#|dX=3>$ zl52~zIPu~wcv^meY2>ws8^oMQk~HaCzaVs8++kz^@BIYxnV>WVbCezfXZKm5*y&(4gOXo zP^KY&Ih3*SIqS989-XTXD9i_=dxSk2P(~+}F`$q}o363m1mZY1tf>NQ-o9MLE3%Bz z7$RF9L|M=W{FCxC+G0aR306_ICSJVl8vMYo=31a0#Khw>tuQ&COfJvK<#loYd`jR! z!|$e6pDt;df;m(SxC(m7^Sgb-&!?N9i6o7G*}5@b$8o^?1Qmaa_|^n@gb~dA2tT8Pt_JA>ljvKOMi9klOSH93%=$V<#7CgvS8lt^uNDnt|&Z8j&?;G>5^sW zM3YqAMEUQ00+5j@>5huTJN!S)@7x&B64^xM_tdJbf^^8nk1V>d1(Nl086r?OPb;k$+k53S8#&NZbLHE086T)Vo5PwtVpJg zCEHQ421lwSM^>>a89IVrN+2nQ1C^VkBtu1j`S%4n5^`FejuskooO$ zVQfdsg~6aDE(^}J#HGOzNvxz0co5N{_PuW4HP&H&rA zFqbKpFUZYHaY3d+IBsQ_ixsX9=XUe>POK*;DAX{>@ni08t5OFoKxX@iL;m#;K=z(1#UD7Oz zyCyDt6GY2p1kucVKyx93%G|nbr0DF2-?-#xuZ4?AhplNy5q}qa90W8)eumsQ@u`QF zX&fS$WU|?G>JdMC-1w3khf_BWlQ^Og%ci_J-Zh`LLv5ob;I}}pWBL_;BC8#1=-r2c zW{0+b#^KB5&Y9(Ro_!M%P{ycw@C;m^>|n!lEmyV`1cLcqEI6@!$DAgi=xAn%y zdBc+)*FXqvcPpXixvYpj_&pjeL!~)r`caOaS)Xh2eWxDL@~x+ocz)d1?TCc5;Dt-m znd~38CLE*BGLZF6C(fa_<~<2Ebf{9Xp9bUEw1mA4d(D8iI+>v9;ltT}cI{DghT@@^3V?A(pxQLcTiUY``pSTiS4j%s(L3Pel-;?i{-Ky;SH zC|mnW2FfqRhC=w5Po}Lo{pqQD#{&H$I=Y3~nA8d7n;>Y0j*igBkB4qYgxLx_tmHGP z1^+flvrk1Wyx1%j0e#jqIR0?XcQ}^*s%c)cFk5kUts8fL^pW3?e6MxS&+w{xqHjFk zH|tW*XLys*6o=UglgPsdH4AyTfs)JsD%SI zy%HM1Spak#N2}P6RwsyLnV-S;|5O9m5fcRqE}DWQzhP+dh-pFVreg>W8M-~q`3Uar z|Gooxe1a~2F45D+!GWtP9x`UbP?H~T__R$HyJM3D;&ddXD_wI`Q=*#ARcxtr$VS-+qXXA z1^h^V5Uf5M!P<|Lu1(Rr37iS4iM-Z?NOrKQZ#{7@D#OR;)3w&^jcfc9KNsi7_iL=E zDGg74-apAF%>z`hvn+5!3mU}R+Agvm_3R>_PZNQ58F9vWf3tO*vfN5bll7^lX}IlWv3_! zD9PYVB45?;CllmHEJmKcTvCYOph@|4Sre4yUvbnrF$64ci?2UI-?BALbC&S8bl>I* zi$aP*R-Ml%c%bqkKN2P$)>~k8ha&fXe&#O%+8V`8$Y+WMUCewI&>fUbF1y7%pB4oA z9jzi+#5cM(wr_L78TxfYH?(WJbC10hPoW}>Ndv74(X-ErS0&NIt+3tWj=l~!u=f}d zsPaA~(Te+URYD)~AT0Oi#{@Rfb60*~?)xN-7i8fFK6@;SVADMVtZF& z=l0yK`n~fz8j*ncH2e4KnQak2i&qZVU`i88*!zHF?0p2*R1_p!8g5?+oD+{+S}$9s z^w$lI+{b#Mmp;CS{8T>)e43UepswB9+7k30 zJnGu97^B1v>2nwB$)(s8v?kPbynV}8oZr#(mb!7|b%t<-le~E%!0j4;4gm7DDB)`q zXx%=&+u!3+4-=JjTx~K4heGOgNEyuN?bu8269~#vq$ZaImA~A6`>OQR<1gbhUyuUQ zoT8L2^PtFi^)OO>vEKx(Q@YGIAYo^A<+Jr>!8c}CE1IrI6yHy)Ly*%&5HF@7$rw$i z^ntHLvS~)sdzwz)yqI5q-@K4QZ-N+P)U%7p`$a&ay9r8Rq@oy}nrVunm`jvGG&ygJ zyx849c3WX?o7F#4A32pJ!V z6=~$vt6emzHNsMV?IetxO3|B;q<0XN5Vu~ z)V(S;yMYzFW8||0o9l;Mc{GlHD)-g5yC~Zh+dRe=tY%y6m_1QtQPkc5%p#p0VE6gf zYRIsP!n7V5Z78Hv#}!ZFKG3m%tVr=DTfvIpcC!e5*5yQh?FD9iq#&}}T2jK?jNo7| zj5VCo=7V2u7xion@p-y91D~y`w3>@@+^gBJpj-Uq=fJBEp~u(H%`_YkDqloZ+`*{} zyKhQzzoEFgR|KzTS-eSID#!+`>UH~AGC$iHh=XVU`~u;UFKz zTv;XcKm7@R?O7UiERu&0Gr5~Ii&LD$K3~D*uTi3*&tulyZ?dpTSu$ZKAfeQo!3J+3 z6>NlfKJ2x>PiXo+Uuj7!{t~|a{a86cA3q{C%?3_h_nv$K|NVd!8RZG4)nS*e%uw)$ z%kR=SOu)=9o8AaN80Bhf^Z=T?k08OTyNw^xt`=c`2E7Zj?p9r@;?4Hzse{z{;&QiP zGO}mo*o^92*=T(K4l;!nwc%3~O^9kt_T}YnMU?{*CGNU~M&g8d#I2m?_w>xS=%~s> zKboQXX4}Wgk06DwK=k_K0T&3UbU>oGiPyn+x0OdcSeUQ~hkoR9?8R|`8ha*>I*uKX zYVfar|Lec~+yD9R=!NIAXWfUOa`o;x0I;u1RlCFb)tDKDqYTGkwmLbaOaJ$O|IdF$ zy8{z_-TK|6cH`tRpM4J!_J8~fx{8y>PBAMhulvH@HSOYY7pKg(NW0dw{eh@*eJeZ( z7jH^{-@COIyvYhfB%qcMDg*{rBQs|se zCyhs`;n}bjih6n~SpL&LpxO0=iKn`yDTecrwy80A z8QBqdV{s0Vi}M3qPtcz^*Ne$N(9mbcN1MbWjLvp7j!w}(@FO2m9tx2-joqojT3kTz a(er8k+7oyW3cQl#+5ZDA>ng)7_#yyFWSTet delta 911840 zcmV($K;yr<(MjUWNq~d_gaWh!?Z|&Vmew7uj1VBkEbF!d2oORe2uY~BZ5uO)Q4B&- zSnTKXd_VDi$u%^D&=jG_%qdoCoOeF8Z?X1;&>YWdEM z`qr!E+12m4+}|SKSL4*&KDK4Xe!6x-`h z`M$Un$~dlJnCC*7Br5hNKftO!fPcFzwv`T71cCQmFpdq~zahST|Lxih@p|^|a(K); z+=8xhf^Aw76%~H-y*0l45aE9vW*T_CYf!`}p~7t5~ol@5-R~QEEQE+@j3g zous-_&Ox{OAXjJiO#$TVCo${@;HW=xSL1e_|J%iMpAE-*1>PB;cfo(I0=zdO|MMf* zPDkf|-lgyTj#Z`Ii}#I6UM2bU*+|sFOg`G;j?(ADX|;R`*(2GWS}Xh30qHw?2z0TB zRNh3kN6I~l)HTd&uX4YA%zYhwBXhk05Z_OgGslyx;F)k_tvC(U%!r5h!0Pa=nN001nV%7?LZP6qmKhmR7M zuda_&k!G&$t@p#ZyUwJhZ+%O%#LRT({4{a#q*Il5{A!Zw-&DGMI*@<4lW(5D?DmAy zDR_LjHe5#u#c$W;MtM~6{U_Z~L)$!WQu%9_RxX4;Kb1@Av3h?Wxwq$Mh<11mMYG7Z zt53eWy`aLr-!_zAe%3kTq~_!H{LXf@-5sy&)|Dl^yY_#K`ZFU19EZsajlZ2@|DTVy zXz(8JZGW$A@29(%85;Lu`#dpR64g(v;^~y&E;0Y4d5U5qD#CZz@Se48Wf(_*eMNhH zfXCg@=4CIL`&NI-2-C=Sjfd*~dyw1hWPxR&Bff{XUvHFg@u+_+T?&}C_3!l(9v`p3 zU!KF?R?~5-c#0C&cEfL36z_$erC#jQ>Yy3>ma^WZTpP%vsN#_Hw5o_LkE!d&$A=`! zPGEN5kB<+jXBNlg^6P1}Pgc&-OFY|FO)D9@b{}$-^PGQDa`}fk+aKy|Kj*-)UuxTD zy^Do+awIlG<8ZFt7b|a`SD^M1yDihYz_2|K?+$?%OLu#3t>E8&f2M*x+j*yovdNVi zc9=e>qC~aBd1bGuAj_ioHm0{m?NFX$+Lbz;kPipq>y6^?&kOL@jAM1j!sXTXy_ybO zX`jE-l-qwXjd#IzwhD!tId1s=7XNm*+3)e-nR4F4K|AlV@fGd8&(`s*Wx-RZF8}tI z=YuN84$;?R&Qa%MuMv3dR(bUq5%{^H_^Oou`DdvUoyzyihtE<0q@w!xNtMyYY5k3# zt}^;w1@?(@XPU=xlw|3{pLduZc7{fQyZW;mK16?>uWDbnFY~+qKR&)x`hL-lE5P5W zgY@+Qe0+Q$fG_)cPY3je>p2+tBQ)*!c_kU?6U^qoOtZ_;`&*-iO$! zc0Sv_nx=2$xBWU&2ZVh2<&bzmmdKZ#zLxDoX?AG)4b{mjv~gtsPEmbCmzN9g`K?<| z`=fvJ_II4PTOHibsu#znHN@{KHh&-Ozthfp*28&&PushdEPaOTKZox1R9)}5jfdwO z_^ImNdwbolxq(B!)&~B)``!)3z_)ky>fl{d5IigRA3-snJxrWlF~PG7CiowHy?oPG z-5U+;&U_UUgSR8n$!&W1WOOb_UW`${nXPUhSCF1Q)c%q&G{@AN=)pq4BdGCMG z4g)+l!hp_<(~uAC$nMs=sIH@5)A(yY!aH6Q=7CmvPko~@Hw?>Zjot7`%d;#BzwU}Z z1;)J)FM*oa?)EDG?P;&Cy-nEPolnv#uNvF**XMcUh7X4&TYhXw>hW;;+-bsAHQ#P& z#NF1P{pAGLY2Lm8$C|e*{+s0{Y4(4lJFdUiUHRs#ZwX&_Fn_tDXa(_a*P}`cZ};WB zLO;}tLwk;o-9NQuSJ2*U<3Yv!dQ11Yw@C8cb{tx}hq`+>-{|~ej?db9Wu{NtTWRRA zs!Ixe8`bv;T^Y;oqQT2Ce6QAvF}$;hk3Try$0N&*y+z5Nw^}oQshn2!gI0egS1Nw1 z`3&REK=lsBd~W}EpKzYqkdGF#t(lV+wMNCHr@=a-)8D>yGb~G*FH&lN}Z^v%+HA_-@bc~ z@5oz^OXW>h_pXIgozuN`%tf^id#@OWQ|eZgW-B8PB184>(RbQONOosO;Am#GJzfI) z1@#R$Rcbma{(RY5vhBaO=qn0OMK8>U#Xh=QW;-e_4Pk*Rj(eN9+2`pewRMl z-p&wwM$11~bzU0Ko5^5BPJSUK|Ie0^A1u7Lq`kW(-Rq-h4^u9NdqJ`&{&e~K+VrdX z(|B`{wBs3nXcqjTS@3@o9OYEp2W*Z10RS!?0YFH+dO(19_3#8j@#f7JnGBvTyZ2xtpO;y{a?lX5v-*@ZP9mT>s)arH+P7lGAr-J?3Ie#+R^7WD_AN?Vg z?_@urBae?S=O!b&Vbj#)$U#VcK6S?Rh|e zFJJXMDSy*jyCZ)D`11Jm91d)30nZDbV>)TU(FMZ(MEm98=>aO86h7=y;?+ZKS6#IP zh(3c?%2Y9LClQWU4{*Hty8pMDf9|j0K=OYNazArp-~8ucFW6~DmAqO_O!ehG7gFFg z4w9J1beM(Ft*G=Ooh_>tEA&j1O zS!Sr4J9+%en+xD?J1NCe9L0M!a^f@i>(*!R2*t!By-)#nAA0 z2i#t*D(XiS2gF|=o(b$k-{b?OqJNn>QB{TIrBeEW6UY0qp_yli>c3kyJkg)}oNCS9 z<=&6$l{J6aj2YXVS5<>CAg!Li9%bu7?5+y&_4>LKb)=YnXv>zWL+_U-}L*74Q9IkjzX}0#w zZ(Eh8H#Mid@3`^}=87@bDa-s7^itn<{djG2zEe~89Y%i#e-97JqsKD8g6F&UJAZM| z1^0)Tol5k}neWfNcge~7N#qO8;??QKoi>ht??dVeRRsA_*l<#;m{M_C7y$Zh9fB8H2 z@e5y?HxAvO*}cy<-A`9`F-bq&n&G}v`TcdLr*)^Fs{8UJeSgL3X~pX2I(>Qi{Y01Z zvp(mas{8!BdVj_48HaSkPgQ*7gn576#o2#&7oQvOi!;-jm7Kx0Jtc9mDUF|C%Ma`OlYkgfqV!-=w!NrQOKI zgZGv?%O~&3i}$ULZKd6J$ikj3g<&Z2Qa)H;^z5esj6>7!H-+A7_a_qwh44*dTyH(SjHfpDiAAc*haA?FSnWY7Fh%l}55e^QeqD*krf{ar+o%jglOWOE21QH>yRjGa+YT>bX2_1g7I zMP;=N*KL^g@6T_P>ifT;t{{k{w{^6)s&!>lf-8Q6cCk?_0X{B#>XZR&G2~12VGi1b zJ^JNZke(w+#eSatYzq7L8~K06Zh-F`-`an@KWJy(XiF`{$NT-=e*G~#`wePqSoNVV z;dfA;&H0%D`Rn`se{bVGP26j6-m2`(Rr5acf+4=y61?j-`xsQ}w*TzMmxE~qc;N^9 zCstC?tvrd!dqp|#LcMPv=BMH>`(4-A7rvg)+xKSobnf%{j)L#`e!hPP{5`&_YnMuyY3&sdvlhmoaoAt1vx*l zK+u2uk>xh|+~(3Zspl?`ve1vv&KCaXlg?de@1mSHp8o&+pa1v2?(!LY?$rC@?p}&3 z(Q{NM_l0wJ!4F%7$wPnQ-uuG$D=d76Bf}r|S@^ql0km_S@v!q~@+-c2zi+TmX~cIs zCf+R%-%YcM=Z~_VrQGl-c6HB8$^($At67x1oW-r(!|z1_$EVoO$;>M~_*(tg6W94W z^e^90$uIx1`*^iWm8XI?A=Tr-@5)h#wJUt~UC|T2W*h$R7^Q#wy$Vjq`tGrWKN!nz zyEq)%fC*0IZ=<-Kj_%D8-##o+@rQR&=&yZinCR>%Ubu z|Mu}>IKUqtD@zukhDP}SU_LEXKQ~`0LAg)7sr>ED2g#+ElC3#n>B!>cngflkxy^a7 z?8k?{JUoGquT_6%GTWWmO(TEzw6ObSzCQbrTHY3}nt@#NO8<+R#-?5xL}zaF4Sc9S zfTxy-m%5lIdRrGEUiaPzPCox7!Cim(R6IPo5@HU5k1^)T-#g3Tzy9e9)BphBPY+Ai zEU0Mp*FSCTY2k;m%sqkVn6jFZHGF9>&!x+&&`kb&j0b<<7mT`iI@@>RC_OmaTwSp+7z7>)pS}U;jirBY)ZlCM?(Ff!Lnjq8;uKe1X{gd>%gSw_n@v zgv`CM6#pRfKM4IloY3F6+LIAw>B~>s=YJ}>FS>tt;|oSzk{{k0au47$fTUMRI+_lW zD%_J{mOT0yOsl14qmFg06jYY_-wzLqNw%DwKW)hrKB3ZW z(AR(W!5*qJ^`~p3{uw0YMDCD4z;z_`pSQpNLEwK7_`flMANTdZc&T1GF1%rzL7c50 zU;Z^}b>P7hTgAh`;dQEf-MVy`c!=23~zYp1OHU_{ii$J|4D=S1C@RI6)OA0kiL8r+4sI^1D`RR`-C~l=jBQJ@jDV- zbr9bOa61l%LAf8~`5feYDo$)5KgEzgW%cv3O`b2j@1FJR@lHkDkFe~e=I^@t-OGQc z_k}N9=$>ixRU-oI9--gY@vA<}Y&Awd@*I1t;I^LL=ef<#RE)en=zHC&?-}}QewO#M zaWRHF9hS%HJOHIzM%~Nfr`U-?^Wk|vV=wPJ&4bJ1ZFM?JZwm2liId1q%rt#ENc?XZ zjIF*sfu$dpy<6g2X1JBi-^kc%*Mc`g*6rstE7pINW#jJUediSSZB=hu^$DBC<1hnT zFEgc|)Q_Exl%A%xkA+{q3+gbI?*#&>SP@sK>aBZQ4y^o>$)Z zuDU-7wY97KZRYI6SM*^C9`9(W?`}gFIC-GGJRMB)D;|@82lSKe+=G>;%RSuDX?BH@ zt~ILET-z1?jT&@5E=eASC9RiiNcUc@To|{SNZ;$z`_Ax;Z;h*r&%Z@`t`X7Yz3Lr@ z^!GOO5K60L7!}{czmk8a?H}?He>go?+fbpm8Tk?%4SPTu>} zJtus4Mt4JZb0@nX?}Hb@%3|8CioJ90eeSXDooe9Q!Q!O;$H#4LmoQh!^!Gv%AI<8$ zQ-}O}PTdKw&dL{p3okBJYJCQwGI-IM+m>r3PTaRt3IL@KmwkUUfvZQy{{+qXUHdEA z@%s*)C;Xuwf6$LV8?kHp>}O+myU4$R&ih4o-tWl=o%qe#e~6s$^&*tQTS~c2{PX!O z{73fZZOUK&3;eHtKE7PP-CPDWe*L@QCVx#b|DO+sI3+hjN&sITYTG0JjeruneDq3w zbu&vn(>F_lwCjJj*l!>Le7WKD;_xdr{davZxH}d&70zGjY^y0{QcEO!I4bQZA)}A5^mkI!3A}YbzxXTe#gJ zy8Zxs*`_^S9{UTr1>wPwLFw$})Lp@uqRH)o zC$16hjVSqcJWMlJ_ts|`0lN;|jDWd|NOot7_DX+7XD%BaU#=^4SI^eJP6h9vz3Ob@ z&Jn$icZ=XGsOYPc1^&)w-qqS4UoHYQ-wvi(8oll;UzcW*xq2xNtr)@{ZEJVNw!Zpi zYy0&Y>|fpmsh~dG9?UY-U8zfaAaUTAN>2IrJbhRC1m+MM`=mq#_QZVnc5ja@y|gU8 z(c6FbK(6%mO51^(tv(rv_xgJ_QdfHGM2Wj8!_I!!>{g(^YkqIf;`j9S_WXa`<9lcL zq{p9}zMgp zsNL=KLgI&$Q1;~u`-z9QW1#ut8~dqZ7F2)H5Bi;|e*DJq=YgAn=HDhVa1FyZZ_gzG zKBMIytU99&=(=Lpr6$iaaW@EDVb=rTs zy$)`kK;Y9=9Aw9gt;ObB@ajFlO@|Jt)ln=nc0;y1G@QkVe|;{WO5PI7 zkJsnjE$Zw>?K}a8TRTwd-8)YZd`LjIH9JrETM-D@C;j8^<@w|ve+&n{(F86_OrZEns}swbuy9?F0KuykIZw=W$}R_^=NnBKcLisA`@4x4{oV_R|Y zgu;gube~dbO7Vp68?q~NXh-qz^nh<$QS7R!zJwHqhP@>&i`VDv23yv4+-dhU+DTNr zY+s_vUt9OQ+hgd{{)&9zpLkpL4d|$&pG3tzQ;+=fOg-}RAbfo;59R68&LPUn)@pTo zPEYqfQeL0m3}wnwvLSPi<5t6Oz`p%bGI?Z z>q^l#E^x&K@=kJfpy!HR_konRNn6y_+-<@>6Iz{whuBZ}4*Q9D7zAsuW+P)#6u5Z;Nh=XpwL13KzRP3;_07VUz@_f07BmWu|7}UcjPd z_O=CE0y*)c`cn-%57d9LP1^=<`&7iXi&(qwR`M3G^5vs=mV3Y_prS?pmIxnHpW$ON zT1iG2)Wt+bGYBMdpqIV%U%X|+zfyHJb65}OxtcYp_!(Y z#BzxxYMND&>_lB}FE*7lCpQn*sbiuWmakWpgjCH+qtt4d)QnxV^1T%nzBRm5-7gJ1 z9~|&&Ms=w2N14jDTX=M-Nm`lB~i``5i#u`Kfh-M<^0;Jm#doS^D-v2Xnf+ z#eG;#&9Z-kder{{Km;YA*Ly%*t$ZNb!{I=qf#Dy*g=%1a*q$<6!H0#yJkZSK{;6<3 z9@a%3Wp}jB4C3v>W2V}N-fI|w0O0H0joJDo`{au3M%e!RqX?p!_| zlFUq0|C3au<-TubUq9aRjJMT_G!PX9UO%xl?WpgQGhmde(ry$Uz;pmIAoJtR#W36| z7-XRq41Wk_2C<{2?%@KvRbh5B)pzxK)>ahS7nj*9V1}A_Kb3#(5Pph%?m$ksr)60u ztP_9KK@HSC06yiMmSufZ{)}r91o08XI~?t#As;ZzYT5hvWfkH!yF6{fO>S8&R^5nj zTs3v`^$v&=Ua2F5rh^}W-C~v1GPgM837(sb^6Ct@9}eeDHhI#rD_qh7a0C93WUe#G z`-b{9kF1uu%_2{5-Q zug-w`;c(t$o+mB))D`n;OYXd3p1|H@p4C#fndb?vo6NIX>OEd~bv*6`xWzoL4%K~E z%kx!qH~M=omQ~>QnCHpB++v=mgK>Z7t@5-5x0vVYwsy~*^2P>q@0IdoH@f>qd9sPz zeW9#EeU5*gt)y%G^8*H2a^U|Y{(1kM^Ct7Wap!!8bKbaf{@=qkZ=b&3Wt+F}oWF0j zdHbsHG26UvsQ+M_?_GX=65G6QDt=A2c^@Hto^9T^bKb=>?;G~}@0{=P%-er=&R>^j z?pRKR&s5B5{vS7pqa;(qtRk5=gJbeFAv_2`@Ky@%+yD4>x6SAKg?}r%&rh`K`)xme zy-Gen{bM$uw{U-82YLo}{{46kdT9^(`KkFKi?Xt!t4h_|jO+T&WmfS-pf42%kS@JvmzhBPu|9(jmeH$Qb8y~F9tQV1G zLBEu$7AXAH00^#YlN4G0;h>Zx;qA}bto#N?B*~EP{hsWP)F%5TD69~HtOhrOw)g&4?=4uo}^t& zv{iaO?2|x?$W~Up9wLUk%E%xUhLPJ(^m>aJuG>tOx)3*};|*(x&TP?9w5Bl1Lco`b zbX35@Ce{nXp@+hadFsuY4m!;+S?M;SRj|?6Kpi!}DYl$}eakTj$K$OUHS|z25BvvHY{)&I9kEcP$YvPUOf`M6PHR&x346cja85MA<&>iYCKjZ5t;^m7B zcKrpYN2B&+J*vt|OjLi+Q2nJg3G!B5BfxrQ z>XXI19xqhA1}b!9^>|Ml^3)VV<7|QFp1v_Qh25p)$s~%5uEz~$4J$*#wXi67OfcEp z1nW{u&eVRfD6o;uB3Wp(=}4h%vR*@>MYmxq5pOGfH6S1y@rkLm@>+E_Z{V#&6Ir1x zMcwwCYR_q#k!pXndVkdFp-qa)%`w9(BZ!-sBVGs>zE>LxOS)u{76-0-5AA zq_&>$>O}1~+D0m49KY6pf!2s4DTxJFp*9vq3WVg3SIs(3)(q7*=wOiO?Tw1B@TA^} zIon+YqjtjDQ-rJI4rz2SddcE_q*iw)OTI}r5DaSrff|1~ngUE%k))$Fz8DsyS>Ww( zQ}dc~Qo~mk$YK*z4M#y{2S9IVqb8bJ^+n?3E3Y#d_js0eyCcEJWqUrddU_AD$I>L5 zt$kQ_{0^=wRIXw~Dy^1-x`pD#C~+4)pg2%1ONb5%*fyiXrdMBPg9bJkM(q_i17ioH za0i-FP6K}`y0&WNo5m9MLTQ*!85edqKR01UV4+MK)*Vdkj`j9@wuCfb?lCsIOn{6r z9F!$^qfrm75m?7?n{C&{7EwA9uC&LW&+4(E#C4lR40 z&uEXdnuZ6q^F@P~G++dd3Z|o`nuZO)NoLqhEdYP}AiGihC`pwZ!U}3llbk}6)PnCd z>0svYZB`x#bEOw|Y)qX-w7t;MMY|!;qfU@ChPKmVCS7Auhf$4SjmZjSve~BB4vh`e z*87V(jz!V}jofsY`3rsMxQwlsW_PTI1g1NF!vThow;9bGUe!ebNG&0zU3pQ&HOFr`>k?E=*@SI~rgb(XUN#9O{HdPy-U&0vENqhRu65!a{-BNDqG# zoE&j8IUy!>#YOQL%i44}n&GI~N93-BZt}iLF>yR)*Y=uVM~jx6nZ5>Li7Pcn4%-3a z8iJ$Y*qF457(=w$#Hr5~={V0Ct%O*B$Qm3>#sfF3uO_O7)B~W$s6?6@y(O9|mSCAo zVb=Lt-P9o{F|!tkf<`*stN@OxPwRinHqf>ff{X?y0K}7!-9;vBAipssHrZR%wm9I1r7B?dCD!J6$FcN zNCq_3#IT|ZZDy&_uWyK;BljpVC%e<(z?_nueob6Todjpq*ouu6(OP$5jE{fOyxZkG z#dTVnFsBDR&!F0nl)R42$?c)+DI1Zd%sROeqoz;IYKQ^A5zc!`wr1A8Xs|tgSpbwD zv}s`}do3Q)VlMA^s-LwmcdGK)tgfMm)7SY0pN&x6P#9U7@w2%KPgP^=@_fW|)Ue=L zmvN#MHi?GJ5{wBU^tnGhJTK2My18&q1 zCk`-o7xQ5sj!=M`cX~qsVn@$gV|C@Z63s=JKQ#bPB~Y z>zv)q2CmYc&Kf{{sIPy3dZVpGvY>R`LgM3=CUvkeH&;V)(9lvwu;w9ZEc@*|tuI=f zy}~*TK-S=eEqN`M9P}pnf*DAi$Z#N>foh{Lk9E0C=h|3BrA^mMsgy6G$&x_(c!Y{v z;c7E(#(-j9P_r&5<$}nImKsY<-u2;_f?_gV>l1B+vctxF3E6-7N?k>D3V`)Nf#d^V zgN4Lqf;07=NVZL@IYDQMv`3dP--*j#8(l!YJ~xzMSwQ zW!z$D!emjPBeZ|U?3fdq^>p1&=@7SbTAH+>H8*sIHr?d-ygk9^yoB5H6~bEKy5)>` z#*TeXo~-SlJrk9nOC=L!$k>xSZciFvA=rLMBaJSnVf`lR(xYx`15j(juMO5J)iYr+ zX=6nmr$OG$B~|1-t_3Syd&N?Y&}8xMpjg%CB%LA)W~hI3mR6KeO3Y=HAxSCfJGvrv z6&dW3(&*0j86t(5HjnczB)Uai0`Yog5O9rOQ5D6K_lfY9~ zG?S&>cAcj-)3i%LGlo&-!VGe}Cfr10W(Hal7o3Gm17Y}x%82%$phce1puQNiprSsC zr+HYn8MuEoURY7YG7>ZQy$RLC!cEUjmMy!1NVe1J_=(nNNaKZ$nhkwDieS#RD2*71 zVx7w4jnE^-bVfA7mOcWMjT%YF!o)?^MrRX$8sg^2bsz{VW{8n>;>^?i%S&RzYr>0f92Zc+R84b#G|ogfxF?^;m2<2_sI@D9KkRQ!oQ^uD>y| zI^9`y62&E%anKZkR8Hx|!gOb2kdo%`1d<^D6wq)aGCdO$R(5PFh-%N4!FtKnRUL_0 zsvAaB(}s{uR}rOlPtG;A<}ZrEg&M2QyzeWVF+!J9aoS#PP}}D)J&Gj|>u@qa3|4u) zJr93}?0i^55-~Lebv_+|i@uxsomiQ44KY{9me%t#xj!HH=5*Oy6@^$`gR4bb8i@SF+?Q1sY8y*Q4Q%#n?ov&2=+Gq?Zwxxo%IAVzo!EI)StH zu%a(EDGzi-KA9BMf?ShGntJ&tBW=$Wm!lb7?*KyqT#(LWs381;T+xE)HD@vB2R46V zufiTVBLz-^+Kg*VNj7Q@c?rdB1rs=6qC>btL5+FKlx(%mn0iJwz=9F-00W{4$Z2Fc z$GaSi4O147F_d37g&yDO@u`E)orNk&^N!5coJohv$BEG9dnw;Ak#GXMx)9k& z;jCSQGKNT7^IG{(M5vIO`mEP7nTCI?$MvRQX%N9rAUK{{(>mWnkS+qaV`pG2x@H5T z@Xab>J9xJycYP;VcSaq+%TTSu*MP1^K$|6FEI50p=Uq2xbNrA3S54H(mp#!Pt!W+` z4GIOYu~kt^ovI^&t{W%61Ro8yg6(FPr-YogtZ=Ng80$5z%IQZ2nVp9V@W==NoT zCE}2m21uaR$)4I#tc_du0|Y?=qMuEBv^nush`L$y+oN;@tTA_v(+GiedtIOPh5p=g zTb8Q;bG8Ek18YKzWV$os{*GGUBhNERZ#PVhf>Vb|1BAd~yu>hu4YqLgE z0+eGTkf4UK+|(5-Z*69VGUnl+05p=&8iT2>*Ck{WVWXvDL4=g_<@tu#gfKH9Mur@5 zo6S<$j5#+-`%VlrX)^N>YpkW#SR0r_WD)iQ(ijWtWSq_vYT_){t5kpGMOs-KjP2wZ zFeLl6#8`_FfB?GQB<#${f$`J;hgrC8P1Vhq=m`pt+QR`hcBr9J5F?Dhc|#7lb=TU!SL#!D`>Bd3Gr`cFm z=tx|&0=%#b(^|vJ6+IEji6KX6Gw&pEmMwKBhWUjOiRg-O*0${cfJ~0kL9?h2L7v0M zVOLg$V=0-BEqx*iPJqbiS|Jm=Pa*+UNDL=UkZ~|U#?hojp?#`7pYbkAT2U`XQUK3H zamtPXicAA~GNONa88^;TyaVL&%nDN$aP$D;5eO1U+s0M{+RBxDj5&3GjS{m?t?my8 z03FWeW6vcdY!ribkKsWP5FHI%_2d?*D3H@x)Hg|cVNYyX>6=Q6;kbovPu#|4I5iq{ zvkN591Se;1u~s`BveT7E3_g>^oT_#DCQdCRdQuc*F0g;f&{Nk0ISQsSpFwsm05m7eTkwf8jR?YzqbYSieN z%AyF`^g@3xwC0G+JDs@UF`7@(4JoR(nnSUZn%Jn5PP~AiiA|V!Tmz)W9N!R|g1Yj< zLAMPr`808G1MKB9oDp=MO0y=l7_>1=?=U3Uh=88fr0W2+MtNywgsp}a)Bg} zPO;YXe7u22R#R$7BJA>Fs|&gHesk#c;F^L#(5!#2fW@Nk#6g{t4Z63o=6OHgjNp9X zG?*nXF~cFftUT7OhDEgdkt-6kH%Ur$R?eXrQhOE1?B4TYj4T_dqTTjIe zSE~(NPPO}*&@$~|N>6Hr57#hU?mIO`G@XBDz%LPQX{L;;jA|CZvufBB=N(My(>>Uc zgBCdEIFn~ZapJ>qot@KY3tKnWe2=V8oe8HoogRV1^PG+0zKcLJG%!tA=@-b#5)=Yy zuEr?9x%A9IB??!(z;PpVD&mY6I^$quXw4b2Y2rA~g3czGQiCx%ak@C}SRFXogiL?d z9H5LM6GTDjHEF?%GBvTp5w%LyeiRLCRa{EGuyNy8@0^9FAg z8)w*>I7AOxa8er^I=La`I;X9uWHo;*RCkkDlsE4U{9YI0M} z)XHg@sMQat@h0!td`}2kGU>Uax`P0qp_`Z~rb~uv_NTVKnl8LZXhk_P)$@Oz4E36H zCetI*oG-j+7-UvIPl#j|7iI$pJzH8eAOwfZs0ahXj_a-(c$tAdMY?b6b%;yIt1hvHVtI91LK|Hn6A}oVNdKb%NfWC%A&U5Ky98atggf5 zE28h?+J>z6B5$T~*oYE9mG4lsK9zYY3G+0u5#xdEMRl2uM6S6|6SjY0CC$E*qlvem z7MbZu^Hj}5V!Rw>e$CMeT*uj3d*I_6A6ZP~wa9RRHKWl<({Pa)O=b`fa1<4w0?e;E zg+|F6!mGF2jivzCj5a=G#w{V>qB%0Fk6Bm_x->OWouV&kNvA*RP1nE>wnTKG*n+=d zwNwCx(`8#Sz0nFr9XfwZX>P*s%7j>VV9srIBWeP#>f%~WV0YXd%rys310GHn9GaTl zj^(JF)X4f`Q+CL7I8&zc73`^fsE$iLqD8^1V=STd1a9c9`8u4?;5wD_K;|ZT3Yf!? zASY`n-Sj60UDTWKM&g-?Q1rMZt##jPEOK-~$4Y;g$~7Y#v`_||4T``e z5F6@@M*c!`rgcITM;?bQh=t8|5{!{XHCZm4mhOg*vGkh&TJ+%oAugF#G93uE!XyS~ zQB7nTMy8f9;k3pn>;@YSN3|wl4~vc8XtF6DszW>*u9F2wcB!7H(lD#Yydf_Eam2PX zvK8dbq)7p`Xi$HUwPy3ACYnon;3yGen|*DJjQU8j%BcD>*}$YGXY)i{I-C*J)uk7~ zLI<_UrC&_Y@lpf_I=|6LDjIkcyQUMhy++YI@~|kOS$61dRwH5{w{%JZjRGF?4BSix zraGN9dhCc%S`@Kh$tp~X<3HWH%w*n8ABR$la=1}KIDJL$PvN;) zBgHkLLvX9fD2UtwW~m*?1=p^(5m!-tV1&-5g6TucKHrF)*uFa92Gsu5qk4HzEC2b1aTLN@yEo>QpEKI^| z9a?MxXp`yn_OvMk!&!nZ<`OGd{jsS_Z4d|xAMVZVe(c$Wm$W->0w3)unh+ghjhpbt$slRkNk!n);?`cREfA#CbR%x&+sC z*%5!?*8yZe&{3nVf$Ef8i%Wg2r&D>%Ecwo+FJo-bv;0|225K#$%SG~Bs4cB7f#FO% z3hJZIOl6co$8R7oy;?_^z^&83l=5L*Yv)U1(MG*OC@2&M!g;+uMr09<8GD5cM3A>J zJ|SphCW$0Z3NETlJgOTEmWG_Kz@|nRfHZ%D0m^p!{gEF^NX-fz+6r$n(;9A&N-Lu@eGa(%!T<%09-+9;ibDi1hI3yLy&2IFtRNiZ z%s`HYIoNBY1r>EH2U#?Qd?5^*jEysjTeq1koXf**2uf~in9k!ka~5!08`2>buoHh| z3L-+9q?6nmu4@H695MxR}qIUirY(O%!hB$^yYnvHD4JB8T_`f(^G&pi@Sx3(_aK-SmSzTvrtJ@SC3V?Y%7KnPk{PGT zfWC$#_$GtecAZXK0f^RSeJQfspxF$7)U`2dAgpPa$;*M?h+hmQzsK}ks6G!o-p!(pgrbW&}iy)y3HO(`Bcq;S9ze1Y7w3VJf{THc+g=j z0dDCH6$~4lu0Dvyijj5Nt5#z^?SZRWeHkxubG-?JW-_6Ad0V0dmo0zhVN^pf41yC0 zTUr|nZuw9v7}TwmJfVPmHqht&CBPC~+UpMb%9>apgs8DXhlmpJlnU;xi-&5x(zCUmUUs0S*Yg3t7j3;iuJ~H zgLc>BXtZV|4Aixin#zLNoK1?k#E!{+G-!;-j#7xG(wyNfYB?FLq_Ny=Dluze?r1b$ z_PDv)S|ld2m?Ln<=F;(;1VTIu*uK!nq4h{>M9O$0#N7s)&kKKS3dK@;poxmEx&ZI9F^>`V#0uY{3s~!WTY~R2j6B+}EV}lCT9E3AH%*3XjS>1HXO>MN1 zh+^(H(45)m!-P>4IRLc<*Xty7q9km8=@>w+YXMHc9U2YPO(T#QW;nIY)g(qPLsiNLR*Crg!_| z4C+IW&#ixa+Rt!+GiG2{NLGVaX=uwQ9{~C)L1OTpJIlQ7g~pV@rSr9jrx-hIKEQruf|Dav@CudlLs>6p4}) zlC-fJ>QLQYm7P9{T7SB$xyp)x)b1f74|)S$*}raKNA!e}Bj?M2F97D>|U z-liST7EQn$O?qp+kF2|3SMAChVLtBUo>t+ zyvBbvgyG}0JVt86Wv9(Ca!crh_FA&%?G{U56a&UUwyp)kCeRT&PTS|2eY@z`BV~lm zYmK6=P^jYSBE2Dvs2{Z!G3~)*47XT3uA$^~z|UfE5KCZfDXvs=6!<35jhH2;0iCAP z^G1r}0nHu)@UwQ7jYm8*W0BQL-86;G3Il%?THxe9sxUYoP1}e7EI`x0fN1L}&d*K1 zj|k3mm1=8q97mJFk6?n_%oPUjXybLv=j7az-7GVWZ0HQYj+e*kgm0zefifP(LEN9q zdfJ7o%pE|?D4(*M=3v10H&Mfq(9O6rUg!;%nD^_z8eu$@)d*I?`?9=lMB`RA@0uKI zY5~=MBjK6akT(l|3acR#2#n<{Iy1huAbp@d?tqBh>o=y-n$t43HX0%#L^LgIAm`H1 z4H3}EW;HG43vV(DMhVy7r0IA=)u%PBCsM>j(h1kcJUE&zI5?Sh2B^Ho7b4eLPUsCE zj8}$X=V{yz{2tRYlAa!<^-;^C*Mz4H=mv*>C+mhqBWX9;guSpBLt(z$5VL4dBgtq2 z0xhiFpeG6dT8mytIek^M1b`rQ_*l9S#zI zzZPrQK%3YkY86oqNBCHorh0$a?dC4pl2&d%!x2~X<5Xt{OU3Tiz|nNj6jRF;Ef5WY z?qJ2PX8rD{Ua52UsGU&X|S4dyN^ONh@XPk26FD zT1AnrcvHo5pC;>oG+#69*o2mwNOqHdz>~u91gZ^MlOfbsjfD`TpgsVp&B#XlPCN9w zIwYrD9qi8MfJW1Fvo;*ZlWyD+(v9Z!Tb+$q3rv19Ws;(i^k;}DGt)^E7!*>lM0$zV z2kR)uH`=wRxta|Y`aDFNM8I?AtS)s#ZfIqW$;g4Pu~RA$yk^?cylaM5x`_1lYN7R1Rv}j_wzE)|Jv1f&csvhLVUah?h(Mv%dc3qreVz5- zpbZM0S?CWqXfvLH(NYY&Wk^7OA-*i+)Uq_93$Bz+E67HDXsRevdE`lyuNxx`9}=7z zXx(X6B-}t>HiS@-WM1}rlRgqPAard3tX`n&yg`n!L0^g*7f zcIMU~@+W+;$`Z2g)u!WSed10QE6!1rC9q;Q9OL)G)Q1NuQd5?bOiPWNUOPjDVBL%k zNj;h?%|*%(K146-v0&DJWFZs_Nu0o~;VO&yMi(DPvj&vX3M6xJgw6yeaV%7rvr(c6 zLSvD!P>w`qDY_0c*^4w;?Q{Lu5|#CQvT`+$=)tXg!jiBEtGLbsOb&GyN)MfClLpKW zmvBwCJz=&4HK;>v+Dp?@Et0V%CZg1At!K<+DZ{?Avhbv-vtVz383bHG_2S72953es zste2Wnl?{>B^X45a7>8B#%fx$+E@!Uv!O4Us5k43aI7YlL~+-HUBQ{_-L74T z&4kDYs{|taQk?>sQ3@Zoa0$mXyDJi`zL}BaI7SGrmivIvK@qSU+6LoveAgnR7M5#T zrh8<}$v84%QPK^6L3P$7M0^OK9*5 z&og_RWY$tQHeF8hW5;DRWes%ljzJDk5t&&`V{JryjTzhia6}4r!xM<+AdFV^Heu25 zf^L{Fs5}I1W=ZehUjYH88D*p=+1ZPas%}#cbOLjU91( z?e@-U1jdy^@poLVA0_rRVmP*8T$6CV7>4yZWMXE2Tuvj(fX%*B8qhR$<3k_^I`7&a zh@ZUALh-iG@2=14%AEX?{MvNMG8xVac95s1iR)=*y8`6&zMJOe!|{wKxCa{2bh>6*&2rA2I7nJY@SnoaLI$+-`9Y;y29RTPP1{b-?4X)U0JvVqTCzsHoFE>M?Niqhu;>dOSu`tr!3(j}~dM5pQa4aa*^?`yyG zA<;{4iGOO6=Vx~Kay89_#4n%}a25u$P6p7n5jM|_fsm5bZPEy+48rRpHO zZ}sjuDvBa)-Kv*}nDn=oE$A40)4Bjc>g>U;d%=QyvJ+utq&%oEVkSI) zDrlGJV6My!$MY+EG@a~tPIwj&&}Zg|@ZhX-hWkjgInTRB5uBimw5yJ^55{j!F&D$I z6~_Qu2Z+(K5lQs8?egU7_8dWgrg|K9Lq4fxwg!w_kn#iu)?^-=4t}!eK^f$JwDgdz zi^HVN_rgN}fQE}9pb72-8Y&`kRoc&gZ2*-Dk_vcg^hOLE8i+ka8#X>|2F0N2$BHKV zH29%5CViPkSgmlv#hcH1mfb@s-Zp_j^1_JsSR;T4D?o36#iDTbN^a*?!X26@a*(F) zFMp4FEiB<={KN|WK5lbvOi8rPJ-@p&#gW6bdk+?yzKfkLKs0SgD$w{encxn8YP2Mh zWS$Y*ZUd;w&$PIZjhjMY;+v7YFsd6M(E4+bwS3Ext%W*{V`|ZwdtE@1_1#;fhHqRL z5nh6`AlGY+HwPl=!7!IMDCvi#xcvpoy2;76IhPmfoMgaZ8Q4nB#8Uoc>;v0&@o<)HaQ2U(5U&xav&Gz zbyUkTaKA=kVGWP!0A*49!!Vv-WP6sDn_J%xc}pcE@Sq%TB{Ktm6TrGDSOl*@!s(19 z=NW~|fU!{Q2oZlq$AZ7%m!Xhq*3QJlbTY?yQUtCw=-UtQkeq3MBpsM%qkB-%!g=OHrMk(_Bk9_d{%I=)n1H zajiXF2|n^>ym`0h6xCyKMWoAx$iy&$4ba*sZ9tb~a*>#;#C?V0Sa)3bxSLZEI?6#YBq;KYJa}mk zFZG#$@-SAyR+1r;_M;&~>m^rAio>9Pcd@Sla-)Prclhkc+vzgTcg37=Z_UjkIE{Ri z6q*|J`v3NUwV1evx!2mJ-t>4CRTzV!jg6TecS@v|Wv;t_A5xhle(IR*d8V%QJuHj? zIGr;q+o*;i)*s2sxT{cf26uZd5X80Fq8b*T?dKgihR%LC<#;&JV2~q$(bNrfDFVZE%8+^4t)xEOBNFFJKyY zp-FT91(jl*hfC2MDa$IGdk8#;liOtoN(C=T^3q!nq)>3ajUy2B4PB`{^+w|a{S zCJ}Oo#1RrWm$p)1c-T-c0KSk~ai78~8Njb7NAsc-QOj4AW688i6Zm%uxQ7wdAe2W> z*4!6=?i$-9n1#kCV2U93v)ck7R*VW43{vrN{KB|i`IK?nQFUZ057_OJ4AD`7rdf*y zP6S0~cRi9!3W59*YEs+~S@?%Ws%Z44;D6orRpX8V>#Fu4Cb6B74VXRtmL!dIZ!9Y0 z$*cYSMaEX7@_KkMgTEVz`qEVW*xEdsRdMTo-wfr1ejsxOQ%z%>rpsnMr64$(B+x?d zu#cJoV6E@DNJ|;bpj46{y59#$cWY3=b}&XMB>#Mw>V53xDwCkZdUF!-l|%|WGQcoo z5n}zMKwp&&4;aT@1aqAWd5uV_`K%3NrKv`t!F?wtC6IB302{98sJimSH*FC;Kc*y_}HOBnWA@$e~HM~E4 zvpCl;jA#QXnqa8N6cX}&`zctiO=Y!zYG>*cE#4{y+^0NjegAr-Q=`B!kD9tbUqyU; z?u_ldb}27*8M-=r)F{tGar|pBo=->9GeJc$I4+mo^KyR%m}eezrEZlSz(&S~d%wAc zt`$NMravl(dRuE1A(RPtK_ygaO3xY5`!;G+uk#iZjb7O|$HLQH zUC|VZ%)?wxme4C~DhK7TRj(;H&Be(MMZv~9P(_H+sjlUpehuSsPEHsoXL_5^w>eBp zX%e#gIVr{Q+vvNXgF$i@A}?xx^G8cThbQ$8Tn`|*-{l)urx{jb{#cc77L6;XY&F2W zwo(Eef{-IpX{vo6fh@MKp7XBADS$r+2x!tFce+4+2X%!% z>#P8{>@>n&$f+L}Dq43LEb%6Qzg3K|BJad+s>0rg$wS$gY6WbQ!N6;OtHG@IMABGf z+6aDZTMOY)KiP_aXjSoAjo>SWwlhP_b^r?MW#PkB%;-S5>Ip>%i!VtK6**?iY6jdri19t29oIC4#X7x>RM#oTaEg-S@+Fh zWy|FUoN-7gd$B)%HUkcDprhN%$zk=`9~8o9K`S*-sha`byf2A2wyoY*?2}c+O|Jg7 zb$EMc#OGLYR*=B9b@hBW#aku{TirtYZB|Igo#N z+Ha}b2BE!LyE4*5Ldj2P*NPJ?n||E+E9Lq*r+wh4@~;nnf3w_Gfu%PvOnsPXa?>Kl z(_=lCi(fy(e>nSkfLKb@UsV6nlWu__B0eD>^DHPLWNo!;JYCaDh7#$}blC06o!H{X z#AuJxy%XSX3gWZVxClq-7qBDoxv+7IjIQ z_vO6CFVjhXT`@)db>(XrJ(1@3B1hy@)`H0ELj?;3}9}+ix{aMOlcsprCSv#LDw9Y(Ayy+xZ z%bv1%jMq!%ktRc{z8lE04S*h@qZ!r%97CsK6AFZX?wygL6dSMm;-(3jDxwR5nu?G- zsfOna)2a^IVPenZoDo>G9qNsNP_c!q`^eNcc?hT8mIYupC5Q>lUYvo!swKbBviz%t zx>{6X1lw$|?aly5)|^GX4nQaw08LcMg(LVfPznCwJ!dnmAbqvT?ja-oL%D}SMw1p zx~p0m0yk;{k==zgk*OdMHXCNn3wgg;wiYHX%9ve&4F60$7)aPZEhqr!DR2Jy(dUXyfCnrRq$$&XKYHnfiNCG5_6tdX9X@4|wf_9>YN}0| zIos;m)s}&nX8y_A#~@FFR7yVMkcs@p4QKJZ-1!MI>I9TO3Xy3D>s#rO&c?+vFuaP9 z#^pFD}b-FclJmTkPx&U93gTCI8q0HT3|)A_2k2WrIb5pPR!@*XVkjzqF6oSmfqyVNiDYoF2H?kUb^rr(ZZTRd zuUyji3f(1bwfpoI4<>bbN92Wnf%OK1;vG^4&|o=H|NA;Y0~^sM&{cNmjp~^0s_d>D zdL{I360t%bL1DTTO#jN<#z@n*6|dzYK3erb=0H-R-4c--&K=VEsV8-R$>6s-FXLc( zCMsz%=9`~>hdGAX9~uu2|*=69pvT*1-bfCpO8ipWL#zu&Pf=;O=%#EwuvFR_We zwlc+y;Gdgm@MjiGIm^40+@MZ_X>>}S{2wJhwS%+Ea zZqoHHhrwZwv*#y3Va+Umsn)iIg76H1E#+Nq>xKFem9j-bRoSFB+32Nulcc34W^LTz z7{e9m(MOVwdco?4{i3*YAEr-jq?}7B>@gZB)J&(sVZyCd`%~eiB_<`tj9zbk6<@(B z(vgm-d{0o7p_wI1nBv4F5^%HV4m{vbnzZwF^PM$`WDq1yn*HW~-OsC1IJ;yccHOAX z-o2^sCM>3HqsH@)*l}JunbMOO@z0=RDF&1+SLrG=71}vHQVAUkC5K*Hb2>B$;3~Ye z=rEFvn)O9Wg@ziKEQxiIstNx^l@zYNfRWZTA1{NVU7AV=8CbnS(fv>5ZT?+?dawPK zsukr+J1sc;MZQ#j{<12LF+wRegf;?MsaJN~>#crcWEoV~S{|I=70jz#sIw)ap98#8 z$$NBWlm)2HyIfz8iP$*SQ)?M#5YSoQT1D^aSTtQzsRQI$ZjgPyy+Yp=v0*(Mb4Fg$ z2C2o0%a;+1zC5aqZcn6pH1`r71`V_U8-%A(6cFh+3AV7zeMThVx$my%6;#0K7JQaR8*(erJm-}Li zpAEWA4L7AxMJ_72UUaB2)YXZ+>Je4k&W}W(un(B5Vu4@jjYW-~kOMwTEi{}SgF?-PKHwNhQo_}Y zbS&IUL(l$1;Iej^#ub+h(_pt6uN;SAoodq$f{_WLwJE8&71N3GnIXRU%fNsLH>Ddy zk|R@{UzpmuTYFO>H*eF)Vs*u9Wo+q9;%@!>7J<~L`b zcd@b}vY#oN=LZ*7|CmlUo75?Sb_|ub$Bra$hZQkzW4F-< z)_I%>Wmj-F_@Y2P8AD6VVc*b6m`1u^+?>A7=Lm@UZGPUge!AM@z@`vO=@}YWkmR<1 z0cd*`dmsoVfDz+`MMH`^wL6vM?!70!fk2H1B?hyDxEmXs+znPdM;HJ-K1x6PJx|&q zlWGS_OY=9uPox5Y%^e;9a>-)BRUhmQ6HhSd$D`8wk~nS@Wn=7YxK>M@w+K6oU&xni zv|v%GtMyXYAqyn&b2OS^?ffL1EiTJ{Lz#2R>qkYo)JEOXs{q{zUDXTBzxjC0gY6wb z=kcYVKDIxwxR0)%8Rlo*(Y*8P@0T|(3DylAYLQZ42Un1fyEOC9Le0Ze7XJ zFVMLJrqiJF$e`@uc<{EIK=b}GTMRG%el7l3c(YwO5P`c4n+_UA$UGTk+6Bpf1kAvn zk`#33_X;HJD&xD2wmeAqWGIM<>JbPD;_KjEfj42Byy+n`72J!!6}lx z452}_dWTLq%Hx3RCfhktX7KEPIKIKQ_ZHr;LH1Bs`!-QjNpW21DMCHWB$9iDs9MGUz)Qh-YOKXx#L1u7&T*ShQ-Bdz z39&45%%Yp*jQQQWm0m?8!m?S4;E1#H&~tXjbI?qJK%KE}$F;Xz12x}~57y0o((XyWIqVVP z(aEuVcj{7d4sVj7*r9<0P$1Y5H0_h(i(QjE)XI9OJ_`Ji;fWDqC3baB8N-Nlx z8-5o-(;JR!GkYu3U1l|ZoAt5;8~Z(wR}@kR#B!lMWu^E8;)ic8Xo~GJK8tU?PF~c8 zh0}D>5t_yoDX&Y=*U-fJ@Rii#O@XWiR6GKwH`6u4mT9UeDR*RTU_vf6ZTvKjpK(m~ zIZDKYVvvX*e-epZSaaCDYUlIHg(CI)DwOtJ+BITmZkn;n^orMiN{RxTMf9Uk^!C#F zemMDOV_MUPaMs;!AM-mXF^x@A-zb@fH$()W76Hc#T5nt%dE+RFQ$X&QJrS{Xn`_mb zwAobth^KnJ`RB%kp}}Z(fqUnY!dqgL1ko*&MOuqZoPJKtm_EOM*8H=^d`vLFvWk8l z)GEwE=A5{!q4(Q=y0MGLzJBvNSK<)=EiH_8VKm~eue}kQm$fm&^C^&S6nwg@w3s_W zG+~9G2@46W$iYH&6h%*T2$E?KwzG>gEpcfD_%O#%4rR!S8Pl(sr!ss2(+-co^nNrH8#4Coz=~F@PzRoln zeJP)BMT=>*PH#RS_QwpetICOXa&MK{p7exkCeBEKpvEszD~7P$(~#8$j7qMa1?LTs zH3BaR*85+EEEiLIeYRb~gN{CEe9;hLaZK;pkJ2lo+U{ne>|37HPH1zk=k}(>RF+ya z>0{&VB^yhBeor>RkY`zbMycA<%AI%JoH)AN(QIQP_nF!Jt*aMRBLPJ9(%Mv$2fZq* zd}d^x_}<8EK)rOaNt&VSooHfA!P((K^CaWCgBsH8^_ND7w*Rao)^?JOH@N0jB7Z^O z@R}{UBP1F1r_z%vHH@_a|1MFRDiP$W-SHG=qr3`#v{IKsEL6?Aj9Xlh(0`xsxGF&P zoI{5ribb79x4EV?l^Yz#RGIeNq7(>pZ2*q}1mcd9-50l72^8L|JwPUKa8urepEEBu z`lARohrNN_|9Lh_5~i1{GP7npdp$r~-H(DbfFB~kfr*jhqw!J*T6jX?vRmVa`B`wk z2h5p&%&6MQ(sj>oxp_RM`Ik^-0k{44$`JnUE=k( ze;cong6ooOiW&$#p-kBCubS4iv<}a?7dmTyBw9+s4ls=R;p;^B;T}G!GVz4CeE4Kf zPY?R_Hu!?ttU5A0$&0|rm-m`NbzPz*S#)^TN}+@8IrMn7`UkQ2zYJY}5q4Of55Vkg z{rC28ude%<=-gXWv?NA_t++vMFEG2TNe9lugN@O5^=fZfFk*^&;Ycv-ZNQJJYH)F|^x#ON{LwMY4X^W@ z%PC0`k(RSAkrI7#Fo2JJEAqKfR4xSt_!5&e#laNb$F;!~pN9nCpVVWu}CO`q9) zou$IORkSA7jq|mbGqA5ql)YyH!o=@*6?u@BSLo-r>iz-bZ^HOr9lVo2flUTnC7&T# zxiR}I>8lg8@F*_aY0qs9y6el*jq>vT|v2 zH{63%iZ?`<6qB| z>C(3X?20h0TslT#(FIBZL`b@z{=CF}3Ce(7_Kk>btA#tg{_!JB(ywiHl*zxvhNIT0D+TgJiA zSDc-~hCt#(Sdxs12No&}Nv;k;WTPeU6Zajvi)oKu9ehk-E zxtz_y#w5iLh{|@xbJZs9a%mx5V?L_50oBphl-TMUquNwP>DYdM@PSxubgu&(6{)>4 z?0Ck=&ba#rqB9qNcMDZ%HT159sVzHz86oC{IJM}+oyNt+rcVoe9tJzPBbsS9z^40d z`!m*toJRvwlrCc0M4ZS`;sBiQl-1^`=P^c38Ssm?TZtX%AjWx1?eMC__7uLS*@u-$ zqko}ux`Xi~kMT)}pQu!09&)j}Fg0^FR>vtkAn`DH_$Uv53J*v=hJ$LPyZU8uKB!Hc zrA3S>uFfaa?WgTrF6KjOWiN$rKs8Z@zd;rfwJ5Qg0H*`ukU#DBvX(cVL{(*7!qOQc z(SkIf+{Z4dAtR3esWC2@G|BB|kYk!FmD)$VWlWtf4ki$3TpnM&S==WDz!cE)1BaU_ z;FWh@FQJNmTSrv5xVfq#VeKT}tF6L(;-L?#L?9*i^AP_rs^Q;;!^yNnxv!cLSv0dW zLH$)*I#CmcWGJnKKBQ{_+iyCrVn#|Hs+q+s;jo;xw0>y)`t7R zKp=&;C=-`$-D|AkSSb6Qzhx)u)Mnvc^H*Ipr8woUa-j=+{7mfibn; z24qjx+C)>yji^1n(r8|N^S#~$DrxfcGmR6GKZmsiB<5KHwL*;6qqtP!4899P!*)}D zvKdqtK|UP=B{CSaE#7{ru$7iNH^ldij(0WrJ6E|i29(Y;8{iBqe)OnDUL-&F1Y&_6 zT7P|it?eBd^1qU<7NN$hp4&3bM%20jkS1W4cbj=o}4z8;sZ_31_`+ViG-#M z=}IzoYG#H3zzfg(RxT9&p@~8UN2gjsDmnm4(^2``5QQ6i`FOPpoz7aKCteglRcs)N zQm~(sDr%dG&UQmkay!&#^_cu}awZeYcI82T>vHf4qMI`e+5?ZqEYfvgMSQ0HA`vHM zrm*~zXexkn_D{!2eVb%jw4h0B* zCmvM-jD(Q1$m93>Y~@0`(oq=>d4VWAbjGVVQ8)IK>EBV6!sZ@tyC55#gIKx9<&cBdil>W8B=mIjfYWDD(B+)M2TFxh)%J(Wk_;>smakKPOSCmWufpS% zfr>13a11}+)^v`5wavLrgn?2+Oh@YdJ?|Brb<2A42uoh|iQ`r~vF~EJfVTyARu3o~ zijs0fJbcd1!tmTLTY@Oj?2&$yd<=BL0a>~)uhax%j{ zVxd1k5>c++E>G5fsvyIE=%zb}5C;8q@F_iTp-Z?k3ei>*NgdOn*_D=ffht@1<7>0a z^I|5D{}4E&W)zlMiJtf|o_JBoj)AcOh&h%5Ay1EONi=|0@@yXFwNLGT9WdMn8c))4eaZ#L zr2CT1IHr>`Pe497&a6LhEm|Rtw0Ym?aU3`g?C}w<`#EJc<^Nu~*TH71hv_Gfb=H24 z`$dgW_Pbjh)~p%ZMS|Cu@1GtyHf*rFj!+@jq9VZUI7cNgyl4YX`PEZPg>5zl+K@mj5<}}* z8o3e{K*kgvEnUC|Se|`RFju0H=inxOw7C&4`f@aC>@M|Mff+4q zmza9srujsF_Q~IoIZ^vYo)l@YM@PkHXra1k>92l*j=Uh>D6RVV#+^H(l8Y=h2G}V( z*fmbck#kzqc$CAOmNTP(G7;n1BtIH=dDz=EIhKdtpx}dNkua<)_ofkZS!B?1{N%W_ z#>AvTxs{DnTS6rYg9ecG|9Vh~pTv|$zu?3U3U#!9;X?~p-itlUyxYEJk|hbEplX~a z`2#hqcZf&EnpW_(qHktF&|k`-{P+M0xOnusC<2YW<+X&m-ssJzpnJ9#eaVxZ+n(O; z5I#eH6FFqZ3~5jcr2?nxIS5DBt_yBdtc`Ys?c^x|Eknsep0%+&(O}<|wd1l$Ug^}C zd3zauLfnZ|;Gat6nI9s$_+?{g$)j#;{?dR3!Q*4nigZEBVqpH#`AjEO`|sM&Q0FB>&EOG9a3WBMR_q5wPh{vIJ)Ic zql?^Ht%rmuZJ6+wA3qL}b6wHi*(EL7{U0rVfqm?D#m{`7bZB zSzo*PGx$_Y!mX|*psfjhSSTHBk)0Flv$lfh@8{xx3$6jOxTbH(*%TPTqKf`L8!j$? zP;)5ANagaA?4g`%7MPJ7o5oI7*bQ6tjAuP>YB61m6*pBew#~3o(Wz(*6%*;ujKl*Y zDLoqPZhZv{;zlqAJKf?Ta@VLt>m`-gYdTS!D_&7+EX*uqH9sDvB z2(uD%ATHLigG3OWM80D}Pw6?%Z;UzWnhC6mvdccy45@OhA)Z^L0yzLgV0;a$yd7W! z0Bgbx7hI#Nkw|@yQa|170mSrz(CYiheQpSy>Ln6b_?gEd1o73Y>Umkn4(F#l@YqBW zu|}Gpv^;cn)F)8`{!yA%#Bins z`;IGtC_-8YPyy%aM*Y~Ih3y2B3thv zZB;+{MP6AX=aD4yl2b@v+_Wr6Y3QYf-;B+|Wi-9!TgK=l5 z2jnD~?4=2OQd&=nkdDLy^@)Uq3kvgHQrdA9?)hDbDVSd5?&9zeWG3gYw6_ChujEAj z){o+K?VS+D7r)c+Q$C9mNllx*=K;rmYD27gs}Nf9l&#ZbiU@otxnS@Tm5fxJa} z+<>1$mb390S?YbCo#kbuQeFR zac5oWGi~GW&>E#SH*9;#4ie5~Lg`dK_Jb&9%B%&r#s`-Bw`6!=z0KA`5*$Twaymsl z>E<|F@s^TbLm)qFBiC8URsZZV2g{TG>^DzeHZwsrmd?I^dxasKU+0^l_vtVQtRiQ( zm?6M|a=IZ@rBaXN7Bw9psxzq4Ek%zCyObhne*`# z-jePk#7R|ugZN?NKzR5eop6n;`r(@`*z~!A&$zaTKaA4qbW^Q_{>PH}jM7~?Q*v)n~E@(H6)y6YTu znE2m7`j|JC1{ASxbRhJ`5>B|@SAuJ?&T}wJzU}OP)D7`>L1W}`OAyEbRqeW5vAaod zABj)9nM)m1=`KH}@HHmxxK73!%xkq-lSIC!wulb4`MktY^S#1G0vjQnQmslsa%BWt z6JEj9XErmUc6omVY>sfF`vGJa)xsv!`n!E3TvTLKDG`}d<;_;^SP0>}xY)#9fLt7$ z2t!AI64uK&lZxd^Iz+B@JsVSEk3z1Zkw0ce>b&pMXe^aV zhN%MR31%tw$xfhL{dvE&dOm?`>jtjWa4~^@1r!qpHHuCpLaF9gH+DoKNW8c>p)gOe zOfK&IS5GRK&mEmS#*~ZeSXtk(=s{^g-ND2{G2DVcX2cjgSswX^n$HUX~3{C*UF zY_T~=5+A;T^Q9V)_OxX}npGD<(pSZO{}p;`12_OGpD{|e&hjC`+Oc5;NdFN98laX( z^bZVw){C$AgvIDps|I6nA##=Awa_f}wiS|a?~B%BhV0NWpiT0+v(ed0 zRj%Jg@$K)H#3f41QDCQZFndLJF#Bg-TK0IJne~LdHeJldwVS=y8(|xE7gZt-K^rCv{Rlu3Kkgp10mgkSRYLz`Z5N;NE-p=HC0` zdiBBkWseL9f=nG#8U&dl55ER~8*4M+ve20}nuKjvh8RCS`vnl3)CfwGqx9K{D86?s zvpph)1N6GfitMEi9yyL&&n@cML6e!ttiwg%3!}nRjBoU*-R7b~6KLbe{J40~HxUuH zu!L8?PacB)5>}G>N02IiY^7&aAITXa z^NM0WzP;xdp<{qmQHrrzC34^)TrA!vbFJhAkDT$pzz3BRnxu62&}WbAB2^(FEL`U0 z&P3}8v^YGboP~JeEWJLcJ8XYSOa;l zkg%#|%V98%H>||gVIg(-5rDG3$e*EP=F4kdR}vzE5`ex z{x-liLXx#ZfgR6xHbV7C`CaxIzt@(^M(;qd}A{FhexdtTR|1<)$7f zgR3e}vpoAYFJ^>5eALP*d;o~aTN8EZtFkc_X_NoJ|+Sn2{ZS78VVOq=i##0?7NNSf2-}=54P+ zvaJy1tQhB6H(^g+|BVUi@+Tn@P?urKQd;*cRbhflK+wZ3Cmu?yenCahq9H zWgf4vY3UDUOn~P|y0#*QCF{>#)Vt$m`i@%(#BP4~0 zhska!YuW7&9Ur*cp^N=Z{X!GC;I6=b;<;RW>Qf4Xv6flnKz)>QY`?b*kD{{JC;Il^ zO!1*J5wa<<42E{kGK`<;TkU}d_^PObg%A2t=x4KrUN!JXr-$G{$<61 zcSEcTaF%H8Y(%**v?Niq`_v)J4s7&FptBvkNICU1yXWV<6bx?#?MiQDq_tM?oVa0p zV2r~9JYQul!f{LTMQpLPNq4?~k^C~CzMka7!11mv2Vk5mWMx#i)BDAgO=*%bBN0Un z7BrCxEu{^)Y|kv2XnR)>h!ru$$G(r#B$AIe!xQLg7#B+U@IBEWZM~no#xjrjO+2G zDIRKwpOTFubW(x3-2@4L&=14-=oA;uYNB9F*he7$>us#B%h@1vk|Cm=2kK=Q$#`!w zk%KN22zDF7n1=%I%ToNwp~rr~PC3RRuf#c4)*sM+ZF~GGL6=LA7q0D@ znZA!o6`1^LXa4-{f^pw!b8I||${C1++c^c&K!;h{j`4?BlOYb0*wV>IJhOqR-)kc& z=Hyd#9~ysCQVsB_fY%DDx1IN>!5uLfh8FsP55x}^p5F`HV{SZOQZLYGymC*F7 zn4uf9;!to11+|NR){M#V=vh$Dihd;E%7zP!H;{i7`|RaKVuW!d0h_DvPgJ#wB9+zd z`z;(z)0r~rXUR6kTC-??DeRVj?}@<2dQahP!|?g1{j*WYo(>Q91Q6twJZvE_L7;{rGvMBxX`>U|~dQb+s&h>?cDKP;7UmsinYh(?FeAUCFy zJH)P7V88N39?PTAbO*&lh2MI5Vw9{Pe;nK!{R`VKRUi1Am9l&_ z$Zj|Ce2>%#xpW-mX^>xY4+#5ZkfJ(k@nnpOvlR8P!XY&C)U~|knE(h zab|J5f%4qNnkgX_pa1>qSw2X+`mdDlJXRfZw@}PqNt+(}0w0j=mM20;m0-&wv`03A z?(=*qAzOSZfA|d_fp=;-CL1%`q^Z1h+V)fDT=)Q}#?u!R&+rUoC z?$HpHeIgVf-Veg`z4}3&&+(*AAcn~^w&SHUYls{%-#Xu zmw!h!G+5R{1S`Y-uPTWjLY7zUo5Iid6i+Ww3u-50uL{)m;$ro8o3$5c=`<*1`ed@F5hPyNUqJIFf0#1|co=~tBD^|nCa2)T&~;yq%vcA29ZAoD8tq#Ga*_D2+4Dechy%eY21OS*%79W}d4S4CaiVqjl0$-y-5ZP3U|D3edg2neozf3b*q?m3!`N)(L9!^N@R&Hsdf!05|nr8Ji~ z+eq8o(RzLvK952=q^f#8+qame1zL@#oV?7ZYH;yZZo)v8>EFG9jbZZ*!@>an__kR?|D&;4Ym;SeE*kiMv4Be@jzX zaJaV4tqNBwqeYD{5*+N3l}JL zhP)fL5$9#ft0fxQ$Ae-B%X+*Pf14#;4dO2F8ew;ySgvSs)NNjso~1>F31?T6Htt?p z`G!uyoL~TB7|z}G6>j}Dsrt}S2p`BJxW=EUg4{8%?3_d)6o3Zvev@u%%Y#ikHqF8c z=<{2>ANXB2KQ{2@Vye;EqNIa%+H z^z@Y>9Y!RzUbL443SMi=&6UB%{0$dL@xRm@i(oj&RXT=4$lA#ZQyQXpL$?yp+ki~# zszkReN%XOr@7mR!iXh92mkc8oiHXtT6xmPI?}n1luJ8NZ$qWYZt0vN}(;9H^cUgC9 zJZxyG0yBwYEh?UDxS2A_e`olQ*q9ihNs+YZeD-E6YSJHsV*I9`!DaH8NV$%N)to2@ zrBGgXn^iyFhUB>y(4;Nfk3J}X7`&Q3!9+x$J8PEJag(XcrrGpD%`14}<5?>u99mVr zAa|`hq1&}@S#xoist?n(kt9z;{#>wtM?yXhfM`XB?N@@F)%>YZe?ACUsg1+$V?C>; zdz{cq)*|@K?(||!NQG_kP5Cwu7-c^@Nkad?a&T6etAd2k{|I7KFwPCXI zOG^UoP{>+F9^T=bHh2`DOlAvncf(C{7rK!&`HM`-0Os+J)n&MJp1RYkyXUbwRUA$6 zwHT5sSe)Z8b^!o~e7eVFz3^`#$fAIInEXM5R0=5SuyhA-lV(UC#91m;T&%hA zA-+$~?sh*173oQYUh#Y$Cn?NWjsYk|n-QHC2d#bR^y(Au*AlYScEV8g+b$6z1H$L= zn&PD$r?|PtC)niPB5n{@L^KB?8`=aS{KI?zKMM)|`FZvC7i!N1eL zBNjIW9>ozdfqd9hY+KcpPI20-9Pg+&D1Me9vE-P%CnM;{_|cOi!}d$SBo8;wsQSTo z6yGYe#-7{&e_ehxwbpVN8y`LG9gZpT#m znS74GW=vU$R}w9J?85p{DWg_aHI8_S(duew036mQ3XnBNVv$ESs|uGXHD`-wSdMPt z>SsY|bB_R}5c#vfo;t)xhJ>&?E}7!U$EER9lWApxfAk$lzDxK?$>= zkAk^SDHBPZ;OwMr+34e=_+A?^6J6eDS>_e0Is~SS2yrF*8gg9PD^bSQyzJ0IO zSgqq}e|l0U00CWoEePS!J*$jt2EIt~xX&O?YNShjDeBDjb%lf{v=E;t;habgROGfC z=+l^OF%U?ZGXHIcVVJ+do=E`bejMQGj)#~G0uVgsmfnv?NJZepu5wvfjE}uUA)5No z$TN-jn_y$<#?@B;v#f9`Mytg9|^I=`pcAxBM&T3DeyG+DFj zRShOLcvh4|Sf}xY#!E-i_zl7IWxbaCj0_AJ5lCTZ^$85w+sX{ApQD)(Ia%KvR#87@ zoTkl~K4}w#I4Sb-kicIu&URKmq`qjQ+<$UkVnSG?~>_!T%L>2?%2nxw< z@ig}n$oTHXwwV*qi`vl}zMk9(!kUuyJ@nX6VFW?}_y&+O>_L{wc)Sx=tC2G-)o6!UVpgL^3xG)*wbh!t0eV z0iuOwR3XD|4MvLtq5ZHl%=r_&FIp5U%y*)V$(#cl-NL#9x0ox-(K>`o{(iq!e;5Iv z3Uv6X>|d<+`!jGf{z|nW6GW9cA5+?!q0H;y`Y<}+A+XPu7a**8$`I5oU&1iZG(o+& zGkpaW$!CrG@p%Te=(3#$78`P$a|H#QoKe}36}paHi5~nk{_bPuNZk?grw$w`J&=!k z56P6Y&e>B5Dk5_1s7%KV$XsB1e`?vLB5<+{+}cumeKP4Z8m%DLq$(czT^?&+aP}aw zLZl|0z=>Z9gWQX4BKyPIHKxg0Jgwm=%aF*VAB}fxtXF4kW}(WUvPe!#%DAnnLa)lH zf`aslg_vU!`0%9nP|Ny3)ZWOZake^uUfcW8lb`7RI0C?%fQwG{RMPoufAu1-EwT3I zRW`8Pd_c5a1#DB(lUtSRvNm%f2-6kwast77-`Y!=U9qk<`|#aI*do7c>&_6uF8nuk zF1?B?7ybCuo_XreUubmz0&5Y2(267u+cFGjFsj@2%JV^VU*aHZo_%=B z*UeK6ufVNzIW`jy3@17K%^mrg#J1yY+SfI_rVNA3uoS@N4ntQVrU(_YdAV}mMU*j4 zXFR#UI8>bU!f{YCqcIl~Y8^X>w1o!h{StuPvme|}&^3O2*=SV8Sz zggflACLF9Xa>O|{&In3$@Y}$k7y32v=HQ)bDrRko`N9D7LQAotYrsAigX+{v&T9wb zlY!gv-@fXmOJGvjg=E74s|03C^7 z?kYaanLGF~B=}MXK-Bf34~0R43ut9~SjeCX6Y0 z=(|^v6l1xn+-J=8X)V!v3yXW=q6pLDCdjW|SuNRGeoR%1Ks%Xpt@vV=2sQG_`3CH3 zx|z`;hUjP4VH@qTlXB*QHGB*h&cATLYxZ}`h7Z^7%VBi;Xqr|3-Re00WW8-?A-+Es z)q`-=uHz*?fBE6asr=|eLKE;6#}#AAx?Dd);G-MwlQVsQ!G#e->e>x5eaby{4~P9GifXuo?nfdB;MWA#Rl(xU|@bBEi*T9#O0hKo_eWu|kTv4c*ZPErbJ-nfG z%|IO04r|!VD%jL1`8MaHYI7>dh&V=|OrDh>S`aMH{dNi!u>_aSS=Mgm zBBp-4f2Bll+cIEu)=F0~iTR>7=cJho0DDJ167F?6q#`7w)OcfQa00BgZfq)RdeJ0l z-s*m=wg4Rgp-5yHZ3Jl`&99KvhXVPRat=j2zH4tq*N6qx^Tt&Q2fRuw?D(afedk+1 zRaf{mo7w}skX${THF2ST#zOD5%s zB_0mfyXAzm#b~NCS+~XC?Vg0fIEzM^biCPJW8NGBEssa}4UYj;m6_0DyjmBl$SnGpf9C_#L(HU>GDG`sGppQ9>M=pg!f z+)HxnTkbuDKds5EwZ3f#C)a{Gl-+x!e`hFQvdjE=;7#3?tmWM6$uQCS?kgIX7}Mx# zHJ8`kIBi(gFSnMo5Y0^Yo-^cIZqofRmfAdhGKX>3;m#$zum3Q+1_$le=#m2@Em9zTjw0<-7Zy?P|3HM7?g-g9o{-swOhle zjr9VOqt}&FcYPdf%>hJ6aUa|rRaEnV$W4O1C1tnlC@Z;o>RysIL3W+Y^#i*CJlOSdp@-+%o3z0@`G@BjMGb07bs%zoK_)yw&_80L^4Hh63#;jo`AcGkVPr30 z7>45;hGAfzeDwEA&=mTUtm}V3s=vx$s*WYysgTMa!?4KuhqNCI!(c|zkz|FGZGrxy z-|#2T{WJ6rlyOqz!PCEHf8a2TL~0DfJH$E;Nd8;WNE{f3eeFL-NP)Bg@#?B-d`ctZy|7Ioqslo%leNaC2$Pu$;81^>)`TX*ykXFZz`B2B%Xj!;vt)(k1IB~L>>W&YUEML%l#1%?ot`WOo z1`z~nFXjSFj&@%0+>XP)Jzpaf1#+-!r!1;%8h?#8wG^MSe?H*g6%q2sdi4?Rgd{O( zM7f!998gO)a!Sxsh0HLUkH20f$@AaJ=$3eYkI&SM!4%z#BCp}_R}^!Nhk2|j3@yWX zyz1TsDwG*d!WB+7D4s{UNwG9{vu$qJC}lv`;j%MLfZHN7A%?z{__G6})$b(Tr?D_z zcM<`!&V*x3f4T}H$dLRQ1)LKz3F$D3&UxqtwT+P2$&)wmA`}X(69Mco-!P}^1tr=H zm(eob53as|01)Y(p!V06-LD*vFgxL53yXcshrI<6Wi9GlbobDsRv(T%!edI76Zf}` z)a7nQEk&kt0f58eSO4;1&}tby+>f96Glq_@LrOKQe~tw~EP;#!h=co-^sK_4!MpVz z>;w7$K z9bH3;R2DZL1H3l>5jOeaeT0kQ@R#=8(YQ1YjdaePMx^}&>Q%zg>8}eKwu^4nftp^V zFQt^wf8rn?r~O$Tk)Sb%PxdMwz$EsPuXawOg@EEl$fbU06h&o+CHX!g1g&xG0dM&4 zhWG)`s}~KZ1GO3t5-OXNCjJC-C!j0gSz(kOE;z(NQogY88pV8Fk$%q36h!?K*OCx) zhE59>glLskt^Q(z!+2DiKHC%+2#fVB;q7;Zf2PYB*LE8ZlkBSB|DvCyp++6MPDU~x z`cb|=(ohwQ-d7aLk({S=ea8vK>eZcdT#>x0Qsk;Xj#rLn>ad2jWS)waUnY8Hk075Z z@4VoH8jV{Pr;aRgw5X*{v*IspY!T$G(%{aIQZZ-y{5|xJgiusiGjMdcp$&U_26&!d ze=jd?g>*&p^2{qoX4bX*@Cl_MzK)q&8*%j8`W`Yvi1kL@SxFH1!L}?S6Pg+HQfnsw z;=frY$qzWLkQrkv{=|UGdv9vF6Jxm^klmk`abU2QyW*)+|X-C)Ne_DhIs#xnF-$s4&#mJI?#g2NEgYk(nJfY8>a4@6; zmn&BzFpI~PKdvsS4n33m=ls!N)VY=%UC^DOKjv|i@W#5?COS_3@#pL=@!UBF`Y)NO@UasJju z&2S$bmw7QR$cI6S9?sXWe|^zZ=K)BT?wF1@Sh-iX0M=b8ty=xy2Rjl06=$0O_L^s) zA4710InRljUr4I2p6yn!Yi@0X^4!F}0E%x*G>pJ}67`|vnHC>to>oQcVRutz>VK9I zIimhDppUNHluGJ$J)ifi!9hRGM|`>+RE)5v6UPB$q_V6gM3u$(f7?zT5p1+f(w6l$D`Q|GA2YceB3qExMBs;U_QQlCWhJhOvikGX_r;9p(b}DQmp@DX_}vpHJiP)V{&a zXapvBLEkVAQdJ%wy7yh~idkXFeL^Uz%=V(fMe= zBA5HhC5>ONtD8~3d=A08Iawzqu16@JlCDa!6hOPM>%yX58sNyQoATfxz>bnVG#`dk zrMU?VCa~5gPz+=LQY84zwv#y%AqwybBOqScfBlB%#2uaUfyW|I8c7%%p$|w_mYjpy zsq(;TWCDStOepH)CuE5Hw)a)NSSLm&h*Wvy*heylV-Rmg-9T?*K!Rh(PlaJ_a%v#{z%nWm-KF;|b(BAOH5XZegIO+%0jcLO@(ze)pRpgi{;Zl#O=lr3D^YT zH=S>vZSYp_>|;tbC7;i)!s_vD?|PQ*e?K$TQl*HA!kxsuhtj+wFbs0v=8w@)TKe$_ zzr>VO93!H6Xm5rjlZ81Ezxvk!DQyXk=h1((79GH&8w}L>W7bBNZ{Zx%>%Ih52c5V5`ZsGN5F}lnS z<48~rOxG#%svJR{-|7_EZ%hoF-6sW#Px-m@=)gH7m~e9KQGJUB`dk|%OubN^PXxk9 zZTIF%z&fllm&D54K>%3AW(X!K>Nt$2QLr(pX5Mxj**r_mc5kRbdS;v`mLjw zh+Te^b|0}>SSXFwRQh1P8KceCGK}!{(e6FHYriB!0vnOQU<#Hujrf?c*w;v|C2lO- zp<@h>`z{Eq(R6WEv!q!hU=ixwd7RO|vHiWkmP&z?D?LKI3U|+E6oxwyWqY{58~~cI zzv_>|bdNGjzR&wsyVGoFe|=cpkvR(e7}_pJU4O9fruj&5n(A2Ab^Z+?xEatKyb`Q1 zqt4#+KK|X!TwCQGTE*okNxrKQ0aWlu#OWvt!xB@(b6CR}B}2D3uz{p7rJgU1=~2XJ zT_Pw{h(eR%oFx@1N%%ey9h==_F42H_ZJ#Uot3%!5kL`snY`h>wfBi-3aTO_(#XGXt zVQN?q-#==B8(*^c2P|&0^xOG#m=(A;w>GeJIPhbQCk~Abr zoh28_e#dG#&k1+%Q}R%Y2MUS3RAKouf$OgB{H0BR55U=#07NODka6Vbe?x-;Vn|S8 zTG@R~ez-%369`@$e~6M6crc!9&fkD|s9qPizTo z?wva$0#AyRWYC7n3Zm#wwpnu!oad9VfFm~1q{q~Uc7cS^5Ok_{16p}$-2%j2hYV87 zH6k+>8PiRjfBE($7sAx&^8=6@7ZA}M-$b4&)nLBLf~$!|oloUz9Te8X=+ju~GIW)= z3FOchDBu8@aXn~u@e3iaMQqL3<0GC^Py2J|Y_lu6x;88?IyQ4@+`b$VO01%XPr5Pn ziVr7Ep+VXWjo0guY8ns#ylI7T;^SIe$F^TJ%d34oeETF9*>i33dGEjD8$~Sdn6C?8%RzDYTQGP5Mm_#3*)#I6+0#* zJI;Csx5NYk|6x+637t2(hbqmsrjX_%C3mVvS zI7?<1e=Na}7)80MvJDrP)k2M*we6=VCuJX8IC|mD!$HYC7%nIJiLix1dd9Fc<4L&P z=|DYcmRUMU7$z-akrNQF(~UBSiZOL91l6A#HG?<|WqUphW0zk0IYH)i!b!J%E&X_ z1-jCW@M@2^v8CkBIQcGsmxod0jI8uPajjPQZnenD+fHl?-R>4f(Y{IarTb5+ ze^XKMH9U*VrYyK0NmFv_;Ng^5{cG%B`lWwHrCU{33b6Wx-2t*c2bg>zqfZ17`9uWP z$DbMq>mdt3us=k3ksb)#3CTT^eZL?Y8$46^sDJlW*Sv0Fj@WbfMr3J`I*<2VLj@87 zYkl@|3n)3MdUZVO1;x~XxbJvRWuMz6e^id?Y`4Tbv7<}W=faS$K>VJbTr5V<+yUPu zSRcJ74#W{yBl?4{YN@NC(Z;lEoiIV<{)n_YPD@gK&tQz}DZIf|fbEZ~4FqptF!=-* zkPfy=3`)q;LYUH#k?IT=AHkoMh-nW$@1YDi9!Y})3TFbvZyGZ<_B~;kTZVpue?sXg z#!B7SxI7!l-1$75kiOh&?_eD>?ML5+ShQ11siT-~z_s;kH<*YYa+RUNi9vs6s>let zLWp9zkgZazQM@hNcu-Q@mK}K$QQcH%p+5?$At=<(dfw5^zVEIT5t$@ILE8>j_lAAT zORcuk1x7mby~%D|jd&)nfz6RGe-Nv~tVDH#22RS#Coj?TIRo z6zd+?F{+0vARu@`wOLCFry(O1(0@W7RFy58wCb)ZEIX0!U$Q#4NMf@M*Radsl)(a} z?ln1RZam(2E@i^-k{*_IC6*rAFpkI=8;N4hqXbN`j2zx-{q4vN+HpcZO;<<=`0+*0}vGP-B-~iJu6}K2)zr&OY zc?_B|qZ3=ZlA7bhLKdzpe5LKh5(2(5$(mQ@W-a6K)Uo2Winc1?#(_}MKwr!r;PsK} zP0&4hzBY#ObgZU*WLp&bf8$SB`gPmWNd**TfV87kcF7C^2g(tLm!fxehB)i0banzz z$Iq$u)$BEw*(L4g2j_=W>fJs8{qWu_i;+@KIq)PiErQ2QV;p!!T55#_fDa%r@ws~9 zot{0jGo|W-6nVLOT}R@pN!ze-U>O$xUT>wKviA%WI&kelHFdige+=M57{W9>n^olM zHi!x>DyuH)GgaWykXDm@ShcugXGizw*`-DTs9Y)2xfCNRe?kSNczMtAj`qOtvvB5y zJ%rQ%t!1VmW!oVZ0(0_4t=a|H`DVA(E3cEFAsj0n^gVAF7UG}Y zRqIIFxqtoXpT;Wwf81Mwu013Y^zXdQqMVq6Di$pG1Oty}q}|HuBp?)cOV#*MvP2WV&;ZBN${=V;sbr+?VvmRst_}c?q!$z&cJy>T9?Vm<}=TBp@Mf zQ8_9jRm&i@249}?ox(2s-l1HZ+&33S=Q_qBzJ$PyHy+r9f6#${sRO_hA)AtMNx#uU z5R>`RYy|OtDLQK;i(w#$9>~ErLuh7ZXJ%&h>9_U*BrHR)FyK67D1T4 zy={n{M4MElYcl3(mv>rC%@; zt?~4Q1Me5V>T*m zqb_Hb#Uloy$ehtrpm{u6iDpYQ(T(B4PDD8$vS>SkxdRHmo^{1&*#GJ^ciR&DL1vc6 z`42-Y>SvD^>elkMaP{aS<)Ob*f7UsLE1gQ1o5UDqZ1yirXgY?s0Dc!=5ETS9vyfiX zs7Qa7e@arczQ}~pw|0Zqs2pVZ^b-L43cZ~!4l!)ro;4#_B}a~EC=|HJpG(-$uY{ z`(_G8_K%!>ca53q+x#Gz()HIZJzosFD5GvWf4T`ld2iumq+}KSFx2RqCXD|zLSRWo z%`p9bVF_M^=Z;eeJtvuVg+f2Ht2X&rSs^ZJDhc25D}eR~mmB3(O5E^KeBi}BGJG?} zA8kvuNSv2K>44(sGurw!pLm$pX{T+rchdsUphvsSO_D!(>{-v60^UUtmje8)%s-ap zf5=!!&GuI@0W3QuT4j}tZdxg)^GlkO8IqJrTfGzUz!E_>q)vtgF-7uuz=wt>$N4u%S zeaiLT;_mk+L$YH(URoKSUF3VBet^0ze|XXvl?Q|ds@+TM)v2FW@XRG#-*I7=Wd9PY zqMrGd;MxX<7%2Q7m`o^7*P^P(es^CrwXKS*2^|CYylrnA#ageB!re+(w=UAY5WODcpasFfWzT}O;XQet zBVUJyT#W+dmO?rTtdLEp)M!y5>-Zd=buo{d<5AT7^KuZz%Xdsif=` z@3N*y)4=}`1l!yH`$iH*8rmekG?5NqN>5}zmexV7Wq?N##NQ0w09w~5eC`J4Re?e;iao4cV zSa0sS8$pEy?_nsx273V*mL8Y)Dhk=ISIL4#S0W~J*uO*yryBDvPyQ19NU#mjM>W`p zd9`U(D%(AGl!?b23b|)n4<$?O6J;iyr)<|OuWz#F<#<*WKI~^ua_HhIdvms-t|*!W zwvyl{IEHdd-;S$qVotGXe}35NGFvVmNs*9J;ka~)56#;jh&Y~{a23$ zRo9g>JCbA6)PVtIuG6&5g&EGR`H$vn6PSKdxK7A$ib(GxcpM}~e}vXnl47wMSx9DJ z%G2_LH+F)ZQ|RU4L-clZ4ifRhTQwTwQ!!d8HMiIyuY62GLsXM`d}HeZKsDMPeMY`f zLIHySIOgI1UQcP2=@}sb)S!a@F*aVU%w54)e{UTwy-ClTHf13QHBl5<#`qVD2z5<) zI#Yz1Was5|8i-8ze-*hz5&++YLvUrA@nKQ~l>z%tD-h@mi`N<93;-}q0$VGqF%nX8 zU8ltA2oQ~%XWbDpi-~)Z$_GrfmAR{wkj1;<^?!M|42Fm#A7U$WR-O3Oz1KkL z$#s>8qQP_#vykVdhQ0z&pj*-Y%HB>KNt&>-7|95ct}@=Ie>4~?t%QktMNpu8pDZhj zYe{X{X3l#ySTj6j@)UuJlN6T(`K1)l6!c#sT;Ak#M$y*jXyvP&Px3~Bh#&&+l&hT*s;IgDnfgcd;KmAO6>M zCfX@pOhgN0f3Rsgv4+0k(qSr?zUTzTCr*moNROqOzlV|Z zNTa(aD@)&A)*k54^f}!3C=T(Tzm^_G6dW*l`wCt$%~|4o!eDRMS3MyjbG!-bW7jp| zOJK?)f}kEiRK~48wP;TTYxr${%nnYJxLM*AHdg_?e`?g4e*ArgNmP$Qy#Sgz$f$*R zQq20vZR=a^8@K2gq8-$r;ljlS*qyGkl8&IHNWY4}PkTMlX5^;@ntx0|UtxBN%<6&L~B3xoZBImGx$Y- z_1Cf9`TNQ3w8t-JIhHt=Gd%}PVACv6v3dUvmbhc8P2Pzc%2xYR>I4z%w z!Ot^fFv-7Z5P5P5Jw3Sf*|@35ATCS=wA86>se#nj=y_lYBgYNsIfgqp*k9Y& zcca(1@q4mCl%a8GTwi7S*>{EFD4q&xfDuyDRrI>eh2PFSF&3j3AX+E#oG!DIZrM4U ze;<&mupNpE+CYN29Z`x2pi9#cf^HFjcUv_H9w>`c^a`C*vFtfc^O4#|NI>;lo-4eh z#&*YUG(v*pjT$o|YIi|9a9A5%OcOvIU+NJhTToa`FXRA$bPpSPz;@r9t5#~rh(1Kg zy>ojEb5c9O+#`Mwy1~CT(!mp|sd6o-e_vHnfuMK@;7(;;dGm#Hjf+c**v$8i#^R*7 zBD}mELCR6CUHTL5%Ie>!#*j#@K?PETfDEOh2>DBV0$)3SSQsE1R4ayGRo?{tkDv2N0(1CXd!`19p6}?6CHu;e-TA3fykO|CW*2>HpqO(#dz6l5ClrOCxRFB>q^9vV(fYmMzB zYI~;eA;7p<8yq!ZgJ1Tujjradu%Ag5ZW3Y083x6O!;NHNn%&@d?%rt1?PQaf~`VtCp2H-+oH26&*M*W8;^XlE>@D-J4s!hA_fJYxy>G6subb zjj&jh16F6wIh$?qhWb(oOaJ@Dq3B&FNVqbgF+DY$A`x%3rZ{lp2}JB?G6PY?0B;hz zi#l>Y9hj=%$*Zuh-Sp{xe;tBoda~N0nqV*62KAVPhIxJ!q|li3i^QDC3Y#@CHF zOSsmgappuAw=-?YKGX7iVJVk*qqxRK@;_6FjZq2kCG94&DS4Ylg~~M>my>ePc)<7S zHA?e}(3OdG_#XEq8DH&F!tz!Q!TY=CB@tKqZ)j}0C z#>wU>Pb|xt35pLnEgqTR!uMkO)d~hiXvP6FaCqpPQg$O>voOIOTae@8ABjkQsz82K z?kmk0ps2;=xZi=rQB8-0>$6THe_98Qf9qiz=DQJj_nv|D%HpezHPH#9aT{u@g{5;e>raJ5wK=u;) zSLZeaea~nc1xw5{2HkVK-=ZGf;h0NCbyXDtTHVj+re=Gb?TVkc3Cy^9oqKC?gUy0Vz4#{}Pk8U&ps-uT==aIn=ylZZL&uNSRa9OxV(W zS}hvj_OL+G6Ip_WuCp}!C^g&7_eY1|-u9HgWs6P#!%O`eUKZg*a|T6L{_YFEzEO~cD z-hM2T1^!QE*_8)2$00{_0poUr{6OgP4u4@KShaQ8-;vfZu~}U1J7Gj7AMdRF~WJ3)E>WsZ8cR)_qXt>Kp*`H}3xp=YRySSqJcU5cBr!4Ti#KF~i3c%?F9^~B}2ubw6ak%xj@ZuL! zQnj^`hKN8Z=<~&@mOIhDzWooh));RaxGkQ8w zfbZ@z_uC0_IqB!qD}ffZry3h0m&BXdG4_K^?}fTvgY37$kg@IelfKiynnen-Wbr1BKhktZO=!0@9hHiupvu8ND`#PjuD72H>Qvl z_hiFK?>nBFCbX!!*NSh4ADF*B@0SlQ1*B0%jDs0&nJr>7j%q&7Eg^b{448N_hCE~^ zFG;YdK<4`U;Ah&(1f5XH?_>5b*uoD$iE$mG_@D}kELiGAADiJM(SJE}Djk6I`Vg53 zyJKHP8b5xD-vDbJc+ zf$YjIN!5l%4&9{)zQgoltE_*yd*fbrMA0Isx+dTq+uF`nhEwlJ7SI~FZRbf{DoA2b zwA^s7t(nNd=%~4*+73Jv`U4)43O{1;RsF}jz^vY_encrh>jmj<;8~q7)&|M_E4XN{ zN)st0XIM5TL$I>u=+xa)()Tn;61C&G&C;GEHa#1|@I5`Rm!my_%liJd)Zk8=qgl2o9T(@C86VQBzffGSJfzO;a4`t??_$cdG> z(Bb&f=p1t)iz%rP79FJdK5bvMsYkZ)9triC1{`J1o9{Ykrdl*MCH$!}oSVeR8GpB?i(KGHRX&KE0neaR52 zNPnpkyKElpvhcE#Trn3|FsX> z2$9&bT^2nQiQbY!Sy==Xu}QG_r<(q0Lw^E=MGfgzAM3y!+S})Fbl1754(_HGSMp{o zk1(BTEcQKW4e>90JiW+Fo0XL?fbuXb{CcuLY(AO~31Z$u;ur#Jx(|lz5kc3gMb{C_ zcr?}_x^iK0Jx+(-KLckDvS3tiF2Drns zGgRf5n)r$cp;tUBxa0@$!yNFbs;>&iW_6-7z~84Le#7Ymg1B2lE{Ebk*>(|DnsY5mD>2Q24Qx|Kg&W)*-8Eee9aC$;O z?mye3=&$D=-x`+vF9!p5gDEQx3;hlU_T}$>#)D*V&)E`Wk3C)2ac{y8=YJH^Y_#Vg zLSUbZjW8GEUNBtnEExTFDsKcVEJk0r8TvKAPP|{o>@`&2d@@IbseJTc@5X`~EP#-S z=O>#Kbs)-w==hY3lx}+R0q+u%N-ey5OliQ41(%A}gQv!b>&ve&2b-Pc9F%(?6ipLU z6s~{OStie8rXj{EJG(A|@PASY;>0m3us1aj5A9?V%A;|1yjxr4IUr8wO;T|5L zlogp6nmRN;s|Q-bjgcgDu_=-qgh2q^FG?dmH}wu_l`? zV;Fa_6$m;WH-s}3aggoV8gL0HY?6Avp1p@Cm*|huMH=f!mqBS=$A2508ra@&o67kl zIl{!2{^l^iv8|qVO7+8Lc1!o)>?yNahwh}?NY*mQ> zXmhaLu%v_Q>g$IU{MYG4n>_0YY&n9yB0CSr4dI_?5^AT4jbp_ZKTLl7zwQ+wTWMOJ z;mpx%GGYin08K!$zv}_}HqQN&%W8JJ=oCh`JfAM2sO74re=$s1>)L<6k@`|{R@GG- z7Dh#3hyzAe280%IfTKOam3dBaxbVB;ZXBbj81PtKThvcizpwtaGEjejk??h_u=tQ= z;Gpjmg7`xu^5C<+3KOU!1@1|e{Bm9^daz*G3VuzXN+L=;lfiyHic^yFX7$OCd#39_)$shW>kK@RNdNqv#Ebb9* zpk_~7wQL^wJGHAzX^9939{*N0n)l=A4f4g#vTTe!JyiZcMvC!*7|C1vtDzv5Jc;nM ze9}#fjyH+eLK23b#6zg0Mi7&=p~w6RTrhu%P|HuZ8F@H<`|TM= zkT=ryw;M@m%@4wakI2WL{ucF2%czXUN1(pvYe7_b*cyC}Bcs$fj;8IX+<1O#QxTXw zK)9{uK)4>8AL?gGn~1fz)QM;{hEq;?=dlyqb`)6zq4F??B>XH+rAfPrV87ctNYgp! z2G04lYQwB9r31Q?jZ`R`(@W2UBl$_*KS{*2;@gn85ouH}FM> zh}g|2R+`1*X0MZ;cNI{L{a6ycivIT z9KTO+>edo6*^?qcfu!E?OwsW03fT*<^lf1r@7YtbXCs^6X55HAPfwD*G6eT=O&F}BK2po9f@nEcex>EGtQS#ac)jW= zfX=dA9o;ti$WH&kD}uA5Ed(*>R4g}e~0mLT|7e~KTS05M%9 zitgMcvf?dAEJsSHSttwy zBnp2BFOp16Ib?V;h3;M9i2&l|nb~6)CL7+f;}JnUmR}Wip_Q4_8qnGWP*&n#!9H6U zjvq^(C0g4O14v2OZLctVDr_t{N+e*XmPvi5V!bBzs*P^hSWx!z@0sR5;Z}yoA2zD; zu?eUi<5SEg$LU$h1Asm6xk$b>-MxClT%>>gEq1n%&n#;x4dMzo`URwX{}SJehT&$G z)o-Bs(5c1~@;gnV5K{RPHB1?x`x43{$l19YFNyRyWqLv3xE`zuY9j(d1LJ0GaZCsf!j(LBZ znev^d$xaw9Q=QD*-p_M@vTZ$IIYO8lL~{Gd&_1MQ-xTv&Jt?X}SMC7C)Cl@B!Bqo* zP?FZ}2l$j)P9glXhEZT;>)fsv0T8kgJ1o$0$_Ygq5tIUI8*mQW>agc?kQAh$iT^~) zqLskGkAk`pw=15Z61Zoz3aKOjH06KBk~W}v@hm%v?`oFk8P`ntoT4H{+A~XDXdr(Nr*nxL7ttqP>9c-CQe);cuDkF>1BnxW7w^U@7ILkB;# z*L#xNMn!po5y`aZxHKz@hA_V61m^}8?jtCD#-NQKuDN%BwQj4sSi^sgp{86?#BaN$ zO+DsSUG;}uz6)U+*8e!Zp!@7s7)BmFUiGwxs=jk3w-C1b-1p6lq*ep^ldA%Q`TUSZcAvvM9hzr~$MIds1k8Qf&=G4Oa}qXE*g1&-`R2#CLzPD?epx@~oR{8q4;I ztK(^^+xDJb{gdcb%a%0*?lX?L!Ex?dI^z-HH3s}f4!X(Xo$lgSSw2ygmGw4{G~-!N zY6b(sgk?meykfPyH_&VN-CS4YXDi$|wIJys`SN~c0ax?H*>v0Fv8{V`tEOUzvNex6EpyStMNm6NoZq%n6E zM~|#H!*rKB=UB7ShDR3OKiHCnj zV&K@N#Pgyg+R~}Q75hNop{>$|1=v!4Da!MshrTTgPHx%mFfbNDB4tfn2v+)uxMp%7 zALUtpF~y(!3}X_5#mukY)WJFk;v^{m!+udeWu<=wWepnJ0=u+irR5$%0?TTPEbx|9 zMPgwn+hCChl}Imvg^?a>r1m9W8!B9GMus511_@-j4jlBh;*eLnlHUc?Ua!xS`zQ-> zxgQn|DR^k$5#6!6?uHN*{=Hj@V7(Q$MSY8ITiP#P5u1Y=cyFPCmxw(1tEn{@j}xRL z=%asoKw8lV9#h-J`rz4H>WAy$whlq? zox`QV&lwqctlM#mW&CSBRk+p(7XEPi^q}pEq=9B$brm7C!6YVF62DI-EwP%?3BIzw z0%H^liFyqf!plL?ROLq26LnfY&G3C|RL znMNuRA@X4Z!{Fu+cND8&ctKf_I3gb8&9>t2S>o~13L2|?s6}*1%g8~wnc%|F(g;1b zB$WCG^B_^tU$c`N{`VgWtv3|5=yb|VvL^#n9nXY6?uN&ELvuNXU1}>bU~~^b9_4>R z!|82RPs~Y9m;t3jxXAq$bh_Mq$)wA(P<+@Sqg9g?n!;9@Yk9KkV|?Ptj}r0^^GbO| zxJ-eJK)hG#Flmvz3*3zu8}#do>_S(8j!ikur87%>{F|c%^~QjEryzCi;SO(Be8yuH zpJQ$6cohfLgujI46#~9zFTcx;*%E)n6@g}DL08^Wd&-pB5&JtXfsF$LX8MipFnm+d zxxZQ}7QyeAOBM1RvFqbx=xb4+Ub@a!?Ih|J+I5-cLBlk#h6w9yGfr83{5Id2q3Oh1_&DP{A_rXM@Y8XjZjwZ7Ql3;fwBhos^QS7*dP3}H&T z3NbO;4DlZ5$#QsHp)3ktypQU;@rKc(WnL z%8N6jW{$PrR$tg0oC=@`|F(Y#?14_0ZMou?tshEERHs?+64K;WPlrkmZDmCj4>VvzVXnDqt~jvx>7O z-n+#QTP6SdjY7uf=Q&Ps?_R}6xP1=)p3ra&0&joK5wSFdgi{)31l)h4+5UdNq_L2( zoIl6~D%r}O@pr@Xhj*c1>Z}EbW}%b;R5*d=uS7LK2){W+x<5-dXZEr~MX9z!AMNmL z{wq81O6s1W)!x&zEPATU`YmgX2FsymmR}p{I?an}Qs|4FjZL4{>ha+vR-=(YP@Uz2lzm- ze;-$oFUDutwlag1zz}o$Q`EQ`1E0(xn_hWelF~A>hQ~pR~#!vNVU$6BLI& z0XN_!Qk(-N4GHDMQ$JOLBcI%WlU~!}rm>{?_+Y7$GLk{7;|B|r$dPm>qIv&zn*r6tsV7N5PjY?JC@*}YU@_a= zooqnsxPN~W(upWQ-4L+o$n0tbI(k99!!_S)4#!Mb`J?F;En7EOpZKW_{)$`WBp2iv z;_w_+x>7tWmdRwdjnl@XJLq?gr2KBRnEIKJfxL%G+WT!vuoVTahX~3AASPJ6KuM4j z;qOledrrZi!LQu-QV+svLtY7$i$Ighx*Ty;?dg9*Wa`y$nO4J}_V77ta%4P8U04L5 zyv{$=L0neiWMs=f^GbwA*yAh%l`+I#c4XR{Q+OZU%u@WqBEEVz-#&Ygf4y1-;wHbt z&x2Ut+*TPU{VV~SLTx#>q1pWXjn(dP4X;5z@FU%w-Wq9j%&#gK_ESkU&UE(^&P!@% zSZRNW$axkuyxt!OcT_0`V4)Eq><3MDDOE^0%|`z$r!pH#hvJ6BIab}zc0;SFK`nDw z{#{ljWb!O_reI+fcIB;(zge__{#G2h3ednr?8E^@KY$1$oycq}1^^sQQw=y{SYs`w z45ukfUpo#fK5`zsllU3NlFKXP!R^*TWHf(Alt`9V1ogvaiN97CVf4h>xC&R5TLpFR0PCq31eHcDk%*F*Mz6<4Klgrbma zA{gM&S!Zu!@k6NHqlIpodS$Im=&9b~a~;<-Gxm$eSHo*bZ?lmi&xJFYwOHI)CS`vE zzyLK3hVN~Nzw{gs9MS^6VOy08`ED-BPg7Y>ohEG#t`X(XTeKa_v0uMis-XxILO_Kb6TGhRvuQ{;jVf^Bt;!NGqhvAHCO z*i}p=pg`{*E25gESF}MKt4dUdX;fegbH3^@3g_tjtcyS$yoqA*Yoew|ITHXDF9GO% z)c#e|GYrW1#VrC^;3dOG;vOvrTMho5IpB=Nw>emo{synUmU0=&65mSM?G=SW7V=ru z$)1EyF~04;CL7h-TN^sUMRb9; z`qVn6l)tGcNRwppmZPpm!wQ^Iv2;7H@Lyz`RQ7J8%$>FD>_bQ-(L3rU8q&ikaN(>2 zi(*KNl#ldwEO7(uVg-aQ#kuXTsRx=GWoA1Fs5E}^urbHI&Iw5`;diy*i&Zd9FMwMb z!>z?g#qd{sO4Gt$K(T+^M<^Rz_JbB!9Nkx=>Ek=c4dseRyGJn>({|f-`y*UVs(+19 zD$VD7EhfDW&R+?7$?{v#;!i`)7fXL(kB4uQJfc#q+@Me6Pha(CJ)pRxAm|3NV0TPv zNFr)U`mtZwu7c%Y$vm0yXqVLY zE@6m=rA0@x4cULNBANM>@&y3I{kB-Mv<7V+QG~i1J#4H+D8X+*ZM(XyDHNV>qGp6@ z(3iLlvq;zov)$(Vf*Qlw_j>(9Ag#h-w_mjyNhyiEZ?Vx za^+GkT)%NlVgI=+#0G~e$Fq~A3|0NG@=*&AdUbS%$ryi6-*6b&MKe(K{(B8XmKeOp z^a%KErXctUW|uWwe-uidh#(UUo&LCxkCq@BiYm1@R}wekr=UC5Xo-Ejzj^1Vccd8N z%x_ZePG+o03OFeUbmOFu=64M0?L|6;(Z-y}4)GCiWil!cbqgsAz(doLAJNW(G6phu z0>_4q0y}?WV4EJV#|wNKMoA_QiY`mXjQH@~;vh1NKBafLXQBs}6OLTpGm_!4QdIyz zJc;-Vv8#uPePR|Su)fGHMHKO|Tesniptyh`Ga!#pV9zbz)tdpfC2`eqUd%zgvVdsL zJwFvt$U8*uh}Mcdm9t!Tji~4D=kY_vu7ZVkKPZ1;Z)68|_yuF^s~=Yor<-2OhR}6Y zRzlTS$PH7}NS@R^Rj@<_PM2k2ke)n=qbK7>P%0f2PAL+O(L@9PgMVuK)~X1!+q$r= zux_owAwF-1)O1Yu@{=$zTvLNHem|bf^lnazp}C?T(GcF_RNx4WTJu3Wf+2SQnZ^qo zpiO_bTSN0~K)`6u8S{AH2N0@0PnnWiD_R9$+!OYAJf7D#PHNnGz?-fc%-+@TJCViW zXG=ueUcvNTqUy+KQ~=|h znlztK=AZ0AV^I+@gP}7Ia)^_3#rJ^MzWaXw>F|njYVWsMt)Dm=HnhPAgVuU4p_KN8 z0pH``#hAfmIQRbLX~xu?GF%2lBi&I%mpL1@ubr$(cn?*f^-bf$3FR)~bpb)o&xsH; ze!Kcf1J3#V_96@xQVkgz!}=qAQMy68pt~finqw97s-!`2+C-YbRf`(O$KXwPTx@?` zkx%aLRAJHaq1s%>134%@kj6?WpUwQR0L2U<#M?uwwB6NyRJUlXziRBD=; zck#MMh*GIS6eN(GnIS%B7P$7<)7XE`>XNFdr7BM6#n2|7tjhR;#5h#p- zXIL#Wd@hW!H$VD(TVG^{6xlj^>6RYgNPh8irdl_ zpcc@+&Lz8|tW2{Be23*PjX#8GVWupXX|0gP8ei>mUp*AIe1zTtgzgy}yFd31uVNN_ z94$jFnBk&25m!t`Sq1i;=OuQRfQ1DeL>p}b1aCuW&p=@mFVyaq*s0UU%1ZxPbUB5u zLz<_zvn(+GDA0y@51gA9wQPTz3*Kzg${Wge2tw3pbOUuwdk66=4hTXs%kr+GX||f` z=maZp&Anb&61G4+@|Tj&mj)3BBjlcY*O_mGfK!Ggf~Z1eBN8G;U16PXw#ZO~t5STv1ULi)T#*1e*s@_(Xs63YJD?t;A#^ z1S(;}k|KrH^myNV@GDsZL>K-R9s{XzR%$;PjCTxajpN7hLl3-Ab@8y7J8=l(tgY%g zQ6OYTCT$OV>!N^67jbX@iB zsHhD%mSqE+vI~yvqk(?{8m_1>-itrXHq|9sZNitRZmG^MK_69x^2bVTg2MlrG9;o< zsqGA+Zg$4fNvuvEaU-Bo=s9H~pc1^;58FCHXep^3LR<~~t1K+32X(vP5^K!5K9WV( z3Ap|+g)4-bLuc>4)6%i6(yTXrvc>E=Gk{#KjA$ffFYFNpDVcv*>RR+Z*8*o*C$MmM z8f#mxGnjtcMCmYkDtE1x{MEa9!D{bjTQJ4ZKiWY0NEqG)L<2#l-+|hWw%WVy0i&%< z!TUu$v^HJEa51H?N}rje_Hnf|z8yoT1O#s3@@tSiwK>DC1rXPxVNS!%i3oY!37+~y zK-x3|(HG8NLr{NCm$*vqtVu&#Y&L!5NvFY18F3+LtK8@-g1*M(NV&2K& zL%{=j)_HZiWKM(c4(eyh`&|s`XLcozYW)OZcIJNQmU!j&#$(+304QBuzC>pZA<+|a z`{tTD&r9eUkn3z3yg_okqiWxqy7Ozp`H7A^L+e5k6}f+Q0fIIr{H_(adU5}v@?2XC z_dsRG;XP@Z2ReTUPo|-mr4x&nq73+iyxBPlcWbqbCvqeRE#S-tC*}&rBsIkWsx-hX zlGGDi>R~f}q-l<7L}RRD;zm8b&`yrvu1R$!wCk zid#LQ63k7t4iJxP_XzK0h~FayJmeXFI`3Y^5|z(t=8?xRU5dQIIcb=i0r{3`$3E~U zJaS)~iPmVoS2%9eSx@My+S5ZX0re6y5zpi-3H*Pfdljeb+O>pLruPEfo#>v6`S_lG z)MHe+j)3BtKS;=K70jr|@Y2gWy0a)@;L*_OSr)#$cEB)Fazlwz5#kJ?Wz%~7RJmw4 z%t||pxfL#AzWQY1oavgV@_uwhyN5itsbBYFw#l)R#(|@Wy(;l({rS#8~uGXyE zpyU+&oOUJkB}xGTR^c)2H^r`bY~3$pR?M7T1l>sVw0iE+gQk;9Ee-Z8k1Xu!L3H-s zz(-%h8Kgcy-FDjsOp_yeJvsJ7V}-wE-+X_Ga*MkP-WIfTa>!%Ye)GJcgL%i1E)ME~ z8X#w@eK&_HNj;5eVU3WrI`Fa{UHMZlbt~I^dQK64Dmq~dYS9mnYWR<-fLP9nd28W- zt>NG&FF9(EzK8XPr1l_g&)_?^Kz`XX;BYKT0VNawHNYKy(Wn!MtboiEb(M0 zT>_<~@!O88g={#ST34P$Vi};$mjS{XlkO3g+ixqxfMmqpL0aA~C4~=p4`_jy7YN|DoFa1tu z2q@FdYm)6|x+FQu)6lk-QhqvfV}q&k8Ng-({L?YtWVl;IGoIn}!Bno{g?@j?Vnmn5 z&ND%Bmwkqk>T}wfVcwIGIE9nsBL1%T{BhJC?pi5pPGRzqXq)lf5tcKNKjpJcwzXG1 z8^@Kx>YyS7$pr{OZvsm$=lC3>B@CnB^}5R_rybMk(aYng6))A)$zUy zz~F5*b)sbcE-`e&{@7PG=;MEUVk9t0*8x@w6YfMdtvt~JKixxFSA|%jbxh(=6YDwp zgr$K~|3$y;iY@)w6&6`kHNVSNUH8RZ`zC#p2wKDiC#@=1;o0S!cUtv&Hi|782dc5| zC&xLXfD#5L0G_Q8-ec3iGh0~tBXINCS0q8IVwuX7jQ~MjRBTwygr8LV0iSxA;0z96?eAo}b^MyR_NN|rT zfAs-fN^&EsEcYVguTQ*yFAocQ|7S#B?|$03RP7ZJm}v^s&#=6WnUa{MBnv3f71Gwi zJQXHgQ54yAkU^?&gOmarWdQ<7BC}8L@+G)uxwRS++|Bk##M6JLC#ji8+c3kEB5-1H zg#_Wf?{@YFTk=9AW7})3$>#TD#0D3B?7nUQ<=U#NxQ(kn*^NM_iVCY{+|W-(*_8DD z%lTPLN{B2U@9Y#=r*Ds?l!s^Zr$09k?^rSPfzr6z2vbjcjx~@3GS*T$m*kD^g>}vU z5r3ti2Lx6A^BjL1>xdVamBm)$A$SSLh(|FVqn`>c`!iIgBz}5;*CM3etmA`Jtx$p$ zQPKu)f#0AYDBVxa>@?ZSB_iag?vsE8i)1{TWu+^dt`Tor-&p7rj^Wa55}NNj(B~G0 z8gt5%AOYHW#=8?6njnJaZPRwcH;lp#!y16n6D3JXzx{tfq7^u}vi!{Yk}3FF-0T}XjHp+}ds_U)Ms(xcX8k4Z-LZAVE_*6R{DM{~c2hxqyG_pZKy&8nfk5h4A zpX3J;czjYa%cstn{QAF46G`Bz=?m8rMHBLk5|d3Ao)P;Cc|+$6sw3)jR$ix9CZqD zRO#;=d-@S&-~SjUa@?3;p4Wz0fTj_FpcJ;YOqwR4zt^-BP}Rl{Qf?CVG%yEmd6dxP z?OQ7SF-Z_FJX)huoN_fNZM>nkunfX-$W$Y`LqgJ_SR^v)At)5zdP^9c$U5G%wm)Zv zRh)mF2LybhdV%XA@u_Xs#V9#D+_9;BRqqiQf18JC1j#eU$!Qp?n+Ia^0W5tUqij#m2Yzq=WN>r4%Y|Ub1 zWq8_tXhR=xDnMs+q3J%!+fuN4V(Hu>n|l}7B)RlV7Kh-zb7m`S+q#ZmER@zTqy%w2 zctWbYx=^GaEg?v-BO3!W=N?{tUVJzEkH?0J>B&d`DziB_gdLAs)thmFR0XFv`ec6( zrudsb|27il_J6G;szOKmH2zyR=XHwRKLJFF8VvSa6t8&ESkl^JE;Pm}>Nqggiht1% z*0oo~E#l)oI$@x2iQBCy-8?MXD?s&rMtF zROa1KRl+!wS8t<^aK)srT>EnAawtrXuG&}wx}4?7TJzEdAm2a$gk9jbb_&9ubhS$b6*L9;AxY+$!!aNwC;j#UmTeF-QI;#Lu8@wm+Y_kX$ z5qt^uVVFge*9Nhv;J-mr^y$64D2m`bdOj4C$`l~Z0#*+SULchw-Z`$T;MC!AS&#C2 z{S{WmJH2Wxjl%5u#TRs4TGxN=)^ZecXq(5GYuyXZ+pnVY8Xm&(8KV$e52uVa{{=?R zYjSGf4=6Y1ZtV=i3v}^DAdLn0#fzW1kkmFUPjI0;1BP(IIqej0k8I5N5C)?)K2CqZx3mvi^BN<; z-gB^}2GzecCzw|m4KNZC)@yg0)Nm!DNEZEIczPW$x>ekZUM8UmMxH;Jgc^gmrHPyk zR%XKX^TS~Rj)#ovf@e?bgK8(#lh z|E{$3^$Wx8hsr3wqC|fHGOikoWHuhB%%+Pw>bFjVDUvC~`fZc$u`YSj<1q!MZ*uX? z8c14?(L$=}V6fODU&uor$r|`Fi!sq#@D#W}8M8Ktt~^I{Aeg6p=gl*^^rR3Y*xrqP zSk6gfF$A&6;=|hFHcw}gxvU+=Vn%^JSG}=NWB4vN%aP5yMWmwUv)hFi0 z2><(f^SO#|7+Q%rewc^N!d~Ew# z9E6YM#WdVfoH2iFd12K63KjXE!Z_jrlSw~ZnFnF9X~Z~4&LznyrT8(zukl>Z>k?o| z3cO+F2|+b5{?XLc#)JP*Nj1P=@o*TqsU+DHQ9AdSzTHONV>_+4r>v7K+(a_9g=pFg zvEyviHt);+%y#X=t0DKiMCJvY=mFRL4ERjWhpN>lTvLBpwhS={t)5MZ6rNS|*a&1D zFgI&qNpvfE9%U&9K>G3}0*9K29NPb*HRNPiwQL{p@ z>Ewu-ule4O(Lt8=l_Wa!KxHV`+KRP{66fzwjT(P%GC2vBImFQ)e!8HS%_CS%86%Vw zU|qLwOKEB&uk1qAr;%>1&$YIKTF@b$ovkb-iWo(O0vnc(VyOM&S^4%OfIZK7SlFx& zsBwo7ZqOVyNcf;+XrgsMNnRhXRO>G>-CyPOVoPfy7(VC2Ba zzK4JL9VV`M%TKs&Ol3;*JJrFJU%DdteU??4ejp&JA{B<*XoCcJ&naqD2IeSdU{r=7 z7hB?|Z=fyOF-8F+_w&MbgG;e@piL2%Q7LyFb($7l9bc*O#2{qN7JB;A2`B;jyt?)} zfOF(wl4Al^ehJ8Pna?ex1NT*2K`>GO6VrdNPeq?bal?NjrV+q`VWGej>G>B@(+`Ne zwuuFJwSK;9>W&wz_JZSHtKC|nob}MuQg*Atx+xv>VA~5W5DOL27K2PZlepLiCiLDv zN8TrJ8HA2zv++3XF8cX%{|>)>8mIdb>gHLqDa4oosz-$YvC~O6#7ET{-@IZRRE2-c zzROQfB7pJu%4#;fSR(Cz(;K1&7nV`V48rX)zy(xBLduUZRk#Vn_(2|Z8&kO!sdrl6 z^vYbhl+{!xBO1`gD1D-qzXl#CvUWV*VLTD!12iiXUox{H$JReq~S zMAYO7qI%bkQ7f7pf{GN|fz%AKP%S0beC^lm8y^~%9h6gy&k8SJ{&1C&Lf?LChYxjr ziklb{k8ty~9wh6;?1nimz1+CK=}YkFNhe?g*doq_PyFiZW(FwDXDPW&356R9!(5*=a8%v0P@+@$SR4af3 zHcov)=3R)&dM>BVc8$3bisOxjT!+i@CoUT6=I%)*Oum7#@t5+zNt9%CnaN09+HQceawb;PZ-|`*cY1FdH^V+s_r8K z5W4W|+_XaZS%pG>2xagRi`kS18Rl9sit$|q+-pcf<@qMFJfc#>Ng80^j&82hjNRJ8 z(g&apiM5AxN2}jYPh|NlZ084mi@~*TO$ut9mw;Olw5KN=tBb}ysM&ul*48USsxU)g z%oi9rwg8=mMr24IFmH11tZn;f%f9eE9^?>D8(YO;=An`8TQSnTQB3%52X~&_v&H6m zRf|VQQ=MP?Bz`cq>J3|l0~o*uw)n8l*FB6hEusuzNc0N%W&u$kQ!6r`vMyYGL!;;; zHPAd_e(`oqOo8?NZY6&};8CB1co>dvRd^1IeuccX-E;-cD%thcJsduyQ7mqgcCF|U zrAM&~Gil#nc(vDcbyO<5kNRv6oR{3dIV!X% z^VW=T4uElP8RgL6d_swRNAgCTmLy0`fsnn;T_u8UkK2A*TcLUXLB|%{zia)BeRp8z z%)(=LwEpWoOjjt`aV*nPNO#N1K4*afA)zq@ht>c6pwNHaII?0Z3K#@$$KDs#$n6^& zbeiM<-p!K&<9@8>?A=22ZnM($_+mPKFv4#!%s>W(tD<#V23r6Tyb5(380Q0lS`bTQ z`5OZrr&&G6adoAJN~3yQ8o&a0?7GZh+@#2f>Ue>*wb0Fy=v=OPR62|XEfP8d+Qvzq zSxA1j^Jag#=6M#cG8kcC{h1nFUXd<8>+Hi7c*cAo={Wew8QKNcDdqY_K6+eIKWeQi zIS*yo|NX|KGKbeGW5<&P&mp9WAZC?erffiin)UYe9IEZeb=}?fYH# zYF5zo!r|o^>v8*z?wD4a#%DWn&23pg19X6W!2^E~Pk<&mGId2&7aEuHLl_Jp`^ug! z7c=9|8q;)y!YCIjCqaxuWB`qb?Zp6UyDd}c9KqgUhqG=)zQgiQ9cK^*d3BG=bJ{H=`6ADYM zw10p0c@2Dm2Bchu=E?N8O5<8C4_CL|3ytm5(-`;GWif@CGR3_GPSa{QfXYBo@yD76 zA~RxLX@|Gv==GzW-?2&d&vX^A0Q@f%XvY629K2=qJ)Zi-?3W7W=Uq$6F53>G;E;<= z(ma$SE~SbCBB7_Hh{ja_{c+|n(a9d*Q3QXzk@UJl5;lq5yf`pszn?eMu11G$zH}7G zM*Y(A*1KH|Q2);KYN!S{OnPAfRr!E>`~bPdCKRAS-G^oA;?HY%RbwxZ$)FHxNmKKO z+p7M=A1tNKTA<9Ap6YOBhXf9#ysPf>_kGZ)fLWCzTf4Bn078G%r1YB zw@>#o9@Xg2Mxy8!$?kz0Uu{x0UHTm)*=MNy#qo7ob>GkA_l`ew&{2>kV9M$6S#QGf z4YJfX*6j29m##Z%lwezFFSHOfb*`(O&zR^u-8I2tZtvW~YmdiHqgu}yM8Ddo1~){h zc9D$qwZ__uB8vjF@<$+M-KcYhQ|f=Zx>1n!NFn;U17>~ZpMtMn>UZcj12o+u8xi**;3uII>vGQ^p(_m zNZGsYBo$*XXg=UT_f%cI$Yn2yr@ogy~?oLW`fJYc5$pw>+^xQ%e@r~LFc zSGQtGMf;j{x%_I8Bz)-iJMMqjqXDO^_*dA^dZuk#=_2UyZYi9ozv_JDk7z(ajAJnz zC&sNy^Bt@#=e)IZF3U=}8iW;hslPU5XC!jiGLXhRlwD0N&FgH_*4-Wm0LhlgLfo_$(C6Xw3iw<3Z}7{x%Spuo4cTJr9>va2Az>kUcpvHf8m z;bj6#hPhKKPm{{1uosZa@^G$DzP4%%mFAswTMH?gN6#!Qp8AhT z#+$qAE=%V8L^qOahB|-diVo|?ic+BU+iwS)B3iufVrRau1yx6s(FzwksNd_YBWc$J z5|yvdO{R~I(tGJTX&N%W*R^+xtPtMAt=DH$6jd;{K1AOI<#|`7P?BQjIWzxuK%M%S z`xz!o@GRH1uueO=WOCWpvhL8}i>7`^GkF%aV_d0oQBrm)j^Td+iA~4-@y85Gc+*WG zQQ5u&<%_R`OH?N5UbYjSMBZ*eXrOYt?OFKqOp6mlPy{27Z3eb(E2=AZ@wa_(B;PJI zkt<$YIx?IqjI4jW(S1sP>gE$Mrv>+Z%)V@8S4iOXcm++?=lb7Vc@ubmH)mqRt)k$4 zIkt2pbY)oZdrB#YVLZS$B(S~+Eq5?+$N^kmeo9l}R&%15ryELy!|CMGg@!A{!lDmq zUbkQta>Oi|cSZ|$a11^^>}L3`L|6t!3e;@`?hY_}Y_5Mp2OXsWHI)6_a2XGNnkNQa z#$he%N8)nRO@S1#4jCeTYjc?1Ms+AU9TS95PhfRdzM?Bl#L1GS6m7uc2Icx7sl{wt z*FmJe0u90$B5_ntZpr$x==StxoVkeU#e71y*PXxhH*oh}lIIObPcx!dd@?bQ)!wRJ z1-%hBI#GXYhhF5BD|3EP*9DoG`NdOf3u@A?1m!x+zI(x%85K^Kcd%5cwT7Dcg_kQM z??;!0 zR+uZLefUVTy1-9oGjK9txd&)c@PW|qal6BkaPWV@7iK`lFcE_BXT+QInQgFkh{Zdj zzA~oJTh0}}S784GTrj%ZBv-%;q_}H3l=lOSSkKDQ&zr)IN4D&6Yxg$@(gxSJs~)VE z?7(a5m(23RmpKejo=U^Yx|I{M9Gco0NdSUkP?^2{fxl0bnLI>i^f*D~16EU*F1;Bqm|Fy}){m61ZC@OdgR|yigM+qY;l?i!LB|j4@VjjvfwRK93eR zT)owdo3iFLPlA8#)w~@?3o>cD>?l&{HkEws9C&ErPE5 zDkIBB3QkC{FUnD}4w->C5_lkwKklyIpEG}>6WS?ti^9&^)`9gqh~}pRhED+_lS?Xi zNn4@2-^leOd{JYVkqpP!ov3HTE51}C*N*~^=5~KO?umZ}HsjNEF zsdaSx&`^$Qu_`k(n>!s^&Ya~Md}0?}u*nfdQBQJGLK*B!48${-x=cSBoGr$RH`RaE zo^5X`XCUzhPw`73)P)e_Q!+Q)1qUhyJPEUI@U?7jEsaMGT)?cz4-|UiAp(JdIXiHO zct0~2pKKkEZ8g(jelWgDw@dj$uDcdKP=Qo88+ZI1+X;t4lZj=!Id7L=M_n;Mi`YQ6 z&klx4IMd4YcHKz%eil%!`}TKICX$A7Yf_Z z4*VK<~9xWj%(J%(u*iJq*N3$N0Zci4YmP5U*uT=BK# zUuu$mae{bB2kB66y-SdCeu((-nADcuESZv{p!&^mso>Ep?&A0`RmFLZzW2(VDIE@B zZ+E$#?Z$k9%3up9f@Ij+L^Fro^mLN!@LLz9?&<7oC3{eLp*}!W8R!XJonM`QX18g+ zQl>j`H(bH-x%bVkUhsd?vc=nMaPu~!9p_cfNgi~|KA*K=xZZ(`jFfTrh;pyoU9>F> za>#9*_ccIIoWFLesw* z%jP3rm)cpUaua;RqvN8u^I1;`4fMC{o7=O6_fOu!kr-!m5`BNM#Z>EO@RRefFTl6> zLBz~Xq`|s7X zfaL)Z9%{iy)yk&p%NJ=q>4NrW{bq8Ym{1g!O8(1W!8!CpAN`)~3RY8Xx%u5>E0Sch zSuL4f;#mE#sZD>ANKX-9q0ADx?xVgfc&)p;U7St^K;1NKa0+n|ft-W846v$p;yaJ* zU{j6B1eiZsm#cdBXWKx*N+?)&hGQ^T@v`kB(DllTIU7ujFtDN~*d0&5N&{7~?9x^) z?Ofmk6b1AuApf=f)JJd&^JvWHoY*Ga@QZKpg*uWtsMUWg1wk~f7^lPtqt`>&W`dq{ z4w1@}>qv#G|FPh4LZ<1`LLyCgT+0R+2RPMEZ|BC`k=@_ zuQ+!TvJdGxov7A!2TOX;3t~hZ+jJ8V3(3I{JsO2Zv93z@jx9rDm2OeXwe1Bq>T`3= zD}Fi{R3%ZirpzKw=C*?2a8jgg%jyf}WK+lSA>GOJfe+nQUUM1u<p_0HGstss?`U! z^+163q4%`l5&TM z6l~DMtf#L6qM5RA72&H|jFYE2(HTk;hI=j;Y-aOOU#)nM0_^d9dVCDsV8BCcn^}LS z^|g&9*I>hkl@THjS*Oipmg-!ruHj3c`%0drBm;O(U3oF%im3L~Sx1enF0@m6yApRF zU%y90<;(+66taXyFGJJUF0LTNE8F6|y8&|rE|OGvS5RhfOE{lyq8`UIFdu4pU&=zg zk%nZOJ$^O4$K>e6pDRk0d9Z(sP@#X8n8ts=AilZ1uChdmHQ$TL)Ed$hW$|mv>W<&M zNWcC!AbBK%a>&XnyS6k%wJpJ+sdWt^|67@ElsFWuIRO#v@q0qKQ`tFI=o~+(l*o+_ z!VTghEV=MWWk(CP4HjNYOP2+Zdc>&=A^qPF@l(5|0Yaduc`w*Egz@Sl^Lxu+k zX%;Q~mg+MC*j(PZN%lF0VP-A%`ZSE^n?c(|FJLzN%c@GE^m<-w9WYY}6oXB^!~jN? ztMbUtZ%1-rJg-g`{GN|+9!^!ZolX`P?Zc~_?%uXX)Ji zI17fyR8I8%9QciN`=J`HY%^JZrGRynuD=71hhN5k#4%1a5crI}X6PEOrtUx)q`7>G zk#&8e1#OkiW+ZPb6|$w7Nh3$UO>9dop^L1Nno7!#F5Nt0UF}PE8{5J^3$$cHpb_h1gFNO9kaM-2|a}&5+8~e;YnP^})$8^stDQM|=D)0^g=bW>6|WQ4JtN$00lA$G=Z2;U-$fRa8E3 zSna5Bs#BQia9P6GOXWCptzCm(rJV4Q<|mf(M4nh2p6v;!9%d-l>=W0f`XO!>xsHz2jC2h6t3up~{2FiMi zSu~%qzgZ%9*Y{&F5c6lwpF7O%FcR~haetGBs&sc>K8piuuH%1bemf^m} zx>$J-4JIkr&5`Z9c{t-%H|KY9YobTYZSsy{-VN^Y4`a^{XA05JlU;2VkloK7XBFKc zJllj8Cnnn5+!Y^xERNmU6Tu|>y!NL7#^-!*@0_NZz%m~VW?LXlpXcVgqn%%0mds$( zH?CH>6EVMS7SfR5cQ>}cQ0E7OcEtcvkIVK_0B=ve_CtX&59|(WGKIhojvKP>AW2S9 za^j$X&Wv+^m!KyOyzmobH!cecao}C9wTL-5#GRr!drBqNPw5}GQ6>V9upzkz~$`f-Gi1^m-6F)OOcy)?gSy_m4f20{EKf!_&0k0V%sc( zBEBh#b{Lxh8s`5QedK`Q3=0G7|6YNA$A1?9LuSM`oiBITPDF83|BTN-?XA#URR2uq zP2p;P_Rrxgi!Zp!8$N08In^kg3X%T6UpWVwW?!(eCvY7x1(SCrBiESpT#eL@%4y6< zupw$-G_U@OZT#Fm$|lz0!@_*RP)eSuF_{t$E+% zx6wPrrYHO!=#~UwKdv2c%cthu?L$S0+pkFwCs-@!B_VGj;#xB~c?M02B*4;9&&Y#+ z^1!k8Ro*L!MsII}$_yzdi`IF%Cz;e(zD+%cE@y=r_1pg3fBa3C3c?rU<*xo{VY7K5l$d%6@v))c}YwlBD)A4#oexe zao37;OoU#pVl@I}T~|==n2&7N5O}(O*L%{EeI18-m&O9#cG5BQ%EY?DT8lTCW&P&@+}b}OaSE%C~0 zcnr(_63N7LLw|4%Fyuv_CWCKojeniLMv9ygoTq*>D!(}fGC`R)m2n!5L1deMgmD;~ z3tatX{`MO_*eYg0a9Ep7pDjZP1V3whPL2uf^*lWg^3RjUB#|eNDSU@z7-UNhQS3^n zou#3b2pVvi$T&b#673+G)7G?w;JBz8!)^!LhN2f6xs8(ll}~Lm!H@HYC4Yj+*ql&Z zEDmWNJqyA^oe;>y2uG-YstC1zev(uWXD67TbgEEli;zsL#xUN>?Aofl>#9V=D!9{X zl-8=qz{Di##H`}Pr0`_ubj-bMiHBb0i9hmNMaac7Yc)x8oC0})0#GulmeZ9l;C;B9fx`$ z5F!x%S}*m_m!u%>ofr#Te7OAp4V=Pg{AzFfsD_?_GalDBo>$wSAl-Rd-+5XpZUnmL zYiPSQ%VTauS!A%DgE4~D4s<=#=TM2|t*KS{b&yTlA39MptbiYOY3Q7P!Rz`CAg;9; zq(Rsx1IkC?w03bouR+Cs04Z6mp3B7b9c&T*6;;s0I;3Hm3EasRo%PU6JypN-uA;8&UGtM0HP zM8(t0Rn`G0w9w+9;+>)vh>D6@;HRG`bpHY5JMr=<-2_7b)I{VRI*jOn@KcMr?KN^IJz@#rxaCUd9%9uJrE zPV4q^7lY|5U60;}U^6~0I_WBzY%6~c-h9V_fobHE1a$n~%Eepe zUe~j-lbZLiCtl8fgKT%>!;9TF;?(aAdy}#a2md4ntHW>uPmy0<{c|T$@;!xFLWHmb_K`n6!od%005@r-;iww_TrzQ1RG=W%EflBe-tbN13fhhI*k zm(Q1S*SlMBw>m%Ur@1R83uD$@9z(Kmg+1+Nb_Bc+Zx07*Xo+=X$z-j;}`CeQn7!oNw?-t(V})?yW!f zuAVtqud>)}V_0^2KJMR>_xN!QN|dg|@r^nof8tqqi(XYp5n8+-6Had#%q8u?gLv}P zquJeA1UtE~)A{OP&(d4MR*ON-Ht$GP+x$9^X^U9AaJ#2&l_ohCp{0hbs5X|BeJl?1| zd~b_g$L!uluD^Lk7KYjKD%NgKutZ2xY(;-Ix$KAYlUr;Jf6fD6v@fpOx`VRSZFiGy zbj??T76`S1+qLaEG?6}jiI%Xx!;UVL0 za4sM3_&qaM!W~C6o%rrJ+`0SVb$uENMK`R>f|eKG7Dts)x}D+M%`_Cm14cO2h*b`*t+!H^`d3B$M>^pv3q(=?_M$Mov^D8>^w?(^(nu9 z*HgQ7zj6{R=-Kc4-Nnf|?iW#hsC(sYu-x@mbwsANaoV@5<0J^qZWxLd91fN|Ebwyv zsOQJmdzK}fnmhZ@3hjux&Xm8Z=qz97wXpX)_kgx34DNT>TgQpW54E|c$L?}Bn%W7w zsIqN@=8~G>{JL(t7ges6Jk;g}Ust|=bn4S&|K{`^EQ)P*a<_BB-`RDvp7G1$?xiQbOA zcKyxOE;<{ELO%$$qvw5Q4=2~+4f7gWhKru-k*RL!BHL5TU#>FJgKKAh+dbf$u^2_XMPj3$Q_lNs)o}cH5ie}T-(gcq!ufuCn<7Kwlo-VAcuZIQi&Vo}f z*(@%>$jZaXJRVg~<>GGW5#!{n?h(%3i_PJ1>)Wl>o#m@=_3SPCu5);QEvUP_ukKgW zHr?9l*{|w+or)O6y-~dFK1TIUvHhuV9d4(qtvW2iPTF}7`r+aglI~4rkA&;3st+}ku$@Ik=;YqKf8n?I*hBF%JsHjk++d7FGVZ;-PE;=^+0dH1TmOFV z`-V})X2E*vN$!uA?tW{3!i$1*||F*~Ve z;HX~H#Ri_kvb!z1L+?3gou{!E-ICjSzC9f9;R&N#dP4|byKc;XmXJ?!356<9{PuU* zg>E>d#64eNVF}K*!S#AtWI?{LM$7y?IXa#p&zs^s_l$nN9MF1tpL(R+2vKr1Q1KNj zeDxmPiHC0fRguE$+Gt!Xx}BRp4XtPS-fmv>LHT9l9%c`^yL4~K+u6(No{wkd#P*W1 zH(}lVbac(g_U0{ropp!qsW2H1 zBpQ$XVmE1R9e!HGi}s=2?+$1yoXd51510M&5h3db5cBj>$B)-d$ulv#C$}L3? zbA&B(mE<6JL4xQ(vo!R*b!UNLay3jl2C;ETqB1q=>37V0*XADKQ5A zV1~tI+5Dc=}s^GVCIV#9zCD^V=4EMfO0u>9kX-28)-itpK~Ym-_VQJ z-AT0Qp!M>9F-+QR|9w3yI-)0cW^Xa>pWwA!a9F)jzg$fo@u#0Y&jDQnh};u8FY^be zdAQ8hKAm6MPAn`Sy$mhcr++#lY1*QQ&*&W6VTz_~1))vfiIiyOqb^na0M(Y+N)8fV zVSYWw3zV&yT`&D}VtY%uC2^iC24|PYeBwUmDh~U9)&woeJezr9ot579WqE@!C;ZJ@ z<3Nq7QL9&UMU-?0 z{%WwFNPjMEyF9q#mksTEZ*rKaDjmev_i^h6yR36wS;C<)9)~YkwqY^6pD#Vb2>St) z8+*im?&wumUesGX7b7$5quzUK@3;5X%Ult6d|BZmsxQ0kq(_dhkINf1^Llw!r@lBB zy<_hhbuM9OIh{ktyR@IJ;oa}|AAU_{DE6I|S+=Uj;ZcUu&2cz<-)nKCm-X8|^pDkI zn9qjS`2Di>Rry|--Q}K7NM-QZ3DZFtY*%}K&!fvj_c`B`gEWZHbd^j7_ZNRnOTK#A zmus{!r&&jKTVXO^cE=AnU#zyzsI?ips_M4a-4&k`W1Wo+S-UrW^jbm3TE4GClWv`? zEC$}7e?!^W6zc=XXffyW;CUx$U#izijwUd>?b^d2F}xeGANNl`TdHV*KmiEPX+K(j z&U1M2dQNM&&-@bID4a3|J;7Te--Pz+okXoTNd+*9eVzt|GVywn2QE(o(;T7r7EP>K z8WhHWtZ&A~o2Q0Ez24j{b4d1NcRD@Zy>9osZ-X)i{V5!e$NTw-yxHuzH2BCI^oMFX zz2EaPdp@7f&A>J{o=q0#9`oI3u=UP=WO3;gt`+n*kL7f6?uq!|oqOxYb9P>K2Sfb| z!^=w+QL^8+*md0R_lL!DwYpr!p${XQ(nhC!ns4)Q?rm?=^^;}%?hab=HVMk&cGLfC z@_OBLkGH|@729dPwcXqL+)p>l#Ut)MF2fcn`m=X7cy6EO{&E`b=l6a+5?L941SgD4 z&Qv3qb#B$6JKQ|><7ao>zR)6DO&7OnQ+9{sn)SB3=X+DO+E4^@C%mdb(i-Ax@LFvj z^&n}rQBfb}t9vyVwT9$6Xjy8S9A53=2%7=*qtL2wlwUXgX+2xzmLDum)*|ZcM8}&h zjom8jjKSc?xbi~YJ5LkSI%a%-NEu1f_rp9-r@O6>w?(=lmkcKXb~>Y(zG&XiUss32 zYtfyt)1@^V)JvmXRdG7r9|s3-cQMBIcNm=SH!<&AD|1l$>GDPfSv4T&xKN=gudT}< z+1;P3w0-IH!o=yAopZ>RL%Uc$W|@ll(q08!jE;u=wlZZJlZTZWvjMSxWW~f9ch+Us zrQ_*xj;@zAnsr~6>EQN!H{QQGgYI1gn~mHW@8RLPzdvU8(c!*cT?U8VWqlcLHl>Uo zv$RtFysh?+bk-erI>lq=?drBOYTL$TE_=HW6~=OIALXOt*+;)f_OLiadob2~Ii<(F zIguhRXLGSByHhxh1{0}&V(Ic~lTX#@JoPfg+KaVe6Plv@?mge^Vo!ZDhNE9raq0CR z_gnkY-#<|My^yL<*5th{pBCO0)SeP~Fl>S{{rA~$a>3R8*Zs+Gy;3FOb^DtLRfFSl^lZD2 z&3d<;&d`%bgPu)ChfBG6rtB~>i*gUt_s&!6xbPT z-aCKp(PPrJsyLu&qK|p=G`?7ny!kOQ+}fnB;`;e3D@~TJVn_CQy*H}kj)(aSRDxe> zuskU?MeAsDHohajRzg$zML5k#I!UlX7R#UpI`gaSs0;+Ebclb5eJm4%eC~pv$QIFhi6{X-Dx8|`K~Wo3 zKtM7=RN{c(g47&`lFx{6TK)NuTWG|QD(_Ukrp%`Qsj2|sOSz0hNUR6x+InICRvAkks7``enh{tp358fp(j{tSBiV6%l20@AC z7dEJ9iWKEiaSN7)g9O7!>>eN@0)AY$Ld{*J5HiNLT;&`IW=$|hpr{MdvRnk0kQh@W z1e?|dnJ?z8mNRzjh{qrbd{Yb|U_#XVH8IKsQo;}3+_lrlp3~6`0Y#vc{tSN@sl zy5i*V$Y4DNbb*zA+?clS&caFypMaQxDg>5v4n9mM6a?$bS^C9tdfd5Kvi-q%KxCH@ zsOY}v&?|`M^M;;(Y<1ohCz2zKJI7@hBa)}-S^tNyc<(q02;gmdC?+o`^>7+3z*2zk zyJV4fh@(b?XJtqUuf2ENZE}A)jU62ytZs7m8zVZ0xsH>%$=hSx@km@ZG35&zsW@wn zZGL!$RILBj^FFs17D|>@`iUDa6g@AC?9bXCmdelBd<7|1(j|gB)AN~yxfN{W$X;{; zm&gcFi1n_v6Y<%UNMV7X76Q`+DZC{S1@q-{no|!GW`Rf!mog?uGK_yArM63i75 zBgX-2(Jpg@NpZy$bt&Gh>>69kj6cq2HzATC+m8eRIYAJuqZ3~vAzUQO`_fN*?7Q~} z_pZpdgt1R2AapGFi*CGLb7eZ)tiBD5fUvrOwLAFT zVm~Wx;7VZ=kPb>szjl!oxDAKF`VS5^5ONmfz5z9_;jlc1r~rV?QUD+ zbpn_HGvTE04q$%_#I4skYfZ9Mzn;q2`qb07shcc-3p>)zUzmJM43ESkgk!@C;EL`8pD;_b_9Q8@ERW5j}ZY8a}db{1rU7J znMouH2ZWk}Vg<;Ad~T}3r(IV7{xe00=`Ltq(BHL#b6Q;Y)TcC zp5ca}}`qll^!gWw)kk^b$#X<=CqQLJ(cXMKW&z^Lk*JGFM40G3z;+p`qzl+YPt zGmO*70V$j*QmKm=gM?#HCZY3T)qa7{w?W2BET{)UxmG3>Qf1hF>>`5b93*q;YdCOl z>>;7aa1&RKj;mOqF)#r~37&wPovy~MWEl-C5cPkgjxsM4I^RFITg5A`@kQgxpO|uF zx)yi6w=le;qVDkrQ`nhlzD1HQ`#gVPLBr+FpZfd@O5jBM&VmPN{ujD*mdFVx>FZMCzS>WCDy#s7U~||= zGzRDZzNC-M*_R80QAF4sLiW$P-EjHem=P~1N|yZ1G;Zj>>-E;LplkjBq4>>pJYb%o zs#)t#Onq>ti5c(BYmEueVAN0jk5|@PTwFG{r+&ncy~5~mAW|SmVMs(d!!kd(A>5{Qe83|2tjwH&V5`Jj&&0{S z5w%_|ud&Z@{A91eY#}3rs@8sG(FuwqdMa~8d>$jCK%kgsWpb+HkBDGyfwbVt5)1`J z{wav6+(HP6c_10n{|InCTsc5@$S8kX=&gB4GWo;^pV8wO3CUdimK(+v8&-EW@tIDs z&*trWd$JahA0_?AS?`vXix~EX<_~-PGR+xc7D|~VC`>G$*%m72x{srpRVa$@0%LF` z+$BsNgJ69u_z(oICl%LusB=lLi~Z@xqW4eUeskOS^1PL$>yGAXmBJ_$i%);=)!@Za zb2wJBpLMPI@xwv9qt4wZLTMB=ar9;@{!?3i@x@>DYXrx51X*l&Bs#lU@~fY){-wr< zvlqlZC5ho>(LeFSaF5Za{*X9DIctl1qLkz=#QKYXb9Z@6|LaMO7&qqk`(5`<3CZfa_r#vC}ceU#+w@s~CKaivdNqh14{SOZzS}2fZ zh@8sYGd9IC(U=fxK7)k85j6Obdbt<6H_O1P(Dj0o29E^A!uVoL0Ed;an#=q|q(0_< z1BWU_h~%REa#|w<`nLXI;7ep_Kjr8CfATnBKsxBGzOFlwLT`Yqvp#>t1qk-fc*{`6 zW=JG9^Pp%DCv4NUs{!jtW;4Qo8P`u z$C8dI!$Tud#Bf4j%#fcGqM`|cgdpG_?x_SpN=%R#5lb)+t4VsO5@ zewn0{>hF#qckN0nk+^iccTAcdS{TR{$$&I9devbmUD}B1-f#WK8em*;tNj|b+7Lgc z(TP!C%rJ_#?$Lh?uuP^*U9FL%3QOsL)DxEiffSU`w`xBqB~|1?RoNIC9)>B*$Ct{q zEEsJpVdy@@Wtx8>E!cBc=fNKE4BQdiRKvfr!hMR=$=kMQ_?N+(o+{CJr!n}a|NXbQ z@ZnIRI|p(8Cr*0*x{eF$owPvnD2?lX9+O50co%5N3d=-CVt))O}zExh4)&(S+ zTGb-34#R&Ngav}KB(!2asF)IUD3OSKWS`z2to`5RbDwxas^B^EFE&Yb;ET-^B22;f z^f4mwCu#pQw-(EXG-enY5lI##R9#Cb^8@h zETMnvH055P`j)VZf92)Owq%Z|paD{^@ipRzr>v2D8h$p2@LixYI1-M4!-j_!El*yq z-E9^j&BN)t;ovJb)fk#+JpGlwO&!%-JZ&_ZPi>XIxz${X&l;xG2gN@8Tk*4nzQ_OF z=d}=NcvzSEpf#SX!5yGqZl7G(XMLA6O}c-cYSz{V{r;7BbA3!o16>y&NX|{aUyZQY zXmE}lziEYFZLEAnkti9hg`;^pC4BbbTHatEXiQ)PDL#QTmI&POK~f8g(r!U>?1q4w zg(&~WaSRhIeWYUa5G6*ZfUVzlSJw|_O zQhsW*)0ztBx^n~trSmjqI2AJ-zC`;qdV%=GyKJO{1xo8hY)xgXUx&0G+g1mtL!_(! z&5GB4m3k``Z!k#rmJksgYmnvvl5_#SKu9r)WHN0_Zh<;3kvf(bq(_{v)r*VR5iEkk zFl#@VO_4x0;tmM?elfM%-2;}?ZghWMU|_x44=7_DR}-)p;OQa=LoVRR?%rE`wDy$x z&6@s|$FTHBq`Cj^na$t%O&U(n)CPuG9Y>A!u6f%ZapVft#0jnV+D4-U6m^`+SYhRs zi%R#^bPSR@cE$oEqz|3W#8OTD7dDFYx?p|IkzDOMO+Ak{tjecwcDXHt4aa}*wsZ0+ z+8-qKS!C;Z3s8hHBOpi9<~)meD;BJ`yO|p*3xU)x9_>+eXuan{4?&{&7_t>zz+$-^ zRAoSAsqvShX^i+a=jb)uw4a}mA8Yz%iC+pLz&XsM_QH1n=keIfOle8uCtu$@O@7Z$ z^qBYC@47+!GpA~5E&t$TQ~!VA!?Auper$9QJwvO1j?ulhe%|G54LCe*;ia;y%#!Vw zD-SeULJ-faqb$N8PiIIBEX&+tF-qIF9RscVqi1wNBMYR*U{iU>Z-t92eiEP<42gG9 zfk;?oFLv5H>2(sEC!_3S?B|LV!FaX}4HhC9rZ!FK^8u*yGl2k#Mg`s>gy{yu-7K2d7`{~0HG|J&ZN zZZIvJO3hnDql0_jITTAHy*?j~tFbHP=Z{YHqk}fDtu@r2nbSvq_~@ov7z9_WO{u}v zSk@(il*=!#*?cfeF{FRuJ#F-F<~uTWFub>4JzDEX&MeHeUWq}9t~X5FDJ~QNdf`4C z?UXk20w?1HiB3udjUCbSiuST|CDXddV6`w2YU^At+rchukO5T1BTSKL$STO7c{NR|k35eJWj^=-R2@JszZR0l}6j(Eq za)Sj@w|rUKFz^5-ZNLT06>9c0?gY#QRF}9@g&`H)DKa<;*&X@1pKE5je`-P(Hyh!v ze)OF!{gp!w)c^nF4g~S1j%h7BXzJS8t&^kv9I!KcJIyzBO_S7JSjomYFR-N)qsvF% z_v18P6$*E+Go62LEUc^m%;gNH-Xx%=MT%&ZmM*EC*42<06lHclh#buc4T!4rJ}G<8ttjN-jR%K%qvfrL9-$0gOdCQ70k`euI*pI+aa0ZXOzBnky!)yyd- z&+K%aOwt&Yj-u9QPQ}0d#FwWi6MxsO{?78hx&7rHdL4c==>MYk`PaGDj}E|}pmibK zU_ghGMi<;+P-iLpBpZeST%$gY) zZgd2Z^k+J)KXWuo_i=BfhF+a>@E>~ru!C?Si8On>n^Ozu&H_$s-ymoxDh_4AYonP% zYkh&_<^ZGJbmGf3Qq+!JQ)AME$jAbr=b;px*E@f6uSAFyL$tvYO8;wq{2Ct`T|Krj zlrzgu{FkCWYUX19`1|f;Ypos9;>OZ}lPb2Kpt9s$`d%%x6IF;g@prCbcKap{TJvZ> zbA;>bWUiWd>8{-tn93;tHDRGp9~f)qrH4y(TUv|xREf{LlxWSEHCXb|shgR!j=gW* zf9HP_KlKzS}NF3`1PFwcI!)rdUj-?^QNsZqnu%yV%e;}7FY1T>1&y885f#Q==EQ8)_xGJ2@~IWc}C?gUyF+E zU+YzKpZ>4tEGhNB+_#yt|L{0HL-%TqXuy37MK)1P0hNBNEJTWOC8vmBov@KK?eWi^p;IPxR?Se)Qqb%;=LVzx&xNzWm_+XTJRpPs1m7Bg+?yPY%Z` ztBx-o5jy`2?HN?PAJ-;;|tcJV^ehOY@}OKKyCFam~< z6mejv7(o;`xbc{{(pld!1lE7Y%fK_hyH%1#$G2nH3mkudTQsd`}@x~(Kb7K)aMERoiGGT~yOJ*2|B zt`VPLQF?J2*}BK3(nMGp6e$;69Q#N`-}Ntc_%O8EGZh^u~sbHAV*!@}zOY zQa5rGgBm&76NqJmF+|?v?6k%rrVxMwOK`d12&rTaUtL3UGfNiTuA?aUX32fOE#!I& zqFA>dht}u~uer3W2x1p2NgI5U0srv3 zDh@rYdxM4pEUhZ)&GrAm$iBViqR^N3(>ngb38{aB>T(ZK9gzCs#|tn~k@lNS{V17= zOuiHn6D*Snh{%HLZH=H<&pReZa@6xw>{}^yvFu+rQcSQ)wsZe_m{(cFq{gLUg4|#M z^0|aU%;)iwI-(0W#Kp=t3cnhqpSmI_5j>*v{s8OwVVck3TKZTri{Q8lRN>BWPAnv^ zNKt>6Dy>sSAr_QIs#g!nUlbUmc4bR+7)!HRlG$elw3_6lA_c(jY|j#ut=qkVw{~ao0om z@sEG`gFr*gANH%=b(IN6^9KmnpEJkbTK9j)N2F&KKi-3X^(wvN_greOpLD)h_1gz` z{P;~iwI+$Iihnf(%_U;1iKm)}RvOvgp7rB5Z}>;US3hFH++ZpcspM9-jt& zr_%k|Kj$0VA+MqAf}9dRIUzw`jw~#A4H$I290TK6dyMpa6d2ns6~W3{LnaPd=d(0B zzbLKqz4ZP7U!kDU`A*i<&!NWyaJ7HV_xJ1x8f*ZPH1+=n1HL@5@s=SK15(Lnwxo{@ z8yzn~mG-A4UHWDLJtjx3Q$v;1ZWwpHfJM#gy*b`O^fO-?cR~0VINA%|_tOe3At6L5 z8|%8PpMPMw*Fo(<5Lm32B)UGZTD62MwhlzvbLLN!UBBRZw#2e`BmAugY#4uNid4Dv zUz{Ov@IUh<=1>rLt#xWyx!f1?sKY7J=TO8Rw7WsW>Et8#Q;BjBBg+@M2NLa#ftaKn zQZrZ26^43l5Skb=YU31s>#Av?_5M9LT>JgmXO3iz_ACDtV{`R6-+C6*__Q>h{?zL) z?k4u{eHuw`4qW%Izp?a3iz9#RCtvhqzxhS_^>b;H-I+`+QM7ZBQ~hZBu? zDrZ>6)TXCB$a3j+c^xfsMrFx}imBaMJU~8L6Ra^?W7c(pVB53On1X*ayt!P2j%Ve)T15K~y^9irzW#8=&LrH4_NmG+SNz(DW3-a>?uIPk* z^3=x4u%MGaV7Y!_Kbn9L-j~bUumr>ui!Eu*56k{2RCH-;zThWlt(aqp(rp*i-k&1` zJ`WDQjHr;hE1qgxHMM`Ih(&Nzz#j3y@dRL=m1(1M)V%Zc`F2=ckf{_Mx6!`%`_of> zarDFOI$zw={?HEBvGQg;UPVogS}55=I>KD<$7nDzIeH;q{&@^a#fMAE*wKBU74pyh zO3V<}{()b<(>JfQKjd4JtE3B(i^HlExTZr4b4(D`{F*fSxt@Q!_4;EH#$JjU2gxZC zo);+V0ZPv7nnyV3uV%S$amp?jc!acXujXxm%@;x{#6kvfC+0|v=eF{8XW$lsSOlkd z>s>l6(0&CSTar6im_FgU=Q8KhYR#va!Lcg&L~$~ebc&)Y|M9yc?E^_i%cZaDXXG`W zouSTWeJsOMNhg1x#1SL&ir+a3bHzcK0y2|8ZCb7f)>?lhLi56E<0ySQ`pylkNuUG+ z#h;So%!n$+ z=5=nNY7~DHFsB3wsri%*a&5tAix4NPiZwowsn9-iN^wPPss@PFigKm&Y+7T4DUeuG zpW~N`5i$LAPRy-?qNj7cL`iqKa58gYSJit-`k^JM!|-jls^pf|e3q4Z{?5E0mFcp_ zXRP8|+g|wc)iFWwqQM$z&yi|yr$7*29lw+m;?#eG5k)9^c@Y;hGbTtzt{s#v(mFBa zY$eN^_L(d@#YAJ=1gstp68GbmEAYxH5E8LZ6=1;k_CUw^L&8$%9faEVSLRqXzCYum zyEfj1&%PjCGei?}7f&=A(a)?v{22p3vG}Pm$?7N8nEHbw{KT^_w5ROy@u+a^M>~K0 zO+SD83^jiJni)kZ;_EJiu2m2ak`len!t^W|qe)O85GNdo1f6iMyj&jVP~z!3YFm{V z<4E3-uY6XO_7rz^ND7b2f>U4EAZ#D!R)j!%uD2_a`izO(f|PEZw`A;{%q^*QaF;Wb zjV2ZzY22{ZO1dndOTEU^;>0mJv_?@8v4DS#BLu6(acmJ4V`2ygwM2wghkWWkW_03wM%%7P?OU~mWd2uo_S%|?W+IfbERMZ z2v@z6d`1E5IOi)6&FlZ*NrQh03`aj4_M=DOpLukHAN02ewyAk}E2V;2eKzl+BnZa= zhyt~7iLvHxRV1>5*&fnz6xp4femtM|gQ_;r_i;A>$JLS)t-#7dDk90?U}|P zHpxljz2@YKp#poW3DAlDocz!p!9$=@B41x>hJWWPNF}|w=DuB02NV@v|W#zGcu6t5o2Tw9r#fZ+kxe$iCaj`nF* z5Fv!NULE@u(%QIfu1?0E+QLNax@pl2CLkjWU7DSd65vH9#PVtyz}Nt zxdh+}bAdT%ePr|2di{5nt~qn?(K(Vc{Hlwi3Bu6~Ek_$01ilN3nJn;BGKpVi3n}3u zB`|D3&!dkN;^uw$yxoDU#1DzCdbZDgdoJ|8d9AU`;Sc<#$0$R;^pg?x^qxSy*ZW^< z#NxBZpM80r#`=GzC;aLz-yTu-fzA9>^FChx>@kP6`EM~iQ5ImI1vZKcEXql<*RT4k zR_tK;xPnSJR$e!$fNBp5dOsY`U<3zd076b3LT8|2PcIY?AjH||L{ZiEVYHelHj8A} zky33bs2L;)vIGmXu#cpXPnB{&e(Q$Vy&+>#Z^S}^otS^Y=1fT!r2bA~Mc!VE`|%7P zjWh9tX z?*4;WzxML|%sv0uyY;iyKQ;LmM?z{11R}z;c6U0h_1*}qiv2`Lv^gb(+hZ8xJjWtI zP%YR}#E5?hZV~NgR(f8=1R>=f%TY%lm|(P=uTogaj_Ok>CzR^TTNHbYOvjbl-~)4Aa|=vmDD~?Np%(`VtyyY*wbs{m0Lk}GB}lVxH;r%6 zQv2&*XB*=SR_=@&Z0iJY^3FSdlIj^4)z?pdjigDnFA8MX>Q`Eujx3BvZxGXESBM!A)OKnbOjB4QB|V&qiS}hW z8FiaI5{hfxO7||f;We-J9M%PHDbQCzK^A;{dEY(gZf)xQ0AF6-;Ki@+OM_oE)%e5P z)$C!1bvAh)2a4iN zq3B*G)?WDRiB%q;EYF;PS$dx~O8LdpGnK@&2?pQGN1I&;_bQP8#aH) zQs8DU=5fh@itW-xp3Ol@lhCOBK#46G2-bl#WuGnZnY0-9^#tKPMmlerdM~ z?8_+vlIseq_fanOwN{#$_wPO0f9>)9a_-(AbGu*H_h5C?tDn6fA^08(>XzZWDsDkg ziM2Ekh|W2}MmM6PP-84eu~=BA?3{n)H}LUvjn^&@qaoHhMWDTo`4aD0h=BR5c=it$u`P5TVn2=tY*X`?eX zb?}!T5PkQKHuv>)K>qaZWWPC_UZ=N6Tq?{JlBH$UxdX~Qgq3XghfT)^t?^9!#KMYy zCyw9@1&@fxegqpjLz3B8G8t-a&=bE$DO(X%I{cA);SNa7OWHhxGM%l(?Nt{H$r+h2k#cjTS3*@XlcD+kEfmQSnC=2_>0hPRaf5|SIjDD6ABJ#Ha z3PylA^e}!WBA>WWuEVoEXs^X1)$5`;>TO?P9F&#+!?`sAR#LFKpsEpQ^c#dT~Kc+Nb3{)*eL_#V+& zuVs&?u;_9P)TZdyL>g&a?JW-B7u|<_h_9~<&c%X}_Jcf{en#3ac5Y8pup^``1A%vS zHW+S4E1H(C>|(uUaCjk?_7yQ(aG9Mz`bSvmeYcM@;N<$5VC^wbHGUp>6^y3skgIhv zQ6-P;e2?qCxLOqQ+xjMd89a|kBVefSv#L8+e&ro|{r77>hbf$V&4<=;>uv7w4qNnl zbr{=`&C~b;r2o!I`Cs30J6ai^dH;@aXbE=whj%2+d<`HST1ZCuHCGhMQ7+1TjP*HS zE^dopArqNIvb%*)_5Z;U9n(h%K0=dJoZZ(Xgum~ni6mtT7svGYFO-L0^l zsub>}_285LnydQco}(|&$7sK<#rH= z^20sD!jaJdzRi~;2a6~{{Mk$4B#US9Aa3)p-|uH(K0AbeIVcadBLm}D5I^%UGz(+0 zAMd7nHup%2{iMC|QraJi8$eA2^8+NsqQAR-?~{skwcH`USazI=ThD&>YfcVa_Sz0z zuc_-9T-bi4ICaG^bd%d$S#Ly{Q>z?Us5&6bto=0F5s^4a_t}M`@^-L)dZTqTpj4g!vEbP- zqxsxEWkc;Jo9@eCdMU>%^=40{q_B(jema{Vza5sxC15SYv<`k6 z?Ll&XKCPnw#CiK?TtQZN?shstC2CE1zt&6NAF}M*zR^9zKhJ?arp3AvfBdzdZoQ+v z_FO#v+K>2my69tv5Q%b%lc>T?Q5J}U59Z(!5QK=gZ6I^UuB7LsV_|$m53#~Nql_R` zp$jcFT1$jy({sV}2z}ey>O}@Zcy=N^&jta1xrz5X`Go(&eO%u^_rjFGUU9gzQ7B7Z z3ao>O+F^T3F~>ncsqXv-Am0$CdL4K9!KKgBJm=afry5hN9k9Od&k}aKr}ur_{2*r5 z$vKz8ON(F+e`tBpU%FlNFC8!X&)Q!8O7~Jxlc|y}e4AlF>Lir;da`2)0e+qwOFXN8 ztU$DgRUsf^*PMW$p+*IZB)WLw@7==Zqqks9!Hp z^7F3n*u|rzK7)*Nx$qxKM$+cpBjAJ;TL$p`0_Ai z#r@INE04ODqW45!E4}l85obNIy10|EH&3j6T5MKtRty!pPNj0F$fGc~<`kq$E-!mq zS8GgT}Uwwuk8=qXL#78@~kyh=HwdY_nQ?BA~rrEEoXu3bkJSo_S9iz)1X+4zIV zGG;=0-Zc;D!`ng(IcFsQ{CF5wy3U0-pqG+d1-#(Bc*CQ{T^`r<^R&KWagUkrI)Xc0 z6X|oN@>lPo*Q3O_t)m&pYj{Xt=bWn5YZVcCbvz~;X3rxcZ;6tLElM{Sj$=lL9#gqPRk0vdeP!^u zN+0VSs=C@c%~zB(EmIgcXWQ&7%#gs?BOauDF^Ng#1#R++5EG$?V z6E(J>iiW%O6x`x%a_s`WtSuT)`eu5}z4Wif~eRO5yKXY5jjb~>I@5Sw~7o`mpZ z=9i-IVZ1)fr8kl5qPhqVWRAHDqv!E^K3_h}X2-Y9#k`a@)|P8O+>LMNfk%h=XP@v} z#kZZg}umqH_m-6x{u|3tZ8?(=v>#J zie&Vz>PeRI9Aq>lYG^MI!%HUCwo;4IZzfnje4T-Br|nyy3OCqUfxUy!Bi6(LwY4Fd z6XeUOGIUjLlk?}hU(v;@DBs@~ZSC7{dsnpx{GSlPJ*)a;q_&B+s=0xyrL(7cD8%R;V zIkH(eN-q6t8)1Fq6VGRQGN#h5;iEvw(|f%J@5TN;#F(UiJGahEd`qmK=G1}mJw4}h z5WJe~ln?J%C(Z_`@f6vtJ=%aR>}DQ_nrx+GAU{9G8r}%#QNaduLGK^)9NEV&)vuU{ zboq0E`P?m@k++&ypLZ`D`VTp%@ia0{t!;y{p~y)kZn3oaSxDh5XuWXGunc5FDsUZ; zRzRlsnZ^%)5_96H;B{Fu3c0_##n=7)*yq;X%W|$pprWUx;`^9dAs~$p3QBj@6UX>k z>bkMrLhy=Sr$UeOAGwn3&&BGm+~|bsh}hlT-skBUWA9?S*P`sktJ5J&*~#B+e`*X0 zEh}?|q6^09d$5I{oN1{zT}nT3&gE{JqPDL1OcS<$a8$veGEa2-^lY3ON)Zc=;|9&U zD}ouh=ZM{&x#fi97!6PcZUlZFC9MmJzs@pF?J8pNr42ylv5hg_#WK4()7JP969skI z^&^r$=5hm7KL2Z;9iDV&An$Sr9Si6+dhKEY(&dnL)6F41zaJ4SeMOX3<~J3t3q`@G z91S*q-%le>EY}70#mwS^s<+oba6fSJfL=abcCo0ViVX*MNM#!k#e*nk4C}lkS&~RJ zsBse$#bhKJ6Rv*l%EFzvIb*}dB(hk|Y;MQUoH@R)Gc~3%{KS10qxP*p9zdDg>WXoe zKlJB!&+pdQ{QQk2naj%sD0WEIeVYb80$Mw3`8L-OW>5%@^u+bPid$ZY8LYTANL zpb(@I_1Tq;9#?nI{C}EHhIPZVbFJf`ULthuJI4zHV zu^QWS2OCmJ5>~Mrs5V$oNoiWmsBAxyD*A$|biJKD*ceYETQv-df>g3r*gM1L^lb2r z)5l;VMF1;^XbNY#7j|?ID(Ru-XRX^A&Q!bj0GS| zT}R7}@c_O|?4#kD?z!LWR?~jy&rJM(_lMiR>w1(Zyl(zF4XQuR*g+9eDMPv!1JsBk zNj`|7Aw+8vH1CtM<-oihlXUp!98(wtsJkspVsEbxG-E4}m|r!kNYn(O$F^XxF%^D5 zOyQi%W~ttS;0z)#n_zj7cn)rSRqON0Fu2_!Kab=ZT>CH2t1N$vrJlQ4e;@sS)PAvU zxX(?cqJ^W{nZ65@W9WNt`V<*;8!eER^qP}TabEN8`{n_h26FpM2q-eZ`MWSHgqo71 zai9`jJZbU%xfZB%o{C5PHRJ;6y42Pf%ZWm$vJ3F7^K3i1-Z{?ayjxSh)*EDbr8QjP zR}J@83*41-d@dJS_xv)O*wd$f(5P?s36QO--yHG%l7KseSJ9od-F@3QvRybTN)p;q z03Je>J6Wnc2RI%?l%~q!+vn1`ou#RI^np>jClhUrEx|Hg#yAf%)UjIN%V2Nu68${m z?#|~v>R$RCi}pM3zu`^_XvoZ$9 zlvMqm0!cv~)1=BHKit>jSo1Zrlp{DIb= zF-Vm`gPf%@#y-AqkZ+iOqpo30%$Es)JW`k9kOAj@3N}=DcxgV)4#)Q_`5b*5NBLu|arw6~4&@y~*4I^Z{p(g&Kz!g&Z!rB8zBiAC={y1s{}Yd*J_ryuLS zzcggnClQ(-rpG?MfFB!40QVwz>u>n-2ET^i?dC?9CATOnGJbO;%SuO}fF@K$ zkjum4S~)3QtRF$NvFp<*P5BVNMjK1>Z6+*=2%v^w;UDTEBN7V(MV9&CBW{685s93W zjMk^_UW&+G>qH5EODKhXD@x>Xo>Y;mMUJXCs-bp~%Shl%gmmIe2u_v6+W)hgMm3Rz z^c3<)B;}Vw2tsZ-6fW=U)eqR$s&bT-oayu0?)q~*{|u!*@fMRVnUD$l7Zob(Nv@XV6soB4&j zcn9)*q&@~$GL|6alnFk9BA~W>4tD3feSdBzP>gST=bXTO?~u7Nz05QNs-&HmlJxt? zC(!L9)KzhtVwtn`FAd&IJ>=yhz_IrWm zF3~-de$4uR*j4vYu0764SvEQ674p>kv!?5vb!Sl@_m&m*MiEzHYMpAnV3pW$nJmum zJ)J4B9bWZ4hiBua_XFU&UcYL z8;kHdENy<_Ijb-)MqS>~yYba@cTy^dugBvw*ndAy>}iAC%$E!?MA8h2)?Jd!-^nY; z@6ASktjCsYX>^fC#I$z2k0Pe3ptK%86UDq+UpQBjZ&)#H^>gos{J!roR#m0jpL>7e zps{|@#eqMu(e-D)-TEOb)a%!MpY?YM@$5s@oE(z*4DU-YDO|=^D(W|S9f#3=Edo(< z*jcS9yW9p=&Pkxe;gR3^KQw(qoR%MF!U;fsD$L)U;9@!?je?12+K`!hB65O_=)PZy z8m8KiiA*BI0eoY-l_hsUdi{UVPbvon%u?8L@7vvoZEH6axc!o zh-FD)$%f?8j;PFuGaGwJ9&KDD4Iy{{JGOAUxW_VvEqQ_X6(=>p{&SB5jQ06|Kg5eY zF0jh`Tov(c7$0Li6b(S*DWjv-r0e<%`1i_|95sHfuHgIb4#ikWPO3sAOfbd>-@v}K zkW)*om4jO34zXywFDLe?AiNspxX#;Z>@g{rw?A^{Y|%&oYI&RfrJU%W zbLWPY4+VT3%EwtwA&OmST7v5o8d}(&l+W8IlGH1AHUs>64!CU*N0XF)6U9h-JtVKJ z9&2rA)h?)X#@Vj+#iQ>{5^ILIZF?t&NmYpalur-?@cMR$5g$m;x$h$%3bl0(%hlX{ z6Tu#FCt@;DlG+S;c_#8f-Q0OwlJoq*SBW=u1fPNw!L=ZhiaTHRn%VYeJ>8>DsB}rf z7oOtr`Va|yueg{xdp5j(;y7beChoXg-s>jZag#qjF1lvu@AHSAx2w;VyDnl@kkwvX zY-vgC2~a?-(xUNqg`|umRIrcWx@OK*yffe_?4l!ap(0LI*wuVU1(Wf9Y?G?-Y^*(0 zECum76Hp{#Rvtq!c0m+^Nz@qllr}KOP9hGKrRxGSu#d1}#FpTH{c&E-!~`;!+QvxS z&L^ykPPT(R@*4~ZK9Di~3{&$0LF1-C$EjI&*08iYZ<#1|{+U%Ui`D@dZ`e8C7+=K% z(uPo3V_oVU?}DK$SHu|2p*3?au^O*>9K==Q9iLQe8>0}P!DvxGnrSQ$I9KCY2H&t6 z1Jn1N=yg=!u0`N~UkdOq%aWqDlPYNljM-A!1g0-0;!T$=$2P5Ti_+`ibON<^7*1~iE%$@oK>4{N+fR%Tm!7xhBH`AF;fA;6wg zhXVk3Yw4$eHoKz1=CEfY zms|#@aUD`99I|}|B3N8_<^zy2&uW7-`G$>ctVccQ_XQrwMMj%4w3U#InK-;3HnLj z^g5V-8^#ClhuxAC-pt6M!9gN9_n{l@-3m%nhpKQXbs3!`qm z|KYO}XX1sR9uWEup`I+BvzxJtew144qx%|p7ez?oEF;jmBy&%1dlt~Wdwjd%_q1#n zV@c36M$@s2duJrIn9q$@l(0;Da^AEN=T4IUuH)z&mHm1ymY|2_FI@aSAO5P_XpM4z zmWhX*M9sm1Mi27-{8Jn8$GJWi(_Hh1|LJ>>(}6;oee8H$ihM?>Q~H&@}qi+<1a z=)PUm7$l1*aF>|t(D<4j1mv#9&?pwg?r!icL`Y=*R=t6IUQx z<3k2EtV^H*Fo5Xq9b6{@>wtF0kGkuB6CHm2dFP`7^|>kdCA#pDdgT|fbZI1EsdnzU z-r>rDWxL|L=)y14K}e%lH)WNZH1(uv_q=MBT)VGE;2481Xd*IHc>hJ<{oFfq{kwMg zzk9C5{W+9p{_`{4o+DbiJHC&0(cajpy?d@Jp42NJIy_B<_=WixFf!3Si0dDJdI4RW zk%=GOk3Vo5J?Hx8j(nUkU{uPDcdf4>QQtw{yg34MmMBPT;7(F-@7kDQa{HRZuZ8dC z(^J?5qgoXMJF37VN z?V2~wW`<8A^7`nfF8bOC^%-R2seP4CEaAd=5C(_HjaB|NJ=bY)trOPabsv#IZ%Qvm zy>o@K#+2p)ti9wiJh88 z=TCt;woc4b@fJ`vo>XSTr8x=r=-20dcQu6lU`t^%M-HbLUqXo-ngfps_h}U|MoNFV zD^iEB^Uv3MUeZHEO&KKMgmmBaKhKS+zP>GR8^8cnatoIHFspY*Kj&B9LWQ&wA3 zd-l1q_iAE%7XpNTMferYq;5XKRC?^nPoXS`yr7y@71*-Boqo?s>vB_fV8iQd2}Sne@qtRteF31Xe7Dq;#VPvBgOI)voy;7Y`JtY4f0H5H5Y`e*<6wLknhe`^?b zXBt(Bllr@HVqcT_V$VD~J^Nb!OPL1;@kLX&Dj{e#{Sg2`L*}U zpZ>-VxB1WWL$RZO)#nUq71A?UgXrA8@-*KjJJjy%)?*|vX0{m3>JNu7YJ>QQr2i7q z+U?gXr$jN^S;PPSv#&2Z+%ph(4A=x}v7OAApu!4&!zvG4Q4=>An_gl?&h8Ahki-Qi z?*83;pw;vp(YzDZOz6d$(W>?#skbpPH#R9+0jBmE%94gdQm^ChXVf*rJ^=DyLFeoE zf1e!1`?jbz1fz0QXgtPoo4GPc^^-y zTh8-m!`!44f<+<*Sv<0;9eNe^{D=EO7J>ad4wK5PveZn-YCM5i8l5|jBOC7|TRN|{D6J%~O`^<*h{$C%l$6RPKQoWKMpJ@%@v3CDC_TJPCeuNV^_4GNwwBzn^tz_uqrT92mw?h0k!P zfuNoW8Fx60y?D?b0z1Cw>PooQ^6KxgC7Jleb;WpntmEED7Tx{+J)%a7=rn)a_vBxs zvS_;FuTC;{A;ox_GyR#}#Kq^cMaw~d%&)dWEKKB~#v&QzuRLi|0&2-Gkq+wY@4*Vb z8#@0P@9*1q-|~llJKlIl|KwEIE1E;YcWzbh`89m0l|{E=JTcmEpsK&tM?J3Hs@}O( zG-V`OffN^h5rv!n+jm%DuHaN-2ZhGG-|Kun2HwTf+0S^RFM_T+c=tQ$($!FZu&2?* z5Cw~AxhIuaV9guHoXVxO5oO`R`e_42pr5z06*U4$m76b2kFM3(r>SebW+4* zUZPiroMB$Oc~)5;q-snmy}?l{zYh1~)XsPrUZ0?>#!W0+5{P2%57@ul1A4#e zJSMUi0@uC7@z)VWUn4`Gt!vzAVe4GMSJ4hCA^+`~Y9Rr&deNSSAnCU#Y(_U*SXyNI zZRNJ~2YMfG`+Y2K`Bk-WnxLp^k)YJdA8H`denxhH zo-3U0IO3^PsJ*`p^O>rD8he7?rSFkbX?mEq=f+C{9|au?SYZ7ikoUZukseP)M1J$k zJH(g#$gcN1<=AP|_iJw$`9w+L3$_7V2j8QO@dYlE5rfpWjE(d_jP-LHuUN0f7{Ip~ zjOn8+;$2ZAquGJx$f4p`Z{zFlv1iOM{zZL?t>53{fi1dwJU2#vAN})};N(B~r>-Xo ztS7SDz3ivr)b|^(NAajV!I9Uy@A=UhABW#Nrazdf2=O}IoT45=f2K_p$6m$%eEwd? z;yXE1Zg70F6CmrI^^JNg*bnCyKg#+$=X*J6-i0+an&u0&GAJngdYpktM~cS`3gTOb zNu0Z!h{9nq;5)&89UIp&FwgL0NRz|KmCSdbIY+lz->%%jeUEOIAoLiXBN^!$lz)kc zjc1J{neo;3>%7!FkpFf){C^qCWkv<+z~#h~%p)osJbsTPsW^J1%E9!pA!qvH&ROxk zs$&MuL%2g;1QtE{p4aE7y8{O=_4Dq{md0SWvo55o*X?ai;xt_9+~o`zIo6ky4_Wi7O=s$jc z?h`jDA0mfJ=FQcZPAaj1&n6>8ec90N+zLgd+rUZ0D;2`Fi?2sQlyFsQ?Uae0xUUY$ z5$hB=q&WQkjGMsiGZrDO@F0D>eTI=bFyhJbd*o(+L@G)jj}_awez|K;dOn<~5sBad zT8#H~pK1}4&c9RZ^xyKN?tHlCU4QC7{hkl8>+tF5p@e)&>|cm^{WO|^EIm-p1%z(^ z>M0&kKduz1CW`;5^{rA_#g{&Y;&4|qH z56P&1f&H0^+e1Jf+H%K)auF{xJAZCzI*c;x$3QVXzP*=-{~=J2*V*TQUoEus5ZyUB zKI1v5Bv`;AtrG*Ck6f;Chdn;7@4w!9;5XeY_QPR{x?1Cv>pR?$+Cv%h1N1?l>$iR^ zjOR%|773BnI~C1|F{$r@b*)-aZKhu!Y$jKKdw+)G>&~f=0h8h~yD$m|bGpw<`XOHj z7wMcp0-KT=dHw*OUbnzLU}ocDtD=Tojs8DgrBEi(aAo4(7gi^ zU8dQ1ECw9L$QfC-@@~V@_oB z?fGdAxz9ir!e;RkoQ8vpwcUO4iE&f3N4@-o?hA1C*LgUBYt-qFRDWo%{pjawtn*!m zkT_5_K@C$`job?CPy zdbX>vN2CAAJme3LGWRvN4Yh-2r@@D+CsgrSjdA9>q1)w{A68=sh zM)el%5;lI>rzBt%`snJ?tiI&*>z{e{~`wBlbLM>^c+R*zL4I&DEv!Pu(Y)Cld0Ao-;Tw(<=KN7fQ($P zYFO%_f=2A!xaAJN z`MPA~!I#nhVXXcKF^)3-(=qb@U2HS|UyluTb*=`ea0Bl(cpWeQ(BFeMY)Ad8&x&W= zoOF1OZVYenBK?IS-Q`E07AIeZ8^k3Ui^8K)PN2b@OocF?!Q%7$bQ0`IN){Tq6;g=k zPGUWK`?8kWCy6XK4WY|_$HCd+gns0ny%{qCsGI%zWs4GVkw@neCt>z^jexnaD&vt7 zt|JRJ!dkK_h<{k_CXbNV?s2aV3~L%V&yI60Sbx6n zeEKssb6L))#jaj=6tCKWJvv8;(}dGyTb=7c;W)OGZ}x_kfSg!g^| z(12}eoOf4C$dA7;I#?U7dn_FG&?lhEA3s4==Dok%sq?J7;p;n$&Zm?UTZy^xbvVg$ zTGz4h*~!Oa_g?hwV4Fq@{;4w7aZBHuwp zF)QQmAt}%Y3H85X?_mig=rQ;T>n~x`g9^0+wqz4&a#G)IEBMG)cpOAQ!cmg@{oCv&^Sg_hJeOsxHhjebKDZ>(R zIN0Cu|KuKjb@Qt6uy=iY9UFK4Sohq7Xl}IKpLI80%#Uv0^&fg1ckKiCD01X`P`aLt z+0gbCYCDRPFdx+{Ejh?KVp4vmqn9bMLF{sm zhxiq1c6-)V(fmi-jbko-^E~bF2=AqD3%-n#;(daUu;JU~wpQ2H?6^cfBE0j2zgdW4 ze@vK7L`@0EkSE2ZfX1{{&K9$oa8!}mh<}oQ#Uu_zygcTNjIAjZ?7T|PNs09X^hS{- zh3eJ6fbfv_19JZSDcNLVv4ny^9Pcixnt?Bb09E_Uu15=s_dQ~fr0rAk9>}TwJoJ3! zNOt*e_UF1p&(B*d-DxD>{PK3U^KWeT=Y5aUxnpP++Dq>zcE>zY`^rZ9pEbC5#)*D^ zPt^Z>HK}d!PvkRzlEQ}K@CajvkVz`eMWsF6kk_EfX3C=_jS5Pj@oedN(|jG%svx#% z(RYl(ncJ6AGNP1#1tTQ=TyiQxDv<|}S54M13N`a*(JI?<)MaFpuyJVSJ0i!VLVsgD zw&7rdczh)Zbak)1&r@n(wcf|h{#Mt2DsDJLTt^1n_Tn9nfW87)Yq|H%_UH$$xtDjY zX8Cs>xremJYg9LHlJdfiaSWkxqCrBmeRKse~z{c7HyWm%SgiJEPg zS|nAKZza~e9a!Lb2)h_G?s#vZrUrTh63r`eCK+H2!wjf!$O5=$%VP{O+<>S~S@p); z{d(n~PWdMP$^QiAe*%`G!@2Zl-@_kRy(<4(_Zi3a-;SZb?=c=7m!ZRsYi+Ze=5(|t zLf2`o{hikR`$0^Ot7sV!i=^6rI}CwRg+mTjVW*y?d|t^UjbZTa-# zoyTNxu%`CBIu+;g;>uMEm==-Zjep^pgHr&o`|acGD95qn26TzOVWN zOWA2xo%};um5OEFrM$R)JGGaCh6(27b1MeHf(&~+iKusPSw` zH3n7G*n=aqA#2BY%4|`7hYB$Y(gl@oJ;i+NmxmX`=qau}Vp1SfboE3>1VcC}SSlLd z`sVku@vcYQ)y*c?25|v*&$r%_{pJsJ@+;jJ`ipn0YmV2xA9?eC558lXay3|<#QJbd zB8McC)e;5u#W+5xf2DE8|2uuNfA}}&!x8BA2ahT8^W|F@U!u~XX>d~J6q06-wGluM zAi>w($+_6V7z{4u-p0C0%FE58is6VFxGdNX%_6DRYboW8m+DZk%3^N`Rz+fZWTmYN z?LADlW^0S`6ibbNy+b^=%k}uTF}bvjEnA=LN(sFleQp>FT;HzJr+9`~FMS>-tmn5C zCm2S|2A|o=co_o!U|_s_DdZOM&<3HkC~d(05)UEKwNVgLV*zb*`?+b?N!l{XnGW+XDp)$13xuZiVRR&U46U^QA-=OAcbdk{oRv)@gb3qR3NG33+oNJxZ&ad znRoX~lQ@g^eQp0wjzo6HKL7BXcmB_vwTNladBkIkF4-!~%7(hj$*js5A7?cAqk zy=Y`YKqSe2^SoJ|2_xrZd3tX}0%ew38@GxkLDr&$oIj&xO}3N-~e+Ds1oc}mNV z3Gg8kY%1!1&mrnV73N{el<=J=Uv_`5pN-eAb~9h5PO>H+-r<;6`9cWfJL1o6B3CN> zxwu!yfl@1*Ew{l{jsn_RQ^Y<0J&wG%_5FVHz5hXJ^9WI2;PH2VPT65wyE%oV`HU};FYBkdBhcV< z8LiGm9j3`7*+u)Q;(-H}4=O@qUK_o)A&E++Ib{Wb5As$x!MBUmz~RGL!P- zBB4Z|oT>Uh?|>PNkItIPi9)PT`Gj; zpe_~SY|j8DFysLcU9W@aQk+;4J!tH~nYl#eHcMH8XCo6)_urHy3CfaVzy351u)k-t zY=(G<)EIpS)6o~y--D475#$X?B?@V6UtR^CpUx*F*TEC8()h{-Uy7}buZVct(O&z1 zt!sVDFoh2Wwm`{-9Keo_zSh&ZpQjG!=fUT4P<9l(q3KpNHpuUR609~}bDA0tL)4N< zr4vwr=BC3>B5DG2lX%lnZG5!1-|PJGdjWrMiToAB|6KiDnP_c}q*yRfcyM70$O_c& zNn!;=b8r-Lwza0u3B43}w*Gl8QF01@<#cp`d(004sskt;qlp6W0R>=AyMv&EtxQCLB5IRL)o^{ zIM6xbK!*ykDetT9d$FH^uNbUQZ;N(3J3LR3Gl^&Of?dyl4^KAOfZD_J*q4Z=7rhe zuvili-j7jmWbimJmJwMdxQ<60a`f45eF}}YM}KLI;m_b1pW_LLmzV9FpO?61$aw2t zO6+smu+Oje^{wBV_?@2WZ5^Qgj9v#by$3Jl5H;uZpRc*ee>fnw_3+1dIoTgNPhRga z*sl+Z>blHZBS!Rpki)#z=fXjjp)E_Y+zY)oAe9Z1ErqNYQouThyVU#nVfaw0qUVgT zi%&kc#RHFxrnGrwN-o_OTgWD-T<@nL>h?VjUy8kR!aeVu$0uLuHA3SoqTU%|LNu*GDGAhMjw837w5??~I(}Ek)KcGH;xuY@L>Afe_bCXNTC}-L~ zp(^Po`as`%wWb3*{R}a=NNUK>@pjr$S6ZaQ6gDw>>7h59Z9E#VzJ5a1j@F9xc)WW) zHU+c@Bjr(_lUELRy%pq1pnhA*g|4qXbNTA5#Uo}J0_14lT5Ck zkFfoH_JfV{wZ{7FNuh+t3NIV5{|Dps2Zv*QpbB|^zJkV3Pi{SBklVrYNAZSU%!9}4 z;1zCIOE#Gx=VF%&iUFn`iDjm;tX~-z2YAqTipjf&{_2C2xjMnFt|t@p0$@$lQ<&Z3$lskKvYJkAc2uJN{t zW548cjT-%^@g{S7?OKQlA>uebtZS8n*bXKbF|PBet-^?kQ8Sjcz_^}n?GsTFfqV*~22lc$~`*T8#IA^%Umpy*@&cVEC;=muhQ;qF@M2_1~^Wvu!1JDp2>It0!l$KHsP-bvSgdL5k?i<*)z0LCc2we0jJ zwb%3fH&;#TKBc{u{r(rO^LI?u+}7dJM4ak6>! zwt}9MUO#QrRo4QKUNTM{B2s-H`4LdzKfuNWAK>(ZAL(^Le;G1hlbLJ zRuhRN%>Zh;mfi=@0s`!hi@%?L^&VH;G|ztXk@cTJ=h-D)OBs{Yk5}&D9qTPx_v~Ta ztX%A!>a)RK9S=>Ga_l`c{*}yJ82=tuO62mia;p=O{pW9UVmw`7CQsq8z>(Xn5f1PEe_f9t*c7c4PqP+Fi&jj4gGA+g}ZmL(^`8$1f`9(?qFsFaA6qlc$m zaZtgUN-Kcvb8CEH%_$-j1AWP!K+RF%a0{46maFSOnTWPkP!el$ON zKDg}se(xO9tRMivJ8~F)o`Vxxl$^*(et>tub9Ep>dN%fIvz$z1@IjT1mAvLXtsCsE zVvHdJ*}$>%9gdjN2s!$HAPSNRBoAm_V2!*$6W~MVRlxEeeSkX;?iU~DUVHuF-uLxb z_~Eitz6?_v&!PSJlM*qbHenet--2_&C#E^Efz#>5X z39fJK1KsPT*PABhFU?u0eO$ZR7wxeIhiE}mY6;Z95!Pe9V>7opKMJjjfnJ~7wui;@ z?5t>5MZ}w!?@V$(YE5O6D2202cFj9{Ec%34iu~)>os2GzRH8YT~ReJ-Ly8CpDfn0rMsVNRWcptYA7g zZ8kRaeMqER36{UkfV=00Pap368lA&~c!x!@PB7=U>*702^BE|{ZNOfEH6nx^B7wM& z9+&kZ>8Ic&`f}x>(;5HKu(<25-Sk-dJ@v18X!?6_tRH{lxo%o^{hsV%P5haMXnI_l z-f~%I=m&FuJPLmD>-*UKi=J0}ph@VQWh2gP5?{(J{kEp|JFUo73fMK0JiF3Xoa^4= z>1FM*VzDSV*Kv!gwJy=W?0GE#dtxmo);2VYm`L4=LXEcA&(Np7yy@}KLiuyleM-=m zcgIh0CWhU(+_km(^R&bH#nAB`?q^q(&-%|j{LKkL2-uCpqi%}L{-7Z6Rx z$4y?P6=kpFwkBk=^&Xn%ty%7fbTb!quEZwShbzNI>;$Osj zv{%<4t=(%`PwhM>4$HrJ%1`Mo|03l#x8?KaZU9|CqQ7Uw{9-)?u>1TF*5uu4TY51+ z>E#l>EBI?Xe!b|htk^|W+1*7u-g#AYxnaN0H1=v3 z3xA69h~AstW%iQP&n~Z;ZVzBiF8HJx&Wz;3T@afU>KPJqRwe>#_KlD0;x-;|D5lr* z_*_ZUN+|2#%3h*f@kPd0-xiP4t9-Mwyg7t&5nfLWQ? zs9lQdIiLdL7_Im0z<&7dB~3^=A8E%v5yb1Ph-#mg6>3s&LX%?pIg&wqiJmDiQp|a5 zk2MG3e}gCO$q6qPTRxxnSD1vE+2u4fBRsm>&ArNu+aa6CqMcZadNJAoj@}_ z2%pH3_-}NIaisA%8dy-IpakL5O6{g2NqlP)?8>dTs~eDPVi#{L(fk<9<2x?Q&MP!d z{+r4{Kpv4#cA+t ze~bp&+d%P}2w?;I$YMPSwLWr;%W|J?M<2$wMYYG=PB;W2+m?lGY*Ewg|FQS3&50{Z zo9JJem`^j|Gl96+#`}#Eak5G!QMy4%2rRyw0AZJez+eO}#Kipfi7bJ@-MxCRwf27J zd^sH*VTmkBb;-JAWj+rhA=*<#;k1rle-ACGx#*|^F$3ifi5$JO6gMG~f+}IHdx)jp zV??mM)>+ZxM2;RFThD96f&BNRsn2Oz&jxD=-orTIK$7s{w`c^F=z8*MgSw9~5R5C< z^`&4;{1vP#TjqUAljOE-sZ0=9xRnpN)crihH$-8!+* z_;fnvK#(Rz8F3ZxtpAj-#=eGieQtcd3w=Jn)plIuSbd;^J#4})GYNNEOT=#UalPw< z{CMd>pL!V2oT5$J-~{*wLBI7@H-gNkRQL~HBbB$lR){5GE`j?LgLz8pe|adklt4df z3(gqE_^4OjN{}N2IS}OFAPdBx!q_2~h|z?}o&{mX6VYDBz^poc`|$xjxw(`Wzs)qy zDZ7(X_<=R|ec*!g~665rQf9lufUy^5>-&Y|HL?Fx`@nI4mti9?|b;61*Ui&_w<9_y<@hhpOFYP!uB*{CISpe+*wmX|>kHUxh(0vi3jw*B!2DxaN}pZ`R<>a9F?rCPksp*yw>g#EWU}xmfAr;Wz+4J)`bD|TRL=sz zZ8Oh4e8k|OSqSO#$W-0r$M{4%u_lrgknlM{JkY`?7W(Y!_y>PM`~ljad>9g>?Bj*@Ss)GK^6?S1}iHOeOm4`2}aaPs=9n3UNJu@&O(!k&}{YKEx*O zGY1>-l{C({fA)d;+1LDAtj#x_)qKktzxrFpfWFeJZ}GXez4b|4_sWBxVny3x_#XGi z9*IqJA&fg;^uV#~M(n?{1{h~ zUipd}Udo#1s^K{^zGE@sxy4<4MIAGLjR*SE@u8l22}h7bHpq^Ee#a9%#}cfEfs$S& zCO%=ie*kfM)uXse*ER|I_7>bUWjYY$mqTK8iC>`|`6nIQCUQHSWsqd(?~NFWlFe6sMWfUwDhbB4zpCR(pH@v=snz=vai#1( zu?_xA+^M53u|5ekXJDR_gDHy%j4_zMsnaf=eRF@)|| z(ti?@`V!`KOcltoXHJ8$CD?>ai^`Y9hl#QBae3xp$;VUU+BUg$=5r7j6CHl~=cauy zh6sMB_bu3cjCZ_?UBr6j7hlkOyOKY>_yOGJ`|sG3R~~>o7~i;;`HkbaeX*xpIe&4S ze{276mT|QubOY&jEDXxgaabe})n#5Vf}n539~8J_63#v2*m@PotMefxQTx>i zRLZpCe(#m02}mhMwvnjlKefF+VSo#Ie|9}fTMn{e;@O z&TF2MYpmSrdkmvKd%~EJ#t;8`E^)rP^ zHRD#>pFkK}^D%G6ER40YRTeD7wQc#UpKXc8w6?eBTl=anj(!X5tBEUf0iJ3+kvMlK zx4&b9rbg|1jaVx<{$YJ9OIW|pYn4e(N49~7k=4>p)h>W^O%yfpOF%0Arq1iUFWM5EOP{{ggh}6fl z+J3?keR3{-Cuff9aE&`Bq{d$u52WArS9%`QdJQKrZPqRt=i*XdxZckFe_D4lhVje~ zfaBCTv7B6duR`u;qGFzpVI141P9wK!4< z@qnJlJ~+<;JY#1wC;DHV~Lw}yDlx{w)&yMpVK zHP6NBI!^lJ9(x7X2g$9D^!oZ6w?zB0&d_Ug#Ow}MeR{(W{z2DN!!z&^=(xvJ$JuIK zI~K$yhH)w}9x-u9`8BlB*9e$JRKZPBSf=!mR1|U9<6#jG$jJx-e`&R(LM)a6`KpQv z_omFKh78x3GR~3}Yn!ATkk#7SStZ#xa%!!5E$h5tf;|lN`HDne%Y=#y)|TQ!3ThZa ze?!s7`HBG|*_avq)mkz#JolKX3_5M^&N*<4!G0uUBJT9|X!E{|PJT3?BzO@PkI$$bv;5meS&*X|Mf59$cd5aIFSKdUTkncRXj)ls);eyuK-m<}4sriC_#ACtuhkE`D zgRwG45;@fHtiNZXJsPpAKx2KU;Yf1ao4sm$VLdO$8z@rSd=-aa6BF#Cn;qEXWO#wU zKs$>~#lkyhg^l>I7#m4J&CrL3aA3&CXPG;TkYsN{x6pBNe~tccr{Qi^3_|0HB+K;U zj5eM4eRw(@ok|uE!N@$bm-(YKxhe+q{h30R%zWU1sRAb?EE;D|f1%yV=VcxjbM&(h z?WI-m1rvr%bgY#$Y3|sbvWDZw9-*zL$cwi#j8Emt9}d7U7E>dfQdL?f;|y_)6CWt@ zgy+<65)iASe|x`Yzu^X>z3b-~{IMtk78OsEjda*5ITx%qU=n@7q;w}Qj}CRLWfZ$_ zXoERDa<51o2-#_^dKN=FYsQ}3N#9{p$6>POm0RJy<8@o({$Fr2?r;HcJ#@Z{>%3ss zm!>63$F7__n@+}~Dz%UdkICLXD~vZ@iMM#i$y7>Duk&_RrHlb3YJ;}D&?A=tc z#q1v1U3sg`%y*oYNeP}p5ON^5bHyLNeaC5rd`0?v4EK^XX4=(94eccw^TDruJT7$` z5A=8xeTy5p_`&J-e_Yog&ARZU&u!0j%*l=K73NaJg|%MikZ?LAllU>7#HXo@d|9qC zcLamCf5r2<#K7g%G8w}0VO=cacXQNv9-j+piqQ5%5+Tl&E}!fTNtPX5Q=Ck5+ojZa zn8uTd4>Dp(#}g6)d}xZ4kPot6F0ILEa(J@b zlZ;!%QXMyv(Ec$Tz{BU$B53(8JjIClsN#u{hFHg=k#6zmYFg{O65+$}x zd_vX}W=T$nOj_Ys?Cjj|F(#0}4C1wI_Ex9~ZpVZ+r~`+b)FRxs<}AD*D(- z#)yV3jJ zwt5DgkwE{=XtPtgJXNVV`M*Df5+sL2+pn?k{Zf*)PI1s!gd+o>az6q>Uq;Yt!dtJ1p zZTyjpEg;fx2ySguzkm;1LG@|xe~rgf;CblA&6Tp?{dw0qqV2obLCZV$Td1Q@+W}wD z$DA+Fzw+1q;FDkaFuUSce#)2n5zY&GcAKK%&6rr|_oH=j8JlRQ=;6^A#z5Z`!Ma>U(l#)0Uv52@HnP_8%`$#MkUilAMJY^^>hF(eT5rAQco z?zuB?Iv@tSCo$MTiS|g(f2@>r2*Q8%&&cIVp&a={X&=K0ymlWF8J!P`kp*zF_+C8%Q zkUQ<)HF)en6lMnQ<#xv#VWIdbWFs2d@!XBAgCDVILq3zr;`VT*YM-l($avpibc)gvgTNFqR%fh+qy5y2pozGY=9C9;>hI@aGOsbb+kc>oga zm}NX2btZzPp$Wa1e?riI(|mqvWRc_vF*~RbWOxwsx*b}daKi23jYk{x-ju#>zbmhS zbxK2WS*z$ZYcRPHzgOy*ns&oKqx+4dk)OZtpPQvtFtK`kC)q1P-N_Jso#t0o_Ee@~^)h*X}RO!7g7n5V_M z0FE)vR7v!W7up|yfOR;Zn8SBvOa+n%6c&g>lqiF-ak5&i5ksC^pniUatsetZCZt60 zwT&MT!a5FrM89=PL_P1`^*fw*FPKf&7;*HU)Lce-Ze)@@P}MizIm4I^TfQn2eF^G* zCz$!}(-Ydsf6I}F7&b%{gEK-9OHNf^4fNji6qEO>pTT3Fy2?MG%PLb)-S7|68k>83 z@3&w36R_Iv>9RlI*Lf8&Tx(rjrNop29oN#zmH6hzPkh^a6O@RAQLJO(6{biWqAa?o zCY>?Cxz1lub%`C>Cyh^kYr-8o`|T*ipTa)4wOe`Ye`>#*WWO7arqY#~r<&f!#SB2> z#{XFyOMUK5NLr5_m+=*Lg{^&ml@+xP4@Jt6L=LfMoA@5YR-)dh(}K*m5wV`B*aDdl z+-F?JV))qp39R*A&ZNyH`kn}35+SZQm6!Oam(B%`ltF*g5niFo!-M-okub6f6px(uhH)3d$`kb(7L zd#3*)R;TR**K4vwY#!TP`Sg8U8g3y46^uRzjHxNit{g0IZOo*`W3(yS7T%s;*5q;U z8cONqqVI?PfMz~7<4KC#H4ZH@Mn=J)ST1b}N5@nrrc#$F8H5v+Kx`Cl#01mv$L|kq zf80-hZE-!{MciEN*FP5Lx}GmSLsvbC1pKGfb9*~LEQuk`3u^Gtdw*V#ka-RaK3ngM zPRYNwj@#-Nc$XqUyvvuv>EBYnR~(!Yf8YMh`DVyH8uhOC2* z-v$&)i#NOq4Q|wH!;kd4aVqowM&10Gb4m7>>*TlNTeSUab%B1yTh@$w@-oj`*k{7V zhwK~fD_h<7uk`zw$T<1NqxRk3LF1VHrvHA!OG6*$`ixi4>7r_4Xz&b$-1>kzX2^-g zu*LW(F*vr|?OWJ3kV`Cr&%~s7e-1P^(5zrt=Wl(SQj*wGC8iUl-^K?5#UsnN`w8NR z3F++7?keL~JszA4e7_`H--aswxQr3^?f!(gB~tt|JYcB4$1vT;`^B9r-q0`F^%Z&D zelre##~#XJyFQoHXV=DU+}C^dzcOy;VJL)eB@2X~C9)itaot|-D!jETe@jM;78}z{ z=_n?5V|T!=`oc1{0x4Z0PYwv*^(NtU%pU|RS?h`^J|hAS2=A4sVhj&`Q}@r;-4ld7 zAo3h_-_Lsd*6k(T4XCX{VRJx?G7^4oQ;>3cZL^Wd#zbw$6x!A+2xl1bPq&Tu1$Vkq z;r!q)@%H2<{9UTH{^K=uY7HbatMW_ zL`aVc@8w1D0mw3*+AsYGKYAo!_{$0%97*8kmpMm77fe=e?<7{IBv7bNG4FZzvJ zDT}ZBujv&Vokacx`>g0X(f9fOE}xXC1dr{;L~Kn0zIEHPAcwIOR$iQA>%khoYhR_^ z#jj{U$tAzfGJ+&G0kO6b6aBh>5vbp($4+wNjr;4CihtydI)I)0&`#g*uJ6~%YbU?) zf1r)Tsn6WVe}zA>=AAXu>=KKv$81yaSO3&Ab=!W!%52sJEy#Tw7iI)w`-Sm7D~KXW zemGCPPg5A23Ey#T@8NN!jus#eaU6SNMeGgVqOPCFzs!wv{3Wh<7=QE0rFBdtzOBVt zr#@B1YiqDi>(-Af1Zs?m#&_9-bbR%9_D0jLMh^?h{&8fAC?Hu1HXRB-!8PzfI_i@;!25+xHxK z*agA$jK7gvCZuZ@yd0k}ju88s!4O}L75!3VqnGuKBl>8GZOz|&7R!NDQ;z}qs^DJt z!t1;UzVqa2KHNX%$!+XBy?WhC6&k7h@BRgv!`Zm<;@Se^zeUPCl(b|Y3L4mN5Dfj)Q4%Y zW!UcvUJfGzISHoP4?J^59^>;l{ZG{eisQrquHyvFemM8l=Sn;i=U>c|9~WHy_78j# z(FI?iIrrvze80p2R6n-sW#4nYVQ&1~f9~J-^{zg+7z5n+o&+Yp$0z!@VBh`Q#*Qh2 z#ICYvAo&ZCu`nsiXx`5oeJRR5&655Q4~=A;j!plYPMG?jzWj(!{gD|8?H)=9qgJk5 zFA|kg_3Qx%HyjJs8kbKb^PGv}8I~VzBf5gz1 zjW}i)YblawT%9OuRfmgCaC(j>?~qtj7M3WO#ZI+u2rI^UsyC>F%T|bw+~`a%lF|#p zcxmTJY62%rCHpDFW8~-_M5vpVNflqwT)Ynnuc*3k%>##^^Q^1<-u&u+;$Lt4xUaYadR##a zy$0u_8JFrcgt}pw)MsbQEd|4OKb6f+cwQ7&!K+%Cpax9&o_pRGyD_t=ELpH#()I-P zNzjoVKeRq<zo(1B&) zwEcHe88Q0SA`c&ANl9~~xQ&qEnU7}z&9#or!_TAw}iuDj^Oq)#wDPfVx~ zs1L~H{sxJ-5N?J1+ls93{Q)yWfNSJ!}?Y zD(eu86Gk-88KZ8ri(+MrPTOd+lg6mE6JI05z*1-n=R0@pj7`z9t+kx)K9d06Ub-I= z_Z%3T$29JUWw!+-f90H-FHN0Z_&i)`8LzF(56HRKLcSlgKM@mD&69w82+fz!2^FVQ zNgPL%uQ4x-!G$aLvX6V>Xg_0mKb`)odKfl)=wIU1e%r(Sh|RTfnr|e(=S=vm{`SdY zj9ib2NueZ=7oYLk56u3YVLh9zr`S99%%9pRnJs2J;z>cD!AXAe8Zx;_xoJMZ5?}i!_30T*rNU6Boy5=81+obOQckA}^6_+W^FKvrs0kNgO0cjjC-p8dA+j>ric9;}Y2Bt9m zHv&oQXMGQ(FDwvZdQLYi6e-9pIa-fPJ4`oJ%!oMkCj%L0k&q_K63euK?8nBK7N>x! z*s@RQ(d-sBJ#Bp|{L(D~!-N9IM$;7W0DV==e>~}qC~FT*W)W%iMy3(intxp0G|%~1N9gEszp ze%}o!hX@#)?)+8|rzbzZDCi@Gu8$Zhpjg15_0X5szrKy<#qjlf(wMg5`Jpd*e~xp0 ze~f3U>HsJ1f{JZ7R|8wV%{5PYrS=3$7b>IFn9)0&yT94}UJ;gT}V(GlkZ={|9ahY#_?`YFp#kCkV?(A@wL? z3TOTcGoATBvXLpm)?3s&?vms~<8yviUv{>)x3p&wxwX$r_@JT2tql;XAd*&}K#4Zu zei*?dvHCi0$6h;YH`;!!CqC1D6XPu%f&F|$d>F;Ux3j&6FZ&DPbfkGP&uO<@f5FU^ zJ!*?OqjCgbb*r^g8}(87@4lnW{%j`u|9*gV!1I1 z*lX#YVS;0O%uV(B&|Mu}SYPq3F^($1II1Zgl3^@8AwTjE`e#eq|MfZu+wIOaonE>4 zuKVA1ZIc$4IJUTneRE77W2P>+e=G@ckiX_U+Yn`q>ACHILSH=)ip3Td5_RTj zZaUSxJ#Cjv&v`FZ$1^1*83b;Xb;-pyE~d%Hzk|>Ebz+c7RPIe72ShsC$>wP5SON$( z39v`3Q>fz2>txI3LJ5Lc-G_vH$iy~98ng~=dk+@+`a{%uj&%J>GST&Gf2Nh@jmv+G zE&ay(h%sax|9r)s>YEn1tV8Rwa@-5NHazS(asxK_9R^#8CO*g+U-;QOTXGaJ_2u8= zU0(@aO|4wvJ^Omv07BO4XL~eTw=+=j2t?TqT>1|4(-c(j2!D@zU5C55`8wA3?(}^N z%r&K?5Hsdc!z4lq!V3ESfBNW0su_1WIPRp9JYW1s@8kI}_Mm$gM}l=*uiNv>Sxglh z-~1F#!nQO%c=AopzqP|j*yQ}a3F$Q;?<*6R4_HKV}f0uLVNzDNt7&LV> zR<|&?`Sl`Uyn5(%yASCLVxejbUz`gGOt!5{++Xe6AT1Uu%!eFQ(we_PuO741un&?c z#EimqDkl$eEBMn;`8I=-0dMF(dgX&Z;hquW^E{%Q%06#H2E8GC0fkUd2rE8RH4e@Z z+wrVmuE4BwQPIBc-~vtR)zM8DObo1$OXxY83?9aMy-rDrCr~0;U{_*;oJ>!C%+MH*PFC3ede~AN_ z#oJe(uv7oT&+>(-I`5x^5z@Af8vmzIT^<^X`4n=lghet=f+*e)C`Yqtu$~DcK%~a) zlL*X(t$5BYBI1DwtbLHZp%GeyIiCwtbhf**uO^f2e|*Z*kch$PpL0AWMu4#z8Mk1K z+}UM79t*U0d~26r98<)-Cg8-_KK4|i4EbqDWb_CTFAtM2t?_F2664kTU>ETI4X-wR zz2ZY$>N>u0j_J0!*^D$s6pn5F^cOstxAUwQ+OC za^<#Df3vZ+E4AT)O8#n#)CQT3hcaz38Cz_d&1_P{;o|sgiY_}*oMnR#ChiVqfJSleVeat@ln?I1zil;N+6NoGmAcxoNRg=$C1l3^}(i zc0c(+RqTXnXW^O^Okbp%I%csczRs>I^I|y z#_aOKm0CKmY`uX@LIxK*%0&5wFR)*QK~ZQsOEivMjDExU?kMYhS_*%RKiss}5B=qb zvF?X)4ta-eHmH%pv(ig{`oW%Mnf~?6s>axE;%t7jS-H;iv@LAXe^)*mnd#mgJ$!s3 zf9K}lB*xegD5!{yWsM^fp}%G!NOrJ7VIWkH9CQw)DWA9O6k8f6J!As1xD}j#W&84$ z_qMA^*BFr)=b-e$6I9!w{4rRF0ZXFt^NJrsl|OORBfa(|?f-Dv4!m=06M^Gj@d=Wd zc6iE18AR*^4>xYs+J1#`c|_Iz*{^l`e;|%+6|DF_9s8`a|C0}L({8_*ufFlr*0`K? zf4kU(C$4pd(0_N%JlUwL&2SkmB#zlTKd9K!IvXdC8dhx!x|JON`=F#4 z)Qgl&Fg`=FoX0!Y6c2`AFz2U3nxD7@lifeJ+e+5H@S7%6$8zH;_rd!(OC3?Yvqe^&2cu*eg`L$PFW&Z9-yRt8`Kyj$ADkK~;A1@%z_s0KNWs{W)BgPp{^FKkUfo)>+9Be}n$<+-{BD;K5!! z4o185p;|pxqf@{2YCe>L1duYb5aEP`g>8#;Nv(pn#K$B3VqPNJJ9A@w->SC$rOJ8&5x%3%~vK@aj9uo%IB-hsXEPx)mk%Qf_Sq z$@nEZ*t|%#rhne^~fn8Kf(Hq| z`uB2JJouv6p&zj}n&AJ-zXz$~7BR{_xZGo$J}__>$j4$x*xc@WG*K`FwFO7|E)YgC z0WmJhh{AhGHz@Qd zB-FzmjUw=le*mRfDvt)UEf`-?*QZ7%oMtkEFfI8 zp20>QSMq|m1v~UV$M#6^`EFeveExPHsOJ83lgp<3Cd%QyKCW^y#`_rMmS5k$3y9?1 zz1l+5f5`8ir|K8a-^q1(YxxA4_r*Va4{CkE{1i?7+?D52K4%4HTGv;%(D$u=L6cU? zfdld-dQq!i{IWd6N-ynEVE^f2e+U$tUf!?sTVQ%e=f8dXzN8O8#_%OC@4J*|R|3Xg zVtzKNe*W@3D-h5liXI(n6trO&5PGtJfWh+ZGnQQiXo>cATdg*ut(<8(nJ@iFC8l0AYB?gTv| z!=$4;8xF{6K3YNxWDv0tkJ8M~S&zpoe=DdR@mZOD9v9{@?a^`GZe^%3u zvoO|*zPgT#KUkGY_pSVV&36y(R(-DX&nLG`KF>pNdSoE&R;yJ{ri37V+ZN~=+#d9S zeL$JeE}rSW7-pYBn~Q$tRIK9TBomP>_>5 zU(fy4JOK#cK39CC(GyO?$_$AA3WvV&%xW~@RW3S?{DkF)2O?Be#;L} zyt`ezWx(E9ZnyisSoVYNSNE;Ee0u4AcJb+DS9X`JWp~+~ zb?2`sERPSJ=dZ;vIJP_O?!pgB**QHg>}C1dQP1;s@GV^c}GF6Ry^`EZvm!9Nl_w} zydYFO!&p@43E-X}3r(?SYcPfiiXeWYxR}t_W-^Twu}H5(Or0W-Vh`CYR9Oj*DW3Vz z5*9^9Ln!=}*z+-je*xxURRR%HdonBWs1l#cJBrXsL3LDC93;GAE53lKF#IO1U6F!& zCd3{vXZ=~Zzye`RR^bBda3Zr3{PlF=ET)vLczATD6ul=%!WRUlVJS;96H{udp|zfI zj}bEtti+gPGY_`0C9_go-WO5j>*qk}=X@fj6zloZO(Lr3f0`r-DP>8CejnYyf_u=> z?=WZ5?vUAZQY=c~>k>|8@(d8Ka|P_EpXCei#p&Rcf;jwcXpGn@|f-x4etPKSy_TVyunqM|P6p9-Sm z$%lAdg4o29OvdDp#p`v#+eJ2wkaZ?VN??1tvX!ukrtQ213_K4UQ5`E(QPCmPTL8!3 zxWUm&7P#S{-sH1AkmMOi;=#bpJBmh1*5!OOlV{1mf96uZ*CUWv>rDyPDq+#3oZGUL z#SmCviF})t=p)2x{i0R%i@u;rPKm`mAZ|5Oz=n#@iR5hR@(z=;nvE0N4D4rQr9r~! zBeLtvIJT>mt)~kxohZn{CJ)B%-|0bt?*aXuTOFbP@{Ywq4|j7NW#-ubVI}r{U;>!o zgd{vHe^-1`eDbKH%&Izpbga@2vygTa!~Qcx&qx>v<5TMlUUS2lm4ukl9`#Vll0dCL zgq6rzn{4I^roKFafTXuwB?|~e2V}m%2v)Wdxqm1LFQaytlp-ubt0+onuX$G;b ze+v7M$ca3hWCDo2M|=>4L+g4KD}nD{u$a=L%Byn0b(~Js zCYD@r`lMiDEO=yLzb!qA9`NMaFQ*ARf5cPjOK%S(4RMSq4ka~89W`M)x=u?vlc+De zf@29*WmhblMja&1ffD-rQUDDnVNw!Yp%JTu9@a;|Q9l@>USr+Fi46=G&PUswEJMNF zXX?49GS}~lsuNJa^5ve#2&d(tq<&cnF0p(LtxiyB`P%astL0d~aV@W^6UnuFf9rY3 zr-*W_U#aD8UY%&9>{JM>w_1vpqN|l#yaV}M!2Vd?peBt=z z>+vgvFg8JJue>+d0k*s4Yh=bLT zcGM7cNW#>K;KRc;YYT#7hfpB);EAe4v^;_97-3mS zse~jEEJ`IdB;3tJq32Ck&&@`<3acRF#!QwlGjSgMg^o#_o3HB_!U8#?Lq*~sn_>Mz zXeQwi@1>s87C?Vd_ZhG7e?4NujafD%<*J088Sa5P3vk6H9|@(36vJPRw+W6Xn2%?$ zacqa^s7N%)O1-Xdex)@S+j5Xv5G@!se;wc_;AMm-Pr@G9Dwu-ndTN})fEo=!fn7{@;m{gf`U+vMkT z`S0F``I}?8?UF3bf2qY6v9(UPd7629rXUo1Z$V7YgD1E8F)b|g+VXo^=6Ue!_O@6a z-CuOw{Il|)u2oOU$1$G8bsg6IZ*y(;WC1=m_(e}_MGb$?E9i6DTMfPD`?lvX7uO?q z@A%jHFnjw=U%~!S%6z-SbvP3pQt)T6M90!=NWy~5*7roOe;VDdVVq0YDmhA zQ(|-D4d0FPPBvXM-60$PM3O~4Jr`OjXIv+q7=3`E= zTns|2Q@g-?;QWl~K6SaoC0$*9Qm0dO`D2~#P?z7*=@xbQFX?dI34OP^jTe~bG}2&R9F>hvY!nD$A(ze!t`rC?3kcg*{^N&BMTd*5dUJ$(0r`%FNm$Gn@p zeaYC~TTrOeAC<_-3j8 z$#uS)w2ppvlXi>wxTUSCimH0wSwgZXk#pXdf1qs!=d78?kBZt;1yv~CyxZE*6KJb4 zdA!a~Anf6E0pvR8r5*!fxE1P|yVuue6JkR1&lA9smM7l?vHdIsaIxPaWHtdP@!k>p zEnM%9hz$aSm60Ghj;oCFsaIf(Z4r8G3rQAEJ;{M*$u;dXJf0k7^LUa!Q9LFPA>?)t zf7j0egw}e7_{pW_FYJ9DVbIDJz*Y-JfkkIB6*jQ&_l8Q!flF8c^{HOsei~Hc+<$gk z&(b|W7GDw;kV?!KW14H;)#n!1dxCxz+nb!?(dFSLH-T|bH#q~!3AxEFV4OHNx&4DY zfDI?}+x-wsVO9vYWcMKj@(2SCb3s0ke|&j~=O0u;qF{$P@g*SgCk!!u(&XUdxr?do z`wZ+DQbl$cSGkZN@w_L|_c1IaS>azqfd*F8_p}G}FMXFGDE{4d@sN#pm_8BYi0OpD z?=a-oT!U@En?O0kbEh>}5?&R;XB&7P6=8m6NbPDXe7-^$iiILa7zl)z_`>|me-v3? zzJILC2~T(*DU#v2)=ZEDtUi7>!18+HSMM$HZ;ZcB(VvMi7~}ahH6WIl&lHw|l$g&{ zlBupoNzUlUsq=&17x;Y!Lp;|X2v`EyVEUyX2G-vku%3HOy)E&4nf71^_?!=}_}ALE zZZ}_s{8Fv-}QfbFID*aZ}nD}#}c)k{!#uYU^zNi zKl4j}NF@H%noqcE zSK7yN+XzxV*XacM-9F*(f1bkhKBn6t*!L318T(f~-l=>2{Q}m<@x#CQ51B2guKq*6 z{6`2}{FQLrra0lj2572}kP)xnMbjBIupYm#Kl%bH?AHMxo`-frFm~h*Pf^M?P#SnFj*nL)j2x3mLEwLc~a6e=&qHu_Sxdyy}9@ zAuP!N*b=e42MRp`8R3$xP-z^&HV~!61-64|OI*cDhz#OM_61`gzF-xMeG&j`!#E*< zVm%la@T}Mqj7u^C_5$M`87T`OW<*G&SQi+wXhWQm2rLKUE$o!#X=egq4I%E67_1N` zoW#nCVG@&xv|eE%f5}u>bC{Im6|5ypdgN7E5~hqK(pte(keNWQ!<5Xy+QD>-R+$o} z23bh!3toYw!m8laCrhx}@S2dNvU>1ZkT+=nNkF#0J>w+uF02Q5?Gp*;F3X8jsH`!` zq&+~T&4ukkQsN*IK++>CWeb=wQb>D@oqa9rDa=wF#Ah(ul8v%cn131ML)!1Cd|P4X zF!xcp)-X@VPT5hK@7{7wB_IxmXdVD zU=GU?2hAlcdxPclaewEVg!m9{lZ{o8?<h!`U~vv$WUc%$}?kopyfxJ!InYOkn%4 zY%exN<(q!Hmh+R(l3KoERiG~Nv0pXvb0a_B$$M9MU@|9pb*e>oS@8>`) z&+}<}x3I6e%`4w%@@Cx1AsTsLCXKw&vsGv{@#-fr4)6?YqX+{imO zd2*AlWFv3dc{MW|dC`=w)3oC%RgL_7)kEI(Z}Rg^-hbr3+w{TJ^7j5){dAjtuv*$z z`Ot)`ymu?V?A_!=Bfr&OMjQF9{vMltx>}P)9yjg&7~bU8O@4jex%KmcHF_h)vXM9S zS6I#aMN|I{Y2II0)=l2LpEmVZ@TUI6N;LAIc|UK;e;Ws!+c;1-Ch3G=mg!<)Rx zU&xyLv0MIb{UdDJ*RgKx6F2P>j&Jf?`QzSAUNrJXe~L+?KjE$Ytfu|W{93-Vzq-mp za|qcuTmf1plwV(;@2;!6Yoord-xrM2Yku3onSVR8&AMFJlOox?^hhCb-0S*kX{4KV zHtSBVeXlK=Z%=TJsO6pT*vN}(+fB^oTPwKEpIyHhJJRYZAI8_RJ0!TyaRuvM%Pz)C z>nb0bjl6dqTf|s)Z*tkluYE&|Gt$Vfb3nm**EvLti*_Twwq0R0x-Vkhxyr>VZRA(o z5q~j%Y~c197md8uO_W;{uIvAI^q1`a-~au;yl?lL`Qg9)*Teq!cBpo5 z|MkEBum9(L`*HXm@ALHSe_pOO`MUatJAWU)z5c&+zRu--zRU9TKeV(wRO`2YeBf)= z^)Kozm)YST&xY|ICO!_+&M-1;9XcK_Ukw|@xJ+I;o5ne`Vu9rq=R)Ozja+3#HhXATYK zWw->-Y7ja(qmO000g%A)=kk|!KPNeY7IC*eim_IUu_!AI-ngg$?u7Zhd@PO z&nfBGK+>86pIfzVd(V&EDN0{VTYrX=xY~8EY3edIbN0`RA$JXhn%;~}e+nQU@k;|W zf3FB1RKXH72Ma6`BYZQNPxTyNmh@nDg*e{yLTAg(M(oF zOK(`-1FNZrpbs$!p)!)h4;&y(V&k5dzhHCRsKlQ-E-4UF%sQ3rV2%@H27g6tBPe+s zR-7WZ>xIJO0wo!yFjaT3agKvl_-esbMFtecMO{OyH#=~q&{IT)v}KtueVP;Jd@FSV zlvIHr5TYAEE`~;&^(fi0^RgXg=i^R6stKHc%R{fzogJGQ;Ln*qCOG$$I+H<{5O4H& z4J>X$dmUwV~w-S}mmMCx)=Ld)EGtjK_RO{o^tkpqp zN93i>cKWL=Cmk+qR=1)B%{VVX&N@r!Fs$5sR<6P&*t*y~yYk@_i!qgZ&>$`}VKn<> z{dldaE6An?zex<)Nk!D)mU6~8a*gx{Wg$cfff>{F7$YR;y|BF0uz%4HuhvTfdRSWy zq9)Ipcu+XDXq)F-kgVrxW7L_dpn9KXf#u5$lSPvwGcxU1$fHSLaX8D3oB2wptd33|T7W>W4b?Gv*D4B|sDVFe0@ z=0wMkFfRXuWXpr>hkuk-!`AC~32CZllP#~HB+Ns;w(Y0J8ItF*yS8bJb6I2n3*si&s-N=X*H95sEKWxxmXE>R;yL_5QH0ak?OxP^23`4se?Hp(C z9FU}rS$H>J?YJ02rF_Pb{sxLVRT#Q#OtZ$c^PW>nC8#dujfjCkba##O%nU7&(N)hHvH})Gw;N(<)1D>J;tJ*F5}ECvcAE*0c-2_>xvAi zbd0}rGV4DJKGper|Fricr!n}QO_xet29Jy1CnKnlPQaU46^HtLdt0s5%8Elk56`BO zj*eYpOBxt%GdEbkr_JpAd{12#N4H^kc3?MfiGLn{wO2i0zfh`7P`5ZFtIqy)KVT1a zZnCl zcg>npr>+DhTZ%_=erf+h9u;CS%|CQS5z>KQH16C2x7lTEaD$@s~awq|nCakHbnXTYsseIb=gTgK%A##81q~(|x)|Tg#r9=W8R+ zv?ti|Sn(|C-{9|0qp1h8euK4SjP^j7uGjvg7l)RaZ!6BuhrLs9EsnZH6Ycr~pcS5d z!)TseJ?hDp(RnE?8~;^2X*g>`cYSo|{MTrbBD;ibq+`}3br35PUz;l| z1%cWQu-0Qhr+>?t9f(U;$cAX~$G8T3Op=O2-!UR$me-Md8B8?ix#&c=HGXi{b4PRg z;aO+^4evK$s$!Hw<&|h{;zlwfZGZ49jIJoLpnZ-?t-hDPuQlY!Jv6<`-l4zaA#f#t ze~m4!Y$}PqHXqiQ+RR~ckf=e(n)jK{ON?=6vSsUo2wE{C@{N6mdSK_`O=|>`xE!VG zbEIUTde>~~3vnLVMTEyQ^-HxPn|D2C())d$y4gBS*c7}EdvsmtkT)h(tbg&d%M`%X zgl9x{?qfs6X9&87&`Id{ku>tCjCx%!>-?r>ktzG!Sd62;rXKAc&ndmqGW6IXxq^#i z7Qlbgo8C#DRLaG6gjI*PM!7?WWrT@eDDwrV6#VX46G+lthiGl;6vW-lp<{mj))YLL zDrOjOO)(n@)@}XmMa`IUiGOKd90)&as_mQgb~1T`Ss~f6s(hX`YklKqlwVh3pD-+3 zKiFv$`0;Uqh&XL~(=E%Kx0jCx3asp^HbM3rcN+3@`Imjnw0{WtutHxVy&sAs zQyIDxiL{N*;K<={b*dIt)#^V?G%5Ar>nAKv9fEXayEwfWVV>N+23j7=lf zt!ux1}*{`U8<&u-^$wKx{_HW@Ov<7s2P zAF#odqy_C~pTi{*t}fKiIa?S4jQi%3$?h~xV*!B*<#!BIuABP@@-d{>!3#Ip4r*kP zX3lbkZM@9}4P7L7i^}}7o{OO8?TJ-l+kbqzG9CEBM$%?7d4G6W&AjhX)VW~{h^fr| z^jgf~oI{443FhO^K<{}^5UmKvBqFtgOUMvg-XUu(_(n>&zpuTKLF!k2mwga@36;EJ zD10%bHh0g+B|vv~MsRmK3u53m2h+v0)=NN7YqUaJ%gp@SZd$AOv5EC*`op@9nZ6a_ z(nVs>eY2WGN`LDebe$RKd4@QkxyPN?OrAP|K^vdr=x{gEa{zSS&*elPRDxDw-1?%I ze@N88!q58vXvt{V*&)DpKJQnx2(Z~y+LTjUrWZYl@$)zb^u9?>+VZq<$ojjPV7m*u z+t)tN_$fTYKnDELV{!L|J`)c@Q!Q>0|0Tsql53uNiGM3z9TiXEf-Wv>Ky_sWNPn(o zst-celQ9c(#$0Un_t+QLy3S6j!LiBBy+!ElpvbxqHrV)h)rRq~muEB@H>KN~XXPPn zt0r=C`NL!|<4}qW>u1>CIN*4orrQTQ;kVLj2B850Hi?!`9#c7?2@+=z^rh~4D>^Ge zK2I;kxPNC&#@^_AQVFWx~V42)EX8n8W@t0?QaS4#VQbhL9O!-8QG`c?Kokh%WXL;?l<=+U%?3Cp z{FHg^{mSlw7xSoFz}6LjNb-V1LC^N&>t9>*(>>08k0BhK;wr6D|Q&l-=~ zr283FNwIUQ09;tI=e)S$JNdDxZF z1Q4BgN!3U3&K7AP$s`pCC8=EgT=5jsL7|(kui62_({=qV4XadN$jZiiyA5+k+JE)n zx$sUq@RyzK)7Al&<>J*gTQwwX!m~p#Dv}Mp56it@FOo}vLi4zy*};&5_Q3)SA2J*cp3_cZ z(leM?-z&8ikB0L!>#J>{CzG=P%@J`Z=5{0w#)uf2TQ|big2`rTD%)|91d?JHsJ4q| zy(9Y*wJ-WWs4vhMLfCv&z<=~h6#FhLaGSJXq1M7DdQkPo6w!!A3>fn?uS>4Eza<6% zKaNd5llf9m+NZJUE{g`7hz{u7#d}pbKf=e;6MV1^{4;4E-9zkl+%xW^0cfXS~TU=oDGuVs?}H2i3<2RT-0TRldf#ZGtic6r03AJp*VV- z)dc1u#mwBxqt4{UfPYn8Qo;q1wln99YZc#gg-E2j>kG%~K8ADtZ~|cYiNt*SNpINh zvS6`JqbX_XgMs#U1}KU$M)G}CM9nXBsZqOz5|_O1rum^9@ft9TI2y))FnBTeglMcy zkhITVG^!X!+$EYu_x@EN$w|o6Vx_K30PdwbTXaWotv~w+mVXn7u00)#3k!@><`#gN z(m03sR$JaxNR%NhR72jcby-iRg>83smUGL-Zn3SYpdmKuq%IIE_5+~{-sRL!LOs() ziB2LLf#@`v*k&MKn#ERVJ_w}-phQZBY}YXWYtx0wW60}(s+6>SS=^pEie1D2`9DuCD;8DL-p2Q1Wjuj-IK~zKd8s2FAn{fCeA>g zhtVSkQXB*@FJ?1;L@Fqxf_rHYc776>mBTSS_*`|X^-}3lPS^y!!rsea?TRP>od_Uy z<+&QFD?|(p$b-t1NT`|FcVUH>8(+1gny+o!525-qc7GDqH{9m)FQQ0M&d65hohu&u z*)+J?`Ody@Py(lOKpzbZejv+W&}8}V3ZS@RC(sC`W5BH*9A7kymJX>=aN2|ZpoQq- zMFdnSt_oveoY$AQe3=*)X+y;tXr~^?keZ$Z@%Cw@2x#*M9Elvp9v+llJDG=@Q3Jq5 zJe7gLe1G)HQJTZ%WL}Pl_+|*Vy)+fHQR8Lc*+hkc49$U4fFOX1o>%tXorAaz8bDbY zevgB-V`xc$tX+m@5OP}5k4P+7aUA);Dl>Gc6UGs^2uYc9Y#I??+(i8zO%J*-X>(Coy$Yu#|BjC#Y z(0dp;VL2Le333$*S4h}4OZ+pci_IDKC(?6dAMg`^W`11SK)qpwm?jS4c!yNSN9|Ti z+OtgcE1YWN8MOvYM{go(xo&<23ZvEv_^!%Qg_bE5VH0Tpprw8eTzVtcAdh7^Uw@T! z>>mkRspGV~K>}TB^d3^`4mEgH+lRr5P%r!rDosG!5x>0$1oFCd-VJ!L3+waNTtOJmyI1k#SNDs zl}K{3YSfY@9gPA#svj#lDHz_!s(*B9uQXV0A`~R_I7f!zaMKi1Dk`qFx}d(w&44*Y+Tan&+@YYQ1BgavAR5Jx zJ1HJE(G?A-XRN;j`$F*g?M(&Z&|?jQZ&zLmX5qUY8P}YuHvxmg?y9`knt%FI(j_ab z02K4WyIvjBoDn{SrZXTyGnX`~x-w|t3sW@cBx#w?B^v0t`PQM*WC6P^k{Ja=)`yjHrAy6rWbc? zt0TE(2Hb*l>{%=A~gfPYQ*-nd?JIFvxqr)~_MWYmQ2Qy(TYa_C-eW}gUQad?sE zymw`L)4Nm$Z?uil@bd#4gRShqi5W=&y)_7c-8eMB_=$V9KQ`1Qc6NToR28q2#Cq~h z1q`{%z{BJ%aVY1NN(|##S6#S7yR5?lNeQm6D-)%^LnW^nEo*bGsejvxI7$pfN^838 zJ3Lnqt#5$Y&(|VG!B1jaK#FG?pvN`eUuXf!Lo=Sa5Cnxlw9s!7MJVIAr_{`Etpp|! zLhB-O7Yq|#k`cAerFuq&}%R1 zzN9{kHIa>AF)A5?2p=Q=2w9tFhzfbH)?@Uf&uX#ok3;EHRDZhQ%E`%(O%;XEEikru zO@~Vc@+TU4wXMdK14wZHj=HRi?u`%}6svc-{GClDbi3IK4888AdR-pMT9JeY)=4lg zpvQo)-o?DP0ti@ntXu_5GXV(BvZ5L0{Yasbv^G6lek$30{``T1Zc`w zRqt@vt$$Wq)>xI6CzF;^0dh`CEgd7ty%Tvwr8m=8Vu28sS?Y5urPPSuXOzn`!;$x)IM6G%XS>AtSnBe_*uJ`k9<<*9~w(LwP2V;?3I1R$BFm ze18bwA{wlorO13^2;nU6B|?mm=h)j_X?d~iNXsj<(}IY5P2z_9fUH&tG*LY%IK`$A zvY&M?vF^@7l5G%SMW&o6_10UFbMM7w(ym(?3FE+GWI8w21eeG~p5(dd*|rc=ei-Cp z&)a)B5-5RGU=9q%b7F=(JT{K5tq-umTz}cH7x|U&DERb(YM>wM-#*+GRo;2lQ|q?3 z>2v|o=UW&l93|bn+9o60QhG7={=6wZsoTe^y*op1aA>(9Q@l29 zs;MH8Um;W_Wd|BaBdT6x#O`bEL91-DS!LS zPinvYo*_h-sXH8;)9qI>$~tuP0$V{|qUQ(l)Rz>7>VD2|h{z!Y!c#ZV(9$0?R^I$< za0bxtG8X(ABQa8zkET-+v6r3-rPEPGd$?Hd7c#95YZ_dBzbUC-!8 z*VhBIcEk$%dpkYI*z!+8gAUw2V4X@Js)6ETy~2qZQtuyYt)tEc;xevd2!BJUey!m- zCzgzL>)y3aX4@h9GZCvO!)nj5fip4YKk}o0vY`L#|Nejezha^PT`T1Lk5&ll|3@n% zF#l(q-+xp>mj68fCYCx+OhsNwP$WCP)9(m;Z<5_YVVWC&+^*^?z5v zN`E-yWLU|Lu;4ifg+E@hm;G~b{6)etNFoS)8#Z$i{{B#a{~D40?7f^VL$`UQA`F#( z_92)<{C@fT0*Y5yCYHWZT*8sVn=3s+)w{Xgw7%ra{eVKxd!Vl#}y z{{u94e{W8sU(_6YXQ=T9A1vU1?fsR)&St)T7RtWKzwtZ&_0s*bm-DFwGJm@>I&^Pe z3?KmXMNH?K46~~89H3@4FpYrQ;F)I4*>RM`m1$@_9jVL8YBD>fL_uKQMS#@`pGHBk zaxzM!-IW+=!75=i3v#YYL{#P7bc;#SC;1|IXDFUPBp%YwCo*mGyK`K!<~*@CGi7k5 zYq1w$e;viZf9BeX^GpcW&wpQGr{cF1I+mzRf?*v9jeT)EN37iW|OpWjo55EOFv zbzPbWnfstB*O{vJN+@WL(RM@8Etb7Ey_WK0Z610IuNpO=cXR#Te*y??0oW_Dh!RhC ztJcN+f&E6R$iqlxAEbDb{>RQy@^OBrVyJI*lC>{M@hCwCqj=4&#eW8%6a&^4brpfG zR!AVTGUPxiKSh}RynFNfKK@r!>zx0ZCvf{FN*iaU!@XgI!eiUkc5<%n1f^cFsbZpM zyoV|C_^Vx*hEVaz(9CecP!)Q$DjMk+@s9HArmS=BA3Zi&q$~iHpwcNy?VuSd6^E>) ziCG$uTGCqHmZTrFHGft92rsc=q?#eB0q~%1uUV~2b6v0OIMB0^&%&qFqz<3jH5n@7 zjESsz=}a^b2+pE$xT?Lo&Y>{NaDMLq03ZNA>O@ivENu-TiYo=0;EzlTVUu#ty6RR;!X=mGw{uVt=|AHy9h9hcIL`B8%u# zfT)F=qqWnO#Q0JH8KOtD@LBRPXv$I9h^`$84DbbM`!I6*U_?a02TN_yG{@AzLsV4+ zTJE8sKeGe}pieu-w1w7eX9#w&jJG>E z#wdnwkL(yP=0smVf4eWEm)i%hLa>}S${<@GAA5j!C67&=H}z&573D7 zhXektS>^MDw4xsM9!AM}V@DHjo(}Zglt+vS--GgqZya;cxUeGvZ~DN3t#9>=_4^9A zX#jDm_z=|Lt-qBqDcc>S9JqMD0Cj$hilfo74VG+c40Mn;w)--JX(92+k;ih&UT44g z2V<~H(SKd6V{+-lZZY6EC(0TR||bGbBF(XGk@FAa)5{@qvM!(nCi=me~j*f%@~Q= z^uaH_v>g7-fIu*e$koayiDH;LFUkG-m@FDQ;3?#I5-&o~n*nXfpoUf{C2V#Ks%S36 zA*_CPkS8fhnfxHrFZl%G`B8S~1(ZlBqP3-Sz zi+{)plv!zt?o-C0-?33uq->Z#L#15_ZGjcC+FC)Cvv}aOrg3r*yPA)E=GI%>LYv-@ zGCI=+)arb(x;}%XsY8&NR^djWF!Xy*q)ieiY^f^Kd7rHVpd!}UaoAj~kRBY#=cEx> zG%WD2at34)Nv!{*@ydB?d9%s=oVgC10DlVk+SwsIKr$kVVnlrukWgLsjjbUvrn(fs zG7E|{7-2kV%Q;YOp-QRx_fPW`P zZYa}5P5@9yD#p6T$8f8WG?OLGv3sIQB5Djjywt(ZEfyHT4OY{wF$;z+kS*7odr4yO zGZpnnpma{J3~br0m|!b3+F|QKnN4YUHjLG98V_D)?#DUPuk)u}r@L+w_Qwwt(ap}3 zZdvDr4{5p+B#mqYctWA4PrUO@B7YVOF#xA5I|E66Ks7+hymvs1fs$8$gD!~{WZ^Zj z!*BA5!HKUJ%iMJw9$cW6dFEI2DyKIif)-%vQ%uD|NvrO8+w5q?*Ckp zU96Q&Qh=bPiBRXG9aq3!1UUPNPQj3h8>8ivyDY*;xol^FuAN(*IchFTF_}ao%zKk=|(qa(|_nZhj6x6ESz(vgKbSDMHfPw_~;3%M*6Fb>*US&6N{*H zt8ciFTG3|3C|>MppMeoG3?D;@`c>V^e|%BWn$!y1+%=FdpXC>d6lvI)evI_ATw=q( ziKFs261ARy!-4_ppdH9vf{kkBF#*n7qKskSd~D#ia`|4=&pK6R_J18P>?BqRMo8>c z7xqhpki4q)VRH=G129LBUH;R_5l~~Z{ypsMFATU}b(Q$qHS>AYGQaq|{sZsK9FN^+ z{a8(B&UmFx)>hA1%B@<;t&xWgYFURju;Tm$T=eVie%IyF525H67M8J4*qd}bL^t5% zCAKqGucEZu;ZOolPJh#fheIXdZEeCZ^1WF8F3+1oW#C1e_sb!7DbgQ67??yO)GI%fd8=Km+M@WVV&j<=x91|)Lf_6NQpAeDX-%KTrK?X;63~>vjxG)9xgi z2`XIUXy`WIYJZ z`4=l^%HP<$-#JF%3}19_sKHBfM`~=}@@1`mPx4HzsZwmrK9Zn&RrF|+jnW2V4uXg= zKDh%kj6_}r4ybT_q-OEezhIA$@+&OR_~x5s7{l~9W!Op#q?eVY0p7R{oIP+))|$q# z80hllhxma+wbg%)m*uks;D0fA>$zNt5adcQ6-p=XlbjuMe^g1OkYauaPIt!n}JF61{z<@8yYpJ-83I2E)zzp4L_*Zz?I?hCU22IA7&a-?X!w+ z>Ix>UqKa3TtIl|hzFjcoTlsJl6c5tZ%&;?)-h<&saB=BD6Mu)A-iii$$m<=TcXIpq zylle-DyMH5>BvG(6yvR|us;>yc6k#SnXUO}2Wn}0Jt?$pYert=Y<+L9 zaz1%DdS*T0*?){<%uC{o>9JN4Bp8VgNBJnN;GmskL7!?6uSFQ->lCr<+>Xx5E5Cr0 z`XI_5GUzbcpOWu%x515~)gg_p;=3QWkW2S73q5F}XTA44*lLqV$5c1HA(3zZ*$>w~ z-aY}dZ{mySnhTnZu$>r&cTeNuw0Z>TEvNvB8roUDSARIjPS}h^_|x6W^kBf7#?)xy zoT9w)ahgk1a?9^2XP43ll#^tcVV;yoC0IE}`D_KIlR~Bj-#YRu6{MW!mixkzLqO~c z>N~XAmf4$2MW;=3tY#PsWIWU$#G4$3MA8Vj+c0xs2RApV(9v%)ionYPCe9F5^A|WD z%c*zY*nhn70I?N48n}^9N7iV^I0{E2(rbTU$$c&XJv*suUF_!;?Kq_YzK6b@1yUvl z*RJ6+qOSZs`#!tdlc69M1vwi&U_VhqTlvd&^rK_IL)fltc!DrufT-gu8i>`wKyWqzEoZmgurR1iP`^Yxqh_MU#;yMF}5GxF!osFBi3fs6-H^Fa| zXiJQQXFE&pH2~?4X*Q-pLQft&j18|IMt`ZTN->MTYOfWq63hvvPVNw^nVH|X4Md&} zgCkT)VKe__^%}Lt4Ay-x7zjBb=t^Hd>Z^MP1ZSrlLGY@;j%ht%Vx14ux$zJU)biAr zExlmGH5}Km{bi#$PJO2UIxQ;_BHrNJ`i`^z76dGe&&9|UT<@Y{6Y0Qis?3Wmn19mg zConXgNb98Msn&;H@2JK#70%6OYnh9r1jiOqlp$YdI_J%1!)o8$4*l7u^PKZf&rl}T zF)kSgljx7qJ7&tkn;$U6Kjy}3)WN}CoeY-_eP8U%-!&`w8%vESWB2$z4emoeI91^b zAq6Z@{lgAIs&tXiXo{LtbhH86TgQ}PuS3f#ot_3BLtGKjJd+6uSirVLMBLn zd+m{*l4ug1u2pfzYDg%S)=z3_bO;E=P%CZ7B4bA%_^9Owhv^Wp)ZLgb)8pbH!4tH|WRext38`Z5H zAo=0LZ~bIeVnZy!YVs%0-~CqN$>fW7cS!5v+mzVVuz68eW965o7*nTWbw5+FZ)t!{ z6+jchXV8~IS=uW;TC0?#a2Z#=v z*7Hyx_O&2okP+I&-k)89n}613uAnB}twpjx14dq>bb9YVTltstEqUv1Vo$yN;H253 zxR@2u-E4gGI|bqEFH8Jgf(NefO{&naxHegViUvKhXXZ_)Z7R_V^iD*ggR7su|5moy z4lDizy+{mNLp8p{{O>ERCFQ{fEIfnhk!!Stka&Q<7zuJIBv5Vh=T!>#B9H4)@&J#oRBi@A~`S zaYujWIY4^PPe56g<$p8${V#Lw{}9;(4j_CBu5HNwecrMg28*>}Bu&{kxwhV;b zx@bf3$hLeP%N)bjsYu_lp3&HA{{&_nceTDvp4oLSnIllBALnB+-F@s>w6<>iiLYm# zf3{j)zWM55=zsXX`eSQmR7RPN*?xZNagJE#*L(~o$lm;G+EHRbkgkW;l<5k5K*|CM zj9q7&VF*yy_NYrYA&bi6(D?>g&!7whHD$)U+l;tWAsW&i(nioKB3~8hm9hZnce8E@ zN_=xb0i@P?MInLY2y|J0Z+>E+pV}~b2#~kWWGEp+C4Yp5#zwCgXnD$Q-ZN#B;2Ez_ z4gydJrdrh3;gG0|?1v)`HJktu{4%Such8uPD=xg4NfSX9k#DdotfaWh_E#uEw7DjgNAu|sml*YjVMM6|6)5Ba2-B;byO(WKrc@#d9jB#d(uy)z=bVKpk zHml%06nKfbp0}AN)xMX7x>ya9<+hETW+*L;i13xRvMF6QVjQ!wpk$_aXR8!bfk?$+ zsnKVgU3$5d2+&G^***>iwx?Wk#^Tg+T(AQ4$A1k}I@IP_Y*$xV;Vf6tt7y}nk6hx7 zhE`kmf87$hXFn<{SI{hi-M>^!k!+y8|Ha#z_NwnQ3xeNe-i?k%1PI%0jWlJ)Nt_Lw z4Z@od1aLM85Woqo`R*f!bAHSHKlkZrwKPXMB96fHtF!{Zej&Cqq%6Z=@x6K_Ks zAFhWc*;pDOZMy7icbz|rB6#Z^-eg}E&VLb!54slIq7t8cb*qnWZgaf{*zoVUyWNy& zHXYSMG23=MfhSH*jbbX4PP655KAM_`qj!7ZXnn5`<63o#jNqVPlh7qGlx3J-$Itg9 z*cl5FzDXIaCS7WD$NTE>T3o!!-s)c$`*xpZBwSt>tI0XNqOIYSvjn1*Jvb%24!%H#=+(}U2l(yi;Uc%<*QiR zRnlh0=r+2M`a+MJ<|{1g%?fp~Q%`8p-1PbC<(<`CLX1uJ%++^-?aX7Vb|&WtT`s{!tW~zW zO@)~yU1^!~r0e<`OqAPcD{jZ@BpSq1=dMo1t326ykGN~}S@hzs`6zz+>AdM`_mON`NEK&Vp0Z?i zHqGv86vwq`b+dOCdE>wtAAi?3|K_bTh?J;251iXOdv>>-E3`RZJuus!PfORX2@JkXnt)I-YzZQ4zER?p>lXM~p|I!{;Y0hHGKx?8-yCW1!d)TU>A zKWbIG0Y16qMzo{hL35Vp_wXH&IzP?s!;mHMBafoP!_5xL)}zx2K7SoX+l<1cT6NL* z*6f&jR)f&D>MEnpAaK>Z^X;HD=%Q{GTkC!E^J(@bo6B@oOsrz;>RLV6G8`V~Mp)Ki((aPO7m_5ojTUh>ZI)u6zt}o;1(|=Xd&eE0P z>@j=>{&+U@@p?L(PJh|y3#Zwkz0bF|KrvwL4Gd#(Ke?9_o7Wxo->8m9b8{Fil0*9S zjD~wfEN(TuZ#4AE?^)JZ`f|G89o>Wu?e%l$f$^?v(oVGSYx}m(bRpJGy*;1_!_kYC z=IStiA7Z!4>t`EwYh^e6iVmK;UfS3j1y^#4^Ub;C=jkEbu74@ozs#;$)Q?fNZo&$m-+7(MdNrkcZ%vZLi)!!uizZ`FvV%J8|jl-YSOOqcud zHXhGF34cPl#^>nO!){MJQ2@)1`Z#AqvmtsoKCL4q^bawU3p1AJ48+LU*rmvaRPzB;f>2QQTChb?OdH zU4J7B$ELqo><{CEqHm7(=is%Saql)I569KJZJoScHo5K2r)HldvtxU$HeZ9I z{dFlG<G4SCA<53?>)qv~*~WVh^I0*k%~Qx=qAtAC)* zcTN7Rhi9Yahi4cc6?Hg|+BDiUL-m{g{X6Pa~on3~$rn(PNsgA8*T63!}JQ>+wQY!eVm0EyCohy;z7x zN`Be9M2E!hkDq+)59&d<%g30JOn+ES)qskZN!mDANiXqxG0>FxYu_$T?j{e`IL6zJ zt;e?_-d#3bJ64qY+vSRDf&!ZZr}KX6eseGou=;y#;XjrZJ|4eT7Je_0TlV`X3&g{r z`*O=2O7ljpZ0lFnCB|o^pj;UU0IAuBzkYGJFpjTtPy~S8_A3RO?FC)-Yk&Iu8q4;r zj6s*0@x3zevsO{`A_m!IZ&v5~b5#%I-KE_1?7ZF?ake#lpUFjz=w9|K6VLrhz<%W) z0PN3wxa-&Zjjyh~QrmL?qwP-(p6`uCk;K_~BM)*%x4u0yJ}dv^N);@w1A_X?WYhs6EXbl2 zVZy|WB@QYbf|Y|G&}_+6%YdCtRF~oDSG^L64w&em3SU{I4LMk z3R*YX(bd-9o`{s7)#p)Qn^5If}aX43e6f8|qzJnIN7<&Nm?3 zwe8m%7di|36u(+}@&zG?dwWe#q_yh83u$GFgb+oU1e&_jE4Bo`@sMyet&JM!eBqPC zAbft-cmOv*$iM2?ZE1fS9Vzu--K9pGm@TA`QL2++eY-V@hN8eks_BE$kdeuiEcp?u&iDKD|blHOY)vj0v3OA6j->SF(abcIC-?d z%)oo3Mu*XHpfie5OP{25!+@(B>NArB^DW}@(jOkCI`ZzzfDiku8JD67+JV_mxiLLs zKp$ZKm7Afdj9#yYVt;?pL@?kR1n1q=$UqDEX#WMkzeoJ ztU%N%1!k?8f~>BxWUl8f~l4{xt+!UK9l!lKbW@7!L!g3cl2RyTw+`jVhC4X7UGdd%)mgBRU2JF zP>W%1;FdcC)t9ATCc5t}?JOZUBs;C0cn&5M;$&in(b1VTK0%5SvIe1cu_Zpb$KVDa zXt`A2%Ta%b35-|-mmbEhiUeIEe=beAsr&q5A~}9)U_=YdB8)V8b{Mmq5OyY>ORL8Q zUsS8P{gkKKqr2N-PC8qV-0IwqZu#KlVo5yqc%w&z(zX8E1{6Jiz=`nmcUBp3*r)VcrEe5K%8LQMK<>N2sl9? zs0n_fBRt$c9LlsTvN1G)k?|Y^K?o?*AR&6L>LBFF1S!$T;*NM75Sjj!dPj2xg|JU> zpF@8v^}fRRNAI&I2_dhmv<9={!{GqcxvitsJXFv4G-Ta5Y zfzKJu(l{z~YKSTh0(w|w!U+PhH=*Q_cGW}HTzrMbNYnv&r-_ExWR(+x228g{%5$?; zHEmqw+Y5Ct&(@~Tsg6awZ25*{iV2K%T1I~j$cfnJE)NanTd{g@IQ7xGPWLovTpH}TPEt^BG*6U50@$KdOMc~uBl*%$ zK70wJv)iA!_B_^o^O1>K_IBSr?~VSHRo$2OlVA6Hd`VvJZD}O&oA-L+hu42>#QT3+ zUxcin-EZH=3;SQ$R-qm3ZHApCOK;aUkfm3bQ6071+^<#5_iHaeO@h0JrHT0!^SOb1 zT{E&oTUKa7+58SHm3D$^nI9$~xFHl>L-4fPNF$i+^>;bZ-c5=9W(S-tksys%Bzr}%wp(#EI_YMcgNgKupEbU7WJ9 z_=gW7^+e~%&G+B#Z#~etKl;vGiqYHEL7OhZ#3 zJz&i;>)smYo|g>`-zT>2%QJkd%a)go+tyfH!$i+tnkty9w@pdp#?^Uqc362|>&$X8 zHGq@rBj9UYBS*?yggG}gpXEqMKWE!CG1$P$E3{O@c&mSWNlG*Xu^}2WynnG36Sf|3 z3&}TS+GbGjF^JA!s-yF?uyJ}LKK(jY zht(46p3YL6;Q3o0&vl)%ZYh!#^MeE(;|%k3WxVG;&eEK)D2TMoFol0gfYlTUC7_H; z-Z9PL@Nn1Wr`=oJv`m@ksNS0}fAlW}@ntX*9+rBOa+Cw)!&O&qFwlR>`QLuBl;Pia zVls~SmiZr?eBzAXcK`iHd;*Et%XFSxzWK`3eYWZS@^3%you}-+P4-uHOrWIK?h5x0+v$2)H3aHP=AdC%bLwFoARs2>g0V zz@^JrxWJSUIN$gF0Ki4ogI|u7^S7g>ivp9jDUYoKcGv@*jEs^7)El-fMg2X zr8-MiCZt4@8&><}p0rGL{A5i;8KT@Zah9NInK%s8tHw%-SR#9PIS?5Z*&3{MLsN?= zLhKx|bd7)*y0tr9QBCwtwa5ye{VK^%bD{#H*g_;^tJxtalAFoI8hq;MK1+3xn84NC z4Ii@`V`P8)RxJ+%$BbZT`B$(WEs4J~E<4CEkOS#IurvqL)nJ8mq6PAxG1$FB%z8Tf z_Nji^{v)(KU3xlrZ#Mi*A6MF%o(KQXrKa^}=09{tm;M!#dm-!N1*fA|P1HUOn*dn3 zTDEf&LxK^|>AK@PgR2O_%CtK2h;UgOm<5Orb!mS~HEe;$1d@8u&s}-c@6yZbLCly& zBQoK%9ZE_7|bu+6I&5sJ;iLSBW*~RI@$q16Zt9oc>uEI_PM}^Nz4F#ox z{I+>7NCOg|jBZ_UHKKUzun;z}?R%9mP50Tscq@VS$$AY2Uvn+x_COxYi0Z=cz{{g=(;@_qIfGXuQz6{gc`0jA|ijy%v*QM6b&IXd+NT!b1?@#dBwiUy<&U1 zh0K=+x#_E~1t==M%L4~V3fPHeZYGa1rg!}_?|{7fJk7t1x4k$0)(wB?LGkVXb8@Dg z8|(0C`v*Vwap)~e+0joi1lxe{gdkXGW}9NIij?)HsckeFAHwJhqq}h+OwoY*uHb*Z z55|O<#FM;#*+R`fqWxSDENe13Q3G%w=(#A z>y8yy&l9{4fB@qz+SobDD8^Vhc_Q&Ok=~@fJgyc`PA#G}|H#3--6HAq z|7_#`$RGCc(jPg*zvc9Ij_ViRLiT@&-T8Oim8EZ&Q`N}QV96_^;bpw^HP?phSG;tBK3+Nf(hK3P?doLf zFph9W1Jfr5oi$T8a<4MpUt!esE>a{5?Mm}zUkQP z*o$P8KpR3=iB!*1NvG{#09ax60CWN9?lFt^ac!r!-#Gv1*+2L{QvSg4@bCEd51sNy z?#k+tPh9cS*N28u$NU{P$o?A{Th=RsryB6ITE_0wk$yz$@|}`Y7Bm=%iM!OZqerNU z1Tk@$WF{~V`K)!avPFLxthBaXJ7Zp(am%)-uWITW7OZc%w5r@_isEUKkT?AJ2EcMk7jicslbv7knqW-=FU zvAN8n)P~=Lx;DgAp&Ih9A-Z}K?ael;mW57Nu22gr9cX_|GLcKU^}2&;knGC~=*i3K zK!1l-3gws4HXExYn*ioRzGaJ53yy{&M7R>`NX;PRkzT}kB-U7Gu4@j(a}^#Q8$6Fm z#_Z4gh0CB3t|8-kPrhI1Y8iLl$w%&fa-N@<=V$Ig@@=E~dZ}OkvSVJ@%PY3Jq%%U1 zV-)+YlEi;~{`(q-Hd8ftv)n*D(4e8qh*|DwaS1bO63&QjBngyCsB2PBMuWH^Uz?CF zQ=@)cEHRDF)o82C34gE^Qpf>V?RFeCkLnh*p2tE_fs0Q__aB1ORFYy7yJ z4W54l*(gnQ#%%)wwyv_cwI_Yq{L#X5lksE6*s`V zx7R8F(eggOIq&^$^qqJAi8b89{n6Xf9*_RO`%kR-8>elw*0X5+hmHPEzS8%9?-%uX z(I30<9e@6ryH76PHl(-RNF)gRTd=!ihM|9P4a5#ukYx~;J}O*QAt>1=>2=$aIgXQAmu)ia{mVX zx7_#o^S6D~-0rvU{_{@cSFZZc9M*?6-?8OSAK$q_`L6g!FIN8%rZ#TX7)1SMP+~0~(UgB)kDd@KFX9fuX{7PUF?}V&;O;z#WDJgQMBXv0 z&yY6(K?fW82W7Kegp~8MHku4gQ68*}FO7F?L#V#6pPrq`^Tf|O*0}rSWXz<6Ds^fA zEIA@XSBOO+0RsFSxu~ssz2Zn-A4XEI+@)TLvRdjD^vMXY}!D{#UNBbN_n(_jzZK;(d*5y)AtN^_co3Fu?U!Yt$P4 z7>t0$1l+YT^edga1dAxcOu^{w9+9A?<8wVu=7(i=3TemJNL=@3n`-(M1zI@2&U|HL zg8URfcywZGMlgR77Evo>CM$o*c2y)k6snGWwzqj>f)57P(k^6;j)3|aS{yfRB7Fc; z14?!5%7eT~xPc8YqCxH+Of6-IJ(edn$A~dZSi`jJD)y~B>{~H#273fFS!q1{wb}*E z?~S_XuP;h+LH4jAXvnP%Sewp$F*h0;~;*K*IBv@szMHT^r zxh-fMF9ekErr_}JDarXN#b5(yht|$ua+>)lD*yEkX>!vr?rz{NppWh98q38+Kn2dZ zJ@5B118C&|@4t65`?=Cy1hi)MN57G=)wgm17M7paxP0*Du;zar1zRT9@M^=qNqtkl zkxPDe+b>;W^0)s2NSBp;;z+K_`lbU0SaZw=gZZaDAyBUdgTdEoAO?%CzfOyE033`3 z=yBr5C8y-60!;j121YczCXL4S4bHZSk?_2VlLly&?92XH$;d5|tjXveV7&)moktqE z^BnOw%SQr&PEdaXot-^k2fG>V$fE(bp+@kAmc>9UtwEkf4aC`pa|KE+UE?!%~2bl5KqM zWV|WcIeQ?E1DZX9m<@SB12=2XR+Dd2KCtDU8jLqCVhw+=^ZHI}U_;GcdwK&~eJ!8a z#oMTQB(klcUK8pd7U?`|2-+GQ7E2u^bPwt}E_uU<$CSA6*LvCDjIFWJ{`4nTPWA1y zPdg#oNj%rrj5=UPddB5_`svS3P{SK-EBg@M8TN@@ZP)%&K)x0l@rnjeuyz1~nQY6m z23pNfp2B~u`u4|QWT*sCA@3(Mu7hE1*K0pp?*ZAvYD;GTk)Bm@+*=3M5|`^>Bv2r@ zA7FuSoNX}{Nb%U(0h21Wz}Uk^NBTYzJQ64uADn`Fr$*4Ck%g%PRKlV%PK+5rTIA_T z|GN~hMtU*>-OdSbAk!$Vd430E&xk<5S|Ggj*9L!L#z+s=F)!AEzNHR=XqXEKnmyek zSO>po_h=w(8mKgTCRr3|389Y$c+)^xu-xS`L&(mCkY8&xpcf73yn*!Q?7?y{^z+n8 zuXqB3B*%Tf4MBXp-U(QH+;=|n#Cg|YrUFx9Z0ANNW$K&19LM>mENED8N+R9>xl25= z84!PK4ct%aA_-7Q9pE3D!M@Ru#4k5y2-a~a>B`ndylud(=tw6&W6*+s-)Dn;*$?#n z{KC;$Ko=EB{;`J}p|}8^F2`YXZnh5U%-O@raR`QTEw+YEoINn7e?IUA z7FuwZJi~bd^MX}pL5OGfe2Ar&_u(HB2;P4nlyC2Vk`Ai~P?6_e)*xL=Ft!s38+e#@ zBSG@kiS@iihELW(AhdN<9Pvnyu63d~bE4sQo!?}#o{T>AI__FPmKMi(OEODqK+o76 ze0z8nK$ZgX7&mN=t;lW((2Csw={-NVRix)FFr+085G%6nJjXU28G_(CqccEB@VkF3 z`B&1k`SgLU_2>S24=bh|<^0XW0sK$ikvvB~A2P;w!p4d)PP*L3+?U8pMB_hSAvWM#2cX7TUgk=QxCX>YJuvQD1N3u0>g2 ze{dXPe(F{Evj0=xbuIS$`n!YO-}Ik>k&s8U#x8g6fHr=UQ7N}rG_=El7~gVpnz1*`LafzFU&l6za1F>*^#;p$g-q7Z5be8NZa~uK`H;)F>reTE3`O&Ux zAqP3X(s79PsgIh5vA*8QyA}m~{l#&J@u`>iP5XK~=vs{X`kUhr>r<~bRO#0FQCMMr=5S3N25X0;Q775bE&`DUVnA)@A(XS*%G_GEvF>q)PW@% zEu~E6)RD5B3ud5gPpc6WNyzs-eKh(950WJ+W845)HyXJ$EJAt|C$3)ZX(eIZ1{_$~ zv;VK~tfAWaGjFZu^>27x|F7mDb>6?5=cgXlk8d}}*VJO^5C#0`MuUHm@N&Ud7@7Zm z{#XE!_!@KZT7?kVhgOe_ia)?X$O2oYx%s{ zKifpyDbvF5Jn35)RIz_s4G?bNq1hc4iS#j~U5)cM!k|<1d?TFbMpfaD-MmWMEZduO zL<;|m1uB;U&i{(Xu(ZuTehgtFRMHvfY^0YGzleC5(U(zg(;Be;1@1{frLTdL!aoz$ z=#~PPo*`vGj^#5BUGlFX{l$Mp1F26SXo?25Qnw>}2D;SO(yxE`SL2rcUh?sC_!cjv zpU1ZnFMdkBX-MQqORqzLl1Gd!N6eEKw$9`-{ z$RCZSHj-Bxlosp`nzaXPMITxVR(TrOo<*O{xZ>D%kgSX@X@; zV8|6;-@LW{ujPN-$z4vh&R$}-*R!kc?{TvZp5L_Ri32<_cK;HGzj+XW)ca1klYDM8 z=%B52*hf{BP_5lMcOt-NcV~~#KjG*;(*FU6|6PC5PWaLuM?0Y_UJ_*wwvIVp>E*Kt z0qIh|61Qoz`u-1C^a%b5%bG@O?s*P;^^q5eoB!c_B(HxtcMwv?``@(x$Y1hJ_e0iz zC|Lf@Cwp*M*A!BxIJv9-etyz4td{=BAAS6<$*@2BH?kt-qr-N7mVUUeB?c=Rh_z(h z`;df<$i6S@(trHd@{{zux$W)C3cIS&5l%@`_%qqJ)T8?L_x#ePVKBA3Y3l-sB)$Ua z!)eW(20ec-3M6gzJO9+nv3FD9_H9ckFEJ&@kKZc&Y(I)fHx*u9KN?d159*~K14u_p z;dQbsnno*c&K`Na-Ylb{*VA=fBw_w*d|}=REd2+fNPb*74h0~Iyk`A%)L;9KL(%vB zpuhgy4?pm!ypBHb{dN3-@2}S%`2Kq1I5H1_H2Ht*HSXShjL|iZMlYL25CGyzxSiv; zNcyimTqofGec(jX$UH&R!yS4!(0jN|-o1g5Z@78WZ~$NWa0bY0^tmQY!^!u3DzCrq zyFGj$X<@J){&OAm@Sp3XhyPrsavXBUcsY)`pBFi{MH?2&)W-ohCsGC_I$0}8$u-faMCveF1{2SKa8Z&Z%B=gr!O*sHU1k+^bf7*fEnzh=li2=mjRg zk)aNZ61D=>U^1D!tif=>X250s*p)4{QhsmQNvydHgv@LDfGRLu*fDegLj`{dD3tAX z3{|q62vh+U^1k&c!&7$sKk?iaH1_9nRr%aN!W#tk{F6^j!DYFS-*fH-YGiw#qn8}#L9X*P=5lA#i=-=+pNe8tj)#L* z%lSC+`LUe8&S4{v){h6!6^wsWw#(%Fc753eN7Bcb%lZ;XBcC71e%F9bKwMdFzVXn> zeo~P0Db&kn=zE z^RFddR0+55@sh%d!}b3U{`nWEaR($SFP{qWhIBSnrYlyox`*vj$$&+$nwSC~uqp5E_XK7aA^haP)@&Js@tIes}_ z`5dE!RLEcPk%tex-o1Z5lkLpDT=wIa_%fN~>q_A#Zn*h>8T=jW1t_S^G; zo=EuaTYtPh`UBct&e&%@@%Q}xtMO5g^GGCKuChL1D3`zey#B_2_B}2HI{9wLV9CLj z^@m<=K$c(fyU|pjLgHs9^@Pdf^Pk`2Z2sg&N1zpu1VZ@m-$8$VNv_r42zW{OM11%& z?fZ8npS)#x5Agh@U+^44feeYKKVMf3RQG2mxc}mSp!w&B9O&&v2{;0a!v!#x{C@*1 z(?0N!<8_3TTO#{=OL_1Zj6h4}cfb=l-lc#!XbJ*;81B!daETlYk?W%d0Q3Lz_pStw z`}6qduE+oFfB%1f{x7*B!T)jErN{sNeRa;O?mz5Zc-;Kocvt28zPo05`rrE6=FwHh z|G3C*s?Qy9%z5_s4^>hATXRqU4d}FThpqrZ)m$PNL+%7tr zSLXudy5XSdnjd!4VON~M+ikR{MoOul$7JHfU4U&n$3;Y(#rT}oUTw4ccFU~hv3LfT)z&kV$#7~} z)#iPN1qWLU8Co1)hjrQ%;v~%F)!n4WS-jt+XdJTR8m;fhb~B@yZO3DS+)&|eH&%3B zOun3A_cc_;5%^iNxK{VHSe7{p6my+;^<~7?c<+DEki2F$b(TykW>=ScfSzBk(K%Ut zy*KH8e0#M?mnWlUkB0i*HfJqeKh0o$>PAWz7(}TyMS57x7skZT7j;Z7_;ikz8#+i^ zJRNJNgLj;!t^JygeSbQ!4x{Du1vKDfMdgV$+iEumOxvGTt+JTCmVPi@)cd!Hu8&!= zNnn3(aYFzkZ8>BSXl1k4BQ%{fJo8th(ec%sqQTKRyD85XkUFz6PJy}JX)&#~*bisc zTuX=J)Evz0TRD4PLgAEXIvI>-7kCr%aI22?1n_7uSse>h+>9>D9Cv;l?#RiqPBW~I zTBa9=Lo&S`(_L~%@1FN{pAUtkIJbz5i0LE?Qa^P&V z*EC$OlE8HGYR}6-H_o^9&GD_ObNT+Mh8Vu+cAcZkaxyN>m7+hUo*@#}sndgIj8lI~ zP&pdt!)6t%VcSht$BImIp*0y#>5dhRxaCR{?1u*yyl->d9VQBT4$tG=7upx4wsec> z`hAR*nOya8w)k#I8uB`izP?C#%T2xBuB_b!RU~SR3w-%vX|qXmRF}f5V@9eeXPBNB zohz;~7M+2s9t128=eLsA6Z^RfJ8FNm?RB?Mt75r}$!13Z7XhaH(2~5lMy{tLj}fcJ zo8Y;8-~~GF9$uM1RV?{2Oo+nIH( zv*d>S;$D0u*>kS1t66d&Vj}E=o_yV&uwCu^mS!Xzrz_HE_IdC82|*T}9*0_(AP3YOjao;oucXHLG`TC?NCX)Y6fxo;1TU``h} zK+9BTNxss|wE8LcLaMZ{Qm1_gh|-hTNzlipjlomqnj z_H=MiYhjThVA^E}t4w#_Vv~)Nc_-}2<}h41b7%j)O?MmgUJ+1Xx;?6>%vAlhJ1vpX zL6BS)H^(CF#-Cl)g5IpT-g>N_wy_n5qpFGy+b@+hp&8QVWweby8sqIU&+gtCJx+@z zpX!IfFn@fdFTQ`w_HC(Tm*hPTP)#0#_HoPi$lg`uC`>bQ@q||betzL*>P^?YakeGQ z?b~971yR&t#Lr=Rb8%~%n<%W*Wk7uzSQpoDe7DsnZn2MvpE|Lv&dX_eC=d78rZ=9e zu0MHCH}p1IW>+*d#q)|S@bA(1xc$oTIe#A3xicZ>%j9NFtcN5)ukIMj#q4E-7_AXo}OcR=ks8P65s5`<(ME+JmTfmTrZW= zj3&`?Cmdp>$o8x0IVEqH^Ovh2cN0&pwbflNLD=T2%pTFf_+X}k>ZRn@DyYY^(X^(Q zM{wR7(H?(iNiY$FlC1UOW~&KK)k-)g$KCgu(v-XxD`wH&$AI;Ps1*&e>*`_m>VenDdB$-^|^jx)Ek zI(rE%J71^eHIAnFGa_!LyyNFBitZzHRbw}c+jxIG3|xHS`?=kXre-#pnQVV5>c^{9 zmSc6TarC?@40i{-n9>ACQNHuNE9jSlj>lG0%@YFK&a#E=v&X2*5_cGgXzRCkPt<#F zetnHQ9kMpBMvGN!3Wkf}VegU2({m?J4^N(+J$ZV@G@pSRf%+)Jf zozGW$ch=asEWXI(Fi^wiG=wibY%+d}y+wcV&U`jsF3MMJk<05Dpo5Z~{V2UC32V#jbGJw8nKbKLx1Tq<1e#H#t6M ztTg@Tw&L+#e*7wi%n1v11npIYd-09`RxGRM+08R<85^gIDlgx8rd!QOg^myW^MQBs$^C1vE-!kC?fcj2{MC5& z)sBlGI&bapwN|%QmAoHn6X~HgJFilIip-?Ast!H0TDe&6viU}g3)Rx?=>uh5Q_i(~ zupPod$@Mojj$RNAgND>MpWje5jv#+|cH`%$EaMe6k(M+YPJCa#&6X==n2nY6bh}Jk z1)mx;Q;);-d9>cRtL|>hlQb!#w}IDRly+L8->OQT)a1S>u8;U&1nQh#%km!7mpW`q%NLu3)@8}&d2_7wsdG^SzEEe6?0D_a2Cbc474O2z zc}L3x(p+dK#mztExd{;(_YbDPJCsV+$tiS&Zskey{Cr zrdeL{{}T2dI}a*hyX6HjAg6yp56C%ZX5?UUhO2LVd;hV|ky@T$n{pMDE%BOF&q}Jz zGHVV_XY^>4buvW+W$w1E5uPmad25C+$<_nKFEWWIYRj=U^xjO5vH{0PryfND;wI^b zrIyNv1pLM?1u(d6lY7fY1|rmz&v&`Vi{*NQ?+?XoKiGabF2rxw~=Yu+Ge^I;g^Fmmx5 z>QcU0?3j3GW7eDsPVRq9)JuN$uWv-rI)MW~yOUm*HO9TVHjE!WNpDbl-B{LAvnnKZ zA$cXPrllPZN$o+f2L_BDM4cKs0bL!cKCd<)y|1!Q0ztihsGz;hz}x)j^vWM2z~V6r z;DAgAyYzuI>$}boJcc{Iqbtsaq(6`|S$PY}F=$`dz=zju z9|LaWd2QD)j>th1zzz05VLlOvl+vB2m>({0rM zEd43Pp$!N^NL8WEH}myj_#r0d(_fQ3!*MaAu&xsNd^Kt94460WgUc9uQw!Cmr1Ef3Hj}8A49}?3i*;yYoA917Lk%1Wk@=9BX$b@?*sAPgagnC`UgD>k> zUWstb1BMy9q{D70$F^F=1RROjM?h|VKf#>*bEc-F z+3O;dgq~zjPp_NOJbHg2K19A}m8FRc=sg%UAFnWblivtu))PU`Tp@060Nd5x_H&OuMJkIl z7UEG9d01X1w|{%uQ9cgTQ^}>;0pj-zTKNy6bOu9^L0^9Q5FDVpj0Aa;B{Ip{ZMSLi z*DAy>vr=JZV0PO>D_U)?`v)WT<10e!gg1YemX(f`(@<6>CYQ<@r1Qn&rIn$lhsEHq z?^P&gd8P?u0u6(@TG4XxW%t6)0~XNglFMT#d9z&RHOai=**?uFmD<8*seOh&rNG!4 zn1WOJ8scvB81Q|TqQMST!2E)K{iXDM#l3+7tpV9MLM1>+@?wDa&&-Gx2tAVQn+&XoZG7siE%4%u6E>ng~wZpKbvBm21^oqwNu$`^94vdP!Q~V;+u@VU5+{4HPRR6NE}3 znXHcq?9(jy&AusUJO}Sl0aP|R6zYElKg@9^0r9+XYUX7_0|U!rB|=&0LRx{ey)x7q zYb3f+AuiJ6aC&Q#UzKJ9ZbUdXq6Dl{>vxozDCb@*rJk{Yk&k7Nan(Wv(xTSJfIOE2 zs^9>p{p7Xd?qv9ls}6B&ky&U$jRpDbv2~+hI9puc>>csV?=vbrutNnl6ABs1w z_l0Rj3%1Hh_PvS<+mNTKj$4ga7#oky)B$ZL%qa8OV>UyPm*LKSnNMXJRZ?|j@4?Z*^bjKg*Bb-Z9VnRjKa2th zo`NwaTXBQQO+Tb`e3gOyX}y2<(gqmgDL8@KpXLLkuG<2za^X`L17mg(Mqvk z)d?2D6V;pn88JYh;YUfE;Rrr_&UaqtO2?nkSN0yl%Ck7SCT$ccSuSF>HD;z=sCJ}5 zYx#8r;cw@%<{06e?0DH>RXo75fU!UK^_r4_Ipdk`jCOO9CB<;|mMVWF-$qe8)YtyEQ6#NGD74AN7(9v$VAaMXACZvDklnGIC`g@8o@gi+?eX~}S* zH>GBu0<}e8{q}8v8}iu9xah9BaiThZU^;L=Q%JGr z1P><{zn9=n=?R(KwpJRy8gyKh2Hic@cEWpO%sg4@b%(I~XaC%xLqW3_Ypf>dxx#;| zaf55^Xrn(qhZLuUx_NCChR9>Z$%f@6?o|0zs~V5Q(6~t1yRv_+|NeN14!ndP2LJ`t zF51-Y0%k4(NSM9$atXqQ%H$V@sT5iLhiZtkgXJpJoBI&kLb*uhI?()h2lm~Q7Kz#( zJx4nkBlH;9puS}K9fakLr2D$T0B`oUk1v{gdzrjm%4)u>hy3)NI$^W1h_cKVlEcX? zCySE(>xT(4IRSr>r`TSwh5d5fO$~W?rg(|3gRwBPmQQusW6CGjQOKRFbX2_*wsZ8g zfya#VZs9I8ABUN+l(8vaWShJD$#ozyr(Nwq((t)3EZ007otao?gA*7&)(aYi%!^1H zJbL+V7RwoOL3DK$!-JA)lPsYsuPm{64mFr`$@%saSD=47qi-r>XXWm`wGs74NE@0a zG`ZWXcPqL8PRsj~57ROti1vw_zYU-jM?Tl-kGi{^fJsvMbrbe$c}%}{=Ie*i%Q}2P`JuLA7`n{5JJoT})*yObuh6<6YXA`#FHEl0;@}SLLh;)7zBGOQZ%TB zeTR``yEMNR>*plQ6~&UQFWN)1Xj-Z^+f8J^@yLIex!x9{?$oB!&Vvp&IN+nOeuQ86 zJNjJh2B8qkeFvOw3ZO|GWQ^>dqd4gqnza7DD8XYrs>H_}IPBkokx zw`&nNa|}xUQq};!WYV`aB}L$YD;Q~&urH{Nt6%cAEM2Y##+5sG&iJO|gy}4)+Zwpy z3Cn*U{^Y^_m{*&NhHEH!p|`sJrH!J3-xfGcU!N zAo2aaA%C#|Za3);hnrSs|He7Ca7lD#F{dL?mFi1#0JbF44Hs7lhp7SLBfS% zRTO6m^TNVFzs|(zceO&c)P)w>ZJ@jpLPLLx5Hnu1^KA0HwOD*()Q$E&Uty)q9?8RO z_&R89cajfh_dN3HaEimsRbaPEFbihkX2Tb`AMf0G`l-vr8s2fPxa8TUUka>cIuDpI zbRKwERzFQe@)Vy)_MY*40cPrU4cui&#S3%_x3mEGD)wm(Mw^%JWerD=uPXOA%CCQ) zFz7AT7TJutOd@Cb*IffUxdsVjtDMDz5ps`huq2?!LkW2)79N&_50SKe%XFUzbW?5F zn&&sRvUAmi-R_5}mxc7pv}C@0$H*>)6#EEAW8$k3{j+vE^G%F=%YJ5yOd^3oJvpNY zDrX3JodJViv}eW*fRy1bh`{Kj>sNn)&hMEZP-|}Ef|rz89GX<*as$7tl#&G?Mm*oY=$*CJpHqPK^<@prDOcv($`=qmbz?T>#^X#?DS zbWnE=$A%i~1OHX^D*f>;KcI^ETCg;}=FngocSeISO%T^$55AqIR-BoVN)+YHynn;1 z?)}g^GSiSRlp=&?=v@l>`V{|;{OQX)jvjfzsVGE-FMuA(QO=VfpAWGy!V#RM0yB>{mXPkam@;OIZUi@K#ne z?1^o|G)88UesoBT58oU0jH=$|b05X-O6($r2X0DH+BM>f54Cwfhx7dl*|BhYL3xxC z!9xYX8+>9X(!|vm!?valp#uKk1IxZXBP`EuZ{e-@n$9-`anZ=z#rrbL3RUFeQmFtTBLS-h9z9 z0pSh^yTjH!0elAz^zl7E^d$|&Y%H&<0mbjAQ6vUFdnWi4M_g+HQJz`60@w1@msgQrvW%u-ckbKV9Dul0f{b|lE$3@Mzb)Cy+b3!@>4Zi2n!}sz1kt1&AaZf%x@?NrN&Yewjw9G&&~RtoVR?Nli`(jKN@NzI_{Un)Uuz z3u{E}H6{@65k_b=68~e48mu9zA4_^vSNk*7eq+S?p@o_&_FsP(6b{#Mj`>5455*t4C>a}Oj+3ZhkyG7yB4d;=!@cH6x&m8G{R19;! ziTNN?m*{RNm^04iL4`eC{`a5hFUDr6JK)ni!s>Xs2)i~A$G ze2Q91^vH-KOMdbA!`}0*dVMdqV#3!G;a+60x4+D<$3K6qys_{SZx9TI4oCa}xC2g8 zGB)wISRun&pBi_x4;qs0y{+Z3Kg44}rz>s1$#h)E_u&ai`^^{fz#H*)vi9q9vNVrj2oBIrrxou4F~FKl{Zs{PtL zdtLR{uB(6KFOWG^cSX!CZ*~%Mi6#?~CFu0xOo92+>f2^&uXaa1?RV!>tX}G)9G1HH z&)>Nb9*^jYLR1yaLra=!SQ!xP@-*qE>Mo31y z)rlj5IM(?^%Ho!z%(%)gC|wIUqz*-6_z2$S#IAo6vl@%1sOF+Z6zT1TcR&EksA>N| zXl@i8OWk~Sb9-dv$Toa7lJG{GYR*V79^c9`Kg$j5uaL3KVsY#QV7v|2<0JzX>5bc&R@nNbv7E{y(0GZCoE-e$l@Q6^3<_f0QQwBr`bwC{+?P zh5kYYB>G?5$iFN2mza^^v5?9?uqf#NL6-hvp)n)rNU}oyL#e`|&42S=|7keE5kJx9zxn~F#ON7BR}Nfrf1{Tu&Pt}v_~{P*YmqhKlWQ9M$I zVWfY&FVf$k3A+?cEu-E!YX0R*l;eLGRvCt!tz!Lb^iRR^_w@KDkD>o-s+IpJSt@^X zDI+FKBPBr;xnmMQ(7St_#lNB~YC7<)sE50wB--@$kT55*`h!qh!lI?y4=C z+Fv2b=9HTQnz1mK%eg*)hJ%>1a2kIUi6RdNkN}v^3JQ&dx8gLf*PRK;&zxf|Nl>I1 zxdoz;Fmli#BxiL<%JD+zQHBNt2@`)o9F@l_r$uGQODB*CXiQj(K0xV9;yvx4D}YPQ zmh%BjbEi4GGK_bHqX_^&S(&`@FIovU71Br&Z0IlZV0As1FOJyID2HQ7D&fa|I@{sg zIHSw^AztsEfhH{B;>fI^1lC9rZfN?IRLes~yDr7Aq-dk#K39~tQ=47vQR#nmWEplx z(Bc3slyrf7cR_?RC6qC;R$!VTL@9B)1yIuG!!AizG|X3hX{PosH`_IuB`FHA02T&0F*yTMcBgcwouuMZ5;W- z&&V)4Cn6H@hr*U3{C*ZEhnIiO%#ERG>(BG2h{e=EG};a!@n;oBA_uwF?6Apj9xBz27oWH;ZVDS#2o)j@_7%nPf7zM)53gVC)anU3Ts0`sK zObPZTK}tk-Z<9QYr=&Ld_*M#DA+R{NFm~~BSWk*cF;SD}O?)kjb3AkdK!!GdQ9B2q z`1uhmD|7Q|D+DYD$kE>5BOm8-MU1& z`CD;SE=MuiDWxmF)esmjKd7ByNI~+E!3KOf{i-J_rmCvKwO~qcJH!wxz^!-zU?Y{fm%@6}(&ZT!Hgr>M(uuKtuxNR{z9Nc&?5C>EY z{-!^Rp=pp|xJtvcN-eK#l(M+V8lT19FrkS1JfV?A6|1eq^!U*daEp& zr~McqS>&w#7*jDca0^n2N<7?6C~@r1STqf_3r9|=L9QepT@QlB4dJ5Z9FXTR#|jG1 zFITQG)9(lryh#;*t>J49($t;>9lSMfw(JNEXq_LKO2}*?QoZYMKocdU^NK&U$^t*- z)wwQ`o7yajgEfg{7xPGNw(VHubgPTOgxAeRuAMoQLpL=7ZQ)^_?|14mpTo)TOV@KNS}#|F_#!@%UI?A7sD+{$jf z(kZ45+Q$8OgMZdp%d7%h=o7UB5NazQUJs~ z{tgdpTV0`1PEh?&)Tl}#3e@CzyoMJ`Y&{4u5+WQBcAc?MVtkOLUE_KMJ4|Q{(dn3( zV$8L75FU0vkCrz#67lm;q@9l#8)|UH>=${;st;j*S5xM^D8;fFiES;Tl_|QpekZs@ z4_A_B1Xl7a$51VNi;^g=o$e`mcOZL)hCrj#a8VwCh-k zcC*2OG=MQwh668+ZYN;gN-aF+nRx2>TUGTb%R4GzRaScIenqJxf)?ytrTwz!wH2~! zF{xF5LzBC|z_+oWB0`%2Dp z!7avA9i9=CO8L#waxW`VUzX}nLrZH!)2Sy0qRSi+%ZN0Vl#^Xz?=Dd8o)=Qtcjoz# ze-aYqmxE@VQvEj(aOOAk@PaQ z-H>`OEf}-pB5&-PvA#N!J!0iWiA|yx{i~-;aS2Ei5v^MEGf>_KF2FVdIN70pOTV{^ z4AAFm{>}hupEe=HgLO+8f2~_CX5I8l-8@nz23xL>!ai`E;53LEb7I@LM_$T-W91XR z#4i+7B*}b@#~AiL;?<7ra zq%{^E0rC28vg@}MJb`~uMcy-Ia7(1mk;ViiWBG7m+bjMw)(3W+BWsj|e$A^EP0f&g zE4cV)vJnPplUd~8EU}|RCvGsmjP$bV8V1`_ZrEB+zA;!w%kvF1{n3zGP>*zdj>xQ>i>2HSqVmNJIyL{T z4sE2)mfmC9Nb<|suE*DZr2ywDb!7-Sz-D_6F^M#$q;5u>^WznGMtvMYhRFM)P3sdk z_^!{3zkiWcfmoW0Z8Bx3oj0bN)RP}PKUc2h5A0n9gUuU(QV1aFEidV{iht9a^y*O* zQ-q%T0T>eb0d#U!(y`=>f6>4r%JhSRkWDnVBLxa-bpZKlMP=ZBfc{tjxo_-EGpV`} zPm;8^CYtq+a}3McIr|1c?=q>77K|4$yPlndwJpr`@!X+6a4~~|I<22f0@{Kf4ZF9(|j%5`&g*y|0tBK=IsMPPK?q=+h zD*|`85zyFOrXfRrzaro1?(QUKznN4Lh~dF?AL~E>+y_6||K`DGyVODA+hBNh$KEN5 z;QH#IcSU1KE0ev>_)?+ch^;QPq}$L$2euwsczn`eK}>qvM!1H%+m6u>0FZK`$(83dk4Gy021qz6{Ur@zB2IM$qGDZ3YCLEWXY+sp z*RE}$6#&^Hi{^JFJFfh!D-QC>wUOjLIo zKF&lGn@g2@%Z<4=8*g<7ba1H1uPLxx49j+{Gl?eCR#m^K|D8=;*<%@NFCqt?VJJJl$9Bv#&1+_OT^5eW*?&`YPdrciQd$%N z26tS4vp&Q)x+e^TuiAc(00wxJ4rdF-2SC{A7|mkme|;zP0jhIo8dF%7b2&m1?)u0+ zrmQK@zpSWNAkbnE|4}(H$c5VQ+Y}o0nV^wjC+%k_jGNQBbi~T8_RU8n-MOabL*F`% z5GroGmZlvAJgffi>0ipk&{tbUM4g8MnV~9wI&6>+siUB_;r#jR&g;}$qBsNX#=Ze* zg~v9aH9Rl+CObrat4c4BZ4uoxP?&-jIQxK*W5k86aFi3@NkHurkwtCLxtK0(Mx|)D zfmoj>zpdg|xRd~Rz9ch4{SjSVGd6l|`RywKf>wgN7~anj`HWmtz{~O&RwlIC{HQN~ z%5#3c1}?0qbQ z!lb>M+3n6&mOEe$GqUgtK0Zh4cCT802+taZNxBUFQ1;%U0L$8BovMeTyb^~ff)zR^ zBq~o0(GU}nEdHLmDkw3YatK1eN*7j@yZl{kz{eR*!>(J{#mHTZxI|Oq_=`WH5a0&r zZCYdV*=In1j7S2-CnP;2hA_q0_P2tsUetWuFVP8@=aSvk%?GDl%hhML*XA0Pb?-@Q9ovqdhy`WB_$T?aQeD+fKrH+X85OR*$+(e(ibcCJ}J zP`9q_VP7CUWAM4fZCnMPrs3>=)Pp-0nJrG{mRW-aIk}B+i$JV+BB7Z8MGL%P&6w09 zO#%rN>*N(rKBJq=pZ`Z$ZY+cuwpm@H?0dVE^Nk2W&6FP6sE`cRFkB#?9&6ZZ$=`*U z#}L)o{ABP7o1}XB3#`tX#%_g1-PQMj5y|V>aY-dgzMg=8pNZvoWp-nc z;DjI>7ovz0(DCx0*{hnG`jM$>MAUNyqWdfyNU{#ldv~*}X`EAZ9p& z9h@r3k+CE(-LQ!N1jQGB-LDpk0FHcRyw{iuyAOTN1-3Oww~YFGR;3O|Pg_XSP|O)8 z)kLyyH%t$H)ejg2+Wa$MoFrM1%3)|qzSf-6JQ;$c27zlU#k*;s>2o)~(OHHCI2aAT z5jenU%^!yWBGG(jmo4JjWVEW8qub!=Pp8dHj)@BZV_fUENphlp+wxD_1;GsH;g$6x z;BqzXH6^k>hx|-$&N$Rs_gwVod0*DiG$P)?=9Ua0lWjC^Bk_aKoHrPS=g3Yf9LEvisw_8v*}ga*h>pvUQq=X*= zR?JcGp<}PCR0z-=wrpugrIVv9Ov$j*=A50VMKdYfuh1rcX0JFhFcjuRdA<1WNNXLj zG2HzBjkLxt`e9{(|0hc2e@0r%ea0vA#LNOK8YPGx=Khw1XQ#@C=-bE~v{{P3=}eSg zmaJh~A0$B1806$PZ!q`R3B5-u-o4UVXy_6Trc)?~jBD#)M?A}GT#)WhSG%|fh?XXl z8f6fnEn)V5Z$a-A8_NI>{~Y)+AEdNGG5q1dnTUFO7b4mGGrk02e~H9IUbCz6Tj zhW_9jV91L;jt1Y{8qe&Bm}5C5IFI*fkOnfh$<)t({d#FQn=0!h5+j2aL;sU4@Na)? z^B1?PunGH{pJtq6mJ!;I;Y7usg|-`I76j>L=q!bU~@(!3BB^0QF(8%2A6$5Qwy{&tu91=W}6)Hdgioi^de@!%`8lwc! zh(j$WLIl6en&RIxX5gv^FIL*dkWCaD?1?KW?TYi2s#v@EpxYVsEmr_AH?Bn1xb#VN z_$prOkT1@1Nj#Ccr<=Q-a>flMe+)BpBVD9_>-K#UW@6B?ch8@BB(P6TH@-!&Q~1ct zg9xQ_ut<>*l_~TvS402R_jJ>UJ?1`zk0fKBc`B}b{yhCqevZSc_zw#G)LPHmD$;u2 zt5RF+mdOjp!A>iZ7e+$(T3^|Wu39NcSVdHGOSA(Pc`az)1QxAl8uH|#|VUeNxbtnsw-F0 zEP~xaCS^!IP+yH#9<+Z$`-54Q_w~_3BCV9n4Y^Z1!7Ean>!-#OLnw7eFe2puYa!+EF7lfyqO~jvA>^_$*NY2(qHSQE zh9A~I@=A!41;r91$nv`dJwM#bBwFx4;TPEWTS#xU*~djs=Afk_Flp;Xw1RIfw2hC& z!lsh`BNHaDHMoR-y$(u*bDCr0a{C!fJ`1*GWh9n`Vm%iuoBpTJI7Nu7Lss#AULBxb zd=gPwK?W;zy7IgFNE?!;m5>#GS%rC0s(1(d-yGZQ8|pKzB>O5-_GOM(mIzYZo*niD zoRkY*6xQtpX83M*8kQy5m+=SgbcOt?muyl?s!`o@t%T1&gRB0kpd3;+L5EP4se{zc zVX8uEVBY6Yq;Ul8@O(|U}6q!1uW(}c)r5116 zKh}VTOu`l~`tHxb)>`s=gFtLzllx}c6J z#A8Lk_j(*!>-1i$1rNY~kkB^_N9`_2%_DqwkqoZpqLA=(3u=F7<4EF%DIlHr)ontK zvivSmUq!O|l3yE`sHdY6gK)qa z`qs*Lo$`IW3t3bUtbCkX*`L@SvGIpyl0PNENZ1j`{8Vbm=nKGqZzZ_vrx>}d;)f?@ z8hq`fAXEI3*RAn`zd)F~n%rRDNy4KGg7Prh5=u>lz`43a(pF)n!F~gD-807+G0%6^ zd1#f>`&$tIx|;V$3O(<1s(!265$z|$6G8U4+M|ZZcO-l=yeF;|5gW}gk0YMGy0ONv zZ}z{XM5Q5xE;bc^-X5`yeCM3w_c^rc!UW9BI~+|Yak|$)ys8Mn~OX@2_W9a7d@!tr_2p}j; zBeFEkM7%|)!!915W+X~KjTAP&e5g&@=p!h4Ph?|jbJjS{T6Pg9HY&F$jAm#|WI%DUJW|iH%$qK^=mtC| zL2H(V$!`(eT3n2Ge!fys$JO}0(n`g%H9F$`98&rTI&WCJq&YjY`gz&)PWHWVr2rpIqC=y7_kI)PY*kAVeRwNa@O> zI>Pq+r9W8c>&co-em@-D?QxmC=3=1vPlI7g79XBRh=wKW%2nqY%i51h}@} zvkN|LF==&iT^?HRC6>~((dl{tWXw+x+zTT6=_ z8A0kC)KGTY#IPyhE+!H`_^Rofle@BK8QS_)efvv6q4+ZOZsBI2mT2^j4yPfB+NIWt zW9mz~3Ubt4`c!#_w@nzx#2hEtcV9*>awH3X0@{s!(s9-xMnl)225=%*CQ+Zk%+smb zae!l`-Me}JdHr`Ysn?iXc&G7#*>vn{iox?t#)u-t9>0IDUq`mVBxg=Wc1$F{WpyQH zd`b95bg6kPNHI`Q%rG#Hj2^8t_To+&_JP z7Ngfwu%;WO8J;W+ouhP2fA~o|w?R`t{{VW;NyV-@p%To5+t2fM@$kPPaa4h_Y`s31mC#Z2tS*v{_zJ`nvJJE_rMe6!$ zR;0(zB)+?&ue3-fLDN$M>kuw1m)~-bA!gMyv?>nU)K-1m<{qb!XHVX(}uHQd) zb=U|y#HFMb(EDT`GjB~&-P*VrKe;e%qm>9!?wxP$>We~+yf&%rO8&ktxgm&5;{JhR z_m!7ec^lS&IzoG^`>-<@XC=jdu5(Z^g&z&_5Ow7bT^n)dMO?_Gu_lIVA7)ehJ}3lj zpV2^u%s@A7V!T-0gvOqNWlL<+N{59H0EX-A2u%I7tmSyN6MZ5 zkZpNbKzGh# z7xUAC!*7O>u76EbxkUF#IOs91Fbuo=)83K(CH4Nuyg&Ld{--^ACJ{WOSta~c%-Zu; zHhXs=(O=0=+Wt`t!-4;Qka(H=N9M8rka&N3qrdrMwBoPQ{v#oom-!1XHU1*)7~>H> z!7Dz&Gu~lzUZYV$u-acH7ykCIKnmlHTTlMN6a$7)j^GjPw&4Dk4F11mB=xq;n>?z{#4?9rj>pVVccCv0)q)0%fx(>ND`BDR!8|v zW3a;Kc(orRUXjp$Vv;G`s|1YW2>;46L6B&)gxLkAbp9lId;KTTW4!+%XZuHR_^<5t ze~HEaOE#RK|3gv;o1gPPBy_!A&c>OYdxf8^8u zN>Ba&lSmd5u!VIJ%XgbTP6O12e>VRVGJ7jgH2Un>6gJB^Jl8-h z&nf}y3mkKQD+W6@3Ue=$(1;e-3L4i1wb|Vg>#DxxQmrQ|Ci5v4#|l5bn@_E$41u8_ zsh#S!z4uk*6{6r`DUvlq%&*y)NU;kr1{#)GrU-Iw&k{bV=Nl-=*)mtS(Ve z@xO0&5(0#y;V9g632Hc~sHg$E`n&Inj`5B$*PLtZ-nFZG+zcQOA$iVug8X_XHfLp* z#djWm`gAWoHu-+J_bH>R0ENR1Kz0l~*g6929S`K>z62O&Bmg;I{{-wvKA?^>w^oo_ zI+7cvwS$}*{sBBpHB*2Segho7oNv(7iHjmrfPaH>%eTF>*G}5Q)g6DnO;oLJ6ha- zJBAKAj(uPt&D|0p-7Hl})n4w*vZp4PyXzQFQB5KY!Ry>iw6u)6f@xO^)4^7E9_09< zQaLxNoFQIVDwpp*WQ7BzE#TZzOIZa}I>ZA>?fT+jDBB2TIL3u+27TjyqGK=~0d*~a zM>R))xymYigSq|&SzmxU9h?t9aP$R#{ww@`4ZnfE-@YZ?XzUy0()j~0{oA&_LE68- zLZ|LaMlN!o8Zd@IM59{NO}dF+85@LJbqyN|XzOov0uglfDlnIO0V2v<+wmO-+En$2 zcH<*Q*CK^M&>c+HY0xK?${P$}bc_ZCA(rXjB;XvVV+>3Hxk3cntwj|9Cnj`%kcCI- z1SruVXngEI0!e|1_|U{} zkky|4nrHe~ufX6Y`|z@n_2Eq*+5h&b~9#ZkwehHL(Ci58VXyuZIiMZvMx9M z$kt|>NXuJPMJi5K57>8qjRsQJE(z5TJ;6b4R7#NnX=s!To4`b5C~hii_JB7A_LXrI z5Kr|rSi}_S__|VovIVqLb<^9`IyE)QRTD%Z>N^cE+0;-gffC%%83lk-sB3rT@`CD= z&o*ae0~m$^fEAUUg(!xYbmAk!BL78`%b zzjI3gP7f>RCqMcHaO>2<=Q=``pIG!ehvJ&GzA_D`eJ8LWpA-k6LJES zIjEOZ6-0#80^SmTLqMs4xRgOoQd_NzBlgqu3~=2vQbV*9cuHF(Gg;E)~}?M&C8H)S`guj55Kl6O^ z#eIDPFblwc>C_-UAj#T0?+)qd+do2uGwM2{mr8?KW5DEtr|;U9_#P;K@gC6#8pPWkPNole;{nP84MaU) z?gY_htx^S|X~Q)bIVRpUZBH~Z0AJ$@L1F0vA39#q%mHV@mq?{A`=C?`y_7{Bz{aB% z)!c^SW9RVZj7{*)1Ein$@<+D}?F0J#ZQK6>Ui}5Y&Obc;iBsQp{_V4)2!7=Y;80+< z=bk-(G+V>RzQ`l=dY5mI;;EDoeTXuZ5xTAUTI=PMOq!`p6$`5*PxDJaH@j0-^3kzj-Hs*C)bXe)069yI-K|=8&Gu;V*Xn$2WX~vHw|s?!*Gz0N$J{5c=I0)puTCUjNE z^Y!g#zjgTIhyN0%$|y3$+P;kRKk|CZtoW&$)DrU`aS$jlETfakikQSW6T!`b&KmHj z_rp|zK0TO${M2540!pB&loiM)QxkR=`lJ=I1r!@*pIQj;hh4FU$ryqGUO1J1k~tMg zaV0sBr!iz$Q&0AJHUOfQ%m97zxMzNbqSd0@23S|A325tCy=;P>c!)ML=Gwk0Vjm+A zFr|Q{X{AXo@5=~+essOe!Ce>i5%o4+Fvk@25g!1(;|=)aYve00`%^3W%4fgxfnU1+ zy{7tFKZ)GJMd?qz_v6>!_k-hqE){S0b%uuhu(_|L6sl-Ns=kY%D0lK^7oB7W#OdJI zZ0b$vMvIP)<}=^w6%+MJsR4FyCa}|h4fiMC4o=gFcx7n zklr<*`YY!tzt>j3VjC-}FF!E-;YV>I-|MQ6UwHEqm(B>((dkeA#Z%p+jRh!1+6~@7 zDc{Vh+k;615S^ zZ4Dk%_ej6i{jKVMQ|l@~R^ZKfBK~*%6nW`q&HhU_zhc{`?wJ10|NmMO7PZhi8iBeP z-ZuYPPrd7t8vT0?hf8PjPdbb@$G|z7Q9NZwn>y~g!Z)pU5a@Ak`8YL7a=?Hy+b4TJ zmU4r3*en&F_)3O-NI^p^hzOvxwiprlWhT}MSmePiL0o_af$b|s#e1Fb9dmy9 z!?$1fsz-i*)$0E$ufNt`-q+Vagu^H&T z5XWp!SHy6Wc5aMm+MOhLOdC5Wpk^LDXU%8LF@7&1W-T{#rYdPwnnQ6KQ4f@sae)rr-Tg{VzIIrN8z+ z{?x=X965`%8s+NXtW@6CCM>GJcE%c$b`Z3G4-GUJ@|0lJS!s-71lqmY18w8eKr5Rk z;90+wqB#b#j}Seh-Wl6WF%ef14NAxdT!5WRL`vI;vrz0Zt=h z*ezC?tEs%ekjkDhq`D3><4Q4xb5pZ;ivVlKc7?C>0xxziM;LUj_Nj|zW2HllsC%e? zZ~u4!?Cl?=uUgzkcWUQXe17{o>DQXk_`O&5r86IU9YhVi;rgq)CA39=;v7#OVv&6( zkR&Rnj$Nl%yJ$|PNeN`w0jybjrEb9pmU6>LpqEGL<#m) zY8%9RBw9%kBN=4!?N-n1c#te8;w(IWk9$fjQD=+abvBOPb;}q1seQkS5%}c2?{ReF|nWIAUR%=Y`88R1BY425kYz@v$ zKn#}_Ctm>!3tcjs?EXZ+(FHoAK~p0b83Sgt3b*D#^})^z$sJLm>lzKP6^_OiXN{Pw z1zx@hSgMcj=aJHPP~%FV$E*3KF88ted6WA6C77WQ_IMHY=yCS;-@o?!K74xn>NM8A z>o_6((jl=o^T%e5ulxEEKp&A7bL^yb zyT^TMK~Yy{7}^>d<*s?B=Wbi-|3uxx&(+KdsFJY zoN6Us#z4_Pe4pc*5rY#01yN+uf(A`zLDq~c%{4VjT@$1#WUa3cI5(?it5>T;=ep{? zWKnw+_Iw!6FdBqC zp@`TJWepu(H@_6_rg`TZrg)X%sr=DbivppquRpdrIA1wL@%lP{|M)QWyN>%+cmJc? zf*<6g!##a{?KS?X51T{r_gd0l>Lh>i$@iX{!{zNI?NSFRwh78P(W6u?n+uhs!VDY> z_R?PO9Ulc2STh{LVWRmkiftf=T6}MGCO=g`8cbV+s@B(y)c^xW$tLur1=xa>vW1L$ zpd@TP=`2$3PFtjZ?DaCTRJeCbfl$cUM&-2J_KMh-vbA1`aUUVXi=&EG2Bg>)wR4T) zKu;!T17M`FRK;R{oklvO4%B9sN&xI4;B=Pnl#mk>FfLnIyo=D_Nj6*~^7V3yCX1?pw{PB~NU!nyOWFLhA>p6R%5Cdzufj)2P7?LYn~WIe9NGmH!B z#9v>|LO*ToH&`MJ>E|2(XAhu^8VU}8DL@5|3LrrkfIa9GfSDNr#MO`jA5_LzX4VNH z1&tWkgBt;V1Oz>RTy@vuBj`Esm?Z;9R7ZeTP6R*9z37h!@ZPgMUUKsYV580e$UxTs zSG-IE6*Lib=FgPGd{ynR>Vekc?l7*#4}c5$#fN|SGX%u$O(4bs?Tr}BtoM6AHLxsj z6LK*G_$vhe3KA(q=qVI4Mu=A9>l88!HZ&Fw7fT_j? zV;$hNIHpsr>3~o7HbH=yYKkpRlBgg#KvePuwTvJwITN%*vIfj?D8@jR07P1OOmdO{ zD()lq4$!Ux&aH#7njohngU+?aRsYcX9m^1Z;#@-_F25>6~g!PHZ4Y8Y?@s%#r~0fKF6DakVrFxTV3A%fI3IuQM_cYol*^Z{}ADEa`?6yBKt%+{9xnK^Li)Z;)i(+)uUeJ|2~ zxr&`USc3igemJ+&oKc{AT*Yqv01-%!$1KyetOMdhl1<$KNE1EIk^nXx%v2(onVkd8 zMo=R*z-FQ~#0CbPz_bHJO>ck@iR2^@eQ3kdR$5+IJS!>S5dVmw`c2sASX*gR@!s)0O>%`8D~M&gEb7>0=rpw)L} zhhg?dcEOaxVc;loZk@r$SIYI+Rfs4q1I6y8+bAZy*mV?COAVC}s?Y#p{M?UlP z=+u)TfR(Td0lrRkm}NHr(<#JX^R#T6m@0#+ACW7`Ma}`g^sDRs!4@1F0pG5O1aknRXbmBoTV(vW|)!3RlQh?En~c zqLvhW^j8Zw=qNyb?57gaDg}wr{>hM6x(Y^j-t( z){*X}4{w6Z}wxO(hT!9SPn{bjYB~;lETY{Fm{z%m1KfWQt`nh*-{;KLFN& zxtcD8O$t8^X<|c=YR9*%QskB%?B4Qms$V)tW!;Y~4*V^5f@=>cj{TVN#P6y%?Z80? zl-vJTdKYo#=AU`HZ(aYQkK%tb508W3@gL^-X$NpCedSz#cwISi7gDGNZE5FVY9pW! zT>R(x+W^oh7XD>sBYCq%Q|=+o)`HLfF-{;3uq$MxrZ0By)JFJFpj8pGD`<4$hV)VJEqa4s1u<39=R+U^6 z1eB^4&+Gnweop`%{@@y9DsqdorZ?;WY;hzxH)2DPV~DuwC$@tLh?u@~u*srr5}EI~ zi5d}`U4uwPod&L++CUe6^&fRi#OC2UkMY%1r*#KnHBEe)*Z|Wh&fn*nh-nE2r+?sm z;IMbjQMq~=vYae|MWrnVRl-s8HI78+Z>IjEzny}A`ZErX!$$aaeCt3Y{I$p{&_C%6 zL|OUCb6`W3Ko$Q*?xYV+nfzrurSL_HZ3Cd)z()8MVRtY{nf)!#7Z-Z7`c2b;wx$(e z1gEvX6k~Hs4^8>H8icNLZE#TsoTG>%=)HINT*Kyty}-z&YcGt09%t*7IELs!T7J^zNiGvzhn&14OY620pM5N zz0-g=hM!~Hbqs%9C*t_)y7QqQ#4-JU(7%tP5B>W%{m{RUi>?#vckGB~r?&09*BD~{ zNie*`8lJxC+OFdwe|^*CZ#tm8={9J1iMfC1BtZv2^rqX}w%8;7q8o+*0H}%M6p3T) za~uoLq1 z^TJSFMx20Q$-+n-Tk12R?D)h!^ zKJ-h71npkrl;I!RrEO#QL(9c~SpLu+-eZ@4+sx?QclF@(Jy)?-10!n*p`d5zL*HR? zvvUOBeSgyuBKrQJ6=M$n(BgN$!!JIPNc27a=1VEYttwx9LB@yvizkGMzW>lxV%*^$ zTI)TIKeS!fvF@|(w1KWK2?os7L~TaItRy104wT0ZI89_!Ke=Nd0a2@e8br;5Vptzf z9DK*T8R`Mpq3->@{`GUrIR>=7b`%)&<^+IY%erBJu(bfvyT)d}&m(K`F0H-K8z5?N zzKt7@mcEFYl~&EgZ}Bo5^cK)b6jw3=h$QwGt=8mSP{2yl(gzc-Q)5g?S;t@Z0qkd@ zZjc-%k+u@C+uNRSif-?G| zX+Xi=bwZ5ZU;6*9AHL5aKm@cu?MNUr^grz?&C*i2-s417Vo3%-o;A>QVuR+Bn|^y-04w&a0e<8TH9naIfJ$MZ;SqL^EpQ{_+NYOdq3~jbD4OqE$CAUAD?=vcn$^` zfJdN9U{6Im2C+ET7}#*}JPdO18E3&@2@C;CAzl3b`*Aku3%6X^9A$IZa@Ek6cq2A^VF83!YPn*Sg4>Yv8d2|kvBkLlm( zAMf!ncm}ErEHO{ef}OArg9=y__N5Q6FZoTy|Lo1T^dm=uig9^w{iX2u=F|U@z6g5* zmY{#j+i*mGe?R*Dk;fD0snCzvmp;9pvlZh);LC2DkGzcc|3+Njz2)^D$M8OX>v<&l z4~4yl?|HoCaTNUd*XM?R>-VDvv3R~gzWj>v@hd78deMLRkLYWD|7LuEz4ct^iLi&a ze2Dn}p4VUauj9|S#P74e?*m{ViIf61_D#2%cdW3%9 zUJEFbkQ%#(T-9&aC#w_6vtJ=nGRGwExp+W=8OuD@IHTBVYu=C*742a^_M9^&meT~C< zxQ?ssZKW9Z>4qb>8f$H|wcOB&39%Bm$pY0L53|*(nB6zGYMtG(ELEZ*RV$3EX*wN& z*X|H5d!ogY$FZJz!QmPo9H;Hv*1%=!-%t7Vf2@xCeH=-<%iW^N#jrC~8ZLRmmr-d3 zd9k$1gm1z0RhH*`g^k%579`Tk{Jt81?a9yfl+Q<_h)T+UXXdVpmO2^oZk^9AZE(9- zZ843Kaj_{+=G9Lx=xHmx(fV|nR?X---pA=Bn@fF0?w5(t?GkCmsA-dZ*{f6@j}H^m ze;%#p)-2R+mnR?r!ya+X^m)4It9w767t5^_?v_KoK5g%=tW4wGeYr|dbS|~Y66V2G z>6VjH1}6Ye1m`56puFoCvK`%_A)cKaIb57>TxG+3u}K9}Ym*qaA)wy>Hc0bvt z@t#5{Fb?K*>hY%c`d8Xm^l=M-e@c#Jlj@SwG(mYE-6$JI=5c-ANb1QgYMw_^$p}aK zbQIndWM7p;Juh$0+_2}CdhS#2Sr?JKP4?&DBC3f7*c zNL1$?>|F0Y)^Bzw%R{QmgSyaGq=8L#zyX9r$z4~pw8~2m;R2=GQ z)IP(EnWejd~(|E?h&VN;I7i29y4XEmkJ>7sfA~DMGuJ)d>)|0WGewcz^c1bk@Mt-7vBd2CFkM)+9m#|UG>%h{oI_{LoCSFGI z-JXok>tMfAZFSTeg_bJiMLL4ng87|&(v+T=MJLa3`k>EC9TTPNf3+B2ReIKZD-ED*XX!zYqELfDois{;O)|s&BZQSU6S}Vdqkb-iw);EpW~;0?-p`6S_bwp$qjjXoSz6YUmBadfqQi*Zpv?FPqVJwb>t3K)0bWBlxkdc8j_qZZJEwSGAgT z&FePDDuZ zt6=4y_nX^QJJYrsHuw=E^L3_^V&x`9$aaRarN<~vPLIv_8MvKS+b3w2C#^fd=5Xvw zJ`DH$b~0~^ZM`XHlSvDbrcHwC{><0ve56Dq!)x6f$jSC3!!Ktwj_S^buj9msmzzty zl6*DtfA!nCnHTBgK0U$EPmZhkC3^(&JY&oJcAJE$a$k>jK|N~=PYooymi7ir=NrFX z?`mglmkXA5vps#O(C=(NL)kdpn&(A5x25|D!CTaw#%zC<>uP1QAc${ca?PCVyy$O_ z7pw!me9fCOpTJV8Djn9EQZ#N}-d@frDkh$Xf1=Db9s}3incbgObkfb{i%Xd_ZHup4 zwyJWK&F5Tx8FWeRSJg0=^imepHt;$r+b1A_ClGWP`2bp4gi_+u`M9kK)Z3VvoVo_I zFZe}cuC|CC@6fXemP?EH0~k^2pNi;2g262C!?S#_SYHg*@WGj%k1^mwdyKCq9<-BK zf4{zh^b&d3ahRIavG$QJ^(~jKTi**vmnlzdI!E`VIW;6KJL=TyvT7Fv$=Iw6J!0lSVg=x3z%56qx zZGD`dYhLY^lPOci7o}FX_|#0-PV!{1&aP|alS!~^@bq#G`e*3htsL0uf#YWT8b^jF z&vRSdu2>yz(e!fZrRuo#!2)g%jjj~Y#R#{x--Zx-Mjgt@Jok!3wYI68Y%f!Hf3FUB zYcGv<70oMvJL$6G6cO<38?Rp@8-Ryb&S@ZO3gsR-2Y8A>gW}3dPc9o88nMU)U19B2 ze_CvgyYT(76#eXE`09J|eMBGor#GIz=Ix>8r{)65-W(cfQ<`wQvxlg<_K(qda^Gru zEmlZ&N#PPQbaXNUjO_AtIhWq%uHzjGbxH$bP@A%luuJieJ zvGGUeS-wqf-b@|EOLHzACi!@g9+z6S+K;VQb?b=cBHA?OIxzhb75mL$f4v;baI=>; zJ7bxRn`b=s$CKqP+bfPbnd>LVat@l3YO!_L$NSFC>9JV2E;%&q8bLFCK5gwXyOH6x z)4FgollF%*SJzPv7R=(w5KOj(c}$GgV|~vyx@14>)|onf8;pAYSw;of&-`d5t+H1; ziWXN%QYWwIXV;J)d0zs@Lc`x*Oz7W_;y# z*XgRBXNkX_tsxJZF^Tkb<37DilJ;@CKENH%wo3Cl&&YCI2Df`3peMJ$%+BNj!n1Ov zyFNLN+tF!nglpe#+ALXKt;15A&Bpr`h)46rGt=PUCEY^u4Y2J3e^mjYbqD7189yfn zdFuOSGM%)G!d{lKUYds2M+Gvrv4zg_BN%RMvFXm?mFx2@eo$w+eU3b70dgt1?M9h? zJs;28(t*$Yt-qw`QsQuQm_Bw&kXbKhBSmSuH&^BSIP{J*9=0=RvUzrh%AMB5%8=4sH$=#UxEw5nY_U9Z>cf3wt8NsLF2PMwU+OQ570 zb0#x*x+aNpx~)&C){if%a8w@EWL?R++V`cjrNX~s82watB=2O~0*#|L>8r)n`@XmWU7n|Sk71AMj) z`3{iteSW0G#pP}a@%f1vYm-L5+@>Nf|YaB%H!UcK}* z!s#h!9K(sGY>De^Z&mI+jmH6TH|cIHEzSGlbj}v7d8NrIduk7xZ%)I*1&iip_)qq9 zgGm|EC{i}V^|d|?LEMGQ>;lXDl(E-t@zU>`o9@;UU0f7_eA>lYa?X|LQDnnNo%8Jm zos)Wcf0^qz$xCxzOh@a^X34O$y>YK+>+4#_-Q#?s<#XFMjXycAHt{MyT=$b949!gu z9^2__zlqPYyOEuqyvqun^n`>gkV}H?7 z*}0bEW_Q}avG#P9cCowlJyRy9GVF&vuGVhef93bla|ZX@moihOSf$uh+*fl??w9kk zdf?|PyD{rrj=SO9N!y1!c&}q_LF%f9-DtX-9GuY|Jln2~yJqRVc7|d!Gr3#c?}sWV zqCw|lXPR8j)nrvDZlaw=yEWVI?YLfus#mSgf~k7(kGjXwu;_ZpHei_?Cdb8jgQ>(; zf6wv#tTeh*PSfx-8u6D8M$h57o|{k81cNf$SCQ_=U2?yjv91;K_6bMl`_1yam*$@j zcpn|2SGw!-#daduYrawD<_0y|`LbK~)1>f^6WjO4i>p1Y%-o1~)6?XfbUc5VeC~v2 zx4#}-FO}i3NwV%>9Ji~dQ}DD$&(mvkf6n}SU>@fFP(sM)Y@UVfu{OfQDf97l?z42Z zI__(C+62;ypB_$mVEsz>Q{7UgFkE!=5;;T}KS~4xPn&HI)9tR4N;S24zi-PDU*RR) z+dh7>`Fx(I^$d;YhI_9fWj1llX|TN9EzEDHSvDS4)(CQ|GG0NcF056W$-OjpPF1D#^r`hf%_!PYI2e=e6fLGXvm4i$N@3u5rF`++`6kHrf6-{HE|fT~ zW(~}7xgQNdFTrDq+tcKh*o%csCO2z#mo(X5o^r2>F|H@Fg6LGaT`sbZG_N{h-(u@( z^{DLh&fUzi$9MT@e-2}kJm%WWv@b#T^ooh!?X(F? z?$miC117}H%+1NH7GOO$qRp7g{wQ3Li?s9D$Ud5-as~+3qf|DgD`agG7=a@O-f8V@ zuYF)YSCJcf#}qb-;U}e`4pVEncTnw*9y=_joAt@cUpvdI*X{k5e_Z?R0Squ$M^rW)Q6wZ!kQ=(blvw#2b_?N;H4UijD_w(PN%;^ks&Y@fTTT7@Go zA*;=Kw`>g(Nkv?Qe=nJBkpJ*ATMzmR#dbKU_*6e`aDk1uHu__`AGh^QYfjC^iLUab z#5xJ$D8<{0S2@!{+W4of#&g}zlD?6 zB69ZlPG5`2b=wK{d_CShw%{ml);H^F1dYXz@mPeNIqdF7e^xK2(@Zl~WjONHYH}^_ zE?l|cmA>j}6^vG8tmAF+oF=>7HZr!9o7-81*ZpOW(+))aVgJ}u+b^?(#V|Tpw}zi~ z=kBH9*a*Byyl^i{7~e)(r130OedVz|#X3G^qkiNVyIQ?0vWI)Bu9M<4vK6>gSLN`W zHF!U&?@6Mif4cv89XUEp3#y;J&B;3KTTMNl7&|U*R(p}Q>g5z2?b&m`ALtk@?SOUY zHQF>fm@lN;WOLnzkxGWb-Gq`WSz|kS^|o=Eo#AdAI5V0S+g>rHQGm@p&At6>8ef|H zbXLzsHD9gboD8dL(oLqNr^I__c=~C7SF_nUoob69f4W!p%zvqNx*Akxb0Q1;xaR5T zM%$ZP&RfOm|i{WcmE6d%JQz3Hkl1$dhfE?c}o7W{=x$@XSzx zG&Q9C0+#G%N$Rj%?@v$IyXtbbSd96iiYs2UV?hhxKXJFV-E`USmb# zskdoEuX=>%d467LlhEqs;~A0`;|lE_SAhD%X#Oucz2e^r=X%G@~3v&)T7jGSEIh`Q5!v%- za2cwDUZ(bWg4%g6zao2a_xAQ?!S9X--EFs8?-C1l;eJ*(h3-EV+h(wfU2^B3J+IVu zxq1+|drf_to!SsgRQh~Y#<(i@Gpc9(fBL*$%c?{xWqz2Pmi0QkKD7d@l9l^d>n2_2 z*ZF>)u9nrT7|kB5l|s!jJe91;&l6|UPA>Yqb+-A$YpHt*PJYCj$f6F0ruf1CBVinh)0(q=#{T$P<;S3i#0b#k}Q=JF9WhX&Wm z$$Z?dhw4S*ywdZ}q_t`NH}K7DVPBzmHLFhodZiSp$Z{HE4`o?onCtA2KxXLYnZg&8(6Z$CFdcHu2gZ>Ve0t<{*Sn$V6)oPfbFkf% zx(agon&)tXG`#}O{A{FXe_H}#lF z9Y+?*Q?(5(W*=myV$b9HjQFey`lz;>FHf{a(WMVv)|to<41OZ=e=~FY!2t3!s@W0O zkU&B{Hvtp_7@|Q<2R!(|PcAZx98r5&+9)L;x9v~esnNZfO--%2ahC(nYGMYYqBu$` zkR<;}eux+sQ3O;y?3$?E(dZIpC6fe7(<+P2oKlf-^r^*&n879b8@DV(*{IcZ)E%+g zNCL%e^`qkmQ;joue-f(;SmbdX@x@4#F11O&J)76ejxuyACfRBhu6?gmdhuqVE_nPn z1M>`p;|WcLZ;VH6A$H6_#Su8X$rXkn{hD)yEc^oF( z=6u6PEDsCZ#S!V$Be-onOATS!e}}v%*FkdHWe>Fo5+Z*=k4S=VYk?43za$Uhs%K@+Ao6DUj>`s9QwOmx zy+|2JgH#enqOm0n(rY9+Y-Mg)TW3G4C2g4tiARmDDcHZz8O~^kn?&pYNGTSn09Ei) zmCnBAR3F^Js&`l1wL}M*Kd8>zjDAT3aQ;NlKKNSjf44lZX?#+pd&`(0kWmS}z0t~O zYzqDigleTS!lGrCtAPMc*o^~enV`a%buCu-#bz#xc|P~?`7$fk<7=uUh|Nc}oWRLS zen^VI*XQO5#{HAATr0|8o;Erjt)n;r3Q~1jXL7Y5UX!l)^eE7?8$83zPG&Z_&>@2P zFT%Ite}olal#5dk3*473I?r3?H-UABSG0q8jo0rc-KV#g*M z*V$^xCnSQ~?;6SnQqE{?5th&-#>dO2S-B1$FF5o#+-FaM4@}q z$#849Pxg+~D&*wuz5V$qt8#WLz)lyq#2JT7e`}%~FcqsBF zT(=$hF?##u?R!m50Zn*)6l9Cei&2#Q9AYG!bCQeYH=?XVx(GlUzdCEYS+<4sY3o6L zl9R;VFH3Oo)(T0KKESovIz8-*h%7eV(mbsq~-h^(fR@_l+WBN ztO617B4S2edRC7zWp2qNA-laKY|b5r=+}%I=g$JB?ZM&T0hb8@cS7!IjC&87+o3dF zO;lx0R2f_~C1J#4F1`kNL@odG3cJdUe>acns`XO`6!&y0;B|7h46r(+c=>mxQ30G@ zaLnQquil7nEgjUrujmuhHXkCOejv4>Ee@vJ_<-bQ7=l>c18C4G_5ShnN=)4mBOg+( zk&jcu35qx$d{k`fi7L1sB>JP!xYJ?_44rK&XeSQP81i8tp{_vdo{_hNU@>8bf7kpP zMduwy3_rh*UtS(X_I!^qk2e4Utf*dmD=OYwy-dbja15aE4FvE|A-Kfk93DSPa&$LH(!+tAY5t}*SGMJ)FQ>L0KBV&|7rcDahN+_5Go zat69~GO~Lx9HcrE6l8zflsOIa)Hl?qSik)~wIiw^nW{8eN79QcNI3wIf1K!2G*s!# zlFPs`H;SU35y(W-Tf@)fotiY0d=-ojfyE-hn#caCTCIMVdOoTu1rP81C|w58zDI-Qs7)ysDni?88fH3b?{ZvROJ&ET06j`2yBXQqd98wZIeVr#SA#jgvy( zMkdMS=ck;2m%G5wVA1pLf7Z_BggQE5bo4MLbKiUDu^!^L%Lj244yjO0yD3lx4o4u0 z4*ribbQ1ws?hVms^T|ONz;DI0mH~GGUiZv%)!M{bP7yfEKMa`a)>=88-XZ4|InB&b zB}^ox3j9`mgY{~W_(8pdUmvAl34SfE&R+4&^+fpR%)To4N`-7CdXl>E*wIabKxmOR|lKT%{Ds=h5ZJpA=evNkhhW-Dl*(fwU;R>Dz$OEV*xmDDb%xG~ zI5<+)W?0QEJu*tr?3(eHHeSu!4NL387YSlPH11^qT@^$`3T6-2%KBgAI{A<4%#c}0 zoRT|~Os9EJr(SpCeg!L>$`?=eocQt(z?a}sv*0ILe;46&lJOvKF^5F1eocln@ef5m zM5Pcnt2c*quJ>0io4&lZRxX(uy!4y_IwhY)$M@yQ-Z)uL35g6nJ#-HB>?CS=3p9RH zZNL)T_O&3BM%=B}!1dj^@ZP*__M7$4j6j5;vZM*znbMh~1V5CgBrWy%xev-!8y2!B zWHcm$f4Yqan(Tj}o0Z_n+t#`OP!5!^Dp=D8-Y$MCn1=>^UuMi>B_}=?ewaw1sxqX9 zal$jtD=8zCfNbLhqFz^VinW4rGOI&UkfUl2m}KH_B)uOKhowm_hAw>2MM!qliRPf0MZY`jrW6oW4_#nf+#82rLOv!rIQT zw5cN~p~3L2B=MxpK>$pbh6gi;ngPXPFVDL7+?$9ZHVq$BHiJZWNL@{UrzeEqkM+2x ziOeJvv`t%FfSq^@gAVEDaIF8n{S}?_exGcpfy9CD;zf ze>=Xj@Hu`OV-GkZm|Y|58sA$d8wEExQmjQ?`A~3JTp2)?8e2gpO1j_no)sh`hpqu% zu=X?9+p~w5AzU8ch7YIc%||A+uqq2ZF=JQ*ZqcHgGSw~mu)Z{f@-JR~iv#ARD-n$Z z@l>YH2QYzk2nE-ZT!7AvM3?8Y`^F5Ae}n7K`!%0xK|hwgSwy}qaNPI##l(*R%aGi- z$MfiZHC zGAZAWe(}sv6kMIyy86`52d$i5ulQPWA(U^>?yUKjWasmUDFAZeJR4jCdjklde@GVe zfI0AAWiwTE?-p*|uc?{o9{z;NlGvTBVvMoGCJIVMGar4U<16H!u($J?;z_-ae-oy+ z`m!4!7O?<>*+yg{htAPjiDoVC#V8Lh6!4K)^^~3j<^FFMAL;Xa3QVfXh$=3_0OqW7 zJ*$)Q0BK6%%I4RmBzRf7~t9*(b)hR|j;XFa*VCQFApts+OgtEo^mQDDHOjM;AR6fBrry!*PR942Ah_;of2|soIXIOu#sYhS!#+S2RN^A{@2OIzLyNMUy+k-)CQIuKK_AzXSt&7G)sOLNM5SzKT8;)`iBo|R8~$(N7x_ka(U%FqTb7J>Ije@ zd1L2w>c#uCe`qvZrcQrRl3)%qfR<{n;*4 z`~XvnTu>{g^?c*VDu*z?w`dr3&#vsXTv{+*w)4}@Zh7*FOnN_}s0D3@=*awDg2Nz~ zP5ENrlgK~MERe9MSvJ?Uc$z#iw7loqg*d%{7{;(uI18AxB241Eh!%?J8n2Lc^#o8% zE#Nm_e;YQQ$ltJVw0*!(T2vfcXkJpOahm={I!<1=rS18AxKdq3=~M~3RBZFjN5H5! z*)}^>a##6Hn&e>EIJ$fZ{zN?OEo^iA=lD>tWNY#pNAt6%piL-Xlukbc3m9_IZNs<- zX5?=G@zbuNl(*Lbi-CFD>Aj;r9&qPFZ~%CNe_4EtCK8Z=oZx?D>dT~pmeIM3ncd)# z|Kg^Nz_+qZ!nvfcm@Vf~e&9RsEumub!;=ZH-VmCCu7-Pvnk)(?@0(>?Ci^Rz z<7L?_Mv7C-PBj5B+`B}o{Xu1#D4j|ae-IH!b{lkGat*zz=I-D8#Lk4yH}DzQo3xra zFYCgtwavAB=LDNOA_@{l9)<1G;=*`-&r&Bl5xGb=^u22BRzU5uF>&NLu+d}(ahV8P zqGmrjz!#l--DX$54ChYS^I>uM%0Zi0S1(8>T_4|*tu?os3$L%IfBhV~;!p~@pcio|4)7-$umF#%%|*}AdzU=|VI*)j zzO*MnEduHd7<(BUadiPVnk8~nh`pB7pOadr{gvF^?xkaQgiS<(mETClZGJ5W?Y}(bfBC(>3h_xb z$8#PT#p9tT&ZCrySx?AJvvSi?dV}=>mJR^nsST--R+MZFG*uY<3?RA<51<>VD}{7V zFcc&j9-NA_dxkAgZPV_PYQw(9>Jy9{uiYlB5pxeeiF5}94JXqjf*+ENhMW{gnc&mc z>IiF4A$$X=U;OI4cd>7fe~KxAkYV1HBGYeh)Sa#T#pgq)?xT{ihyIBQQ8i~R7w%aws`lTLd2DFe)Y#DN764D0diZ(R|m~Cok9wRbP9}% zbV4zQm5UXUpp5USD6+h1!NXOdhdWGYH9pwZ#Yu?$bxX@p)T)n9fBNk`FJQ)OLxI$W z+er_7@`Rm{c%B^3lu9-_e`mF1&ZbZD>dq?rSY<1XYN<_r;VvOWCbm{6**XoU$|5<; z0mHB5P(rE-{PI$xBExtG;n~zJH_ZdeqHYGM}F)i=!yqHiD4^KZU7q`wC zVt18|HNB)*j6QCHSI*|Z)a4yc*Vpj#e5SP5o9|HV9~`$)e_6$Ur25Pt%3{!N#5|6M zzY1U#z+CXRSV(UQ6O`t|o49C97Jq$9p$&D_-2AI^rcpGhjzhk|SmD<{9(XP3nr zsD6Ck2%ezABwpn8qKWRhtkN(Cg|sYVQ74i(#6VT!mOf%Zq(3iJYus;F+IiGlVNp}) z?6~SynbA#Df1&l_WUoq&`k~9|Ss=L+ua={5TZJ|qUvS4D8b2YBg0eKGI*ldJ5LGE1 zFsG>bgwy<`xi*&vf@S9Ps(sa`jkbgR;Oy7nY;4eK1zAT~C)1aQ$F{gYNfj-(NI=ts z`MX3tyd*zbWVJ|ij;b|N14)w`p3&3t=9tvX+Uda}e}cIpj^Q#&=@wiyS$EvR|N5UY zaOD57f&Z=k-#J?6|Kw|1jX$LfXrD2aCT&G+h(F3zv&@-)5;Vo4S8Ya;;k=TP&{C$v-6F)p ze^&K+sF%Y2(nW_vHSl~DzVjp`qmx9^0+^tbj?R3ud6mKS%F7#%e~SK$cP+!2J=y$p z)q4r^T6|9MwfYlM71RhI#+(Za>n%J2NWE){QM|Qeg2Q49#02XGE=*znK*Vo-=4T$L32ne=`BPQP!&Lv-TlAjWdheANC!8G8Kz5`R~r<0Rh&p51ji0cw+ zkD_IDAAs?wkFaO>IetY5cipLiV22$wJ-BvBrc5$_eM$k8Y+(b+CC*C4f5aHZ<&Oz( z=3>>ZF7$%ts%XpVvPk`?Uy}YNsI}BPbO5xGIASituMhRC=h6_CmUoM7#w_=|lHG=- zmPONn*FX;XYG`mF6;jj?Rz&s(&sx@cQw=!b#l{%%V`!g`cTO+i(!lv$|i z#~!z+8jgmzYu(Q&D@3mTf8?*`ReY;cD;ILgn;=EPc6#y`+V05z9xn^69B0{)Q#lIZ zV=Fxr9}Q#2kSdUK4|N=m$hnkar0x=^!|$ z_OWLm{LwWSe?^1q$%Wv;AX>!vjCz0w&;z(M6p>!Q`}WN~Elajmf4wog?XaQ(3GkWt zzW`uj@j?QEp0}_a06l&hfd(luF+@-?(L*zpo!$IA^*QHaVDOsl*Xn;W3xxde(LiO( z0z(Xbe$cgPP>x86Aff?@eRajx)6@HivmXTISS=!^MU*Z(B5%IQW8|_$WLtB zRme8=;0U(j(2*?XzsIxw#2x5~)xxxbTr zJ;j(8Bjg|m&ryU`>qfn!;k2Rv3g~W9hkQBQ=)(sZ1gT!0n&f9l_gi)1y9$lMH zyebMCN-Dzye?E0Vx-t?aJ78S^no6MvHFyudjK80#I~~xaFaLgX1w51c6x&+g9>$kh z(X|Bw)W-h4JLZ&G>Zfbh-r;JCdu5)RRzLZAl=2Y1;C|E}Ocr}Cse!)k{vHH(W3S=zZlB#c zV9#dW@-_Kgi=wH51AU~qImP~=&GRxjxTsyOGw|t`Uc0(w1_Ke*Mj*;@9mysAh^wf3lMLYs; zO*MTv2H2tGGX1r4+lPr3C=N4Pe-6PO5X?24GnJIz6RZlrkI5A@Ske1?Nhejlb#0_c zy8*h@lvY=GivNnt%9juk1;DGK8{$o>44(rEYzTE`MRcNMvt6F^{Ec{n#JnuF2^q|` zO&FVChS5xecBwFzZEHZsN4rkIe-$BK<~P^G zlG|X847pDoP#18>u|+C*uy3)=apK0{E*m7+>@-YlGJ|Cx{waQ?vc3t1{_OHd}p%( zPK+B>OL}vFh6a>MELf;sPA6EBpEz(&wQBJ5lY2>Jk-h&V0uS^7r|oi#``pVMC=bVH zUkoWQfV!ZS2v3IqkV`P1nP3G1MIHrjVxtQLfTCONfH&&p)XkA6f9AI<(NUlI$d0Mx z3HOtDYt=*`+1R{whOcXs{2mL9z7;kbX|~iGIY(MdDn12NyJUi??2GKeIIxD`+U~nN z=&fkLHO99-WmSi0hY0~_GkEpWspD){2gNHBK<+llJb!#6^~c<}mfc%%w^;cYsH(HE zLtrg7_yg%_#`Ouqf0wI}GDE)Zadnw1k{z8f$&F(7ULs=$*#s`hSDdO0&kPwPDqkGD zPCbVN@Tx$YjRH7GKltezbRNJ4KMvRqd&ml9l+ zthdRAgO-YVkyN|yT9<1T z2n&GQ%6lpbl}ZHu8vppAu?1Od@B?PLYU8OX;zJ@f^hnW%`kwJZkSs4Eu?B6{{4m862|iYz zw#^O7<=J)91-dtMsTE$)@fQj06zC+`77!ECD_DYrf2Qya51fFI{<0d<<=7Ok+Q__+ zq0VHF=*`0NCr`=_KZSpJ+`-$df}Q;mJpZQZ5|r{e(dsW_gfKW}MH8eD_Vea;n_3)% zMK^r52z+C_-=r2HeY3))q|Y{4m&z|8|0zha*O>b^I4lNM?>Kqj~$iE39(~LI9W{#NWJr4fa%UaC=OOF$( zPy57t_r#cHIi+Ail2*l&ek)Nne_4;j_|PCnM2rBRfE?L-Zbl}Xq04u@t$d&;S?s@h zqv5=j-9Z<{XB&m)rtZD-5htPXvwB{Xt$><6ctD2dB+CuxJ?Wc5i~y};i!w+^S~vD< zlG*YMm~K@cB*i*ba8Ed2R>nIE%8rvu1cQO%Uu~uHlTbrXOPE{wUF5A~e;elJ{(BDj z_~A1JW2@+HgSM*#)jakzToGW+RcNlK>~*lLWgQV+H7`HuX$C0#O0i!EEdN zrchKLNEI$Ah^=Xw7>jt>X^sfuUUs!-dtgSEkmHD%ZI zdi~n2FUc>QZO3+ON{+dLe}zn_@}lPP>quv&(t{0!XW38-PBH##?$8UD5zZu#dfW3S zCKe31d!}_w6dwn~f22j;i-V8uQ2Bj`)Wz>qD)E>>MYy)LlSy?sM;G2D0xePA8m?M$ z=s0u9--y%KbcBEfm^xe9d=QBgLen^{P&uBFH?E8hcAq19j^C=-e=VL+Q`#5viV%Hb z9$~4^d(N`^M88O(_YrhZvu7IwzRx`fN+_{@~&d8cJJR zp^s9k*fRBK_TP}3v~F1Pz^{xlQYK)ER=jI3{5rTs{{Tzj(Dk#YLS<1)?i~_pkbB($F`UECxWH1M&t8Yxn_>u&7U(m zY`2t-u=dlMn_-mBBH`)s&MbmQ}3^_AR;q2}y z!QxN6x9YnNO?KTO7@vvR(Hx)LsJgXwd0>C0uzNs+Z^Q<;*w*=qH5{)OLhi2Quvh|< zba~Y@G|DFJ2M)Y=_?zraF*@M1G{ZJ|`rR*~WBlOfMz9VX|J^X{if0?(uo!oCOPk)A zbQyA>JmM&7f3v3I%1ym@X>fVx8;sA;nK=y9rW=anRs@`;6&~ah$?=W`az_PX36SXC zhp6mp#harNgo*}L4IUt!zYhqA4U)BvQ%K(!LJ$W^eZzd+<~N2Ie%TbvRm!3C1+J7@ zaZQLq<1rclK|sF0CjPx3PZN<1zP%*`D#sbsT>>n83GnFm8-ME@)cOHQ_LR+opc*Kh zorERV04d6GLdx@s%sZSSrIe8*20%O$yK0;+t)TugO@Id>xu(38B9Vy)<;KfsMWy(8 zG~yQ2ta&4O-1kYm#S5<5+v9HmKhp|bqoH&OtTTR=7pqTS;$eK#+k_ovWuDIKy=?WclYgYRh18g9qAxOoi_O^sg7tSUlOEG&$x4a#16!+jxPgzhfs&6z2M)Jv z#9GtAbiXxhiKBOnw~ns<6uFD$Sh3KOUQDFWPp5|5^{u1iH0Y#1;AWK*KV;zdv55fQ z(1QSlq3@=RWE1Yw8%wJ@f1r0(1RS--q}!o6`m>shAAdR*2S}x9=MdC|PM(k>`r9=~WeUsPfs~H6{A9+$FX5PPD$Z{KvoXS>A60r~wga0A*4lg? z-zA}dQUW}&H=R3EnIg5rt8okL^|$02 zJ}#eEI2;jk4(0S7=E!f>Yyg0QVxg25^Un{UZwvq|r0(X-kz)dwjN%aumB&j~c0qHk zlXv&tD8|#`H@tov^wnG5qWeaao{fu%gSBg0?SEC+W!I^Lw>?c6312(D9}>DOYv~(6 zEaK-mi~#^$y`&^l8OJ{>HVaTjRR2iz=|0}%LV0wQz9e`q_gw9+88vk91{jZ<$1=)% zm(aMTv&b|E@$+LO70`e`@ps1Ccg8zHITx%UC0^6AP#r!k&`yu!=VyM}3I$}7Khcn? zsDFYe;>OFDwOEtWaigr56>TwcYeZiaTsDu`tUc^aWMmgFq}mfrtb{tDi0TlsQ%61C zrPKWR5=2cog(;Ukb=31zTtO2VCc_L8Gg`etscMjN{!BL?A zVKB|17yMn_PpgtxAW|;I55XZq-7n|dm#d@ z2eFs)zL1=`aT!#63vwDVe(ne;`AI@lZ{IaIba;~7j$}@hlpy>xtsyHZT>E2(`Xd8W zclD=(m`O34`x^TB)|QcBncL|~EmGL4O6+1K)el1Ei$i^pZqdS+?Q8nA$9)Am! z#tj6c^<_2XvoVNMNig3b6*dk+p&xb2dT5V7fjjm! zakc6`7f5I~#TBKeXIynW0#>4jzI}N-n$9|QaJL^KM zl3-D?QwH1NG>D6}VGI-_z$>Mrn16K8i;-ts^!bP-E30h^oqvH2h0Q9D$})rKX7iE zFGlal&>M_WYK10gmwlH{x&i;%EVBlQYeK~KF{5zojVk<}&qtZ0Zb?i=pRJmtYRruF z7MtP~pg-|qM;kx2K;#(Ye!z%&(clcWbYnk)tannE+5(&=JjtVA0)Ik%Z0sX24bc<| z3Y7_&uT(6ei^F0d6rv^VD?fe>#H?`Opha|;Lv z+Bg^&h)i>jA#q|-<4$RG$YZ%t%tkIco7jg)!D6;;mSLy-`yWluRFtu?5o{6U5-3%u!di3vIAiS4rvJ^^muxwzYf*lX z2Q1DkqVPumkDnG<$Es(IwGedEJ~6=Uz#= zpj+koB4|^2W4Q=pP0dXVyF&glDRMloR_8IoRliz49X`ki1K}2 z1z9y0M`*_MRd#vhEEKIj+sStb-Uugr9i^h0C+h7>%YQ|?>RU*SdUx5@yoo(^ijDN) zSbfYtZ*el^Cn!uCUk8;hE%#);Hsm#*l~=%rWckD%u0GVR8auSUoh-jMg=o}o5e9QXQoG_rQ!jcZM6!G4p8EJS z1apE4&3{^YdGAqtJXq@7Zk0Q9`$BwAKRR_>=4PD z7Bu^Hx|oFdp69#K-lP_B!9Ghb!B|!G?BR)Olbs%$ct347sqA`Y{sf$sjT!hno5+w; zf||zNwc1vQmwWQ4Q3wr(9%Yg(I4Kd849nx`qJKZHm1OD=UVtOu(KhmCM%^M z=>rn)v)#PQ{vxjvQ--p;i!)EtQXll!{`N-PbFIziQ`d9W!V(u?UF%#7w!dmNboT*V z9}9e5BVWEWqwCsg8Q$`oOKup|k#*V+3K`Dp9+AR8`0BPaP01)NE@~ zi+{?Esdv$DB4!@CITaSnbGTHKxVI=v!iKS@nW>`GBb&R?2;{E43?FTm)JtB;JxRwpu85a;NmgOj z%07POT*3^Q$?f)fAD;em&jJl6!s~0gGJmMKckJ}Mw2WAM-lwF^r`<|F+Kh%8ux7_4 z{G(lOi_ERm@QAflHZvIej13aI9<3-vMX9L>yvH~(o1*c`zRAV2k3U1v^6qb;3n8vf zJ_s=O5O zC7MwSrB90C$_IX40}CZFuuH(np-_z zJRZ+$*A01EJVc}M!+WaVG@Z6hs(fm5umWjbIW>5KR+B4YTfNY z;M^P**Zj!(igQUgU#eKrU*_K#SHeiA|nyux%tMQNpxi7XTe6jtkJfocvStO z`qGjEAkx)EO&s}QW+M9%UqdLQiGwKJO|=)nO7yn28sKVFkLd;RODOi~cpF|H;#`Z5 z%JM_;dU1+QJYV?Uwts})X3+LT1<$js&P$2b%FRoqb5W}(%fPmTzjHPvo{YB8mPNHK zVtB8e2d~&7?&=^tqi3I+cKx^-m)X!cFc{$8n7B0s!D34yv*6h?tW#9vhq);de+luWxV&G?1x&fasM|Q$!JDwVT&+@#7>G9pG+Ox3 z$>u0eUM|nEHh-w^c?gEPxLX(817lRq&+7*fCCcse!oK#apuzv*>X2dY%vHG0O@%-T%b--lL z0uIFYk~)VCr?TSoj6I^O7%oewJdjkiP>PCvjGus*d<^)7BemR~8d;yH3^z$;udSAz z<8CCZn-vZ>LJ! zPNn2C6`^shRRPVuTAN;nIJ;n6W<*>+*J3bTYJV7$1#~YT)nq(zv6%Q~^<8FNhFa*c z3OsK6)Nr%_QdC^yD=4P(RCR=8mm3frHRI!5A1-WWKq+|Mo=x0YZjmaVqJ6kC1So7z zqmj6GtDMYfn3-uMQ#VLzF8It=JAdpb$mX#51T*Hz3V!QeVyix$tH?U z9DmN-C&$0Z)Fq7{6!8|_An_}MhMj*(3fq6*Vn%B1`9eYVvJ-(9@afQn&c~3Edg7&v zyW_O=3PO!PFPe8-FEZb`TD=ScGNj60R^`x(ye$7Q+>INc7LeQKG!4*%& z`2pfxPnDVUasC;m-qLvwgf3Qk^R?QJ&wqsc?8ga6Vzds5{CrgA`grOJ*6Vdq>UaN| z4FQ^n6HkhvjFtt*-Gq4+bz)2vP#0l)1{W&|mRB@yGxBkBdfZRN{loRc`x%?3UT*Sv zCV`h*=XgYhhEHK>FclWR4DC5<7R}wp`r_X}W_#^TTqxehUGiP*UI^mc6AF!;_J4U~ z9vKONCr{1u%JgziPfVmcTq1AB0|kpoow+N0YhHL7qF_SV@VdIPm5NF7d0o^!Tyy!A z6Uxn4%`U!BczYg;_nzTY6+I^+NJkd7r!$df*!+H9JynB@X02~W$zRk> zc)xk@da=g~Dfy!>`vl2adSyJ=WOgBtmWSE#pxjLVg$e38sBiKPyA!<0G91d93{Opc zCAig}vtA=F4J+v0`{gbDvagnUy=Jeq)o&eM;8SQ(*8#i9IJwz4|EW#mU4K*Z*prhy zn<*)zg0r`h>;v(*J3Rla*$mmu#h_o8hrtu@|nGOP-XBi?N~UId$4PuGtL^x{O` zpU5lybpCvXr%ww~I}FOyvF_sR?q!skGICeFh)=G1@h@ln3@hEO%75r$ilZ~1Cq2Y( z;tP93b0D0JIH0%gqhn`yb$6bq=77u#GF*mpu!R$Pd|I}R5GyVC_UWT@x#5v{*Zg35 zbb|Y$)3%RGaw1k-mjXwh#!I_SYW}Gfy@$=j5v&?F7)PC#JyPBl#GD69neeJIIR98X zOVl*0b*%SAP>aGIj|4nG@4YW@rfs|9 z*40?8yykPBzJDm5$K82GNjGvhwal}QRGhiqlutGk!_$dVd@_PJhHkBV6&g_usx$dn zI0IbqpEGHXS6l=d)Zb%c!QWSFM8F!~_hJC0|MR+!4EPc_$4~P(R?OvE?=45t@QA$ zZ*f5@`i)`S(CG``P9_f@@C?Z$Lq<^VBbI?BnA1fOXz1iW6pOzAj_?OW< zbRpMh6R;%gblz*F&4dkU=~!&=r$#Dm(qCAdLs`OW_u1}r(r(DAG9r3?-%h%J)1^K< zZQ9w(_I_511YkxAVefj`)0l zi+}foS9@ckGe7vq4u%e5f>9=|Y!Ss6s%;>Uv;8?eLyK@qRs}Tngo$m*vwX^h^Bh=y zH{Ccufv^Id-N^=Tpy*6E%w9i0orf_E+4+h`rw`UECMqFC_C^_tuq-~$d$Wn^FaBaOI@_Y|(n@YAyk)N1tvK9%CJ+nKX$Wm(+ z<#vi+zAo$A!VLiD)e7jr>9%+vJ%3+iyuidHvs`5{myt#{>>WMv3_IZgmS|yH z23@QI*$es$UaGB;Rs9ajy zcVqMUUd$}M&vTKh532%ch7k2O1L*-olDlh9m@q{pMiG<+B{jt*eM%ASi*yte8uO0Q zk6ug#d8(z8S+6#Q)o;0Ejemz-%25sl{5UUIY0ij))T};bg|!?-zlW2y?zcxYG58bd z9^X77=iT^1WPLmCYad9EA$U^J`{qsvgwurg3&* zW5epCur=iUT;QCi*Rpq;iiPu2pC6OMiuP3NaoA?nl^hpbTG!wL-fVQAaS4~@Yh{`= zd6n|Qcz#$fm^z3`u77)aqLbsS#LFfx;-*_a&hP!=BqGDRJKuWNWy{2^n5@@_8H`Si zh0~xur)$N#=7X^=efEH{sI|8Uc$P}!MB8YXctvlcFKu>ydxHzHDlhcNG|yDSdXGuV zNPT|`ID7Y@?6l6^J3Ha~8pH*zL$TK6_R|Dv7p;{@KFKFP4u5hH&L_7WLkn)cHeTdt zehRepF?uSAcrmQrHMM)JnDS2G->VOnTxP3QsU%AFgnM03ok^SWkqr6~t8~47o>v#( zovpNmmv^xpojYHV=+{kscSGUR9oW{F5`O4tYuiWd;kZ>w=QWZD)HS!Pp>Fg(4jeQk zd%)VYgRH&n8h`C8qju>%I1Em>1Qar#*F~zLtyLuYEmbnp4R>}OScTRshCd0F$?wqzZr-Q- z(4_yiGkV@e{S}2P$Z+l2-mB==OVaibn<9{u?Y38*adxzG5j^du5mdm#y|%}+=U%|rTyL1)zOcJ3VMD=mCT~%;d4mme#s<3CaHWyi#)HP+f#jePDFQzy#8th z7-$vxk$*6s(`Avp<-TS;I~82FhM>GWYtc(&H|I37--+Gh$NGMo2p2{EtMFTw*yn8GmBpBju^!zt^^oo zA{frsxBcm^n3L=gTl@5UU|r?Z2>6J3 zy{Jj*8~RQ&L~BPk0odqdL~af`y|;y+)PLSLqrV#OJ$-*3HOcwvp!P0#J}<+qwA3*v z5qjozzzU_pK4hTkDhPBuj2^4lKnS+@u?V)uu_FFVVn1phf_)br`Zlh6E-U?0-}l^% zg?FXb*KJ2PvE?;RH)l~rGkCgbUNn^VX-+{;#W%)5*U;^M>Bsc&u{As2^HA6873u0N&s_+Tu>D)Sk)%Eu_!bG`ur5 zOE!g_-+ofZyM;E9#eF)MiyoJ&Onmh)wy*TUPp#sV6Gs<5CD<|}1B1_*B!9oE)+fp9 z{5qR>tb1%S71Rr+SBOu_N5f9ikiBDi*4)c}ZMh2p&YG*qmMlyWc%GIo-)I&Psih6% zo=dS0{l@!`)ZZx!gY8y3(rU1f2D}Rlru^Y2k;s)XTh);mm$Aw7bCp3oa*d4z!iAS2 z7c3bk&1K4Rk7WS-*2k46N`GysDX|zYQJ%orsO>d;M)TS%V^3kQvRZKzg3S@BvL2-| zz-Dh==3U>7J+IqcXXukgZ`r91rbo~dmPJZJ^gJGSBHUQ^3?yIcTmtdkb&?z+`Q|2Mrs;Q*|8z=_Vzyo6TG2d~1?HkNabOkhmVbGpDr=4NZ1!$~ z`g{dp>dw_;qw~;$!>fCHPb$40+K))klfj*z7N74;w?cK>gN*_r%EP> z!M(7lcuHD}OdcC5Xu2W4bl(oN8GI;~h15PIktj-4t&KDS@@4r>_b@AIiRZ zoL&ADf*~CBUdx~Om0=&wEh|3v2^V)bZW=hA2-o`^KDG(P^C{JX8*It=L5O?&BGb4b zE*AzaRBnc8O@AoeE^L?l9i|QOINt(elXjx4yK(EHcNZy-#IWPxe6$N5i=a9)xl~ap zC!Zvn7Anf(7Jv9q;v6h3_SFL<3|oNppS@Q8tJTWu^|#Xs0Q?~UnBVpw3oKni|LAQ> z`IZa-mwkU3iAU!Y?%OE^0C0}K?L_P*A5h7qh_w&^_J8A`WPpwbIHZC6KAmR(xci|* zn4I|ADdnqGK)v|>w;EEZ>v?mg-ro@{0B}Z;h<+7|R)bz80GxNUTLH&Q4{^YMJE0ui zQog-lSbI!Eo1$FaOiKWa;72w}MW%_|SVnjY0B3){zlh&XDBsQ}007+mejmQ0fas!x zV1#4*$AA5Reab{;&xKv!-!lOG>)Sp?mX6LR@Z0GG04OlW_torZ_HuNZ0UMD0qxA}0 z0MJ){=bdcl9ewM1wCSbnW5v9Iln0=M3x)x31L_7q2KWWYFGQ=blv%x=NrQ_vp+cfu z=^R}2-f$7A?{XkWhlqk&gZG~N26$=31EE5=!ha%M0WyP)I156$7uYBjx9Av`3Q_o79l=>C zpVAhoY~x^yGeQxJiU??VIn1_zCx!ve0qO;C89)eX06fojD`RHu`CykP;AVlo%V)Sn zjek&i9$cecZz$GSGg02;0oHnbjKgTMW8P<@`Hs)yN?8)BC^Q-mS8BcHw`hQ6 zdD}>WWEp^Jv=!jpLFz&gjz~sj`9AE8SP{b`7%W;1AOdjGRe< z1sjDzDWn~{_`>Q3-qzlvRptdpow+L1BI+bYLQDyj?zp~x4%|LU)aX0T{kVKVg2pp>B5$dl}pk!0hEY||BD%cog; ziDKivRF$nzM~46kOzZRlxX1b>(7y9tykk62Mp?z9%$D7(;@u3AKzQ-iwSUn+SBFPJ zu9tkNe{K%H2v9HikvlqAi5#XZ90O$fh<9;bbp^QcB<#^)v*E|*^-Wl#K(LVrmz=c0 zDzYqX3kz*44Y&Z>Pc^%=yk&HSm2w52ZWQ0(z>)E}2wWC}1$KMHp*Sz{(I#tB9R4gL zeZvrP!DkrsuiwB5!+1a=7JnZ32Tq4H$ZzWYn|JC6ulMDtHp{bgMp~^$`ml$3r*EJ} zEP;Ud_19AV+Cy0QL0R}SsBj5uzyN_{aHVE(*m`4anY(}6QOQ8PV+5!?$Q<^BEnqdz z<_?!?N`H2;^PfBhx9kLda~xbE5iXf1*hfyxLQoASp)_Gt?J zY1=CEz1>w~cDl;8^URa(X*MIlQ^Z0A z&zpoKo?A!(dw(sw)%7lLD9&N#!yXQX^DHc-3PAd>hxPUUg_mSU_67Sp-|%-${?j%; z_OL;T3T!G3w&}#&9T`XH`P?6E8yCui_Z4GDDJX=Tf5?Kc^SCckjO%)dDjBxbWe>LG zvG3*!$6*YRTQGk+y}ge4tDBeb-}&>>aX+rqC1HTKeaImflcxHF8`^4 ztZ28R&e^`nmJV1RjY}Ahxt}S&0b!O4H&E#bYh4w9GGBJH)@m-Py|-+J}@fK=H2tlS3+M^u;>u$y~5%nT*gP->gaD=Vf)Csi*!Fa^TA2izII%*Mp^h{JdIp> zgR${Gcz8X;`UE&APqJ;|Kr04p82VJKi9HfA3kErr9&UxEL^GN?&VH{5*M1IkOFae zJ9_WBu>pMWi1w~S{liOk+qDFO4Dc7_3XGk$rGH>Gm(i?jQE?catrgik{1=`v3&|ZQ zN+6V=Io40M7sMXxw|Rf^+1Y5aBex%866M_^jXgRm-`xLi93zT;*QpsVC*AOZ!N4$t zd||_KG{yF%Ticr3jWJr`Dn-p0+Iyu}^lMIRS}{F_kYRaqDUgUQb+!8Wz<=ihXKp|$ zzJE9q9r$mNfAOEASCI#6xi_HxllPCYa&cv`vi$INX7Nt6 zm`I1`?dM)t6#uWf#{w9E2arF&$B()v9e?(RC1L^i#!qEzymZuQP=^eSW|k|b-=F-X zPwjTpB?ZGqkYw!p@Hcij>Qc@h6s3_TU8!8%X2Y#*Id%;;Y&TZ1SJBui%66%T!XdBtYf~}TZ2qnBUXAt~nItM?dw&U8 zhmLf%E&N^Sl(uYSyY-IxBHghH>@zz9a60OX@`KAdxKXUU$&&JajYlfwW6Z-(OnhU7 zH2&6}Y!mkA%)40#m&g`i79N=Vz+Irdj}n_hi6tUou9pOFKL)}jgH19X_Xwk=#%}lH zlG`Ej+dfe#Q(sEnbUlF`ypof)RDY_yPOU_cUMr9|W6M>#s%?wyc&(+fo^9+@rm}N~ z0znVOXW@Ykf`oW*uD)XL3vUFTFO)gr7yVdg3On)KvZ$|vwz?(E4xZt+jA}b0d+-be zefdQH*A9F1iHO z*kq)!MT1t#jeOux8*%k>=}lakEW*#Y=q2fFnc5Y{gu3Wj`1|Jt0*QKJf zf)J+y&i3w-y;QqcMOZ`HG4*JTmcWCJULh*tX!8VsmQv=I6C~}Az?YlI4(Ar&+=PCv310)fPX-csx6;qv<(c3Eo(lntD=|*oZyQlVUM7u>!9;%c3rOW zj(iaq$n5n7RrrAuCF}%P5~&^F+9SU&VENX`&}e>moPfTu(JRY0haLW9f-hG8=C;G8 zCiuq47t@@D<%j3}#7jg1w~gzu49t=aqI&c+C_8FhR)Uw!+KPIsK7S5^Zk;$tcy%>! zs74=Us_xlFyCqO;(a(YeMX}0exm30vevmWEUL3gs7FUiZnKfB)p=8K0s(N_@e%X>D znWVU?-g`f%*d67a#T#`kCi9zJFpp_?g2WKhkTsM3S=qa@6144v)DV zzbm7!Z+*F~i;fxq2Y)*{8E#UU^Zb^b@hSvLyI&R0MqZqV(nv{h>h@PnBa=6>XHv2= zTO*fN+s7h7a>wp^Bya>P;?#|GGsEu6S1%Zzy*~y>T_TV4jp|%|LJzOaYD`6Fz+geT zw3cC7F~Xr?0Di!zMa#jH2G+q7HmW{&02J zzrJu>|MK@O`_^XxDZR8Sv5NGT1)0h@<|5P?%~h>v@fxT|zv%ac8hc?=z00nWbtbt0 zNu>!vi6q{RZVDU0-7PDQ%OX&62sQ9$&5(>pbvg^%_`0s0PunOm1JI36JI|*t`C26 z<>3=Y9{8!>y|Pi#7msa!KKHE;zy9*&s6X)mM6#>p)&`3y+8|RlG-*DGoVHTAYK{eV z;Zwd$6uX_&eOZ+ntJ8ud|5j zG;i~k>_n@x&ZiU?B&+gm33{clW{eD@qNp1u72M61V`!FSlH!g=W7zaF5Qvye%~LtXB@)T>Z7d z_NV3_K7XQ>@;Be|+fR);e2F{i6O4j?VOi-%`^lie>X>Iol?E9$t0{()0MfQGR)MjE zB5V109ax*-HU7Bi!oy~9{mD5qBl_k&BH}jgS~m` zNLK0!sZQpK{Hg>zBNnXhuh92Cz;Le0X*`9!mqsuC3m0eg5$itCSBcbj<#cm$7JDwX=KAAhy_r*<5^_FHql`G}3n?i2PGR^EMHMwul# z9);@iF~iZ-)`6iwc8dXS%WM=z70TqD!SW;lM8m>9~m zTeYcbn+f;e3weF{78ohhAr+ad@7gi14@O~UnT;3qap-Vn3FMbU_1rT5%nL=*p?}AH z*Xi%NA9(+0(uKrd{ozl&BoEy6&7VJd$e`5O8`KAmiP)IDtV->R+DffL#XI6IggQJ$ zYOS5RHiF)c77O@BYp{AmXHSm0IcTgwo|HK9S0rQ z{udkk)`_1r{o=mfBgKr zzQ2BOV5}ov+9OYXb-(`V+F=6|#kx1bWPiz*^I6wvb2p^ofwOjQp}3KubE)6yDLloV#K1{c_* z>IRQ5$x>W$t{gK3wt>h~y@Gh2UHL*_^~4p4&&{=gvLJ)y$H{VV>T`Z}nhi{D$#Qm$ z6dpthSoDWH2sYXvaB9lrDxOr8)4kulV_XeuFR}n473{OY;eRZxw3TLTdLqG+G1oS? z92D@5Jct3E3y@~)oj$vG{NzAOYoK)070>)CDikkz>2|67S!Y0(>=VJ@SZB0QU!53M z-od;0AKs7+UtiwjfAc5lkKU%QUmWnopMQM%zxnGoZ-mxS7yk0KOa#Qz0faQ(raXCl z?jDFfMkU*VjeqBo7keVR8$y|#P*=JvCzlbVeje|cq=8Ib?PX56YJ#b20*k)Hmv%n!L<~LnW z6)1~@pr?MtnJ@ITxY7^owo+Dfvw)DBh@zxQ5RkL-6Mhk7$44@!m<|U5rvimsDkZ;W zj$D<9D}Ox+0-Xhjhp>Te=&>-v5uvs`DfE=cRwx(X#S2)z3@h4Hwn#@XzDt{JO747g zrDTwg<5Jo3BKqP87u$GlKKL3xA18RouRyWXg+J=8aO6zG`GduO^XGxF{^rIbcTgY9%#G#9@ zB3s7MORs0jgbOXRGO}!{k)H7r1zFM#(a4vh$=YoD(z)QwA9eZiWDUDg zT!*yamp`#^`R*h9(E<*f^Kaed1om&N4u|jl;s5*mO1-_v zI*e5wQZ)y&iChz2U?SIq+o*-l3j(i?n%f>Ho_wXIEI|T7f zr6^T%$d&OWaOVat<)#8U1Ig&wNEaQq4HZD+mqvGa=P<~;b1;h7YGnCUX9U|$LCF*B zGR`(1ae$0S(9aL4l5f@;MH;(!{C}$VDxzD-!9$Vvc@4*}g1y3BH zL;u*z_gDXLe)0KVt?Ea&`I95Rv3}^GmFFGy{>AV=dj7xkEMxty)t3h!dw((BsMC^L zIr13}UA+cw181jEDSlVD&;y20#}V>o%cvL6As$`DkZICCPDUg=q@lViIH*Ls&>g#F zfu|P^DKC%i67jW;sVDsqo%Z|t^k^^;Hu$4e7%Sc*t;5x9gYme61R)4iweD}xs*z<+} zau|t>0Zcrw(6R3N{kuPojyU@2dOtewmskGfUw^P+2)7^I;y0$wzx$;9PhR_K$>9&? zex-6%kQ$3lpv(*}7;Vv^Aw)bU0M;R5g)M7?C zq3?{vxsX(L6tjq~?V5h+O-V`IQpy!zq*^ajJf2C(>*|P}g>?~x1?d7_K1&vC&LXoD zVFPK)HnAKlbxj=9r;$S$)0byuld zOM&=HSYHBi&VMJLD51H}kx-3AX?qWjTTF=;qxPQ3DoDokK=`9kNt_e zxYoW)MaTXG|G#bchYx>sRiXNQKHwWqzgqgwe$5|kzy$yJC29OVFQJvP3x9GEY3$#> z`vymBuU}qtV8LH}{pFPBAK(7P+Q(;%dC|H5w6(}qn1An8lPE$*tmjhQg-~{4TCqU{ zoaLc!S^9FaTIyi$tWYv4w2rJ!*a+Wq?qMqxa1^|FR4a6RN4K37)N<+Zd`)+<)AF=4 zSLzG&`i7G>kOQ96O>Q5N#mOe<+g@7#N3GaA+V#*CZM3O#pi{OB%fYsk`LY!++2Gp5 zjkDW|MSs~K;PXvrpr;WKX`8Ru_2(3Ov!L44@PF8Qx29KhW?k&R~8uQ zPX=0$@s&rP{NUEmJj=Q@DjrFBXAmzF_Km>&p?{p)@gnW@`_FoQE9K-TZ$If08?%c# z&|kTvA75R-S|5My$>$p@VX<~KpKu2J6n|%9$*@3E+-tHyA{8t!JfW*A1p2)94apQ# zzIz2FIqL+T3hZtn<~>y^9Q&jC5}~iF=O8_7INPN+Qad+ap=uln7Qr?1H!-atqIef4 zK!4XTK0RL~CmB5f7rbEMpfpiE_dQ=%TP=f6j^~&(5=mSvY3PXSqvER52k`)loSs{6 zW?#dc;6Kk@Y6^8Q`N|MG1X?lc)@ zdkiHJ`I9=VA8Aa-;sU)t6t0&!ZQ^Z5aes7PZrH8WzQ$-!GZXDZ1LBQa$5t`=l}8iL zcP|e^oLgi;w`8D(io3npkOy3ZmxBIY7HL~r2e)17`Ym|hZ(dpj1zhMZjGZSf&6n$u zUiPw76B5rVTV#1{kl;>sc+*R8OQ6UsNc6E!mA$0DDLB7)mSkSUX7BmjEV6I@3pU&tL<%qS=Xsxe^MD4ngZ#N=p4dWb>ML6BUw^^Pk&2fK z?j5J_4;bRe z%%$N~sXRE2n|+W?^W1?C)wDcxl(EK89S?F(dBsU+h8vLZ@pHnCM0!3$ye{v>mXnb7 zHcDRr3XvW`PAlq9cqA$11bpgSLc?0@;-hv*}Z}J zLd-w(vZ~x=FxX`ps4tQ7z8t}li2VKTsZZ6-kYD)?#ONT6{$MVPGIX<*aTV+O!mD1L z#iIM9d%3&Gw3n5RL{5Ly&v-leu>r?j;EZ-CSJ$yJSXLH z>*eiYkBGd-%{?ap(41O5-=aqdy*MB|udJ^VBek;NU(9LLQ} z#Pym0c*0HQKlVQQ#twhAoBoPvU4QJ`FD%)gan1>n3i`yozwty>vdD8Za6O3Cinp7h z_G#AX`uykuRVXM1&bLo1P`(kbT7`3!nc_h&hT|cBC6U>bu zV#plq)TYvBx3+0}+v|fAAzLr!(q2Si*mSz<(tFuwmdIio2o%!$O8I{gTJ?j9?|cz) zVlIntuR)#gu-Sx%B_A_DqdE1}kA0sL*PL4)lgZmOXRz`8<%4o@OizwU#4GW2@U@uk zG~`e(sPns3*Hp)GqRa)<(CoUAx1nqSiCuN&fD~XH)0lUTW~0vJAuA*H$y6kACa6a^ z)(o6j0OkQdvZpri4&;9zPVwqLH~@)#nEv1Nw)hp3%M|c@o?%3O=`KVYxhyl64B{-i zH%C+?ydUIkkn^>kTlt}&MykRE0$y^1+Ci1y_(EVHlt;Q;d?LTU#&b>5%39WXN4>)nd8sIp=CetnvkGvmizGXH<#k6ix3!7yHb>CRU^ zCV94oZ(a&Ldu&Pl&ZlPNF8w%kOji{7!ODF}a!K!?z9rClvfTBgq79s1Y;*EisS|0C zZ&5&nokO&zdt=NZWQqM<#Wb@qe_O1M>ojjn>ePFMef%~D%*|rt^!kh8g_Q|kCZu9c zC;6TD`Nr_BTK|8eYrk@W-+cj7USG>eXn?;w@U@QI_)41XO|EhY2EoEvkZNv2jVAU; zDN(<%jyB%~g!Y{3&^q#a&vQ&m?^Vxr^Rqin`MxLjA+X6=9`g@hm`?FW?{E9{ulbqJ zWYd>Yp<_)Xk{nudB5re|SCNFaNXu@HjS2_l(kyYhbL4-I#aWgdQf9fa>ci0S4ZW{N zvrsm!+aFwrCw#Sf=B|(ezV}cA78s6=;lGV9jxDRKkI7H?dGE(wv;W_<#E0qJG~ayJ zx@^t=YYiu(B6y;J*ba1o{2317$1lB4w^AA{Cx=)G^COYzRn95FrxIebtZ%ZiN7~yl zFzJxt)>MB9)Rm1^M0N60fRpE*r^hKEEe`NMwttTTaR&$Zxn_@1+dG2H#H zJW)LVuF)sV$uFBbt<$BCx&-ePQRq}MIGPWgExTKcop!G_$;$()zQ#(1NIq(Pt;_Dr zlSuCtQhX#o@d1I_1dcsJ{>j}ZdrtGZ_e-X-pV)u-kN^2Ame&8+{hrx@-@i@tXpFzP ziZN5Be(&SpM@A91vUDdG*p?m@(Y6%*4MOpMPxA)Ml&txBl_x)x0+y zUPnXzAg#KTd&%%bt-w8blai>`#SHDdCu-f!a^E^wPIeBxQI`tej@;NJ7;d)H)v;ne zr4NtIVxrM0D-Tp)Jgsi9Cw1;L_&WKdq5qN|B$dRaaZa?8sqD4+`um%=Et^u z@xe5b-m7pABs?q}Bk4(U$g>QIb6@F?x>}J@!pHF})|^qav3W>br*v)^>K7QB{9)`l zo7pRF!{!eKN&!>~59U8tomue%$*EFP(x73$@H4@Il% zeRAa<7Pf{q27tflNN%Z#j*xEM6xK z{_-cU8~osFkl_kC2FjKewurWNc;}x!@I-Oc4hzrV(3>tE2*tEZlscDDL6Cn}W-`W3 zv`4&*X=dOeGn65?8)Ey8Kix*5ohdfmOqq8??gfu+*Hf@S*pAHgo;$)xMFPMu;einO z@mK#~eg5iS|H7{R&i_wqH$g-!?&$f;pZp0D+U+ZbIjSXTTRP4vvKe11NioMBbM7X9@^5lPmYSq*Tx`7BJCoS z3wKd?UbS5K@!vRZMdb)uf#JS??`I;D?;n5dz;_HP%ga8N_hYX;GeDd#!N-h=*OIOx zE7Yiax{ggX=k3D)C|L!%gkuQ`-cLW|;=YKg-JC`&y1n=?6>Zw#dgXtXRLhv3rZIax zwEG`>9sR-QO>>$2Z?f>y$5N3kEs(|&6&$5dRV1qNl}CWfivQ~mCyky~dmKKfd@wQO&1$#Q%v;Pj&!~(ax+h4VAY>V&}NubOU zWxSJyJOqADQ_*~AAwNW=5UBH-NVuOVLA^6QmETJ}lAMnt=04W&$=$o%L!a~>mfUVD z8ok1? zTv44yhe)fHVI8`2^J%h}M0uyg2U{#@s+UiQyd8B4b3BZzlh*eE>K#Mv)j%$?wMKWk z<_HHV?`HdwkF@e1KkkH=`TErpmf%NkhJP8~Y0b0G)xXSLF$OSAGMHWz<|i-bGYte0 zebkrT=Uw#SOHxTGQG@2A?dljB5P#KW9gFfvK#9X|7;vDQ)$qhMRbj1sFxAR zQ~M<778QSvL2il&R9SZX#=y-{%YX29V~tK!iP?TGo#6r8IG5npe5FqFEQ_Q3V0Qnk z&A~2Q{aBvp>7EqlF_%?lXU~zWWmAkhJgyLt+*UJxuMpf69!0 z=V5;DYl;`Qe7hnz*3|+s+ifq<`F7xvSn`Oa6sxm_j^a8YJLv_-Tj#JMVq5=rE-izJueYC#|JK!ebwiZ&bGQc+PPlhXDn3TS>w-99jCRTrW6 ze)AWbY(LK#0KC8Obp68*)4h24ldn_MG7F{mOC?I}qlgb|S>5|%AUYmB%8qXYmC1hr z?@#fMt;`>ny{(?;6#K9V)RQ=;M<-7Mqg~M+a;3V6-F|`A>h595?4Vf5$@@F8lUo1=(mi`CVat30gA0M1*_K z;vMiSp2X&xZ@l0yKFOch<<~y<6VBF#B08J~^nj7?_PsS)RlgPac5nCjUa98}k)8{= zpfl79mg2;FMAfN(UzjgtMBV)uFVxR|fpKbEa5K%3s>U@@)v^I7R9ni0qcne*OivST zG{pnM4Nm|OGGUPZ5h0nwm&sIuk@|wi0x%h=?3GKBnsWt6nohTco|{)W4Lrs3jckW z3%o(IQ2hw4DgGxcNl~P5Z|r}4l?1wI=+4eHUuEv2i-0X+u7YcG+iTC%aQwh>`u-#V zQQS_qy*TuUiTx4^Nj&-nPUfnw+4h*-73aUo1%_-1gE;QZrOe~H$FOH|FlNgij5V`G zKRQ0`pPlk3lK0pBz7sZ%(R?u;=&$>Je*AK-H;FW0uR2=h*t(7{tgU~K|1-PhBQmM)ZPv8}?=1pFW#9Xj6x$pG8LkCd3{il9eV_iz0?E(+#ik-fUY0O*m zneH{)Cl$9(f{w)E>5h`)!NS|S4*V6 zv2#}z8R)sV1_{vEV;O(44h~dO0aNizK>(ixx2zqAF>(PF&=An6<^Zm-&(14o*!cnM zp0az3-IH8%K!iW#j2`j8^{|Bf7OY&80mbM6a0Pw~3Z*`XIp%w$z%B+QH}^nl%;(_1 zY71^|v0Xj5V&@aIA8rO_0EmhthZC|gkn=su8;CrB0XqVuXD5IBbRy5T{_Irnb29`a zh&~IM?P8Fr=KD$XO3feF&|_C z*vC_SmkdIX3;-2*d+y0j1W~EvfK0^;1peSudvOb7_dFG9Ba0yaIoIi1Et_H( z6F+(|9a5o8CGqr24=P}l9l^8%aSLIOPN1~VVn~EOsxTX;DbfZP5P8`52!X^yOmVKF zs)CcLavaAA9G7!Dj$6LDBe(l-i}!3Uh{7=#=D`m?|M15)VE1_kCTqm3xpbJrJ{U43+t*VocO$2c+vT z%9C=@L)llVYZgWdqJdG}^`E^bq6T zmr$&x2tudJJF%c4mU~-j64f9`^erjogCa4Y^`*S%=udqv?#<~r=%?pQL_^H_`L4$< z?s0#E;uv*#htX6YzCfxPu$bx)lU!b)Bn;}b1GVX35aSKAvt$ah@vtKnmP_HJ-E*#bnPA_6emHGsIuEjtS_7T*@54w#LqjRO&rp^ zgQMt6WsgP+caT@K=n9q95yG>Bxcli-Q6GO%-O*!7OWb43XcH+!uL0;>bV7Uj-+`1n zq1_|5EmQ+kI$~W_?>)G$!34kQfL{Ed$bIxI{HBT3d0m7q?x|$O9gv4S$~~3p-(zQ_ zpcn>dbbL)`4bTO`MhYtXP36@}WfqRtnB0^J+ZDF~6dR!E<)X(f+waZVxf0_|PC$QF z^Qm3~K`b?w4x}n3?oT`0uFGp1h$YdoEjpIdAYb{yi6hzIn3gkj6Py3C&)GCOxJ#qQktRhFtjU3bX(DW_dW!KL#c?i|&nY+QeRy(jK7 z=>UmLN9cp>p$u^60M>lH2PbCoAZh`MJ4%2qFzY{DwryUPR?#82t4t?C^5@=R@{7dS z_^G1|zP|^TUgRx>wclCb9xnebJE{Tpq<4TyCWmZnsEm?H4i|V}YfzPUd+_ZZ#P!Jb zZ~Vw_U;r!Eq(CZrQd(FgVK0A{OsDvY=@2bZ{v7g{VVD4!Kx++u`;}Id)t?d{<5ypf zU8+uF60`ma^l{NkYWF(HYg3RX4g?Q zXYJGQIKViSH-kk-3PpdE$fL$+KL+4l9AE!W z>3IXg{HNV>_MTt#PWWH6V*^Ov{6jmxZcqmAc0RNxet8xU6@3V2W%drP^=%s1CrEE@YzR$>m2ncuZ6!k@@8>N}6;;2vW z8~^B9lN}fnUBHdRVnV+{z(_ouBR8|+!9((^QJz{&3sll zqgAMUl6R1ZY##j^8Ozz+{L7C)#L5$F_sF-3g!L~tV(fps*nK|DX(`~p>H9(*Sv~Zk z@=0N%uIk_>IuF2F3OfDaBgVK#wf(pDHfHvO@v#A!*>-SWJ#vGhoBRsx?_`E(7xMLU zWLmL#GL{`0Vv?$(l>7@nQvK}2zUu(O4rLWBr}hDx*?mhe$mQW3X+_3=N`LOS`D|4K^?12($Qvi3~+GWE@WuRToG#&vHp8&CXi zAAa@k@@w4V6ik&@f8<2&F>gN95w zAYhl>tMdR!Q-9^mC)wz1{V>ggG4lz9Z~3c7wfX8qHcujL(iaygU%!?4*(8t3*g&t-Nqk(hr!p^^jhqlG{G30((mMIXpw^5~7@2!LdE z?PGQ=eXred1p4!P$*zBX|I#b$I{4CmufO!)>o5KHdK^Df_-yQ+os@SSiz5(|wLdWo zlVz|(Mi-XjhsArobRDAu@F%*=Fj(AMn&={z(E<9>30>C{^h0+x*jj}!9}eZywLgDd z7lxstpWn;r`ultJrO(-Q{iUC-n=k!zefiQ)*Vl;;K!ovez^q*a9M-3D&G=yRGo!mt z?V>+)-}a*~UH@&rKhZtE?UyFH^xOU)I_7u6Rv-V^jg0wcV*lvKBs&8e`#pa3l=-QO z`qfeP%m(3!p83PxZ+f2je1GVFbeeyX-}J|l)z3fvVmp@8JKR@K8BfY2*E@ZK5Tif2 zU;2)yjCMWM|C_e!I!b=io*D1nZ`$2d?_YIB5m? zZOi!hLpzQm?!W8K4&t<9=Lzxwi_Ne(E1~79$8y-i=+Kk#$9FU+i&crL-;@JT*$z`c zzvjHmAb@y%lqmuIFE~?^h5z$l4cU~pAOtfyeTswrxX;J#8ej7w8F4z(A@|K;9yaGjq14UPmr$ec8hKG!MxG_%59D4So zP2tJ#GA4`TiY&^52*^HUJ-UIML7GDjk_qv2??alSiv4H7Xdw0Xp5b-#|B?3+u;8!n zW$|`oF#4FH7o(#rzaxJWNB}kLsPPj!k6k7ZuzM+os7Eq;PfXDR6beU*Mz)VF?NJ9Z z1t&*ukOepbk|(}$#*fe9za!(392Ls=vRS<; zMa|StJt{z&`Za%M`4cBcBP&1jhzEt$126U-oFjkgFIGQ>zsT!X-_jmkCb?ksv3_Lj zlWWMByvRL!uKLl(&u_kz>AwE*`<1H$eEITwq)i`d0JLX$ljNr#|K+{p-|L;^M2Z>x z2#U$dCwdjW@;J#cv`in`f8tMNqlVsZ@^n-r>Y-$>KR?e#{J7@$M{Qo*8dq8J=(Kz^BA9Bee0S28w?MpO!jS@ z_@C-$a{i;Q@E1A3Tkx2^lm1`QU7TS4%BTMQAIqanbD+xRECMj?|I-0FrivCz@rf*N zh*+MKu}FWJ2!#xHDFYMPmUY#awMqX}h8t}F#|$OVGX2tXc(C-7!Nvn`fAmIW=hX2a z`e5a-WO`&%ScBNX9$21?ZJ|Sm*#U>Lc4aKNuCe`^ZjVeLJUgFu&ysCEMJ|)e08qf> zA?TTW*AV6?W%!SX5)4KR&z^`(IT0l?&tDEDQy-APr^wLP? z2ga$~_dlj+{w}xRclrEJdH%cHrhoUPnCIhldXBf#|MI{7U;jJ%O3eRuK4ho=^XKYP zHsgQj2k#X9AJXGcKVOgkHkEYGu{r(6l~vJvf9X$UUOfLp;Q9a7J+lA!*ALIIuhA@i zeR_Z9$nWIoh=y3@?>$andtnp1vAH_n4CflvK=nFmhi7 zgh47DQ1Y@hb2m#lf&yo`k8M*li)*Y8=j~Z~mackGuG<&7Cqt8tLv1eY$Zyo~c7rQf z@9<&Ky82!?y5s$FGqpi{oqXB9AJTfZJVk%Y?7f2cd=7*caPPgpSS&Z@LEL7oQ+K`1 zi}7ZCFWp9Nr5g-p4L1#(y>z!PytG(9*GuPkzBpa9Ts=7_G(>K$T+fG@Myy&W@R}Zu zs&{>;;o@=-H>YO3SQfKTwayXu)+*JJo=9!VN;E3fi+B00)eZr2^!xMuacr$I(T&3k!(nF z;y+80?9aK^?=1J2q0j96nsM^5OcH-kyDh(+Kl;yd|K=XZUhEIF8;$2W<+IPL8N&24 zOBap45@xkNdmen{(cF$I;SnC~Ww+jJ$L{k?RmXd7yewEPu0gtSmUIjv>6wBb!FxH@ z!Q#Er_yFCk5c3fD)N%pLc34n+0ca^NUtZzdfLA#beN>KByRvrk%c0`!#tMIIw=RYj z5Sf(&cZe;B(dGC_Fz9Hxqbz#$63*LvIr13Ip1J|)jBagfAKmWvZls2u7qdf!kiDLEr!V*5Kx-6Kw(xUsMPGhA7o|G8h$yaoeICA?$9yH$!*#xilAx2Cd^{fJ z1_-&iBpn0G}DuT_?GAV`4B4 zD}Sq^=b@uZM2}q^Il1Q18jYW8x$s48#^Vi|8IoqmDDTmO(@*rg)ykBy#x+<-!=7K37b(jJN-i6(KsK~=*|y!e5AYk zBFmatyL7qw&aGF%d3VX1vFI1tz|F8%-{Y7q= zxh(VZ#yuTJ6U}fdmG`MRmFT!6i>a}tKBRKAA{@HX}moK#)-Qw z+${-~peg%@{%W829y;`0Y`fU?XAZHBw*bSU&2%2NpVWOj{>Oha(X4fxj)OCQiRnUr zsJMTP^Dynab-JRp(RXHL6yxi)a~(H0C9W18qahbQ?n%_Fz=@@!&=pHbc6aOB`mv$JihHGx zyhW|VpG0i!Uax-xNsHrFlQ-12Px>{A=DNSBoy{1vf#PtNCw_+VOJDDMfeXv^Z2Xwk z{iUxfRe4^-&o&eId*mykOwek!PWHui)TnhvSXAspp3Slz^I&)~nr+ux!tpeH>zsXk z>q=Ok(ih)14l3oR`L<(4T>63Z2+ne{3xsV8Cv4=VdPRSy+7efG+|iT#`LsgtE(#+~ zE|A<&F+OLF4x1s@MN#He5x0F3DE!LaVlG^b4iVK|YxuM@b>f>Vr>O3?xJ9?nM~lZ@ zCG)vS*N2y+uX^_FAl$CbxJu$1r2JEkj)$zeT+Dtqi>{t;_n+-P96EICjDw*UCxvf< zxD5Cf=0|_uE)gxZKDtBCkZ`)~<{kKYyc10Yd5RG{1jW*@=Fj6XHEZd3$zExVL(?;4 zo4*>h%!i*#eei{)ZR{dr8~Z3MH>*Tk950;pn&0B;(U*Rxp3C*}Qr+ykwEc8>v&lER zLiF|dRh*X?8l~wyuhe&rbpM>TLXvl ziHTFVpWtcR5%nZ>Nc{OOJ#d0_+lGPksgHl+P=3bw<^w!`c$??b(ch(gP|r5a0_%6l zNx0ZuwD()7C#fn8`s?JbxZ`s-o9cx7Ew`?Z}r+h-Nd@xVx2v z&G;PRjCPi{-)-l1oD>5HPcQG&+TYTH%Js?Vm>(|TGjzCk8LvEjsF3?vY){+Pa~^-m zr9tItH5aa>CBJW(zd0tC$C=y;P3fWgxYv~vYjsL@iwk#@f>Y9L5S`tJNP_$E3_RVf zyYWnLU=e$HdHZF0QtzAUB<;16kl`1(_R7_C_=18GXG#7B^RjkulWVy!qnj6YK+cFo z&bkcevgioJq!$K6vN*q(W=!k;p%8!lcpH&&I_8Ps=X*12UJ6J?Gm7Qs+{P%l_fC3` zk$=cB&m-yCA|s`;JQnzvhu%ri?hdzl;Lh%1@2Xl?ygRpgV@TaFVl^maGA~FzUShjF z+zNSp=<(e8EPGW~LNzV~!D%Ivti@3=^`OU|EfmEy>+{cxr|nLy(GKmYRylveDW;pZ zsUG(i(_Dsk>5T0BOvCueap=ui!t8vn9z|FU&oF#G)gsAvYuL=a`(`$RDHpJG#(7qB ze0utn+9r=E(+#{Upy@PxiH4$T4)rI5ViMx!;*b~jcH4hkkx!b}V}5^)JnM3bisqjB^W~!694==^)nn_T(oDzz}1Cx@Njyc_QQ zJln6k@!rJMc-Sc0)owoyo42Ig-fgtAwQaE2ZQp)33|96s9^Ci|@G*bP=ONyI8o(>@ z4PI`PM$#cGIka6*d8qAMFda0Z9 z`gFL?t)mw|Yq7?tuvPE6ST!*p;G%la<=CMy+ohlFL6EFkHa{S^i6WFwkMlOhFt50= zakoRgD(cls5DnRpGmC$nsv9oanXjzQ_UZ6=`S5-&=I6x=-lytlzm~?e=#^9Ye%paR z=XT!fxqL5g;W5bt{IvS9p+}N(+vxRPegg5hR#AWB`fI^yk;{wk4bS%(>6c!9Rit9N@l!{{Knx_Ey{xkwe-V^db%BMM&) ztu#_5=f^W_7SCCW(Xm+G!u9j@@mwz!)w_22RDIsYF`NM`D|~;hl67*irD(rLpX=_lN(=6#@_8aeZ{gH=abxYH_)@LUtCz*?-c7M> z@6~H@C@!|Po^|PLwp@mf?Y>R&#j*01%|k!ErFxmQ&3piUrR-l>JYS}3{=Me=Ijn3$ zviG}tsmLo;2@BtNHrMEODhgjNF6M48t?Y)gmih(jHB+{5gEoos@&dPvwQSZzK6@_Cs4_% zFAVkA-IIUR|HudaIcVC_9)@ScZ9{(f^cObKw`rpVOZSyU_e-qoH;<$5r2VNeD=dY| zVSYz~rpPauf*cKJv;45Uif>N<$|dT_hpXxngCh!lG|x9J%06`6%wk zOI4ODXI&K^c8*^Dpm$vOv-Qf5Ms1cy_9x`4v*3U7FQY1qyVZY0_4V;~#JF#(^?UIg z``5;*)-%cl+ebMb?CNY@;@NRF^heyR+p}`pU8VckBF8~?b*Sp1qR+O8B)E8|M{u^^ zj^Mrh8D4&*+ohe0htjY6#(0Aj=&Axiwhs_|@ z+c$q!I8WPXgR+0U<8ydgp^LI{JiW?%O}WpmmT?Ya=mv4 zz1;ir3Mh9Gy;wadw#6@+`}r!;Bbv88O-4^v$9=jw4B`5*UF6+$RW9e-_0@a3KDNZ2 zJHMz${?emk>urxK#l9^TyjX=LS{%LiIl_Ob3z@p!uZo=|9S?EyUWxWQ7~fB8?%84R zfc3uHqQmgB%?rBeV}MV$GmWZd zzg-F2&radVa~~wOv)r^R>pt`4va6!qW>ammjn=NL!)T|ojgs(!Zy@EFDtCV0`?7z% zwFYh<&eg@|P#x>}!YxgIF0aqW;B;&19bdit1zvAETnEdimls@_*za(W`)<~+qR&El zJwhqa@0k$a$J4bLs>OZDJNr}$7nnc9JJjAo{k~TBhYROjwaX^AmQ{wLwK1NqWas*) zDp{T7T}(|c4#?^m-rPnwUGM&SrS5+V`q|3(P|!@f3La^%t#ZtRk6$j9-8HZd_v8I_ zHy5JI&9xL&mk-uH^$^X=Q#woUhsHR4aF>mjgP|T)4`d~;?aD2ycrb}^cpu3b?W(F! z?{stE{XHbI@w!EGX!7E2Nuj#yof?tXd3{`OxnNW?w?C{lal4P6elD%VvT=X+^TG_} z^tEcG`k@tD3#**;;;~d}XJf=$uN4-tJYJ4(JKElIk6PC%3zdDs-4>VarAe%4mf(IP zuJv&L%p48MrFmJbB8j)vcX&E@yV|uAW#fG;fquK}xz2#%xnww=?hkna|Rvx*DC& zAyiMFV;VkohhpA+mP_TGJ}$sz{#!7V=a#OQk#U;$*1b>F{heg~bNjYQD;&w?iPmrb zTs@6(_dYB+a~D4o%PGX`_AI|s;f(a=xY*o4a%X*O3)ZhuZ@5BjGXi4wLM(# z$&u}tGc1da?S3mGu1xpgX56Nlk=eyXS%d_(37&A8CHro|12ucMwJTb5^qbnn{BJj zW%t03xU3J(WqF#N zIWtoiEYlKp>3nzJaL@H2(%z=|B#YPGdzrBvhk4lRHBc$-A#i|kn{c(n@l z`_<|m(fKh`XhbAM>BH30^yTu9j3lIcL*ZjDxZM2ZUAoTRpYe5%kYH3B_pX*#_hWZa zmj$8v;RB(IciuAPUi=J9#RZmXSn&N^&3je05%S1yV3fXsBnonPwP9J$9$O=xvn z%|l*Wqr>Cml(|wf+qcd3G#l&I?6Om0>|-aIA0Jz7$%}suZp9O79sCF+ujL0E#XYsHR>im3kaX5T6li}CZ`o%Nzfn;x z70H>2pWulU;ok1+_jObHv-44zfozN1x!SGM!sse5Dxb4n^Qk?%7WX~Rx%F5Tp`T!y z1ZjJAI!S*$@s^q0iZI*QWVW8Iq@B=_eLAjApH&v2MX1Q{*UcE?EPsv;Pn6T*$$iYC zjLa_Yci}#f5}(|!VYzv@;=@GvYB;WN(3fg@iFe%0@gH`xviC``!goCmjY>eI7UeoG0NJtLUiTvib5-RQm37j=Xn2?grKR99F_OU%sMGOAA|c z$!dQqUTy;{!aQB+Z*COrq`BI9#SL?wz?H8qGD+M=>Tj#)e36{!`Z1&Qjkr zttnNjI-J?>-Qlz*H}$`#^gx4$gmm)xPJc+AO@_ezV@|{Mf~nwBuF2itAxK zN{{NLJ^3wd;_LPuwR0hX>Wp6x>uF!TXu)YOGqT*pvM^;_fWIoO#8y`7@nYSAba$7p zUHtw?22Gts^HQy?gg)NweZJ~ASAtiuHM`v9!%byAm5R#Gqwq1{6Zkl>6J59@Ua@~L zmDcFhYeJR_<+B~mR9yj%ZgQo2aFjqMpW2-bgF*XSl-cg#<)F;l=_hTy?#GK?M(9bU+|EM%Se)cA znxEs%>x$7msWyTp%`=kq>+zs0qGf+Nd^p#5jalzMUf1hxTQ0+U?5-98znss4_wuUE zpW|kuYu0C<3G;Z}zU%oe7obLJy;F5r-QiPwimR3iVMJ`BnSTQ9-p!0-7CaV(f($!# z<(CFWt9ifzzmw1QX3G~+e9#a&Xx$%FVdMfgjCR%dzT|)XHCv>+=5#)k%6nhE_h21qd7G2XRpZXLj!)s*%>wCNo=%_T<|Hl5 zhre+beb(?7K0b>p<#Sl3RNGai5&qw7oyV&6K(}4r#XTc~a%w8)xRra#IcGh+IUlzF z3)ek?#8OoW3bCj$=JU`PrXGJ+nY8XxRe@*Av*Yr>j8nkLea>MWxv@BDN5?yAk67@b zRpdpsIpr(DrU21)_tJ^;L04oZxu2(0N{4OKXba{NUOECGH~g!(I!r$BIBKnc!d z4=)UcpySae*=>IB<%#3gNrA|?M<+~(qD(|XPXzb~DSzC|;JvY7Z%X|3)*z7@p{sF! zsn4N!dn^uF)=r}o_d|b3Q*GT*Z_OZki?sd;0uxTl18yaJrXBNFDLjyxSxYS4G7d6f z_!)_-_l{Mdi4+z}Hbn~x_-V`W*xfI%Y{25Vq_9VwL^Nnde3Dlk<{BL{j*4#-@KQwU zPzx3!xCQ3PZTp+@@`(kgp^tJIXUbarV%?xf#2L|~3x~di_wRov{S4$-n@DUf{b*Io zL$1=I!5lRq`U|!t_Yk0ja-Y{AeGWbk2FeKe7UdN=l)&#cI;LN9Xm+;T%?MWQH(kz} zxQJDiv(62sr?g6H$E!#!IN z>N2zf8&_8uOyYlbAxwon$}17`p67~m4H#h z9F@Cp>&B8&;wu{7vfr_GE4|ufDy14uhGlNtBzlT!K`w#0Sie$?NHGYS9Rz7Yp>tFp zVH7QTpNMeWm;h(7NV9}jc9{0L$!TFesuBr-6!inBE{K0iHeLpbR;+p2d8vjmD5#7Q zICe_)b87SgUYrSNX=-&1^^|i z)~1q!w+}J+>G*kT(EFR%#U2YxZDUmSacezuAoP{@jy*|54!9X!4O>5%GCr?kb7dUA z=j@i=M@)Yk2=bLIahZRz4*|KQhW)JXWKEG)t?6CcLfF`P0CqFcr5*_s=hIjm(@$|J zk)C}cYJ-i9D4?sWCWfti)iQ4S0$Gv*T_a!HDrh2S#urI!J&Q0LVGDE(-JcC^bckC6 z>T{Yo<*3qElnv}aXf#rDIN{jXUAM`@>7IT#^%Z}KlPtb-)4SRr9a!UjpkcCUxewN4 zqIoyg?r00)3krqB7D4jOR2$q40xdnueb|TROFJh*CyFAgpNY(Q^1~Y#hu< z9jSk5G9H@@8{&?RgyH^2 zV`hJsp0V(^JZUp|5oeW5^e{)rZEoR42IZ3kUm?w7?g5ei>^p@WQ#p_aoi@bgqY;`G zz<}(_n<|~$iby<;Xmyl7?F@~4zB06()G>e5cT^Uz9=e1y1VknbN~qrYBaZeAE_VDR z*xG06Il^O#pzUB#!v`@$R zU>Sr(5ly#@vAhmh=?Lr-4yioAhkRvy1AijI~FD`nkGgcMeLqqBb` z%jMRp)^4Vx>%|hXR1zG)j0npxW5#tk??PJzqb@Pw#PYQDQKb9t>(-r}M7L z`m#o-@{vYd=%&;+2Lh*$pD_nB4o6ib%JPPn7FoKZ2`oA@gRsF5!&mcgU$#fHZk0+V zX@>e)CeVC8lGOSZEoB7GS$F5NMxiZ?b!ymaLe~ zPV{ZF3Q(y}o%=+!3;R=`y^5Bu2>@=h!cCaS7-^_8;fF^1J^9Jg25wN4z7*|&xqi4! zmk0$*AEITr6FMnM+dcSYKW;BkW*a0>;@frgMKvE3=3vGV1kk0$g&jbVROWxmBj@+{ z8w#x3T-O5kDO(@Fs&wkj^xAB>oYHSudClLuGyt|(T?I7BSZ{FP;Fu~#!9sKe{gnNR z96{tL4+MEa*wJ%U5UeI}2w<;Ij{a>~*g>nt`Sb0R9Zr{m4v6IiBz0Li-8ERyhJN^k zJf*RjWQC$3OqKoi7ni(it@(eQend}b!%D3zoVf*Om(?d`rvCKh2F*dPCa$3l%UcwF z5m+ru;5aSvaMz8CONbT^Ivqd2EPyWgGC9^gh(7Hj=E+!PYH}-7q8duSOM6ZC$QO*z zg~M+*-4h$qKslD6q6{6>V@Bv%hbc>zPD9(W-bT&TwNb^%qI;(-EBAl3H&Kl>pAjk( zIvuod#BqXii6=8F5Lo*O1S}Syi#HE7p2TVLpdWuCv?j9|)qpUYFX22|LHlv)0@nU` z1!CviJ>zp=UB#hv%Z$4h0Ick)7FU|^#l7Tlf>Nva+QO`^@J~)1pi^$j9`%Q2r3!@e z4n{7q^L8;3{bvUixDtPy0ifE}R(NX+b7XWZZd=Y0nCfi;A}4T9ct8g6R}#vTjxe^& zI}8e!$bb~l@pHSnpGYS$9xXco@1W4#;oh-Wv`V-px%7|NQ@=~1iN~G!ow#qC7p3@2xvp%!54NX(#Nx?L$pd&gr*isi~`XbaJ*vm+ns>AnL|wc~fY~w?q)VdT=7_ zCU1#tvC?{_+)YlVF5=e5Lu+cDRh7omb33vMVDkE9`u+}qIOKW_$JKQWaLXsqqLnQYKVf(aZkTQ8 zpq1>w4by+iB!CsKKD1{RcA739NynT@J~qkltf|Mz5~(y`7a4Q0i>c&j=*O+*!fcgl zNA;mA8Sqs@_Zp!-r~+IYxs(oY=6=P2l!V2_&4|?$Odf_fv#1lM=j=;jOnb^y?yYS$ z#!AxCXHmhUdSW$})2%-6Yg$LYjK}d;}T3z+(h5PAT7XbQ}~R5j&NJI_))VzIY7_PZ14zvhWUF&xDR-Vr<)hW zF4}+Uld`$F`f)7%^8;4ID<}6XwuQyzp|ZBAr^>DJiB7ouVxDS6=$~4A-G@zUu4eTD z2{eIwJ6ItS(u@e4dQ@0W6x}&_m8S_3M0-#)ngg(1czU}(L@jny2q3j2nsmw@t)y_T zBzi!P#pBlF>z*d+Ga=*~Pb zm-;Sz!nLI(^mrA>@@owI9*E5^i3o;ExHgZ;D2I)u{^@mz@?dKR^uzi8Z%AvpEo6)OE z;LPeDb1G}>CS$F++XQYF>h+s~Cf_nr^9(RfKUVfRnxr$5q!-goM+5!>P`>`l#$BxQ zvhDZDHfQY@3rGMiK+(U3J?Zs%zmc^c(nv{veg~KoVfyJIDMRGNI9I{-OmS|Z(Y&IW zqeydk^dkh2O6>GH7hbOm!4%zuPdp4V{7K-IyNt+iZ+uaX34vs_+&P;!T~Gb%I)&ecJt0+{W3`2zaSpi% zIg8y$fHzn2fFqYvk%6QvRoS#k(j1|GXDeNrR&TSOcwT(8P8%fAB)}qD^1YoFwm8xo z4&*j<0F@pGIa{Vrj?f=vH^`7dWu9p^Phf42EUppXEwekTcrCTLftub#QGPR%y>=G@ zwueYMhgDru?S{tfbxZ!pcV}2%W$Io(KZvW9M?Np&gN2FHEDdFS+c<&4NT6^j*HMf-9?93z>h z)#tBgF~IKls))GtdN-=_-(3fPs64GE{8u7kgOhcL!RHWgvLtUek&iX`$lJo*`r_mt z3?J(SFGGRcW|EPF_HEdYGq0~6(Sgi(Wjjs#Bl^Li_^lx!3P~UJ(xvBt&hORET&E7wzDVxaDgiq(xn8 z@vX|9`3M=@GW3cQx|i~Q%2K!*N8;2>%F6V&O7P!d`dqS>qliXUraTMD%if`icToa<1)dNJb`!<8Man=Rzkv>v~X7gQRkyFFrqwz)_A{6L)A}fD*k2sG_GeLZrH1@_e^QW2Wgi9 zT+c18D)o0ViZ0dFUCh;qQ65MO#i;|Y`{67jXf4;_y3leb6TC@~Da(%}e^<6wL^rwQ z?BM~oNm;MeHhPG*vga}-e?CumTML5@=t<8i{%%}G0lFK1J_9~*eCw=~bWNbee0`Qs zQJ+)W;Mxpm$c9DPCSdJ0N<~Drkepst95Kj(zG;3Y*YyJ5TzddkmFZP`qeq#KIfq?pJL;}BSj{RNR)fLM1-p4%S!_cNG%4R*$rlu#*$$L> zHrfU@gCU21(G$sY($MCi`M&9+G~F6$gjub0s7|v7AYO3srHmRw9-*zVnm9lz6-7c& zzo?j$ljlrJnKLy-C!HWb42oNshCKkD>4i4pa)QM?`Ns6CYS-rG{oY0=e$+#uF>$x| zP-wbR0NZ0E#T@w|7H-C&4k!dCz*+Ts7TBYyS1pTwvV!vg&nTL5)PQWoWFX-Bu`bO+ zIZfIGQ+?G@(O(Z|T2M9aGsZ6s!HO~+4k?>fZQ6>X2;`%H#NJ=*iwM~7yrEgBctDZ} z&eDovofy<~e+{4#LSWlwZ(iehfK+0~zixXWY&NKrxB}&)I&(!m9>%z@>7z|Z<`9Og zutL~>HT(g;>I}Sot}b1{P=)XAe&kgN_(&(@nvjclQdyJHO*EqGc}k(t`gwh44lvEh ze&;#Xr41J^-D4t0a^e^GtjTxmeKg2gF}w*v7jmG-Dsw-I>hHcQj!&BiZ*jr1b_BTJ zH|S(F8SKHYm`X{`Z?aPWm>;azj_q8mwWgG4gB%$0hq?cSS zTtmku2*Q6+05&nbG38vKxOul+-2hm z4P>~R2K|`)H#^udm}N3ry$3ZCDg_)LJ1*Onem$0hA{0K>(q0R??WXg}*~eMHxTa?Y zO6)TPZf3O`JC5F#*eg{QIX2^5Jd1#pf2!2wMT3=Ze3Rp|<(9rUn18c_nQ? zzstsj<~u^~6eo6nOt)~2LpgO}Jb#f(Q$s`?WqS4vs!2vc*mzric-WH>CWi2T-$7#N zYk^sfMdiin%MU0C}EodLBQCvk`eG>s8_ z2QTEP1pEeo5+v+Q@L7t`{KSsonbeu!?vpasY;U@}Dj4#Qg2YD2*@oHYpYTnw0yA^N zO3aLyS-|Jk+y^83aMP_|IWs$dj;rPO$f6syc7O}oV7DQ2Yab$b(~P_z!Va#s92Cz- zyW`|E?O*vNAQNrKR@eNqS-d6_&yLMCuipUHI0+U1I03H8kLgTaYQv+LFPOg(UAMnEL0L{YchT zOr{nUWjfa=nd2H7$$C4o|Hf$ZEjk*!)=bH_8bt0|QnNY4?sq2$Vx7GcZfHu=buL}) zt#NRF5Fu;SZKh3yJ(&IAHiYHQ6cnBi;pX|w2XfVSx|XXU-$kK+x-C&TQ((Y9>+*--AbjC zGXjFHI+tHo!&|GTi6MCOl!=QO%fS}EeHy0V48rR#A7-jbQ=Ep|47BNvQiKgVjsmxY zQ7RH0(mPhl5baWb@a)aU*@8w62y7uI%kPg%JHj0cvK$c;gzc_ye2Z})p1VuCR$8G` zl$E=of_V%*{9;RQFjjE-Me=%4P5k^U9O#f9at8WDQm0;nxw#d`M_3~*(7Ah$%sZdt zkFxcLyW3T0lu&SeFLF;lX1^Hm(@0}fs75n+NR(_&OFownpiD+S(@a7S#WRAM@SK2jGY z<7k=Stp=E3GB2?tVcLCEYf(>Ar0--C)noE8x_LR!W(TGvi$c$9wI%5Chl+;u@7YBq z?MkTp1~BV?&e&qBUpM?8 zD%SR2>Tl;<{yD+Lf4ZC6EpTHD%_ENfSli(ra2@@T*z!AHB7ntc0{sa%4Cf!7{ZD&s zGswtf?3hc%@mKGkMR?m8hQW-uCCL&g{(=$y#^?Nhhw1)T+DVWHPyQ}_hhezjuij@V z^&vXJoP_`K=%@bQ-2Ul`k@!uxH*Dr46#m52`#-bD-wQcMb{(@%B^W9hhEb`%qC)p? zQaUK}KX}g&^Dmlc{O2e#{${~*frTs12sh=Q=9o+O|D!Md8zjp+#GH-dkvI%v{Bw9q zfAxfaUQ*E7TJ8)r|8gT<V-l#a}-pery(17re3 ziVF(kqv%1Ae)5G*wr!UsAU=$*{TPMnLf`#=IKfr9_6nP*ol=}KKG&(`Ap!xNnQArK zY!~=deF$`V%k-{bnOmn00_1D2zs^2)tMaRAnwuX3>6N8I3g-J8C1=$=!VvSVu1m4A zeuF9joJRHrueaZ?#mTs8WFM{F($xurYLg4G%zb5U{(OI%o7Y#>mzjo9uPOJ9CjXv) zw6x!?ZkJxve)#Eb-UZbv_dc^ z=o_gCMOKs+V{l`cO}p}3%{3xVNUa-B_-$kFN}(F)!S<%@ez|PGsRVx^MgVXN)yEuI zcl4I1DQ9zo#NwXIM9)euUn+?(nvuVMAX$>EP`*m6p^xwxfAeqzi9ylB$MTX7tg1la zGet=`Cy1TvOuDM34^t48syTS}&B4wfxjTjc>U;Hxkwb$5bw*m0+k`EGfPc|D}wS4h> zba!&@vC<2|~yyz==TBrn_|PqL*# zb&Or60in3!1+o~!WPbZz(i;7LfR_1pG&lR|oa(Asqgs2a(Ge%qNEB3mmS8_{S)Cii zdVS}4h>MWf8q^cIm+`PvB-K(=ff&0X*GA4jn9b9J9#Ote_0 z{Xl1{1?B~0Ix1yM+~$QQ!QZRe7ZDjHqK!FODSv!JtMr$mGIfDR;c%!1v;=;-SY*6& z8*C6Yz>$QVJMozl3ZsgD%x}Zk4X1w(!xu0=o5TcGO6fi=(Qwv#msVcgX2nF5l$R+y z=~)-XKJPe7Jm}H;{18ydV&0x6x~U@a+oyu4&L1HF-1X?k$fF?g9{Na8q@kB^hHlrS zHYm?ubqMgn58`>>sO6L|SrCm0_to{#dS*=8hJJClUs7N7^Hz+1_d6#3%q_obSZwWu zHWok3sHDrGl9o)qP>p*CWA4f>wD_=FhAZ+Xfig4Txc zSKr)gh*SbN8g9tZBcq&EjqIy7vm_whmpI~U`nm;!tQZTPwPO~*qnJw*iW3Myt=CuL zh}^zvkw;SL zq)>DdCES7EN49Db9i72e4NftjxV=^%D82Ffu$`U|IHmjR<4*=_Rg6VI>@O-&uKm)MIQKM`ZUFDTI(AnBM zAMvXZv{t6z!!%W)tc^y>aX1V)LcZeMP$GjsXhYewTVzYL8-59OECENE%|Di3t)-j3htJaImpLjxwX*N-_4C92w&y3mEP>eh^x4JLh@S34QIv;{ z=t{u~qd8#OLmBKwN#af!KeJC-g)_^G4<~L|yiSl(E0i4M&ID&up|)rM@dt2uOuC$b zp0IR5UihHo0;Si2`HFNxqk=W%;$14*_SK_*Sd2tGGGj^`qSemoBzD)U%E3 zUmsX`Ga~HUr8TZ{!GyuL%`B%pbj=Q<7p**Z%^FWI4h;$Y_3F`j+z+KDAx!~ z4Ln(xH}S(TC!15#OS=YAx$nlC6*9WH@n1DxH)@jjOsET*hq?B^0N($Ma8G%&1vg=x z);FKLA!|%7%@RaylqN93kql!>XI-e2Ryo12xu=^7a|b9W9VY1}`)DcxyAHR1?=~I& z)?{#Tn8C?H4DuthJuiPoPZc#A#pX z&LGeSI=Oal=-SYI@PMVFu0c#=@DAhBTPjuvA`0=FQl1;k<8ge$s5EGNQ-3lWn0;lz zBy|kndX0W7aXIaGBWQonV@?@=^bFar$RQN*=@j~@fmE4O`%e5S3e<6QTvwZ-G?Ef- zF2uQY8S6Qzn@IhXCD6QRxT)J*g{n=a_u!tuYdWA2Aet~|V>Wc-Yh;+vrx9xyGlCUu zyk>ImqIW?WMM15wd|^|nenncDMXHlXzOd|PYs|!cRORinz)cKZ)s>sW{?&7 z5uU9fvv+sNzKLV&2Lpe9=@4b#k483qeH0kxB!ZUhJ$a7531!_#+b@MzX5ieXSXBr-ANxQCjgrD5(iG4Gq&fs&doZwnO?1+&vwyN;VbLJH zN6R*s8WhIQ%I#sAO zNYKISu+R?8iIK>|>3J$-n<44=4e946j5-qWFOYKma*}a(-cQn#HO%?D@I>s!)tYSv zzZ#8G$<~BWkUkNA8oDGbaNUrdgJ#r?AU+4k@#Le0sy95Sh|UF=yGn!MT;)MLgdYytO`n`}4x`s;^?Q>Hi8U>z4by*FH2Jd^8N~rW()OdxWRh3?} zXIu?0);9}NOH(yNo--4P02&!~9Hxg@=3`_+eJ2M2fFq%QdBkBT5&08FsN1j6*POUM z6=)*N#p-5$*eb<>L|7|za-}puPCPq2De$YoVr@eh0`K1EA&BI9J&oC93YVNrry)S%Q4?NXn>dP(lzQw5<=MK$Uhz5at{tpLV7&y z5&ezRrU*PxMK8My^Q1TlWMx^c7e6RPmHd}v33>YngfJCrf^^;0d2O8sXsx(xpKlXn z&*bjZenA9qV{rY8NO6UO-U3=9N~ksZ*?I=!73SW5;|uvA@zKS?U_lGOJpL8O>s_pB zP7Pta=9uieP6H-8Tj3gbgEmMiya&gp0&=A_4>&&`uRZK=B+!Pt=bI9KhGXVK@;Dh? zW~`erC)?J~;nv46e9=RLG5FX9x#@J|@5pk=Wit!*)P4&wMOn}Af!Bkkfx6%qf@wtN zQ!*ESZqoYjPEx^Q#)6oN)ZWYDkDuK3;nkj?cVZygnnVJr$6O|iCwX-bYfz_%L_*AM zlx&&Mrfe<0q|{9)<6oizX&EpCOqPK|^Hzjb){MmQ>R8L&*Pr0u;ICjh)Pnmj1ny|7 z<}Opb)GPd}q5?#;f2)Cz(Usa=GOJh3n&^efyro z)V~So(}^CK1kt0XJ(qb}Za{2<(HCn#e%y+hb(t5C9V0W_KxdZjF2Qczc)mxQ0?rhX z@uew~7DXCt^;F4}o;3A6@0S=nI2`dDj=g7ESp_=qV3)EEu+tjzd`H>a(LiE9Yoa#H!K2{ac5#PyZG@}@?LC-^G{3R-jX7fGVs{{_T>p6-d~D zpfITD!sa!h`oHJ_%vp9Y87nQ$APL0Tv~Te-2B|A3-D}Uy*&a6at?)%jmky&Qz$f+) z;S1%eg%;fqAQCiFdZv3yGp~)YQoGN8^fTYb#9#r5Eyy=m~d;VY%c@qnAhG zlGhaDm!yfiFTdWI(obb9<}P+FE@rJkoK;c?Kq6J-DIUr1!T3FQd7UmRFapzudaDt3 zUcdAvd4+N45N`c{jct{-D9LrO8lFer-&28?z-Z<~5wBwArn&{vMu~E&x ziNKF~pd-1-U;1dx32_I*XrsrfHiKMbQ`C%cx&2rpC}!c^F0EQkb2@@O*%bhp$4Hjj zTMmy;eKH%!QjyIWg1*o@HWB3$@q*`t1^hTQOG^HiJlBPPir1;TdyJD;Q216{sjiy6 z$j?hBH0zc@kv_L#%D;qkT?Z^GqcKGGYOj3e0-hWIY|XxwzqpaVR5ERpLZS5%T|#8E zt%w6@>WKAY(bPvI-$nxeGUkuTI`LFrq{KQ{jIDol_Y2hv! zpTBWz)(NCOT8X2vgx0FDzm>8is8^*5T3kLhA0{{5C?F9Ug=k=@ zA6nm693fm#gNhi1vh9$o1!GZT2A}&f({iWw2g)OV3SgCaaj-cLW588(o~oFHJn3&ioy8@≧M`(%ooq_3Sv7vD)* z_5^%??V;MBM4MlR8?3lOI8qk08EBP6lE`<6n#@t(rtE* z#QV!_RNRbX+?pw~5BZ#6pDhXCg zVAxMVkf$&*M@d_Wv};z-XBeDznT?5A5(i0?P_pNUAKafG%s+BlhY6j?2ybHxhKkMQ z%(yHawUSXldx(9zjzNk7Hs_Y4tEw;%q6UXY;eO}@t~QlWSf(F$5%%z`bwkHYI+2``U*`skFNmVm451zso~7T3T*Eu-2v%R0A`Bff|tF|vwBYk z4co(Tk$`@S)KRv$k3xnIL1VSEM3L{$+i~%P2Xbw+{~DV7o#bYMot9l7S)*fggF0b9 zg%li@9)rPq<*BD0!=|MWj265u^s39bc}Nxz-Grlf%%QHE*<-F@`61ERn0GduqXijUa9me zw{(j6J(jo{jZC)#aD7qTB(5Id{KBae{3AXgSKGv``*}M=n+tfis|hcd2A$I5Q~24V z!9HJZr;~)m#`ZB%!4_Lz(f_J!7+0=?&-@%n!^5w=q!$TB1vCl4pthC81^xoN&S+;--~v`pY{)Ud@28=e*uqGlfUb+;w^w^U=#H>ju7->`kFH^y1H#_mD4 zSH|Rr8su*@l_!?=^yY~W+$GsUAkz8!qb&+=&&Zl`PN<;nVVq9A2fE)<{Sv&i`6-SL zV^kC<=NteZcvjU-^tOdPn{|*2s*KbE<`$~)!@4;7+Cl4oK>ND*Qp-{p(f_9GP1+S@ zny%3wvc463y}<+!~paJU+ZjvhBtXhQw{07tgN?~4Vqh-y*1 zRA(ptJk-yjz*F)#t*#}`_an15w0ETw?Q`3f^Kp7uHeDTQrg`-@27Iu6y*8|s8kW?6 z_~E&Vb(ub{m>+pEaPL)?P_8kEO12VFVG|vktVeIbZ~NB zanTZgJ?X>S)jQ_pn}PN5Y2GsL^*Vnnhk>Wyco=JBG|liyvCI&|DDg|R^rE}H6rVic zd&pDbY-)}WF6}GyPp|P@pSvcL*r^M%55>#^CzqdBqj-_r5qWy*h8t_GNGEB+?|M@j ztgt44jRnnrDsEaUCDhq!;;iAiu#=P%Bq?wY6R70< zd2$%N6V?X!_t4mUBB(x&|7tz;tqN>N=1U{LI|99l{Bg12QlE)s7DQ5JDt=s}#R-m0 z^bpk3(?+djX*t7|f){8DU%9MJgbUk%X*w1sFF9Tf>Loh*?5>t}V8zRNDrV_!obDHY z!s@PW{&AYOxH3gVCiQ_72C6yUM{cNi?i6v4h!Fz1*Hz6jo&LeJ18L;0*|}VEXvUUFfu5fB92b@a(=2N$^BH^iWV*2)qD9Q{-Q?txJi3|LWfxb< zi<5F%7xO89B)4%1cR{wYN!N57k=BZTWUSkVE$3{)Y;7a^4E~j?)$C?IQt!^=F1xz3 z{lUdz>67aMnL=d~+*R!%-^w;qk7aDMQ&5zm0fpw|v*ISDSY9k$CI2gpJbF`PL*&)p~g80z41uYhuO=WAGBr^J-n>Uz=a z3wm3Zxg>}~X)PE3%Cx~*FYEDYNVMs=0`Dpy4{YWhYUy3pFUNJMAHpp*n0y2(e^2_8 zEkBssi+>meS?DZBc*&!Gn|!CX@K}&t&YI#=%9lpDlCsOTtKhhRVmnG(O(f;_+FYHWzKT>+)GbdHHyWbCC`&M!7@d)wW?y zem?0sT#J{I-J=vk!Q9^wMJ#Kwp;zOsLMh;UtB4ij{MCuiE!($uMC0D$Lf=V zXp%hxKk{Hi%)sRqqfym=<&}@IABzoWZS_ceZkLk1yo1T}2CgTFu=S(2`AREswv+bi zrFt1YI9No_v%VH|9q`_|EM1>|{2cIYkD56;k-hQ;-hAOZ*VChLp)MCf^@|M=iSr)C z=x{*U+|5-DYh`@hjPjtoozttJgGYLXdan09ebvkK9kkV&9my+y`x&C+WNeQ^sXR)K zpOWIlbdctQcggz#n{7P1cu(c@5YpoUb5mUKTORH@IE!L-jGB~9eP)=PC!=P@^ZHVo^^sS}a(PNtdRgbZz(mxW60Mwy z+l_n4g_Jf~!MaX=9k=%R(jvod_(Wrd+mP3@jlhzf%1(m!2^LNr#joqoICq~u9In?Z zY02#*bTo~OcmJ^Ut2J-SaDS_GAkyX2cMASml%IKwwJBB9>@g~r!R#v!k@lG~y(DQ< z_?A2OjNg?oZxwwqTs3=kVr@Z@Bt<|~jbwG?q_h)#kZ*5)Ta)|Vb!J!kD3sP{^F_O( z6Qhxdk+6aSmS=|MF6}w~`Ut!EeWOkmp_5CGxr15fUap>&B_B=T*O$Y{&o~FyRZjWA zv9h2PtN9uXl_~4TX>i}YXU{Yn^ONM{NgzCMXm(y*{ z933u`GO(?G)PMSWe>AehBwB~Z;qk&}HmP22KV7K`jw zf=#s2B~toS4bEEBi^^KjVflpgf%h3~bzC((;`adX^rElHOC8P|H4*%iY>v=`b8>zq zvm$o2^kGl0QZjw1zcbQCYdLlw^AZ-y>scQQojwEK_sN zr)o1~6sOOOP}JGC@loJpK9YKng_DtOQYH&@@;U=qoCg+#*#PZqU#w9na<1a4tJ`OX8;vsAZ0m;VM1_e}G)AYfDe+ z{58+S^R@1@rD}%_wrJk!xH#9&at!WIMwD#-gT%@d?J*uGhpDT!wg;+ko;sPmJu*4V z@ES8mP+0qfE2g|2yz%YKm!||hYLG5hSuF{FzbFC~pBd`qT9yB}9j@99lb=m|WS(r_ zZZ9;{*9se=6>BP+zv7{NSk9exh`ghr{K|``9oCo0wLfsCW4cFVbLz;TLi9==93sOR zhpIvC%XXd0fCz}{#KHb*s_C5inv!+2{IZP0D`KcizM&g&_!x`gq?@G9C}Rh2d2kPZ zqS9!QUW=OFSi`IQhe_m5ts#?}=3ik!mh{V>Z`y6hZ-egX_tcHm<>aUDfmT+O?6{$jRhCnU;OoNs3K(mF@80Z14~px{Y3ivi+VhFMN5x~VU#d}62UIq_ zsd0_H#jG4QJ$lG<4OFs(-LuvezcfaQH=iDQ9`gg^C^R}L*%4xs>uSGsV&*LgviFqF z+1rVGBlM~4klYPPcgmouD@KMJ0CBP z#53>6G+6y6cquW-6uu7G1M%C+=a#nW$4b$?{$hL%uSK}s>M48x#iuQQdoiY-S*zfF zp2p9Jtkj;d_%c@t4QGU^(X*c9Drw$o(MPku_{BYr zelo6ctmZ`7D8TeOPI{aEZBw(tT9fh>jQ3-0NSywB^$XJqnW7(-*KDU7y7W_Yuh_9P zqma;CPCScO!TBgtJ04|!iU;O0MLl;6uM^c-jUQL1##csaMen(1&c-=>kxz~8j9TiC zTcsge4r8KJE}&>?R{y-FGfmu^j}tPtDUyh6SzZ>C;radvr-WgLx>yfS8b5|}9t(Hd z4uFz-x{*+>*j*m4Gp?d72$kw%uQ$Irc4p6Z#`EOon$N+5emgvW&fPR4pZ3gEHdjG8 zea!sA&*tvYESaOg*J zV>-gt&KocarK1}4d1qj=<@!x1ph{as)RvHMIfa#Op?~qxJ`bQ~Isp%DLp*6Wj!fS< zRCvpksYWdh=OU_qZ<~CU%GEczmndHN<0W|~i74H@fPmgdi}#8Q>dee;c6q;wm26kJ7Mz%ci=L-Qqc!7HCYp3C9_^_rd-1Rz zC9h^_ImvG(M#<-Tjp$>Nk+|}LGZr}pq}_0lww5@Mf~JgrT551)%)ZO&pu>-q-O6kD zaje^|WJ=fIIQ--RAM}cOB@t8cjSWtO$k^TOA`tF-+(PzJekA#`PmEDPJ@C@4RP^i| zHH(4Rkv@ND|C|_z4z_m>3y09?t3sznV3!9!KQc=HxSYC6d=ffU^R=m*O zTA~2uGij55dVOW-r>?_{xE%`h%q7&ZxtN9_A6cvvvF}v$K&*Ve#V)efyt$qP?_xg# zDt}eioQ~`KdLp`qhNYXn!RnrT4&%!7`V^m6YDLKe#LI>DweIy+lX%V^%{Z=$jS?d8 zb2VN(r@j)i;H6%~{hX`ezei4h{Cy1}_vs!@7iwv$HTksmj zjK!aSdEd1ji35{QFB8AHmm3rQUBLo-}6g1`R)h7%rG|$OmO($-}OiwoCC27*U9}o zsw4k7&qN=qqlu1cky=nXG3)8wos#$KlxXpPL_O=`j0Lu@0>kcEp%ZVP&b%IqOhoH! zyC9pVb+mM5h+^+FLgCq$Rh-%?ca3$^x$;KVOBL~a*8xm>==6(7Esi?dXU2!P((=Iv+NFO`B2p^kZ$Rcb*5kABBgPReD=K~-0 z8wgP?FEZf8vItMbIDe!@qvx!d;&2b5!4#w3`;?#A;)I)GTW%=A!8sUWTwjfUyRHvV z7GM(s9y0haL;6^5a)EsoV<3`37J#BufSr++qIEWuFvp*qKMRQn-!51|cuaR0mv^JM#B9Ygs=*ny`;MJ1^lsdtS8r|^Ps@wt(aE-d0@6R*+wp@g z31t?@u!Z{Z43E;wU9(`l@kW*H zyN5Gl#8gX4{Ot*d+lQUsR&Lx(u`;7rdW6i0Q5f@HpF5H;qG{%m?r6oskx9dIkdi#G zvq%@*qw{=?TrUP!P3rA`r$b}$#Pfz$x8()HcX3)Th5rc#s}q3@ZGS6A@lqa5!TN|o zUlB)Ug|~qJ*tG1R}|XHxkU5#y%?+*JR~L)MaJT!^KyL zyV`I9lrKL2dMH&~^f|sy2#RI*?#?+n9fIbqp>r7N7-MohH}AH8>a9Ayjp&VTgObBG z1Q{2<8s^xX>PMR?MbF4E3GTesy*KlCY}Hv5?yR|=AK?emRYs8zXUq$MK@PM1l)(Ww zDNCXw8>F^6-aX|A-I%}KneZY$=JFmqarx36PMGZe$x1YG*P@SuL=;xMKooFeRSUz; zP2-XlEuB|*C@dj=urT>-70{EJXx%Thrp9DZHZfg`-dU2f_Y>)mNxcbcMJHW#cw<>( zaO#gfm8YPNUvn!w&)Aj0Ic{Oo-$A*sTnXP?LqG4=@!)pl$Kshq5x7Iz!?AyE*Mx|+ z$nLECu|A)N!Q!`P?i`|C#lBUYjUy1a^&Q+Jc_o&q(duY_;PFdq6>`q$AgD#Iejnty zRpa%f*XUZ_K~G_ufABq}lkIhCyWq_MYp7O5qp9lQqtJRk-_KH2R;j(3%~=(;Ic@EW zVT+nt=G=9QCb5ICrITSo6ezoeZ`*)Qh~t!SbTx^cI39~@=E^-}s$|)6)we(Lm|Ui2 z))m)|q*jW5eAm~{o^26Yw!R+Gsm&FP#>DongoqxB8n(L5(eTFj@a8Jv;Upgn0$r4O zc1kiL&M3mTNyn8C?3g`7Er`x!%Lo(CJ*N@UK6!=*elmz+g#d4-bjjd&M}3V`Dd?RhhA7`gVG0!f8ks;e43rgB{*B zi0SE;6o4$_erh*aiIvCXr`4vmH+KuqYH(j3{#~1W3k|n=6R)7rB1D?Dd(Ylj)pYN1 zJG`3YoQ;)r*R$(AX`1`t=HhmGXHz2)MK1?wOo(ywhJ{e)OZkaUn{oo>#V?w4j)nVw z2i`o>mjd(E>i!Lx@sm}lc~0mj9VV>hOx>}sMOL@l*W1BxJliG8FiCXj`l!rQs4ilF zGZ(ZPsle@&qm$<-;Zqx3rhSXkrsH>vBH&08V!uC9d%GA3@uLAycVIFy`-3R}w zY1MhYIw}7eo@^ZClR?9GmP|mKCjX;<-9%!@3}lyWPJW*6xo_<2xfJR{C2n3Rnn{wW z%XuIq!n{YkZ!DO1&D!TrSaFob@&@)gZP8RGE5BAfw_FEUFZ3VYPrD@JLrkhlg&oY8Zjq-r?Zm5 zk*9GajpYH^dE;KNYBZoW$5S|v;zR6#QJlh1I+j7=KlnHDFPr{;GB%0V`_M8M(mLkx zV?*}~Cr%D|EryUoA zGwb{AluL@|+cbdebgaW}y}s*zv)SBk<_xpXWk{I>@`O`}!D}8m${AlvU_CAbPKkWT z(%k6MH0NdW^90g~5=-)R9@EwH%HT#2eKI&_bbzh#){}0%8|Tk0;JVwknPJk*F-*LN2_d$D5S{_=*bO%{~aKXVx9mnULBHy0&Qp&XRJ!&#L-*?882L zac|KPK=(XN8ukyxU0suvAjvD}H59#3n4tK1t6@&UDPGrb?Xczf1WSanfX?$EWd5;N?)mr zN{%qQpUQ!DOuX%#l4YJUZ?eK%3n?v5-+;u`DarEw7^>7(^Cb%3uI6A;+OXn{5}|T z(TSc^tQ)0&eND4Z{vyQ<4yy*Nfr-&duAwdlaV@mlxuU}N{^{CJ-rbgr{>S_jhE$jH zHO*dV!c*{S*Zo_J56;Z~sf82G4Aos8EUvYZ2M+S^;56ZM(^(8tJfPa76A!@%*A%WkiBLG0*JM~0O_iofa^#!az z_5eTt&H$i=-*`LScpI&4hgVd{9+8bJNO%AW7!!mGe>R}505m`-fc&3}5^=*?23`X&0EB=i0sFd3xMr?m z(Qv35TcJj)zN7|NQKhV@D6ev_)cYF0_i{v=?C)!ZMJNendej}b^zD9%<2*p% z$S-KQol<@6deZCEgC*}u1%jG;cX%kfYH*Oj0unt&AsnnAq9-;&v@fpOA#rYL-s3bape6O>jqdtd{vV|w5N>4(WlFEie zxP>QS{^CSZs?_i8LIyUS3wF3DE1f7MEvq2qm6DD$c0S40kf=b0GY1HT#Wlamf2j!P zM%b8A-sVh(j3=!HmMl6qX8-|vCEkT5fMx*fyx-3#?Dqp~UXm?d;%)vUTl@)xGc<5; z*x3587Ae%$0t#$>AaM%ys(=WD0AR=wD-EcERrmfZd?6J8epQ#W*X!MG|ABvTKM>Bj zp`SGFQF*t9p;bZ6_HAOYOwongcMzC}>* zt2*-mu+ttmDO`)-kgb3Np{#*%)Cd{KT%pt7PJIC$1cBfRq&EQLj-Q?He-OlA=Q}9u z+ylZ|uD*B={>1bi&j5SIJ$gj0_dZDcVp)++HaFD^04!1-0$yf+vTR3?1AG-y4A^4#TMc)=ZE1l1+B`jde{KC^gIX@aT6bvf zzwXTpImfSpu(``TFE0)?bc$aH0(LW_E-Jkmb5ugx+7yv2?!J9wfohF)%s+Ug!cZ<` zQ(T!`b<&n4=`-@tjth0i6esM8}o&eLMKBu(+)BRp3*>NG*5UH$4 z1i>sopaZ!m1JKY2e-hUa*iuT*DN6V6yhNT)jVxHtgj~{!bz}k%pAOL{GQJbUF!&H; z0Vgb}0MfgCjBh;7yd>MQCs^s>RQtN-N6(^nV#Y5`@(k3LU`Bu^P& zOts`0yOTqOJ~xgZ#(E9fyYSH7QIX4}JK8`9MPVe3e@=A-yNKkO?DQs#)a@PJ+lQ_= z^eS7P9=HmY3hEYaqi3_thD#9nZnNV#+2f4@lE3RLc?R;^=2IRIJDyu*4r}Fmzv)q5 z!r(_^?eX^Ipg;G#$N%n67|l^=dQpPb=xCLbk{)fUY#K&tZnkFMM3XyrkT1lvg(+IY z6D)>#e|z4y2ri1L$QkltkuAMkuWzp%XXrzaK*eyGfdU}fab|HKWp|u?`G3dyZqq{k z(VHdSwb?3{MIhUJAhPmb?I~1rC0BF>LNv*hJjp*iKNF-3##vH!T`PU#+5Mwy?SJc9 ze*M*T>yJJD##s&iUIWg|hqGmcooQQSnMy2#fAg=-y(Gj?AGQBopZ{npvp|31&--#!{j1r(KBhUr!a-d9wc$srhL~^AOWw#<+Beab<%FG9 zf0hd7rrSj#tUvZWRUp=^v=Djlva0l9uSKE2W)SW9;xy@5sZngQBPrOx{n1YeamJbe zUJY>e{I?A1H~-;u6_y=$JgIDH@RMg;SJ-j*qsMps^ph9Y^kf#{PhQ-AE?rkDQT{W}CPji7YTnAgR+(f{| zD{PvlWzn_NxHHcKjkTG8U$89(h_d|@niu>+KyZQUW}y!>shIsvSgZ)9if=VI<*cgh zih-qEK?Y39FHOL1<-_jAql%ecW=Tio!+>4!$N@%2>yYqk81Q9t4x6&x`3L1he+yQI z9cOR@`-1`)sBfxuY= z(_buvBG_m_U&CIhcD$rNwazC0M{a?I92=?JV3$U@Jj(mq)ilS7vY820c6cEwDpim7 zqYl3WJQ|`kd%|3_{4#64C*(qRf6uddCis8i!9b6z;D3HMdMmDKZ(ws6dXW^-Rm_8dhTt0M#Skuafk3`UD4=Z-ocL3T3Bge8>oJy>f*06*gtboT_Op778S}TxvFqAy! zIvJczZWegYVLew!b#1|ee_51~^Mbk8pgmtE#KC1Rg}MT6-t*-{{>hgC{PM(C^X<88 zH4AjoOYl?Ye(N0k8(XRWCr`*#`bU$UQ2mX=FXli1u(&Hvf8$-92U!MVi1+%` zdmytLwSvMQRN}Cq+Eb&_wIKj(Z^NopZp+oP<`+M8;svM>JZZ2$6)xX~7d2P>m zd+p}IHT}i;Ui@0|*%OR8Xzzjy!J-Si&K89)&!PGA^v#Eeu=3vR z;O%xGk^Z%VllsE^v-bbgFuOl#R$jk6^EV%UZT3f>S7EOef9Y4}{PNN9C&zqa;mhfJ zFXf9ZAk10S=<3p>LM;SBe6NkIkzg^)aF$~kdl)TE%#~y*ZImo8!yPPG=5d2y;YFtO zfPMuq>sz5J-b)z{7uJ;v!QuVCAPe%vb|Wq4k;MaUqq$ev!~Qt;fs zw8%QBnZUNges)h`lYFd(6$ zofpTq=X1K9J=hklw(Ibmpo!DSmRL;yj{tn>Q2Ey5?zfivY7qzjlUs^V`VYKp;px{7 zdz>90f0qW|IQvs?ZSr4jDi^|xpgahxY_V8X?Uqm3vJE(=C?(rPA}`-Hmh`|e5KJD6 zY*Co(GN_!-i7B=mB4sr%cx zhk*ez(pU@2+;Jw6Ll22NA9Nq${s^g`S<)MEf5x)6$Cm)W5rB$9>tL|6{L8QD1lfPB zdHL2!y;;aR|9scIYnZ>dng8Urxi1NhLe#=bS=JpVR{7 zTuBbnN{8$svo?_Pez*8^OoafyaEJKaMvBjSBLASVDxd7L!Mh?&PUEzj^gQ*F#6NC{deqput>YFj|et`e+5e4s;hS^*rNKb|93+d^_?aw4dnf>vE+(cq7SNt-g)eRPaL9$Nfrq;af5Vi+ z_zJ76iQoQ3_#N?uYHOTu`B0Y5_?yhXf)A@AL1 z$xeG=rhRRN7iq>6u4!9dd0$W6k}sAjFX%yH$s=?v$szfhIkMWpj-jY>mVdlU>&ziCr$m_fKl}wgCxp?5MBeDkJ zh8621u+)-?6u=%}IH?#@&Powiq%2#5SbE}*xLT?kAOp{7&X@F^#}*Jdf3)Gch@|4; zD==y~9=Foo_k$nejB&L0jf*|sE~NC6Z@+j5ezep-8uQP(etu5-AH9`+d*$DCe{=J1 zJ@nNmJ8$ki7nju8Ukv_W>|cGduTI_TrhnCfH2byPH(tNJaqg#Hlz-}kLTn|fn5>fS z;+nt9cuYlzBg`@dTkKj+e;yjHfjkRl&f;~hbVIvF2A0N2Sa84tN|BcEEW&|@pfCiR z_YBrmZFSh>$}QN9Im#}G#HF!$uNAZnr}DCJ24G^PAom`&i57lz*K~FlmjiV(APT*$ zs##Usn6k|fWj7WA8+!baI=nbfL-qirDjXzFk(I|WXsZ$P^LbFpf3ovj>`NANmhCa* z-C|Hjif$eE$&159fJ5Yvw^;QuNrgj#R{kZ}BO!s&(5z79@$eMc(9t$z$(3%R`DZs% z%843m^p5L%$F*2|aSgvQ{N=iTX~Ox(=3h?S>oqRj^V=`i{qfnauB_4%`o+Kx-hX`m zTi5-U&%W0x#7hCThxrIe z#f1*-aTS+?3cXeY@cBbD# zF<5Lhgi47%h>F79&MZN+U>evXhVIwSxcJ%`uJU(ZLa)>9CvIpKE#IETuRf=_KXu6u zuh8PJ&33&m{vqAm{Ydk zA;95ex0oWEqb36x!#SmRjvhHKtYJZ`JVLwn~FTH5&=Y&#eI@bmu*R%2l6sZSGZQ?JFD#P z(y|6!1n%TEZ`uZ^#>5$_w zW{ZJSe{EFOw(KD|`W#O`lxloKXGzgEZ!83%waoW>b+_s{d`6xtne&_7ES_U=YVO)k zD3B+!!z$wX;t!=H`$hhLz;E<}-@_Hm%#Pn=$FJY;-~9Lg1EUfu(+qs;t#4g0feJ}K z^;vpCKN>L*_RlOoJo4)|53V5#+=6r)`u`hdf5p%gEuD1Pf%eNZ+wqF1@Q#Y{!h;92 zN~BsHzX|PF@g)6UIKnWzcx!9~`q*qAfiUGq#<^{l;YAFY-W94!&Zko;B z<>s!lIDGQku;D&36Rrrrk_G}EFS}0RituktY@VEPWf+Oen3xOm)6YJ*oKdLLUeU&T ze?QkA9rkzMPOsA)$J4%#4}WTn{23VcA3JA%bMSBPar@h6gCjSe3M)0>T!<2Eo+ir+yrWlU<7&`qe6B2{%xf+hC}eRN zBz=EGV71c5U{je;4~l z@BQhU(9+lTdta&gqf7ps_fYkXk)4|g!QmWGOV&y%&I9A)fwRK9hPHC_RZ>IHeF!%2 zMKD}Mh{D{quSl>K`=RGKhd#J&s-8^;gO?!zNK-H8TqPUEK9#cKj$7q+P9h97EC)@% zgANN%V~_j?oYZ^Zt$F02kGqwIf6NZ8O7h=zi~G8tnElK@>(+kyhV4J=`RO73gPm~y z`JW!`PmliiiL%5S zH&D5u_sSbooY416URD7>D@Wb=)t(9peRD=OR{2+KZ zCJ?wtw$i(FhIC$fKwX3we^hj5t}3Zk^V=lhb=EqSRXKE%QrGuMp_H< zV{@`E_~UoKM%w%D+qce5fH3TJ?jn^)*y~&c@BPo;dVSA%H9qO9{PUd8oQiPy%Q=5~ zhX3e|d9UTa{lh;PS#>+yfA>$0{nNYewY_KZyAK6|$6egyW8lcCe+n@I#i-fk?I~lj zWQ*Y`f}%`0_o-KN%N6t6fI}?>t|b-W%@@ay<%F6^iZ*CHDH4|d;t-um3LDFjTo1mq z;Xx`NgvIKfWyuO_7pIOvbj-R1EJ5x&sPAF=@L=R|jt4M=lA5<+$){y({h#=P{_d*^ zInhAK>~;+RJA$kLf9jhgKk_mfVBYsN_>=taJ&0YO?VSCm*ADovr1Wbyk>7sy zFn{Zq?=uzu>S>F=xD??ki~p)UkNURvr)26Iw_k4mfAGVJx$?)MHny3U95ipk)@44{o+ysx5+rn35k#wJuhw6$|IO5|zRNfMeR4{7T zY!c?5uw0yC=5L(SLPZ$=ZYpKl^gB#h>CV92f zV=^0NOKFcEvK~y1a`(PC+ba`8LtgvAM>$S#<`L~ivci8C$B%XGFUtFHo~c3?RMRS{ z9BI*%tKxn#-TWwBB5EO5PRnbTw0^aWP80QQWwE@RwrFvEmhml0^R#cB2c=uByDHiK-yEGqvgI%k1P|nenU;QLX55*X*{7fQ za|JD0ifUD+7+A1j#_5Vb!>sPomow#f3t&BuyKQ~?XZoGX3s6P3kksf6m358_cA;82 z!2}ae6!ts8pq%t!lqo8+G<1Atq2^-0GBfYr9r3U61ngSs&oyoqJp? zVM{Jg_U!WK3zGKxyGHM$tTj4+=%9~(7n$mZF7rK%?jhzk%Mbhx0rO)&Cd~Y7UR5$92;!#!-|cEuzqd{)LXjEwdkk;m*~lBi$B{?#Y+uxP{LH}2tr0}4Z^5UG#> zg){R$6hZ;eg~nw;lO+Q;l0Iif&-}B&D@oK_>GxXJ6E_d_1)+XqB2h&E{VS z=b2Y!GcN~+@7v0<43mQP3q7v=#P)wA;5bXG*z#dnkuisROZ(d}_iG(hU%y7YsNekK zN;D@rc{fw=hK1^;1S2WJGKDGSS3b!Eem6@s{S{}QMJcf<2wU!l>o)?cadsXp2XC>* zAl#U<^P^Q)yk}vzP+fb{FvRW^+n9omOl=F2$4wFZpxh>02Mug==u75JZ_j^NqnL?` zz$#~{rvVvhsur1%{Mk`c3SH4WIn@ex8DjM4j~6T_xyS$(!zq{iYPO*k~{j=iem*Z~3@t1G$ zr7zcw5*?A2)Y|s?IJ2j>*7d#}^In}Afk>Xa@$t5{3#X3 zX>4M*0xN2@hr$e&c=Ga@LIh1iVdSDzUcHoUq`)$k`t^$~*qsfD8s?Ko22SbONUBf9 zm@z8UoT8a$$~saKr|l*_K!-tiyJBHm>gM|QYBK`zg+9zgUnIR2!Csa??&Vudqj~TG zYKee!O6b_|QU4Y=Sbu+!{~Kw6VTGL-Q8UIys#~Izx+y95Za}tA74LiI8OckxCHYc! zda?=>{YoZ3de$roWox-bEr z5;8_UO8*mqZCecHn1<*{+E1>HDshokc)l0YX(~n=iV5AsUb+V}F+7l?iho(Cd6 zM9_T;r_b`yrk`hQ50s>7XjFWe?KFTGq-#fqS?)KfzYTHgthmjq!w2@DXUFIxS7uvo zEx+i&!jKZrnBte#o(Ca+$>wL+>MigIA)Izyt@3Qc<%()0c*^4tHO#0+l>bB<`Dalb z2n@*xef5S@OtPlZ2_A3^0^tE;JhFNW{7NWvD@SLA4J zsShS^%?ir^&#s*K$FLYePxRScsZ8ne$8+#qi0g~7m9dtI3+fAI%PsE6S$Opes(xqkIk{Dw}k$pHf07N44DlwW9JG^FahZi1UvoWD(Q((L5 zhQo83gZKhG_VA;)*O7q_7kK4~j&gw2_w;>Q<41TS*v8s^0-Go$YoNvw{H$zamS2_?C! zFH@xyT9+^RxHb`Fi;Q9O8&MepCAkAM|K*6Vs;nf}9!8aSIx=bbj&=T*V`3PBOZ+j! zgkS&%MdFixX8?E43L%rDa1k>%l|GG=8^K}DnntF%r6Ng#0$vb?pE+@ zU9a5XsXl&cQkQIcF`gl30;h$+fXacB>>3KxxhZsiAR*^-Ap3p6ejf0vKzi^*ZdZ~= zY2R*hYEe*Ec6EtLRX$dUEt~I~d|8^cxU}B?olsFV#wiAyh`)6=sx}Y~hcB!A<6)tz zutkY_w=#~b2(%}O#^V5VB|#uli%z}^g%>oQ?vh;v9XJq%+*f(5+$n9-vW;u+#~zP| zfqS}tJAK7i$S024(>X! zFjq1lKO`b+Uo61iwZ$cvK&J`ooEf1hGVxRuK8rCzb>PfPE}WDA zh~M&PO>GrrS<)wLk2}}tm%UV7I1$i)Wtd0C7RHDcev%6wWlFm3v9aCwSz_S2CiMr_ z1-sP-HMJCW5YF9SAo0Q-ro-Ft{jAjBjJ@TVgue=t{YWsfEu>NcaS+8b!1nRj=BneqKv*~X%wfGySxgZ$d>nSJwKROVk^Jn1Jo_FAuT)J< zrkp|#SYs*c2UPZlW{pBvCSQhsMvU2<{0@tSJB8SLS@%PUdsPeN*0pY}%@A2}B-hwt z?-K`h*Fv^s)Fv_Bg>5st8A_)FMKG7*LP}+%WU;A7_Qj}KP_lP%tx_PTW1ubBZ5xL1 z7RKJ524*yWRyxQ20oUT&;%4^4CP&WZp<=vO&0axCRk%fdK zq1DX7=ZxXU6NBFv5zvqhZ@B9Dbi!B=ZrkG|<`z_rdfrUbvFsyWMR;s)LT62!AK+|z z1DHWPH}I{#Z`nBUzGsEwtn<}*Y8`B@Aix1=meM2F$@Q5l$E62>O39W6uFfhbc?Sk* zUS@_wSjJ=LFV<~*?>y9hN3+?3FfgN2oOl-2rvi%C>z`>tHjC9>)zBO-68^nay2_ry zbii^lr5OpQ)OPXv)gm;)aBDe)T>h<*`H6ZshbHf}syv50&{+T{#wuHvqI;Sa6qXd@ zZ6DR^A~4SZo~V|c3s6^NSM?=fMc8K>*jCe(PfnQ zacnB>K*{8psPyp281qDq8&7 zHo*t(*GvldrZhoU>J5gz5IE`hKxK6QNhgLpcU;hVt$Sg9sDKWp>?9sL1Aj`VaU*d( zoJjJ^yg4P($`ZeS{;+n=HCWoDgMNcBf>G^;8Stm;Uguqnu$gXeeh1(RZCI&^*(?JU ztp=J+gG=y}2X37+O;j9IZXHWxisFOi0?OWhSAq^4=-T9p&bi~t^-vA(5jOgLCoR2R z=OyM^J;@clgjF(X^VTwn<_TYL6R68V7r?Lr0Ph+O9%5O42?+orkql_r!@LGiKzXuz z1cK=+jBh}x6S}c7LwX9_&jM32EVkB3YiA4?NUD(Ko&bKes0VFmw-m-q&v5(v++`=K z6q8v>j>a|hcp!&+iG70&-m8@zf4^cpGN$B43M`7?;P5`X&rQSpyg(n`{`E@YzAT%P zMJ@-=UM$~#wn(8ma?cZ|8$tl`worF!i07d^E5LN~dKN)Zm`1pKLk*X?at&z1@!Ns& z(gJjMU~W&0BS+A*^eJup)nq}~ zkSs!R0gd(!pk+0MPe&o7p>#a>Idu6Hb5Qsd!d7@B?`%P;*B!X#DYi0z-b*cXd4CLd6kEAQiQ-@wVQ^VQI>W&iS$Z_#WS-f^kJbdKnuYH$gfN| z(Z4x5^F{f!BE!!9vG{C5zw0f&c0aUi*rZSOFr#<5AU-kouZsIMV~lY$W=80Xq`t*; zs9`qGQ2LXVNNNxF9)y3+J?urTB~m-P*X?%cKf&?sAr#a0Xs0#>QkM zq>rk6I?|Q8kCAFmDzr7+QiN1tdXpzsnKL#AoUUs@qQ+x8foI+6JAT*(IbeD&rgPT$ zjRG#G-3%;$Pyytk`&|CiK|fwNg~}@-s|PDunVe@~cpF;gJ_E74Qr>48;%88QeV=1U zm5-O=@zX9DU4AVuK~f4T3^>XB5ECQ{VY!8;k@qMvP2gO^<+i+SX-~qw>~1vsv@jcf zqi4uF9P1%uqWc?vX@YMCChvmq+)XcH33B#8!c&#De!NJkK(#8Uhg4e@hi)T^2S_AU zlU4eKECHB9vfTC`HDy2xx=43_Rf1L#*8Y0!E(xAQXWTtzQw%I2s7Dx<3Wwugch*m< zaLA{J8p(=+pkWaK_};=3Z|JRgz4O6HLhl|3ooGH_`gQ0$>r^m%bRNrTFeIej2feW5 zQoQ1!wT7&bXAxM0d%%p;eu()6BYzp7w(;)4;bK&fd$mt7IP=I*2~7Kcg(*Ke!}XW$ zHJ^qbnk$Q&6rFUj=Om_cr60yAt>Xv%+0Y}>({YsNO8=C|))Wz0Jvd11Y|O^3+?QT( zsVb_3I+?x&#i=W)OR)4jMZw_kHn`%Hh2&tmf==b2&WWmD7%zlb8 z$g*sME#lAC;a>b;hMseO)p&^XV)e#1*++A4Yx7wNzW`GPvU_s9T{AT-?yR18k&{81 z)1>mjkSkN)@&Tn)VUV1<9)bL%tM`U@&V#Sip!OmjT|#9?8N23WF|4&TczY=>Jh+)X z`A)%w;y-J}RTmfajwo9iZgo;(DZ8WbIjXRKdQC;E+CPnTvO<(m(tN6 zzINIwZvR^6%471X#VBIYX^NXYpVB4z<@Te}-rm2M_U4K?1=d|xM`f3-1#q;n1tCR@ zg_vPl5}StM12X1+(vkIr*El?l3_5pv)E113Satqp2vXn+EIwjLR}!r-nR@F=_&f>9 zuSb&rMfRf@wQx_4Gl6JnbhrDeinD&WSPtn9ETktXz!GV~_LQTcsmz#k#Iik5!VD4xC$cVf?5!p1 zwYVxn1+H9I)E?PP;rtv&g(^M)O)|EKMxzGZxK_FQ@_^2Rp&Le82)4wNFVo-F$H8#& zfWs4z(-P@_^>3`(v*i(QDBN<|noO!v%hMx%@hK?|OmioSZQs`y8Lyy&RIH1+K75>> zdCW6|D0FgG*G=DP6+2;5k+xc2tPBGl62Sr2B}(YxX3Y`{Iew7Ob+QRt^}DKgKT=MG zov5XZEGkacLf|H$vd=NI89i$T>7P%q@4I%1A4U^@H8HcgK!W-Km@Jsj_c&lnGNHR~ zsBKbXeG`C8R+cyrv9s?sOUSc9mHs$mF6AxmXie@lioK}Oc@-VIsrG)K{Hr*(gXn31 za#BKS5eK={rXXc1A!)+l`gh}? zUbpk2=BDNoY8j&@?F#tdPi+9tI;*)~-w3Xjc+=X&H(Tm*dxrE9j_Tw)0`W4HXZAOL zyR2=UDZET35dBI!jqH4HZZRBYMUv=<@HweAuf^ABu>2{D=iT*@h@>LIvwPwmgKGjh zO6YG0{@^t{SjUPRy^x!9&ZoVT&q_m*bHM7!5tgE$TwwFhe0fuYF$R8faSQgdu6MB3 z+ZtbF0e7-Cj5?8kYH}9g&sTN5PZOAb2Zi|mwt^JCsxL+#H6p|z{HBuHNaTzQlZI+1r3;v<8wS^|?}_ zh9eRLq_%t_N0(WjCK*5OapJx?+Hv4EFPS?(u4VUp74%hm&8CwP^c_%mTvOqpiNfU_ z-@nspOfe_Nm#4xQDZ#(FL`5V8UeKTbeqIcdifw0ZhX*8x8pzEt%&O9V^Rf4ev>Px> z6^8pM!eJMLlx0IZx_uzm2Q8HqdxY%i<}^o7$>SziSyozA&?h2v z{WkWn^1Z|)JUM=sMbOP`yP%_@n0K*1(0BRDZeYv5it=zJ8WmEMNd+atH)86rXc9RF zhT`Z{RWqHgRCojNYJP_9^W_v|!{FJ>sGwjS;A+k^(Ow&i^eygxp+8hARuz_HWbo$a zJJV$S(|Wm6@Lb38GY8@HaeWfZ9Wa2ewqP?fEWtA#W@PAth6XGadBO~A?vp`GAyzQo z)9KHYOWxZV!U*6HDl8B*kylXmH6LP%lpHyFg4ZJp)F87hC3Rr1^kOC!3nE6mFrd48 z{o=Tj0m-6)iUja~hd*b&dQm=6l}F3GVHc1}0nM85c-K7#yJ%E@(fdzfVj|uRnqwep zi*L<*f8ob6vE>nperbW;{Q#f<+wxQWMsLIiUOP5Bp!J?FwY`28c}bE-ADU2eD#lgs zRw?nAe}3O^G-cdPw6Ll&G>9=&=OoT?Tu&Zk^lq?|DQoM5CHNzmT?v{GZpA)?hB2+%#Qw`Xog)|-5z zn(h*R2_6d;k>LYUOR(Sg^FCugkjHL0(@_R=Bx7b90hrMrOH|jx7Pcp!VMP7mKYg-N z-NW)YBY@_du$Tfffcw}n;>uR>1=tKvyw8=kR8C-E$M7KhT*ZQn4#J$S^P!_v#B80V z)%iFWPNdKFtD10bP6m)ZW8&$!3Y2mUYIZAsdZtl~9V>s*PAuWG-V%7lzQ*(Bx^|4I zYD98k|8SW&cGPS6u?%2cwh|FPY9=>8{zMoo4SIxkS_DbG#OS<0JpxKOfeLt)D%b0g)#x(TvVYc&8>b@NW%5W=s{b|dRP4`!Ae~q<@UMKslc07iU6yNq zAHajyCo_pz){?^P&n|SN7G+0Tq9QT4abp;-2axn_s{6Axmg92ymNcioj+p7*??v4P zTIvN0UcU>;F@JDzJ@9~?LIW8w$^zL(-Fz<0j`jfw=E z18woRi~`w{AB@C8`-s|HbdGD(kyE{oH+r) zsY{Q}u>(*cQw3h@+dO*cCp_}$Sf7t0V_Q$u`dPhi67hHHoe>j>@5LmTC{Z>a*wt4- zHG`m9PRul5ypCy;vLPU%2iY9#Nw^tY8S_(^d`!*-}1v*b<4+|!%|s&nXVdK7D0U@;@x*(P(2Ic-;G2Zj=W}e zX=~D-@}$nWg%^x}_YmNM%of<~yKdt|9reD^a7-lije{w2hv`!27^iQ{Qwf7)0cRjx z_dqb{H{TRisA|c{mcB&NFxfETsJc-9YmKJKFRqr4EwJ8r($KsO2aG^_J%ygQll^5k zf(77okX*Nr0L6Uz2r%(6KHA>JR@%*5Ff_T;+n{Rw--2f%ML=GmJA8u)0rci zG?{*rFnk~0ZO;X`fb_F-JRF=&_)(Eq2Gn$9+@dBpHN3?92rh|QT(M2N6JYpPauy5D z%_8;q27^v)BU_!Is?a+E3;#+HE36y~G}iA-*fgd z(+Hf%C^oNu&c8P$nwL+o(}&O9zwiDyN?^Y=!J6U9P%uM+rf8>+sv6a!-vP7LXodjG z>#!N|b=DwYEPp+`jm#SNcAi!VH3LB#Eq-g8cN@*Q)g3)MWeUiX)|L7&mnOjuMeRwF z#U%hrSktxM91Q~Gf87kZbgd7h%a=pv74!1OaxvLh=`UC7sEFeuT$^}hnhfWi17X}i zDcCdDS$9+2!a6eK71X?|36uM@hmw9X3=lq%c*|v#X?$A;%Af%@w@0~gpZY8(pbo?- zEy2n1=ZPec?IS}G9X5**mzKK+CS&9IXoxmsw}J!wF$UT!Qtgm*EQocV|!yZ@4`^zJCL~)M=8rHXJEZMDQI3W1e`UUD1 ziQDTEt*QQC3?k>AC&;}_yQZ@nUI*2oC0vQ}LUfmsnMrv)c9G7NDWAt?g*Shdi*?sI zD!eR~-wl`&&zt#0&(NAh2F&DvD=AfyI?uNsyW#@eOP3uw6zIKxHJ`vBN1ICVZ372@ zMGv&RMe<(YQf5_o>x~CcVR%m$IJZlr(BeU^UfJMG>2Rx@MPI_i-DQPz%cf_5)@FjX zMteuw<~Vpop?2GLSAgA5D*}I<$Q%GAzesIo1ZYhyxs8la^RnsrYP|;KG5{qEDb&~I z>3S(MjUtDAPVG#rbPLWobcZEbRBt;rv^ZuypytNT!1j5*aK?U?M@lkf=@s7 zzGOA6`7N7#emurd_7`oJ{oo-*`AGP`tz8mrk6EIM2-N--V;*|0CTlU zg|)y45vNhYHTOK!Pe#<9b7U|s2ZRH7J(^h+N6EW{M4xn+nyr6~FJcL+E1Y&P!L(Ja z{c=c|@vI7rbh7KVdxnuoQmKU*2kg8rZ2g|2U+DG@r;qbdG-Ye-J`~o(1+EWrOhL1J zSAW|>4K`Gl0-7J21Q0(=M4w>IHkp*alJwBX79*p-YBwG^sr-C#!3{rNQyRf-bc}?+ z#rx+(uz6lL!XAI>r@YpOL=-CyFnb!4W&My3J~)l6D+M3TNjpNTd{U^;ffn+K^6;hD zsZYb43!{LvY$s4+C@G4&!E-+W`nUvIcpu_(%Pk_EinR09U(nKRGG;GLCvd<5thy*T zrX=u>%N2%-6k1K6(0N1V0Vg!R>MAxmRt#P(m0#~i3D18a;Y1r`wu;OfdyVy#lve$c z7uy$15jhKtk9@qiNB&l2wmo=?2B<3Fq&bQ(bF;K3PKmPI+pbh-h0PQH?6$dQSI@=$ zVBV{|W50Q`o1+o|wu6n};kw|LD`sL$`T#dT$iE<0MCb?Gug{0QnK4p=fq9jd=eKOo z4w?uI7^@MpvV5O^B>9VCk3n3_ZiD;xcvhR_DjH`G91f0SO;-o|f@^YH5<4$5X}iLx z>4Q+x&X{Te8A`@zN<$j83c&%{=ko%ARTeh&2=y`kOD`lTb1^0A0!0~Lqi@@_JqOCH z?~G!kk#co@v`7*6^af{xcjn;sW@OR5fU=d%>xNS;;BH=jws|`s(spOE*=2qn?!=Um z(|dah<#C(R;Fnmy=my%8SOLMRmJ0InaG8x9eg}Fls)pHyX2k0lF>ju4{u70b zv_+#&gsOev2{+4MW=Yi{-_q!DT~*S@*?xm)Ht z_xrTGU0Y&oUNxo)OjMuUgpEJOZrw*FS);4*kTp-w(8!eUq>oUGOu-QPr*A;e-etKN z6TDx4!JwoL-sWID)gSh}O_FlR)E=(1YV%eHhoXIS-VQX8)<|A9j%e*YSPUp0FM&OK z^#&oZlMDJ@IBMdd#8aWTu(j;;ay_9Kz#=9d?J>84?kkIJeyNYV+t#Da``z05?5_xi zglh$VS}Fa5iClGTPymIy`RC>e7Ej$2h@CNh!nGU%YH1z4j7M5=?$JOn^{fMfa-RpLXf z%!V0&C3EgbY_i+ha~V6OgZ)s-*s?PEY}T|o;Hl0`t&0taY&>a&cLu(lMj zRuFBs|EEjO`M9h>+PtlNaS~TgV=t?JMV_A+R$y!4*8l}oeeA1}vc}dI7a>rs61o)M zaU%jDyO98Y&&QGi$4mmlED9zwChP06r7WF-X*2JNL)G^FfrCNC*$Ag2J%vCJ%g0x6A= znSn35fb%W3VP{%3+O5zgbGwv@l9SFJ(V-;=idTgJ?zjMwYAWM72wTpQM=KfI;ZEwy zX#YHW5rMLdIZ}*IG_g{Q@0>(Hez7jy?+jL*u)#{?G#|6Trdi8I-%3D?!Q)++~n;f@7AELzI|)DDB#JQsV7HQcC;gzRpYBuM5=6n+&T2 zh_>sh`z7tmrd*Y4wQ?_o04*n+rHVpWbiV}+p|7TS+8sb+`FQ|8F_3|OJWNclj}^ZK z2Jc<>Jljgp2}?I%$p&*JO*d|tjhvdyH(oD$v@eJ0kPUMCOY+m9qlb7{u_wE{lnpms z#;Dd#zwPUNj^FJSk$74`PjrNt8y_$*lkQKKLSQLfspPxqTUp_{ac*hwwtl@lW<`lI z2Gno~w=_4kJj54v#ewL5KeWy%?4BEazRt{qJefucXe$GMUY3GZJr$5(eN^`(!aJA%GlR_E2q0B(oy70~iQ-;<#rW85k!V5ILpCtrd6ixC< z3;|V#^xEne8v0?k5ic@lNeIqAtDsMy%eR6N4_3*UAl$3!yvx0ueb(ITc~EJi`KhZ| zL|>RLc;G*pg{!rH4Qccno+^d@2(Y4fe)V=d;kmXbqUNd@GEeR5dt9vVSvBrFz71<0 zX$67-Lpd~bgq?FUqEiAeyOVHsgm96GCh$KTQr+!{3Lu>W8m}=zx?2JOg0`a&eF@vv zni=xR58Kj0x`@%bqSjc^l!=Qcm-k;1mIHeFYDvcQAUzs?5K)sp0MxgFdl|WE8nkI` zNa1*g7@}>CI#<8pw+m+p(vbEQd!*Hbs{4qvb3^B1cOxPxqs7qo5~Po_KK!>s6VG

5#)JXnK5D^*9}O9yx@M94xPtxM;8tC3JU7KY)fD@(SPwa5|7Ye;dOqtGo=EZuUT>hSmqr=t0cTo? zVUoQCm{?mq$YH5J!b68~pyOpNN|6uMdAD2uOg27&ZnjV?SY+Zi?g@vR&&))+p@cyp>sgS?k|J&T;IbJ;1 zJKNZ4ed#N>>P=Yg-|(!j9xLZH{I_ww*7+|SCE7smPB!~jkz~twB}aoq2mB~@vVBH5 zO{-6j;Jj9!Qisn2TKV|%fzHtko-=O&|AYTadQBfEX2|sZ+f((NDn1X6Y7uBN9dLgb zQs6mcZu|W-kN91U^uS)CdxJrlMZT(tANSoL?n!Ue08<(h!$V(d;>WIK7=R_i4-$CO z5EyW5{8Ikn*=yHwhB?P;Op0Te{;G)(H?eqe>JRI&;c-qGA&)CCbI&{mZ`vqoI73a- zbn_s_>t`|B)C17FQJlVuc=PbW%wB)7;3HsVF=yiIo)H*;l9ZV61>S_uw%FG$;@s58 z<@N~1ewki+O|eX{e!P4VUYRcwa6<|v%CVQB3|ADHswP8C0!g6dt$@JuNmK&V=uf{n z_5ECGfnkZhs{9R_(>n5JUsjLl@EPlV=I4oW0(5EXeRAyNas*lLmmaMt=4F43JD|Y5 zJ$n11Hrm+ulx$Zj_g3Ixv$iPJi!d!;AjJE#0E|mx>>oSTz3h4|l03WQHB@o?{q}P; z?71xAl+WlgtYN*C(0;a8TXU046`mn&-sDsFJ*@Rm1afMUI`=vK(qSKcAwpXpKJ{B- z&IpRBXHGqa2e}EYJ_vP?@auot`(F8AssS-x;(Xmzg}?%kVqCx8^~O(cct4P1I~d;s zGa;ee>A_jK+7*Sf3u9`i}uY6YaWI*|BTSIQQOKp7vr6p7}Sg z2^|L7qiT2eh&2IZ8yits4X^3>;M_Z{2VNm5WFcKXeL6oqikw$iVevpRrgP>j`-n!W ztq;lkZG_w~Wb;R;{Z@b3h(gYs@8#Wt;{LyQO{*_>>S4Y19+R^lZGQH|Ogi@-tKhnh z+g?`mGdK?IA8EY}ea&Z|LyM^Ho~X6N4eE$4(^_MtAH4t675mF z?zOB{>tn&M|Ni~Y-~FupLz|y_2U)u^{{8p8iT{hgkBUZl#Cd;O;tK8x5IosD^$i{^!Xm6$-Fv$s33BiIW`7c5xrL{IS%sosBgfYMbG`T z!d&V;?K$)pPRunfv9*MCPHT;9dbQh3*@LGu<$?Z5o}L$^tXFT83y!$}EkxLvHHoVt zPxBEq^#Ju>9!oxbQjSNS+4&$scKrGlx<3VzrQ_sz=c9jTl4{4Zi*?S@SbfmRM*(Xd zhm)U3a5=N--#ogJ!P@Q(C%^T1R?j5iO@AlL)hjdIr@{2dEF2#l?A?bNI|?0PuD0=WTNhSAEE- z5B~mkPF#QbK`%POVw;)k=OOXvWe%%itNO#;BQnejE!bj!{6Sjn??q!y*C4QdOX%)- z;q!bp%h68&wLKLwo(Y9sk*|A-mr{+lm+QcM6vc5r?_&dPc~h;A$Xaye@Aame{@9Cd z>vV4}thn;zO#ww|>rSy;hDVWa&G{M@@vBBW-n)Ms9{9qeNwhQWcsqB4`MgR=tVXcr zW9HnCVe)>z(}ob^M%1#SIr03CqGbO<*>f2{-?u$R3bipj(w;kWs=Te0SbpOvuD~3Y zUto$IS|1znYfr*TuG_Qc{t@{|?LErxFMW*XzJx=5t?f+Y^)st1-ENiGh1HnN`zO5H zA8SG${l~TNFTVHEWCRk>Lw6s67Ey96)&Wt~>b*Zi2B%d0u~YQv3*el5cV_Wl)Vs=N zH7+6H4cz6i855h6;h_`v&zIdA3?6?vL(Na~+@6=td1mydz~}SUr@ZrD<%^t$2`6*~ z+V1@vUWDS2phUSNWD!lYGY*cv&$J-nL4C^9J%p=XaFo@M70!rO!(p<@zDe;TnDZ1+ zwGU~>OEAYt;t9!AHKh|TCa@n6&xju&?P7F7a^pJh!a}NgMy}z}%O58w?76 z>5#zKc|~6Z9&re#7|*?%9(@Ia#-PH{2DNGe^qz31cfh_m{JK4-61c|2V0qcxdYkV^1rCMU1!GmS`j2=9f zm>f7*2K&aP6OAY~xVOEAtrqO8W5BO}%eg8B$e+SEmOzdD4wC-#3^P5#fiEC7AvRPx zg&v{vOknfxST70}IN!KNIz@iXgC{)3z6ZQUl3{Odn-hlx*m^B>o*V*(1d10-InO;M zmgx0zu-XV~aolqrUFu@qiR5_Rtnr*b(l>eF`10(Z_6*DN@_6ghj$D$>nVrXf6O>}{ zGe?dL6CSr`_Lteb7Y*lApe%TJlPruAUX~Pj+=joCq}r#x6i`)z%9kT#;Pdg5aK8KP z*P1^$Kger(SXU5i@@=>)Nj`cBvjsb-P1&h}kzTkZAct1Gl@l(g|1oB2aMvF{35Y?w z)P3wj6?FV$KzGNj|Hj=H=7C~=%m+~kw(IX7uKRSYtJAfY9&(({>^I!6-WNMvcQ!c) zx!m-O+;}4KIeNv7VaxG)R8X4POvnLLyXPLd;+w^`(*OC|miBst$9rEJiVgJs z_ZatRw7UQOJ_-$cV67hYAFk)(ui#k?X6}V;|N6}8Gp&zg5m@hbogpiK*J{kXzq`h_ zX7EbPY?1~f_c^0KPotcQZ9+|FW;~Wq zxGGS3w`+C$^0v3+%Bw$r#i3zxqv2_+FA$CiJ8_Cnb@soT%fUYBn!@Wwp^PP;?FM5o zADy?yFEQH%V@P7}AFeaocAw`-`LHPVJxZ7N?#JxwC!2H*N6lX&^T%}=EOPzp>$-_a zZdv)^A!ltYgT0ei9l@siM%UAc2Ho7v)^NzOQsin+K=n!9z*|6n7PWF=9t6Y>Snu_P z4}r$A8xP&Bk2(p;-=l*#c&Z=|({MHKsVUp8CV}c#VZNA?V;w9+O-VdS)2?<#oV*~5 z&6^CzFH}{@dJ{U|=52s{d|H?2|IJQyv{N5=7tlJG71q0jr)}@2h;a|~mUeyBHTp%7 zAKYguiQI1m@3{h({vHe*6e5Q8cKhC5yFVI!&w-Desa?0p@Ru$h3>|+4RPBWi>*uZA z7sBdX@3_1l@+yekQ>{M6>Q@?G!k5i)DGvODYBL~)f1<#Q#(4-^h1dMNwbx7T9aFH# z48DPK0Fs~fD?HQiORi0<)!IviE3f&s8$Zf|E>(Zz><4$IjmK{CY~vfR%13p|uaTYQ z-D7^)4V-tIUycCv4Jv>503vODnIBJmu&49OX|2LIa~9(BcYHliRq%WCxif<|%Q0FQ z%Tv9V-_Jjjj?N+2+OIV>MyGEmJAg0aQ_$Z1X#4y#C5xG@yO0qKde9jDfO<7x#UI?K z)h)wm(5rlsYoaii7MZ_l`^3S(7?vFUsJ_qVf+3TZMTgwVHs62UV^;eN`HhAk$8+Vy zV_Y7`Nq|gSquva9Fs6M@km2oZ3aX1T%;tWW@PKOXHDcZia&JKK7uX|5rH~D9%^+La zUT_d!dz}Zc;=8!YcS2zvIQjNs{e`{*tj)K+IL@BEsNex0M~)<(Ybs6!_dw0OGp}cL z)j}dGm8ZB?To`}X=-X`nO)OhdD0$2)a5;58G?^IjPQg&3qv{a5}Dcn844U>GHuqk+;0BA7=%!VC9wf1>WJ0yfOQO=cRw;-_Hl4Z*$4{E#E8nBX{h- z{K52^!(YrNYQ&r$6w^>$seD8Fa9+@4L;3pV>iC*(BH<~dCLBZqxSjJ_u(Z&{hvDZr?oMX zJ#)!6UVne=appIBd(QERZrA(48^d1DQv2}Jw0jBqkKy|%t0#=H{qYp<-}qcvb}eCc zjeplUNXcOM{i@LdhQ+x-YNeW$%BwRtDYar)7Q+jYK6 zbo)7vj|2=m5FqTQL6%u=rI!k(_L{4x*;wuANk=MSn$bsq51dCBW8)d#{{F*nb$ZOxv21&(Z64+9T;G2(oP?`~ zA;tGKoK4Gf4rG!kC^9f*&W#|;ZB>A7einbcq$$lQ@-0E}yFx#z@aVpQdUv&%3>OsZ z4AqdZ#y!VNjxl7e2Jh&*1?<#A5F6_0eCm-Cqf;SQ@1hOugzKyitGtVhjZ@KC_=w4o1zCW}c_Ne30b&y@CTWjrC?UNM4`uIZ0PKkD40wog8ZJ5O6~{bmo7O z34fj}4xr1vPwN*C{TLwv1rpgT1a!Sk+y!6QJPh_8%RcN~+<~X({=(CkqZb6ug_NTY zL?Xabvd@0ph3%`FRuoDRkioV$Tt0SS`PlWacS`Ni|Cnx#L=R-@e0<)07?;2{ecpks zpWuE;y0~8wjprA`7#S|$? zc!t`~G4yHAJodh|L|p>^{4CI;d*DoKl4wB^9jvIT0;XQ?K7JlQYz<3$6efS!kV20* zb3^BNAIPV3XM#B_E6h(|KgH(6k+_~ePFg!CVMU1Io(bk{Ob`qXT7}5xt{}5}e*}ZO z5AbH7>oM%+E|_de)Od$nD5uh-k_Bxvhqiv#f5-aW!NB6gf#_aymVcgwF6>#&-5Z^1 zaKjrr*VvI|w^!>NMtuI{J_CB}Q*na;Poz8!$_rCiFi(L;= zl_@vY1(oO*WMbqQd)`sZ!%Re+h?1{zMpS2-bi{#Z9Z|DCWE;I5sDd2*#bcBREI5k9 zH3?BwVEyoP7N>kK2dexSYjPO8jV9R~v`o`OpEIU6 ziQ&;}S&ES2o{Qe-`+-OEDk0@J)w~(R{bYWqH$_s%eu+AY{){K@DyrtraJ8?gQe$c= zMl=3R$|}Tj$&vLMFj+lG19?pw~ z>*^az!qrLl*IfGuVJ3z|XV~W^U^e<<`myt-)e8w;HVkzgvAZg#d|CmLUpuTxG%D~{$?&^b-{tS)oWN|fLF)6f=A?Mc zIM%($(OK%jH+W8kLNjA4JJ9&I;f7NSUt{L-Lx8FbK#_U`eCMUl^`|MoU z;+Xjv+v{cua^B1GKHO7tv;IAIQqnO z=coI{TkUTm{Fe8axwcO0qOYrl!c@#Wb@H5mw)YlmGw20!?!^typT+vi zw9gH$HEkZ_%-kq$&pZ?Ksj$p4tPh^};dX64|1Rw7)NL;$PjeEZw#o>_j2bYEttRRN zJ)D1-LJtG(5#1NobJSEroR`dKZ$hCz?-fp8(d; z4^}I6VuvDsrS-@e-Zd{%qY|oe_Vt9ChB_wR&*Rg#8*p6*&PqhiN(8h#y>E2yFz|mi z@U2_D!=EwrNgr7pEl;4W)i0nt?N!R#KojSKShK%613*j`bj;acQW>e|oA6!|x=R?z zI&KJ(;e8$lYP9aDy6SLGr+v;&*UK;y`CVBZFF&^2JR8fmnn9e`nHOJrUjwUUzn)?K z>O9aRz>(n_rYm=giF`u&cZxYH5MqBCb9QqMpk2c$UCjrv8@pku3uuVZCUb_a*&=6& zfqE{QX{gO##s3#BkN2#QV`#Z%XaC}V$r^gwkPK_)AI{&f@6dx8pkKVN!~v z_xqNCr;8|=iGt{CNJw)?l(hU1(8xwwq_!_|>~u)Ukw>#V_eI*zbyR0A##w)lF}*pf zwmK91%m*)HdP4sl$LZq^do2GgyXnGfGJE0}TPsN|hw$VF^}o*|ykT`dX5L*^?0lt( z2YB9*!rRU{RU1Ap^8j_CmRZ<6J`(e9Sx zyY%PVzAtvxKIF%%*XW4_l<-egc+0IZJRxIFh>?sr`#xREHHp=Lm^fM=>TjxZpKf0k z1D&Ix)$TVVB^CC)l5Fy?7-@H( zdqPVQf%4y}sk3pm|Cf9;G8iK_X7v(%wEGJd*{G|c&l9hut%cg2Vyk6g}}bqW2zyTv%^4 zm4z1_f6!-+I-uHe4d`#rhSf9K@w%SHv3A*>=P#@%!ffse*Iv|q4k%d7q)9XRFZ7?w z=Nfu6#(MATm-0-lF8ay8(SuR~{nvQ?;p+nTdz+8QoXhomSiTipe5Ka}Zuabd)`r5h zCm>o;FoXZa-n%tB%PS3He_R&-+CU+pkf?nQ|pK4aP=gfgG= zCIOYnY5Hx`2$N;h5dJ3EjK>%PO(Zz_iqa4i&DlE86~Bf&bc~aJNby~Ca9_|r`_~{) zf8Kk|R(F5Co8t2Wf4-T6zs}wi00o8O!sWg*a7VQo0&hz!81Z*eeAmU5%NK_V0b zGymk`_;3?)p!J{Bc%Q$|%`ye3F$`&o2fu&r$BXcODIo5TJo5P&BYSZfI(BtC$_M zr;RIkr@RNuqM!=C3#}1w34b8YF9$?|Mmu&X(L!mviKCIiMqfr~Yocg-F?1DCUA(ot z_@Y<%Pk1H8W(hi&JVXXu_f82V6+}F26)7)b-+2boi%SA$W@Hqm*K#|^XCg~Ve`=AN zCGSdhC$Hfxg}Yil8ikE`Lpk@QxQVtB+U0AL2jy;5ugYK)Hf(~*4ON?H&+fHSJE6Oy zL{jIpJ8fyC65yV=xu$Z){ZthXP2&1)Yk=n4kI}s!wbpySs>#q!%^10Bt4rQL{+I2s z*tShK>B(@(y`FJ%wXwGTGG1D>e|s8|u)KNkYShAZcpI^C2;Y5uew$)S(7Z>OVMEe< z$eMj~IPhs}5*(FZ^3voyTByt3GEzpow6k?nj(PputW99u%geO(DR<^K>8uvq_I^Ll zc6v(QhwPk$r{je&&%?4iUfA9F-Rx6-SD$wxkO@fIh6;9ssN2beoJMQye_7=P&-84X z;g8&b=0QmRyRGVtkB=w=A^~m z^W`qS?hv!$a+7}a7UhB>!)ey-qD2PN5acBg&90%8%Pq1l&9S*`;yEGq4-esFpnUJy zq@%au-AhD=C6&Q@m@eCzf4s+Kp6I)VZ$5mj_4RMwnm_SkGWH3GH~#%50pR!}_*Z}Y zPP?%STt>Wp{SB((7kB*jO8@|m_`(1{v49r1*+3~w2>1f$TcL1@W(((?M*SBbdf-|} zBH&Ya!i$Vc4=f-AK%n8p20Cy;a4BTsPx(9M=Peli_0y!I+S=_G;=%-gs< zA3R>4S#Ws|;}u&{0JLFMfD+q7D3lSv2JgtZV0{RMVq^I}@TWeRz$sb90#aB$8}kzl zu*3ea2q3`n2_ClR4Ln9*VS3g0>Z3Eh`BUTt`%!`Q)hvegf5*Nau1~dp;S}pbD$WNW zgt&YLF8Y4M^~qQtaKrUG7&frI;m7{hkNjzrazHRJ+<*0T?!WN?9L(SPuCHMsKKd8za}OZ# zg?Gi5{tpbL5SWY80Y3dlJYQg#{u_+_@Al=d)&PF`feP0zWB&yJ2@7~)|M}F9QtVGN z)}N2_a){Wz(EIxTZ@khU|MaC>n1Kk&z17zSVe`YXP_@&0MgM}NQad6-_bfBOUfOaHC!2NnQ^Pq_Z`*B}3p z{}24Jk6QnuJZAlE572)5FJSPW{RtoVV}B78NW}gy6cPs@&fA1$42#8A-j9AFtZymB zaBc$@f9q2!Oa|}^<{y9C_b>DT7g*xo)1e1!``f>kHk>hRE>a3K0^YHIGz~D9r~{%c z+shZWK!o!|$#wx~*kOI+_*lOB6Ahfq1n&mnOPp6y>}7jTKlUswCxx66#5zGKI0Sr%?tiL-Hf9d0Gt1y4cr+43Fk+=E^znR5xAfp zXbTu^bijv~1~m*k0XdgLH4J`#u$KhT161SwZUATjdI)B~OTbJ4h4Xr!Q)t+t3-G!4 zLRZ05APQI)tgLRaa&QEEzgR6e76qt)sz9TV5Wt5X3bg{8Vx}dJbF4W=DIpVA2ix95vS{C>aR*ZZBmUv9hoo_*t=u!lXX@5kG&*)x}Y{n{yes@~gYuGrVqY1kj; zeXO7Q+k4zwcaGV=+r4#v;po@9+*^E6SxBN~w#avoq2*hV?qE-Gms_3f42pi;%F|Au z4)2>VaT2v$a_+>g)O^bs^LV6&dp?^-iW;u@a5~P^^PN9W#|QPS^T+9Ur5=~OnT`(C z@AK+(RH^$t&rZyn>aO|q#AH-^%l(Wospgd%CjFv{Ax9bYrZOsj^Rmf45U<2FC;qV3 z@zZ?X4(=^dPECGrsNislM&(TWiLd|v@App!Ku2f+q8~bbQz*hr1Hd>g3nY(xgd=Y! zBPyd%3X7`s!x1QLm^7b6Cn3ItrW{Kp0U$^Kus2f}@%bCUEbQvA0C!sy33hq{kdBN} z*@S5X#;8c{8?3f}4+NDTp~>UheC~{F$R-_H(Sf1hXTmHa9SELWZTLPX z`vB31#8$?qPaBgT3)%;sk}5ht_GY57G@w9W8LS0}FpT_e6AR%}PEUZK>wwo-;M-(T zE3Q$FFyig)hqg%+$u36x#Ru43Fep8+N!|%eaRiYV;5s*dKl0is#APM=5h^;q#kDB$ zX>w^pizUKPNdO)AO)=VnX%l|!1Y$Fy(hiWc{_s7&b)vsSa`v#N1T#O2f$@<*`gf5%1gw&$nB?m?j9ss z0IJ%7*BDpI(s7TR)f@xvnhY~A5-ZRDU^WR!P2gQ&DqE-pT z2ufa2`l1>D)L3DGZm>WUIe`Ri=^(m+U(|ssMv4x9fk##gaK}VRAPC%F+f5@##}Tx= z#wwpXD?>qqsZn`5 z=GCF+2)ONf0<2H`BNL4aVE|x?SPp43%E0YQJppH37Z7NIC6YuzPk^m|>IcOLPP{u{ zpDBtFaJAibiy)ZV#}5%&1oZVE!Ci_G!YUen01P)oLjdclGRqN)e29>ScV!B$iInZI z2-T|lVzola5dzm;_`s`kyb8Lly62a=NB+S%V~O#?;foj1wgRZ~zF)0Esq!NbOM3v0 zMs^DjtOZm)8f^}#p0)}=Y3%_(@#7Ch>*2&QHsnJc2&$)PRfRO=fNu)uON9k6bpL{X zvHxP5j$a-~4ZiP#1e|Rm$L*Ix9iV^q9b6ZK<4ypQQhaQxLlBDX#}7mOv&Xo`wzXLR z&I8^WCVu@FdLzL2#`gjpxC)^ZP68|(Q@s0$A*4u%!dtHKUPfVo$ZoWcUPWwIAeN(J zD;kS*Z5=CY6S2bj@&-KtS6vsN=#JBuHaiS7f3iOdq)?c=02zfo0C;uAQo(VH6E@>4 z{L-Jegi+y=PN-~P9)Y-i`WW}H=(pA*4(h&G)CxK-;Ho5wD@J+(3LT(@ z-{utnRrJMb5h@cYLa^`^)(26KKrQSF7FaivQ-t_FJ$#LJ)uB}c#V{)4AZ;q2xFCObR{oplsTM_!; zpSmunUpzf3{PxMTH6m_QB(z948zLT?0?2>IUI0TK%vvQ(BM-(Aal?o>+<&c~{%ZsO z-p`O(TFCf}<%*CNiViIMVBwgo6#S7zf2M}-f*?dCte{)b)^|7-2Qecr!o=NAuxpC4lm zPuG?%5Ww-CihEehYAMbL%KyIoXaNG&*I_ZYWftGdH1fr$@Jr!r|9+ep0pzi*f9!)n z>j_{!eJBiD1<{4W0)S%|RK8mE4_P5n++T-yE``TYh0rz&2vBrvY}X?mPs9OSKVvAu zW1{~plL!V7v;`5y5QY7$Y@j$RJ+6=a=Q3WQH)-u`CZ1bJ3Zbn$fM^60uWblt6be7% zs$ab!0-^%vH$$Yu)5CE-?syNRV)rlRv%{{MX}LuZ%+BH_bxEr=LA< z|0uV;f%%W^i%I$e}{khAOyJI*P9soxirFz4@8i7Z5t~58tH?u0LLEm1j2u& zQxZ@7Cpz@+_~CKF!Q-(PFCt-vUOCx;UleDtna2x&BJN)WV;uQ@_$OL8x&KUSk0U=0 zRRxOkH(s!A{+sQ=zNQag!F{~*$M)a&3;)J~pIP9iE3fd??xV2z)*aK66OHJ_yMN7V-S?U&l}Q@y+Nn zzMPRkOPf$ALRdR2=8gMNG5V{0={VZpsBOp90D_10<>EOUKMsof@x=w=G5cM8@mTil zxEP6*Gz$HH1CQr10E*CB%nifKIP%R?PoVkfHyf}1 z^wWJ`hnMPCzDu<&2+tn`0{i1x*Kvye5I@_WXYcdu=(+;`ymvp(-}jd`|PLr(sAQh0N~&`oCNSQ{5}igSO|aKEBN`(d*egzVp~Y|hyMHQ zedxc>;fMbFoMJfu5I%9-f7)e30m~7)SPlT34_%Mj1^>_u-}Z|iy5!q__@yJh?e{)( z^SAwf=EC!h{1?5A=X}5P znfr(S@=JgC@OQ_*`VRTy+b4~M0GG)>eoYLcAYgjtOFu7mg=u@t|1VlB|K*pK`^fT3 zOJKgczsJlC&*OP|7zI3+g!>QgYF=z?Ck1@zS%@%ge}U)pmmgY$KD>Wv4J^kmZTXRz z`$~lmK28A^o?x;{@=xDx-7*9ba?BL?iee^On&DvOO66=T4zP0Fcl~ zX(`SSe<<1ytlFWE^B!Rsc^V79nIRAW<8!ouj(cv^^Po&KL;pssc&z{A#h4 zGMsThBGKYC02iEok(JZI|xfB(0hFJ4MG7aA?bX}0Vo818wGwx%68#E zpjg^az&#x-P&urifWo-=4J@GG{C9rNlklN*e^>wsZz=%*;Uhs{l=sI%`P^(77O=wi znX8}sW&oNB=>-74KY#IP!uKx?AzaRTa{o5I<-dO_kQIFX9YWB2mPWw$Jp327fd{m| z;qg+B9{vR{PymMg3qt@JU>ASE_^L&mRQLxj{t?gHY6AlLFSq~%I4O9y%M0J6lG|_t ze^LXY1@<^8yMp&qNd`U#Uf}h91DE6*e7p08HE-Bp!IpLaY=LZm*oJo?(LllLqtXX% zF&r+i#Y+eZfa({0H3to#qt`vumg0@4LO@Y4FHeZ25llEUZk6nvS7Y=fx4^d86neBtC% zvHg|*qdz&sdYZA3^#Z){`S|}i05?F$zdL{ICt$sO<>1#(wV7V^XZV~|^EWa?~`aE1N!22fz z0>cj+C;B%}3GhF9YCwvYEdS=G-#Fv2AAb;7j*lNp9)AD8@u=V=4pQ@tKZ*Xz^_7Rj za0I?`e*8%)zVe6$aBXOOih5dS;* z5Wwv?VExB7#tXQ=9sjh8!t@`%`Otmk!dW{1lp_G&15|wdB*V#y`5x%_9HRX3(|?a2 zrJwjQusy$W0x)AerUkqM#Nl>-)`jCgah87Ko&)-V=?zTxz2yptC5F8eKX%hhsHh;spaS_1zYI=wk!2mV^`9e73K})a& z7dQqKf#3t@v;Q~*x&p=pum7i5DI`!7uoP$jP6MSvNPzRnYnKCu!#hb!;41hokp{RP zR14t_+ydWm@&xV(bs$W@rGKHm2n*gnU=+d|cmAd=9Ka=o77!Sq7SJjL7CYV97Iwhm z;CaDH6QEs)Jn(GrutgECpgV<#^P5B07B%3F@B&Z@JOW+{@eF*tKS^|euRw3XM)t6> zec*@CU$D{}7!+cHm4CIx9QYhILJ0x|h6^@^2ctr~W23~j_{3jQ5`Q30AfREgU?T}I zEyOoAR%VL_a80q%nD4ntkpou(-U@O5nK{`K!rQ$|Ac-IhVYx^O2pd=xl8%G0wj~RM z9QITPL<(#c$%B+X#}}~J7a(T=^DubjZ7vy^bj@Nk2J;8M;j7fYc$K#cUk(Fu3hh61 z?}mWB(??gQgw0wefq&YBwi?E^YR9&JiGvK&`l)O_66rJp685oe+UcSB!_$P;m)Bo?Fm14WdD@+RN)u@20wV-_|b`H^1SpTr#dE*-(^4TI5<@z`wJiZg-^To9?Q1z zNB$=4)+V8{`4aCu!tO-GKX&qL5?%c{zD8K5m*PhZF@H~6HskW+6Uu7-w51jfyd+}Y zTYNQ#BoP0SqZa7D$~HzStFKf#ZJE@*l1?09AkNWYZGUO%U;dkEKgQ&cJ;H-Hh5^%{ zV0}f0whn{!l@{wGxjVRRZAdwfB>`#ZNurZ~Gcw`oTN(PfQt$^;7;0eq!!z z2!G-2pZed*_KW}ZBNG`vu##c%gNuY;T;u)W5gwc$JmLN5h;n{)o&D;-Bz|>3IrUHZOfrAVhtyyA;urth z2IAL8DX00u4g{jdLN8s>c3 z9{GRz&;R%TaqI8X|9*?p{Qvs%)K%^FAAeeW$*=#X5l`iR`Me&dt$-CPaTx_(2@|#6Rn^ma08XG-h}hIzdV_S=L_*RvPRJyBq27;s8OX* z+lmxsw+;^MzH<49JlmUhW!4((t!_DVYpxydq;VeJTkVzF(sN6csLQ(*WnNDJod&Pz`e0 zxLob~P`1NeJj{CgI<2+1A8u?=KbtH#P?St1r^`&ePSjoxy!O49AMYLMJ-w4>z6qPJ zOG&O9(LZ)X#{i=2+$lAZs@`WwID2QSpPekK6Omw!(s7pZ_w_WOtI|2HPk-!>^FT@W z39U2oO=iurpam~&EJ)rv#+Sf-4lFTbvOE?E!GY>sD!0v#3%)Iv!+|_^T^|~f`#h-I zeUcJ29ay^U+lzXC==b}68l}bF8i6&E92@`-UvA^bM8T-9n|Gc1HoDeXVsLDnT$pSD z=UzC@41b*3y~|{*eIrG6B4t+xA(1?!>Hesr-L|?LcW|6aTV|>?K}8!E#;}(=cju0q z?A{wiyj`hed>-J=Ut-lFxN^TflCIu4C;3THj471~+AGyQF8MooXnzd;P~^smFOa8Q zhN5lRvmea|SPf56B(6JwyN~<*X(r~&yL^spt*w`*q{xS2Q?8;?WU3(x(`j!+%KrR% z6II6Do}hoWYZE-LAl2g}dj}_-en><}tGsSiyBi#a$Fa$eMRUJI$L>@od(FDl1Wk-_ zP*<*x#>{^8W%$$_6@Sr#Y&;9i-407HG}7U|%f^sQ_#=6I1RQh4%hookR@PT?)md+d zN%+XpgXqZ;!=B0H&8D5DDE&6`)}gm~IeT4{8gzSS7W7Lyjp98ha@KkkGy!(BbviNm zs@|lqIkWjmolkm|lQG#_1GmTUid(zE%*E=_a32(+0uv9Efk z_i2CKQ?;a-(xTgk9i^}O2v>L zGLVMh03|8VB@3A-RUR{?-nXj?rCQ&^j$CgS2--KhTpB454<2!rPF|yD@27Deh83Jf zb}aqz64ARws)U^y84@FQc2zj_5VT&Cu3=;sw%zi<#(#_K$d|An=`~e@ou3V^_ZsW{ zLvUI2I8S*-Eu!2_TXPY&ed_Og-Ku+?Djks*1~YL$KUvDrW{3-d*Rqq07wSxW4IK&| zZDU&HOy860*bj+au(`81ZV{n2-lvC02(P~Nw7ls$D@#LCy4XsruU0L5@(+{&Q}vSN zX!OEvJAdYUR_C@OmZ42^6066ieB%otW?7wAWb$=QtN497 zzlCVM$Zcd@CPB}P!z2`Oc_k>@7YbsJ9?)aP1{|!75K3k$YK}YT8*Dh`zNEr?xp`Mb zs;doTpoY$x8Sy2Nc6-ey1A5HzvF@$l<-XzTet(*f=hAKHZh1Jhc3N<5AIT@(Dq)lz zub!?gV0Cf=-I(`M$%Df_8(O953 zXI)w3fzm(38c*nNcUUj((sN1gEY)?si!mKG<56;Y#hD{xTAH@82`8lNSv2zD;q7%f zoqtv3^3s#=dZo?*HNMJ*naV;G?pI2WBu(I*vog5{L5^-E3w;&6<6y>=stF@Zm}=We;+_| z?TGjW&gWK|hkus|X*nOTv&^rKv-Dzfx6_B;%Z`@tjz-sx zCcGP~$`b$Oq8{-_ZJi2%Vk20m&s#u^Z)UeYPikG1 z>~t>f$?iP5a<{APzDd^}md*v0T`MdeBKn{zy;Dk~cs8CVXY8MItypzN*>)XOmw$P% zm<7HIw&Tq{lniajNso8$;)Om`SEm(?3k!kti0>Tmf}8Ef7wVEn+I4ztJC|9(JCKrlB^0mak-pqwZVguKx5!C( z=kVG@O^5cI1f-Lewy9Hko}Fz+Er0B~c(dme4vRqUP zaXr*`%K}$#-U(YX!?Qn5r117mZisT~ZS7bTr^ek6>ZRV;fnnBa#rG;kN24#fVqBA2 zd&}lCJ@Q5*ZIbikJ1e{t;c86G_Bg~B#q|1Z<4wtx8P)yPYSlmKBF_RJu3vF6D#pNK-4AY@2TdgY=&Ek*q2>U-p?R zqQMVCmab{OGm7Buy>nxd4`OI&V|R1aXKdR0r0fdOu%tB3-&b?K^fq%6M5mVqjcg@Z zXQdJhzP~;U(uxIr-&}77*?%{3`z#r{A*`YaN(=U=+RS0{j^xM``r0qW*7uem$%*1o zlMIAcb4?eS)|1__1l8AMz*kulS+W=F67jO?TA4$s~= z>zDj&1bWlldG*w%{^;IVU929&kqbGdWQU+Ee33^?x-8(R)jd#ZrMp z>Yf`M9c!d|A*H)|wO81939@xBvyp8^%`aNI$Ci{&9 zFJ129S4>e=l6Nc9|BqP#gk_Pl{oe_ z^EA#Ka~2#lBJL6IJ4ZZUU+W?PGCwX{Q$?+J2!c0qld)3A{vxPB)I9E4;!T+oo%7`q zt1FtfcyS(X@_(FPf$nru6v|z9VN};uJMl8c4x}y5=lSAlim>t3<*pU2*cE1WjH6EG zg244QKYPxko-M~>^WJ7IlzV0k76+P3glHH+5{|asx$Kq8><8;}#6_Bx!Y_68aqonZ z5h(L=bK2Un45dViL2N0DSBW2|GUWOfSe0RvRF)UlDQnK`;n~IAnbm+%kEy79pcE)r-4PDp$GtQ?e_L1oWll2xyJD>`zH*kDBZ6 zb3+Nwi940_LO=5QdEQGE$`jAhNL7jre^x6ntdy|UZE?7tP`oYLWs$_C zeRPFAT!`oGX7YxkKfK;3bI-ch{B&zje@S0Jc7FnqG*xdpUwE|2r{Rgvk#plCd|U!k zF9~;z-WuPa({0Qk7p85RQ;mRfAQk3hEWi*wiCk$IS}>X1klDJZ&ul8R@Yntbhu*pPW9(jLxgYWi|) z$ZB8`=0HDmeGj+I^JcYIN+}ePcaogAynpy@K~58asCs?cakN~o<3ODqSy~(6ai;_$ zYwMHAT5pB*rOIE=LR@D%+dnF~EO`4OgrYT7Pq-A)^W0n4^YeW2OI=Ba=fe%*{#jeJ zuv2WpH70d^q`Tw}11V4FyTvDL#_X$nzQxy)o!wV|zdY}v;8AaNuN|uT7ME6u-J%RRZ&G`3UNhqxQ@a?sX=$z1drmwyQ8cNF_FmtM z>fcirTuz~+s**Z#-l2@5i6>6%6Lhl;kMz|D*M7VomXYImd5RMHeirA$-T>9fGkm_W z)%amZ_O|aG&Q-_cb=M&GPWHE|@P8{|x>{D=I7}CDR^F@EnqUtgSROn0I4$>df4)2s z1@duxj6`qGRT+8@Z9R0ho45TbmFa~|0=Q^zce{_871Zgu7X9fpRasKHx4aZL{*jFG z;;v31b?c35a~6R@dy!2407a%+&~Xv+EC4&?0=ryOhvm~ zdF`AVD;v&LO~dGZbI;O?X*p_`3@M&ZOik!d6f~4~rlc38l3lQ$jpndZK{}OW%VM`r%;-CY>w2i@sjbc^Qy< zJ9GM-zfn-y?~)x<_KYEc_oo|1nOv{OyKtd3y)@(~Uf+7uPf`i zT;9Y!+|a};yTwqYtaPC~CzVZQ!IAg8Rwda9>ZhITyo=~QI`>0Xu&j8hPjm*E!YqO= z(2vyJKWO`BCX6$+X)a4B;&M3HT$kA|yFRg2v&ijpo8HPWkWzQJKVBJ5GQzj)LKkay zpb{Y$^4rnNuGTWp+kd#-w^GiUS)WBOd06IFI zQsA;Z2^rJsTbH_a(_A0J3&U^kw-1sc&xV#HS})Z)MsMfyt3PusoN_e))tM(a;mP&B zIOyA`0Ed&r;(vUY_OGrzG%G{go!xxZIH2#zlI&fRGOn%g&Rm6|Ha%Cla#g$ooz@Ps zDt)VN=hvO#`|&1Pm9Oh1K{q0+dy`Qx8*61o)!S8bDO_h|>UAbd4NuCpkwyxUPNUD) zBvT*LOLIRG&Ew&7BeB;Rq8_SuGc^^b)HjQ?HEn%H0e>t3iun1FD7>O<1J4AW2Cgj3&5TqWisB(|>q6hCU~E1tsz?YT4d{qKSa>%;8bJ znY~RVfs(s>%8)4jXmoJ8X+##U%fo#EQWl?ed6MlHn%-}{OG%CG^6=g~NyqgTP*-kw zw5&-K*T-??FEF~FoABBhk4xCv^8H3d`c%bC1(NqDE$6G5Rgc{%50g?xw5d=iiSBNZ zP=8JxEH}d(phpHwV_XLU>hdZsPFk-+WZI9$?-tRWMn*c%_cJf;1Zghk>j0)mzDUzx zndY`%9_7Aw8cRO3yFz98h;PJ49eD{G0g{xCImunhHti3L(veKXn&~?a#^Je2%-)69 zLN4#d^F=@T2rl%wy!Fcw805R(B~);b-hX-*O^;(lSBx-U)JS#C(ASf3^hMDrvsBx{!yvfMYo*jrY1|-&?R|Hp8P8_QkOb0j8W~p>&{)%WPj?1OFW&q zYKjAzcTI=Mi1BDgYTNR@`DB+DsKl%$g}JRl@z8_xWn{Pavdj(VbKw($yD>;RawtMK z1k#)n{-uj#Ps(ngKuv7wi%hUh193?fp;%HQO16pCo6BOnJjd9GG&?8&eY?)*Qz7SyEE-a3!m&7K3hE_Sm%fEdZ}*| zzcOY!Mb&{)MI{Iq>mEHWHfz8d6)PL^$bFlEk$9&oADY3-IK1rATXdr+qXPwHGmamY zE;^zXfmkg3T0X43>3>%CQPm%}A!eCZpFL-RP>(0i>aG-k4#)%6Gb-=pc=_NCQk0*0 zN(J>v(Ac-7=PhAhdpPHT8ZZPMA88bI%Iv`|7uV1zHSa`5qz~xP*A+J=ST!*cDIuF> z==ywjyhgJ3jxAfWrl6k*3q`ubjxFIRT{)|}zN}v-z+~n%jDM%c#`IbKdhd$jIC4Z2 z(Jq>H-udl_(V`vVz%n>{3}+G!4j7Us;;)`iOk+e3BvUg`Yo8-zs+C{+^tilR#}jXN zVQWoqtMY{A)G5_{m(5L50Q4l#+#?k2yWZ}{`%t9Scvw#3G3W+0f7;zhZko1i7Bpt* zb+IM>r0mj{w|~85Zso!Anj(UJGx(Oa=;)WDorx7oM=f(E?K#r|-;B>_KGmh4hwH9E z(u=VtJ}~68r`Qy0_y z76ehcIS^MWGQo=iae72V&E-ZE*Lw7$ZE$EV$(3Oqv zKZ@x*&zz{LX;7~IX)^||JE*gJxE|#$IA>FsCr)#KVLpv6?Rr|g+O=Z_UH*)s$C79M z!V+Qc%OS~j-!V=zpjE-x9aVl zN363?xn^f@{UX>K0Hbf3jj&NIEu(ZLw7g*m`w+j1-Egax=C+qBkv1jLrB0ou5fq!; zu2(W1{m!f2o4Ll^_Sg(JUzsyYpIKQ$m#e&wgnyllY^T%XY*DRiEFvo38YtNkbaaEe zGIblwsir@}3#W=t=A@D~k0epMy=n2RH#5g}dauo{a@{A80nC#K6}!;%CUl>G7=uYY zGMD5o7H!dP6^#7QIZK`?cV6#YvR&>@qW+R^qm$%31Cm-&ZvY0>8r?fBb?Mjn$ex?o z4uAGLLX2O0`EEe`NV4Tpcc)Shx-_O9;s#2A<1kZ3g*zXCp!brRv0Bbl2`kb;l@wR^%;x(sKLm zsq5X)!n395?-Nzd(V)onyd&mUbwg*~ynlOKb~{_`d!TRPiHm&$YANlK$7Tu-w0_~N z<#KxjW>T8jm?;K1lbl?U>MJvvs-4-u)%2(Ahz!>k=i|8*Ml+0x{`&N{{j2jYInyOq zcw=erK%a_tlD!k-xm|)|`09=GmZ|ViznbT*TxWJrZk4BJy2;e{<|vT26B?a+?tk8L zfb3eVDtJD>ys)Fx&VHoR%j@~)9kMpeedXXFYMhm%o@E~ae*fn5oFf@S6nxd$i;rtP zgw5lA;T{jk&@D!bXZg%s-K7hwf;M-$m+Ei-OfaXhTd%egNvrFNdQ%OV;_tmxaNU^h zW4rBTN;Og-nBD%?0-h5h*wFFPM#~gP_TE8A@k%saX2wCtb|^utvDs? zl1M-&Ak8ffT%~RInm>}hEwjcx9clDMK@$DjCC;^Q>7>*2TFHH_muwhEO~p4JN2*?-hc7zYA9i) zWnDD?a_+r^1DDe3WyioO9OIoA)#PK5z;n z*hnyJMx#p7%vwHO?h8|s6#~Jh-?ylxPteuLt>ufo;aB6- z8_93A1CP&p3gdyZMI7cI_G_FH(Y+=oHFLQ+k00W7icQLR<7)+Az?^ z6z(0~+~9m+8DA*L#1^YE75mAaSMS7-^s=a}DElRt2Ra`JE#$A(I}J7v-9-wcJRE{w zP#|Uz1Dfma`XmOc@+i96qKV5E^bhGPb!o?s-y|xK5T{I2Ewxn zpp|NI#thh4| z=V^Zg(Qw1(x-g9039Wiv+Y=D^Nt4!-N@E#Ve5Q3KZkFYM<*)MgY{dmD(}-IWdzQJn zJsK&QYCYF}*=h6ZLO)S1Z?S?{uv88MgeHp$k}Tr%dzHi>4s`DedDm()oMLcgb1Ob0&7ikbliNuQY4@sa!T2?ki!z{!+J? z3pG(U)Vw(KQ|9tPe3k4-_Z>UmH7={JmL|@XMw6i`Ssf8+PTCVGQxo(w#qmu6*i4bL zDa-_U5L9S%oqjh?V5DMkp}!-7>y{~5f&9t9ITo9YMDF$k(~Lh~Wf@I5h>P%?Of2_y zHIAxX41WN&DL940i>60j?vGr_X;+BaGHX|`QQL#3hdD;zy5l=nbl$YL{fgkG zOuLsX&0v+-S$b^0Rs(Xsjdz7sMb~xMmKv?%tuqpo?IF9E5h+WwWau#(rSNoOcb1d( z7sY?(FHc~bY{s<}tGO|LW3I_NtK-mTHvF6t%70aoSzoir4$klH`w-+d`F{VyfV%?f zZ{^Cf8^^2pdmDnscoli-LXuQoMbln1KQxsB?$;G$Dyh!WyUf|yP~CnrfQCMNHxmte zp7DV{3~jJub}kt7!~zUauH}Fw?2t*q^w}?TMl?RV$hnDcP~@9~DK&~27L3nhAedyH zfPXyLuiOzeONgtc6lg<+St##e7zClkFi$XEd;Ix|O?B}u2i2YeXuN>TH^ip6at%Zo z2r_jkLYD080y}Eh=YDpcw2PdY3>+nRZ><{+m3n-T*VR0a2Tk=6@R*&7IvraK{{m^DIWIDqjNuOHPZVY!k=@ ziMhRu)aNcTxYv>uAy#|hN?%%o2a=ku#5hHc}++*mT8hqGVmHLG!loP4H*kMZy^2Hkh0@9q-xp5c%c2%o!P=j$A8<2y`=Iifq#_? z*HuKDo3HcrmPOmDiaI}Z(lx*eTG$}VStqg^5;oSH#a8U-Gfw8As|^5Ye{;}7F2HOW z7V7Qf$R=kO!`1?~FWhT!T&p$yLnM}u(w7(Fx`HLrftegwJ-?Dm<+tl^ zOoJC~eHS-RmVgCMNeLSi>4_T6`NP*a0^Vxg~&`^B_h*cg>BupJpnCtO4@X4$ZtIQvFbD$95;|ptA#W^ zE<|OYC)wlX_d67SXc>_4;h%Nn!wAWUJxz>!*;i+Yi{7HXZty1SDX*?V3V${w<@BDz zjorqF?YxYU#AOz*>`z#vO&Bphh)sxfFUU)txFB#=_DPGsyt-oe07FY^Bw5K6su%n1 z<(WC~C`ci}X9w==T7|H>eLkZmGE)M_bpMyt)#E``^SS+G1e6v<-l zD98eI`dM0Oc7p795B$W0_;#?_$BE%JMfM~y7kuK=)l)sW-6tC~&cYk6k*5$Et8)4S% zr1Qi|p(wlW?RA@Y`or!7;HaDs1R)iaN=&Ju=vJO$8fN6}v z!OkdVU4Vp%g-F-UAsfcklwf>4@b*+H1%i3-`_HpQlb5MZz^~xZcuJ`~_?Xw$z5xp^ z9o&1qd@xoJ>oG}kjDM!;aHiY(IHO{eglEF@3{ryi*M-dpLd=@CLPzhudG1QPn$out zy#_x%&C#X;N>nL2aw~NzawXR6eShV0pU>ksA)2>wRsgWc52_ms+y(%S&i!K2243Qw zd2rt7rb|4BL;V%+BKjn3;phfV^WvxXsJvu$wHwAUn#dVNe}AL^b1-dBLoECBAuza! z&2qGO8n}Y;)S7qYG#k*0vhSa*Gw>6B4k-DXik$_C&FgOZJmD{+qKR`o^}G6viaOr$ z>A(q}(F`6x2ml{WF8I0;57X+jqiB=NCw%|*J#s!49yRtDE8)U*1>7uh#W_(1C*SNt z%3QNvmxA#TUw_{iFx`~wOOYdN;#a2`G*4%sz@C9GdMD#O2LYCwYQI?NGg4kH9VB-_ zkTNF$sh-znxB+~mvecNyFVMcc*&f01UITj6Rk>ttsl zSr4Jd8h`cgccJ5v24QyN$fq6~IAaMK?`k`XUyBi3vJXvj8vejq&I8U9C-lgVjEHa- z)%Jm9VuohTZ6|$JBBLsd8^KGn(pH?5LCYcMx^@&osW#uJ8PhB{tCX%)dKk$z%=M0E zl)Bb%Ol9+Hy7ZTIfbh&plhaTLr?CtxV<`}Y0)I|MN}@wPAI#a;A~2Zw2IJ&@42`}9 zmK$v5thXi4TKKAAO28K4{Z$(nxoJ)Ye<2?MQXSf?ixyXcy+aclCc_3^CY zFMn@$J-o{@lMuPfGx(!Jhv4}i!d;7Syn89{)E`cf1e=Ks6GL=o#p#En5ZlZ)s0mXs zpr2hZmb5~>0})?`Z|4NHEA}@K-EFHf83l>;)s^E(ucMgeiKP(9y&E)cbaD+qheaGg zOMg2c*Ur~5L+AGlHVQ#l*y* zKp0W=-PvWP&d+}#a+z!l#$=8;$4*g^iVPDo^qD9nl!Oh-98JGnvLIatyB?utJb!C8 zYmjDv&f2QPT^5!djU4;eQ#d?h!3wg#tB3(piY(-bFE4P}C@4oZ;!}& zBjVQqd90mcp~w53Q2(581lkX;c!XR6%c~o^%SH&5NK{=<;9TkGcz!2|V?lpBFyK6d zvo4|@5m7w0q4Y(=Zw}@@%m|N*3xA1AlYXJgD!kv%CZAUq z>cAE*j${|v`*2H&R;Wh@UHGojRUkuEAJ4N`lm9Z7E73j)g8}qdJZ_jlG3Nnk!}^FU z1UL!lG+gl`d68JR{YwKjGOF`v@aQA5L=~IqhBUB}P@3vjUdrH(k0CyF(|;8Bd71I% z5`!ALAz;&y>EysB(F+J)FpL*TU`|ng&NE%3)olPEZkCY_)Y=v!W~O^sP{A&tn#&)< z6!AzMPue3jY`f= zNQLq;0RmKMq_RgmQJL1acYoet<$|ttBKWPnaDrNB5hiF)Lk@8ISNdmo5GEXPL)-Ej zc1>NU2W}x*w7gn$rGl^JWoONQj3-B=)HgHgFD=_s=p{m?6!=c->V}!0+LP=)31W$PQWp6KJsMAAhg~e)x(l&SmWc z9XI-p*!J6G>3W|a{``PZ6>e5Gy6+ z_cr&%KnQAYcF9!`aRM}%8cX9@4)ClDl1V6asOuaQ(2Ou*Pz_kd`d9*v(~!utg*qT@@y z1gPF;C6`b6O1X2v>0TGm`bx@~M}_zT=_qOYLJ2AsgF?wQxqoA;d7z5&Q*O!Rhk$*s=Qh&Tb{7dM0(%MK^XD&6N8pia)Fq%+N?biKOlK$h6nW8z!n83lf z#$T!bylfXE9a@Q=VAC)MQf)zA-`&-y?Z8{N(SnfFYd-5G`em@Gf|~}(_;eI*GG!-> z2jdHv6?clp8&J9#YH-$3jkwOP+JCK&^U<26lc$HNf`7fqZ!PF@N{3__`n63Ba4(eZ zi{VcK4k`&JN18Odi{dj~zo=6VZ-t7KGqV^OJ)P7()-3K-ZL#VkyDW@`y>nZZ{TNb1 zsU=ZgZb-KxtC=RM1)plJ90%!k+*)BXJJLwCwR#YAp!l6BDX-6Ojnb-w-I?q3z4Zi+ z4R9mjQ-4atgfCylT6!ONSeO8Z;tq;n%D3&Gb|N{)Sz?`jk!GJy!U5J3` zkY9ajj}U-}4sUL;evLcaJ!)J4CN4qp z1760;t2fp2j)q|b&k>o>eg^ivBQ|q1KSHabn_@X5L|1Z?_z)_VV*yYgLKz5UdJhgb zKYx!Kw@Z2qzk^2#F&`#5#UFh$&MCY-u1WR2mc;oQhwz8LCscJ;*}+_-M)sud%BcvA zkUn_@nqYsiBQ=I#c}-y0!DWHIMx_ytlIq*=c4370_K(La6$P#NC80Zfc4P1hfJo!l z#E0#nfo|A}$G$Fuv7Xg&0c$QG%ju22f`8odDm&RX7$W>abRb}exREw2Hw05ak~w*K zdW2X_pOj9gNw7YQvJyD{aCew{Jo^2p9ym7plHv-ufF>1MQU@#&DK)~M#pS_39p7T* z5Mur!V{{$J3y_r>ykTzx$(f~M+`4DY^u6C%rn8b8fdz7=DtrW6FG>piU~7lA9DfW3 z%%l$!ZDSzIvL()vAECkptTa8c21z2dyL`~gbZoX& zb-GprYU*;4C>}bIj1ng?#_J5732aACPdnNSYiN&)VowXlK3ob$F-%p+A<_T)i zZa%-eCKr!V%5?hs(o_N)2s{OW()|e!)bpCSQWQWHh#U-ncW|!}Ba^KKUV_D96cuLR zT+jD+Y#1@@njm@KS#UCyX7d;(agHHFM6sHRKQTAphK*aHkFM){SwOG2)qjdAP{;QV zLWtmXnTq}X6p7^_Aiqwskb@%)8D9ftrY4457#rXw4fM%Y zCMt2!WjeZI)JZJxECEB|SeQqmEP0{Y*^Q8x{zdCUH(Q#(i_oI>Z~IlM1Mv?^j&0ie zS+R~c&-9j6K|1o)Xnz=UKhHaqwuVtIChJ|oNM5!du-W_2sLzG)T5Q;x)*30seqbv< zNRe0#vIN&wKb7SjtyDL_SCg!-n@qBAc8B27@+7Hl7NA|4^Aga?f}+-$`pLl2YP_YX zQYha#zn16Jq1;nW+@O5l^yuva5)&l&*d+MtS+N1zm3#M*h0)`TYF1c(FOX~@5#f*MB_Zj=>vNzuXduY_A zDqG*3V7_uw`+s6I^ZcTnL$=tyvO-jZDP1}o#k|doyG`rn9`vVnm*cMdPV=Q8a)U?RJXgX-tzr|QU8lD@z z(S*MDGa|jkGD?UwA$Teuj`l`SFAj0+9*+5OILcmJ*zWkCOwdQJL&W1Af`w6 zwIC*>;cnk@th9R>fPlj~>Vk%8pLO6QHtGsL{lL#h3SV#y%q63J?fPo+J8~MC=a*MIOgIz9&7kbO5tZ}iIS*aPb!fjn5pL! z?8+1oqui$*8W<5?16RMAphump%%dYtcp||xO65c9N1k6v-w3#`mO>3*HXNyD^Hal3 z#E^-bXpst*-wD9H@w(j6ozJRS?KDWXLEx?su?uP0+vpo6eIU-E)Tz@hgMVm%zI9t@ zFEbeMmg_k%(Y5@RQ-RAowwzN!1nMS@!lfXSvOmwqv)fk8^ddO{cL* ziq{=p(9Mk{QT)v{kH7@A?bA3#bHb; ztLhEJlp$J#0)qA5U4MYk9S~Lyv=zHMGlHDXWXTuo8J@d!DzkYYh#3x((q1+_ zx&|QQ0Vf^}XMeR|IvLj*vLI{J$@W|)T~5u(pxn-nN>FGEOXNr!`lbOU6MaT_-)g-? zRecabn#%d>rfv>P(PFj`(fc^O(sEqz2l=(N(2l3dk0`|e(Ky3lWyitDJ0){ZgE_Mu z61ha`VH2RoyV}H-;gUBWjP*7a9jmD{bQCh=t zae%BUPR0!@VpqX@i95Awrj8s@3kz`CgG+oa4;tW|EdC&Ozp)9HrflH-PN@oEs+PS-0Yl;WvKuu^uD)C@@|5RRvO-xoy0t4+6#ggwh$8_1I)%(DJ*|Ni{nhSza)~nZ!od z?6#V2iE%#|b)qrSHo(o;B|!0WtKKa*)}D06jp$zy9$N8b_3I-|w|YQzj1j zcz{v{P>Ho3S)mANBif)o+tc_B+)^~ImK;F9_k?=5(1@1jPL-rh_<~nGw20IfaH-cl z!=3G}{Zp_?0fWXkk27dVIbs1u=coz2l4t|L5&_=H%$PO@cE>6^rZ5B=0HOg^f`9y! z4Uyp3td)Pj;8awC$lP3HE4$4p%)phIg9 zl2Ti1r%O5baTMofLB5tpLqLj|+;XtFDciK$z?b$D@G8m{Ai~1%T-NIR_G|ogiNJd& zPHE`0^My&cPHWESFjxfXc#%T(Y{i!`!Zl+qrNe*iP*5rMR+P>^mybh5zA z1ARfShA?u)*VA4jdXdz+602yCWMcax!2X20g$-;VL@)-v!85rikW?_n%HN*Jn<1))<*-xG_zI5K@yY}sHh(t1uLas81g8iV?Ai_+Y1q97{}jJ^0J2<& zfVpTdmrqcsAjW^JvpT8VI(e05a??H7D+K^+_)7sH{btLTlKNb!ETR>OFy9 zkVRPCm%KQclc#^@DTemBt*@EkFn?-}1cI&^@S}Vu6yY#_ABE>5OAU=r^;jb;yYqj4 z1yE#Bd*>k+j>+v$zhQ+?$8aJ`CSOgm<1BLv2hG4z9IEHcXv;QF{?SY>!b_*RxQ z5Wb|pZGZ1o){NK}5u_%@N0~*#3|oKq-+9z^xP;;)VWD%Xdmws#{P?RnWR%YMaLl&! z0#P3m8)NL-b~1>?7=In^53R|VA_q{U`>_q6qO zo*7r+KU~xolVhOhM|M3556%eVoYG@4%%GGs0~3Pba+lI@%Iv4zYj{zAS@4YN^Rz<1Wj<*Dd>;la$Vi&aoUYgn}&deB`H<8vp>G z9i}A$pH-q`e%x8yS=^vXJLFOVHsk}(Fz9_bz%ETj<|q!4t0DPs|HXd@j;Najx2?Tf z*ANV`4j3+rDOG%6$1VeJl!Ci(iv=$Ij)>%?#5jTK2zvKeVE+^fwPNN}-gJ$&91|(h zIg@8A+6=x}@?bDC^AnTbng&%$NOwo8#6&OAn4=Lnz|TUo`yD$K)@VNfr^1nHp#8j? zWI@@BM|F|*wge-FlgEEbILByOKHcy9{q_+tgR}w(%Q2LQ?qFNiAp(hxDi2A5)GBwR zz0f8P7_-P+ zC#H)_COe9@*h(KI)VXO{L-vh@*r4s{%z^T{30|JC@t!z(wby?e@En&v>HsMQvVgmL zxvcT?F%kz`I>HWy_&4*7Cj{976rrZ#ND=;_;$4)#{1y{`dOHR#i+gcM6L*oXN&aRz zO>jQ-lnK8X#j7$pLhl=b=1j8i|OM1m}xBZVPGeK zHc}R!qsdM9*{pvPQ34PUdNxYzl7pvW>M=Sf7F#`I6#TghWvM2@Zs2<=rql1{k2AuR zBpR9B!5>Z>HvrC&G0k@9e_=Q!z>@d;u^N(G(3)k9}lNm$9+ zbD)(;pv?9p2t&^;fs}k~k}*W|Jh|e8ni_Dir3&jik1G)eD}tf3qO|tI#jD?`FW0v- zyH~w-aigt z^x?}{WIY@=1#MyXWf@xI`uol@ArJt=Z%3|$lGA#k^o=jZJ6x@ouZj`Xv^Y|KObTAr z)UWaeYd?7*$+jt}kmQicK#Abdz`22O4KO+mJ8^%cs|#9I%l>#fjjPZl{lr`Z{X>9b$4oS11>oUyS70vS}0RC z7wqG)&TC;TY71$G6KqkPk-A1;&n+kkUkGS5GFa|^=_&S*_U7OD%2Dn~O3o<-5pCYi z?X-W>dw-t2Tb@glrIL`Sni;hH{Vqkj{l`%K-~W&C`VYbSKmOj(-`k$X(P{10F(FFR7 zbkcuv0Pt@~Yrm3Fc+92ZPt6GUe>-h|awvc77q=u`Nkx;R|ML5j%>4NS{wduVhOram z!IQsJ|BoX(;0(i-lsueHo6Jcl{>78`KV3omw<0k`bJ*C-NhmN35C4-H$bSKxEu;01 zP9+#B8HO>zzfDE+FO`#>BS9$k3^5vpVTgYfGX7S+6xoUlLbxgAzsv1kX3GCfl9hjM z0?9`4Nd8<_<==i%`|mC!;i{GY#FSoXHgs=vFRz9Eky{P00EHa ze~zhIzz1QI%_;yWXujpBKG4i@y2*byzYp9*0qRh3hpp~|7+L+aPKz^G)OQX<{kq^M zz~-<1a*ISY=B;{Pb*I+_kNm2Ib%T270Zu+~Wnu_5IHu&zc+efcpRH5lH4Y9y3(k%) zVNmA*@1}5=LP_KGQr?sY_9sWUo$7DSarK|s5-7b!;C$uT$glaTq-Cp<%;kTCRaM=A zdizdYYWzhlxs9dk{`^);?XegAkwYnQT0%*^ygsRg1w6z!TVvRF&LyBq3ZHhTMSH+Q zl7V`z{;|a8yV&!@T*K?dW7mHY|JV%y9KV}wIgQfx)s1VdIa47*;UB;T4`K<_UfX!5 zpH(MY&06x}4{a^dzZ>$o!wyCzZq}92IwFri20GV4ulCG=IiF*9mCm2UeIV-e)8+KA zYi_ce=hsda_QU7W%iCPE%QYmoyHV-{|Lq+@aJIrD(Hdp3W5@x^K{tO78S}5xE@Ses z*tyr)Q4t5&yVL)AQV?UpmU~Du%;-}c+t)QpGeji4Nrk`tUvJ3;joq4#XY?C#n6i=# zPNI=8hLN~FzSYHs9J26zwch@*r_4=1abD;Io$$dj_`-BBg)9eE)X!@yrjKJ#f;xtl zg&A#PZ0!aO2eDVAv8#WtQ7m_|CrFw!V!K_`*$>2nK#61EOLAAZus@h~W3~0+o;}MI zDTrf-KAm6exANpaJ*2SWuy1kJ?Os z*g$plmQBAWHr55KjTPwzgW{|9a}X18kgS6Y3kO!_E@PPxi9q&SoEb7922e8?*_32 z%;qy3kH6pZ^QnKpJJ&Kdy&CgLvG)nhR@ z1OYk;tjj(A(yc1Od%CoSw{opY^Es8^eroa{2>;iMt)g6Nq*o0$U|iqa7kIs$a#cjj z=QL}n9(u~EbgD~WeOtfu#RYd>(&TSSrp3Mr@0q?j#jAgjGtDMcY!Q0fXD@ZxQ1F`UOX(VOw{?;erlv3x_@)!%4Z{A~F-Rnf_vEgTO*$XrC z0H>2mv^wbrfGF`kplvFKe7(3q6E*G$%nwOP$~P|f{h(G`4)G77W71mO{ubrl`y3)> zMJFEZhva`b#}@9hLkfb%B{0_75hi#kiorLq7|}sf(#j+5|+3&Varp>v zSWqWAeCK(9VhzNhR=>Ui9bV?39^uRD>(}HHxq{#MjsaX9mBY7;w8%! zZnn;L_s}DFf;L-UB&=1mV8GMS_hZFm1ucI9Jz?GZ;RqfHxe1PoSX2F3MB_G|pdV;3 z>9w%AzzXO4qf2o*aq4y-xv?!5O=q&M0na5|yD6)m* z$GLtZlliGJ!cl~SDr_!2DV1yc8^4Rdul_68a|OQMSoZfLy0t-Dxy-4O@rv_VzR!QE z*|Y2-S7c9Y`5q%52ZN&tK=x8Y0`Pb+nX$^~EcpTbC%6_T_`ilu8{6@lt`-wG%Xu9bG^YbqYpE@1r|j6{d0T&yNmyDl zChE-?tICz?mkFd&jyjp zK#3yU50wbxMT)i|?t5(z`ZvEQ8}DytHT|_~c5I%Y-zf@K^B1Ydo|Zeh7xi8Kpm?5? z3O^bGG~||2h6z8vmXIq4cv*k{C2aPMZ|ECMjS;2Rv0S{e1q|iO%%UqMV-QP(r|r2KRoN3f4W9WDzn#-GgNpn*!s;0V4sM*vwpHZ? zlU+NjyWSvyDY9f+5qPYJz(txqu)r^;1i!G1CiLCamJA%~^l-Zc*7Seu#FaV&c_^{C zqA?U{Td2OG{PcQbwj}*&Dce7h6Sa4qPUgOjk48XOu@Dj=^&uIHsKZ}D{0lm4kO*WK z)12#KPl@!&MnU_xh*g{19$L%iO)OR)#+hh{hCz!_1U`zUrnlPu_W6m5yM0bSR_{D^ zFWF_fGhG@r@Y|;^gHL}_((bk&7ioiNg{u>BK$sw!X~KU(PzC1C%#h=B@&o*;XqA#$ z92i$xA)ENMRR>4urszf-N)O^QzFyf3n`@=VPukj3UcUeEaQkarQ)wO?CtMgbsf<5Y z(f%-UJnBK1hk@4cqwRI<>UAtGr1IoJ$W`NOh$%D_p0e|_O7DNI&Fm(C=^TkOEOwJ4 zQ#D~o5hyC;zmXBCwF7tBbF5b8QskrG=l=$ja+v1Fr;-sz@0?>9uoE$RAs21dNeqvZ zZ867j^WN}Bb5VKedIx!L9@7c5{9ch|j2J~B7U@cqh#G6>Qr;50tx^MZYBssyfo~L} zM+EVo6vQhAw_$%4?jx<1SSL4i%Y$&aXaYaGVax0G{La!L7S#Q`Q(r@WS~6T`Ww={? zb1H=ZI{(&N5NxT>TKAGq<%*L9gjjZpfu|O>&289zkuKm;9s36fx06AWWXhHz&6_P0 zm+2Okrh%Z>Fr1iZK!yGvw(eSILoJO0@C8xeR^skXhkt*#>(x*Gb0$+w11$;3&gR{$ zXNApA+6O*nI0AoDzl?22rQHl=MQ@agRq9%zmqvS3C*z| z39c=hN;ZE{2!-;hi+zpE!#e`Iy!Q_TyYmSh@7;OVby;e0;M3S(7C;pVtHJT;MT)DYRuSojZ-CKIa zUCVeB>9WvW*GG{}|Anr!LJ;PQ8QIs3(tf+YrmnZ;5M(&$?|oBELDAB>(Rs(GDvfLL zrNrud5g}jB1ZE-!VSWUks<>(7Ak_l1ZZ)jxFP8Gbp%jHa83iFl5ePF^wQ_PAM@3hn zQz3ua;Z-Fb2}!_~Tn#ss7%thBHU7yD+{dSCF+u5&F?9!#GvDCJ>);BwX|8Mm1!HW< zD;bJwS;(SJ!w8+yU}otmyi*x}hx>KVFWIgCsmDWKgXwMjYHmwS9G;?mi|oYPdC{$p zMvB;p{_rL9oiI_MpZjZnMJq2x(!<~1qyvADOG-;nMouc$8F)@%m)MTY$~LAh>`qD& zv00pJl?^KJYoIF2GYOBLcU6CqV`6*2)FLS)A}P(b&>o1`=(1vg`e%;^ZRx3fnNwK| z!l!WFnoamqeH<>!zAz3DyFg5ndP&=Q*<$aMg*c11+=IP0?y6p^Y;mS_eg*1 z!i2}D(b_GC#bxbMOdy5;JE~SJS%7w3u6O3W+0nRf?%qIh^;PuvvPvQDmK5&n!+29r9o?G)VGwGHv%JGfDb>@ z5w}<)seFzy+N8~5Rk#yJ$p@J?bn<^fU(Y||z~Z#zO>Bq@NX)QoRTP9`{9Q-YHU`z` zTg8@D^DI;j5|Uv!+NfmUB_O2S@qlxqK@hg@f^BT}vQFV#??`y=WW)ZhZXSn3sL%QWoeyYS=?SYc9R6mNA?9Amkz4c53672SYe}WFz z#u`>eAWeD^r^4E!-a%gCOpDpGPYfuizysns)1A|_fuCQ{Nad-$S5`okhfA&hKEWZb z$Xt?WK@B%&|-hdq-hcP#8c-z8l6$nAzms(1y82;MT3qs+p_QS{hX33AqE`V za@*$%HG#3jHVEKW~FBVSu(6#o4lxpNE<*UZK$IDDs;SB+AnY%dc1CzuO@#^uQHnBLaaEj6wVp1 zZTe*^uhQ9vtG@sTV196KB^&zfngYx&*=2A7^F{blgI z*|V7$u9wQg#BuI+OQy7Ucj?2PjW7t%O36L-lBb?}7?IlXa1CvKu;%SBzb3r09D zsi9_>m+?orO(Fe8+%_zfBo zksZ!aU!5on?(%h2JRg3Z6(}?ODcWEKYbNxnvGw21L7IPr_%FR8xUzOPc1+3y{k_Cy z6uwh4qA<{JEh8RQMM~*KzJNmvsBNkI{nEWE2sw|wOjShAkvK$;<043TE)K`s*l##K zz1Wm_RhJ4vdURdPr?BK+QCbw0rkedQ{-#O-7q3sL4UPI%Yz@`pEbCXf>cI8~zs%>j zYY|!d!GnLp(+2LoM0U#J8>rfO&>$9bBtoJ~+B}76X|JWz>ay@GIVV$&vs}NPvJHQ5o10;!Ft?r1`5WkZE8A~f4C9-Go;!j{4fq#<5sISo*-vB9T9h!a=KAyf&6z1( zf1QBF2gg}^XElQ5#-3)ZI!#S87iEK#sE!-S@zf#UhAmvps{8`T7C(l~#nMmuEO%;~zp^OBxcJ6LnND7wl@rUXs# zX!Q}h^iMVmEF)1h`Mx5y6LgybE`6Qx5UKf4$Y660`DeDYLL^uZmVXsh3_jT1xRt&d z%~@VPs#8f8ammU2&}Lt?wvpYe_VZpVQ|3;I$!;skJ@7w8^BhCkhP~L+7s<;CclUqV zfB_A>gifqC#OJSlEhK!#{@yS4-5tnir*+Dr*(GyZC9OR zXr@4Tc~Vx9`!HLuha`%pk%IByzJ?*J@q|eb3g4ODqav0L>jggt#d^-r=k4AWz;)-K zW<>*+hs}EZ>-|uweYh;8tVF{QS-F1}dZTaZp%b~uDTnFECeBM5?M0I}!t#e=ZiaYZ zlvw%gaTP;eE{Hspq%j?6=1c{yc5sT#e4&Te{DNDfpq!iYxVC~v*0l7_{-d7+nCRcP zrc&Rw>V}NJDgj#^AyYg~USaO*_pHpZ`Ekek?3M$#(3T{3VnTT#-y(a+HSB+}IB6nW z!sd*1Dh#I&oi;p#mVF3)>hMZlWH4sQtWivLPI^R{B+IF;c0RQ*d?=?8mk>U+x^-t^ zJKI*)7jPkQjCib&J(X`H))r;G_9GCLn|FfUyYaRQ1YD+@l)*YmUM>sT&>4o1FYVml zt1j>KXpEKDA_@WbwXT2kknn#_&v5QAG++7)9J9~JoEAvo#3bl(I@WlntNPxTl5){Q zj1jz4?6fN?R1Hx8^rZKUg7&ci5e;Ojyj{0`oc-jiO;EQ^^%Mgt-H}u6w^n3}VPKF2 zNp*8nzWWJPXrmtt+WWM&Vf55P@x!Boq{XEZ3yJPA|5d7FZBl zt^i}${IuMoAGmG?Fo$JUMcjp#HD$5@c)_8oA1ky>vFj#|8z9I$h}1#9kVe5`nd|Z^ z%-0&kt=Aw_M?MNa?HYfhv5RVgh>ptUyB>EJa}Zs~Zk@!xZbGVmBjqXAy<4%p37UO& zR0qkJhw%(j2KaO=`TeM+rx8WQd<%nE>h@^0K4m zKS?MB^#{jnD6HX}`(iX3X`UZb-=fsll77UjT<|hzn0kzCwLE{NX^9CB9;#z1y;6^jf`nL`}Gk`yzg6W?%)QJHa;|)(c9AUvv*zJ82%hABmbuh{%QDuK#vB@5Y zE;g|T5P%3%%qS@Y+!#BUgCD3z0LRJ8M7XVTzIpfKwfQhUo#WghcI>&{WclV@RaA#S zCQMe}1R~`EDh4b&djpjHKD%8vec)`O1=6h_A}TxRgp_WR9ILl)<-Ui|V(URbzE5x! zhhNDIR*Dsk%Cp4?MH{0t-*SI}Em1k56mWf-DOR5t>p-FWa4ly6P1OX^3njl0BOz50 ze#AeobW;_W7zMXwG*0u`>=AJlEb<;wnyg+N|>AkJ`tBb>+h?ve=L89&DJEzKdPwaPv&M{ z3agn{bLWY&8`UgBX^#_ttM;BVSpB`2mGGOE>mq=GdsViu#bU*Q@|PVKMD_3sLTr|+ zH;ee&ito3&`p=KhqKG~wDJ(V7rVFYUgevT|LSk5G$+b1$f;udJfMQyW3Qi{2A*uti z#1dG2QXm?b{y~4itg;(~uHePt3x)DJN+orzERxBkA-LVo&&sv9S`)UlEg9|M@GzdN z6d)_Frf{`(UJOD)b zE|A1FV)?CF<3tZJ0_qO}EEzYwL%zMKN8uT0f8x5L#?^lsfV=>_T7%KiE3jc6YLTBi zB&%Gw51}-Q>P5y0?e%f4@SFn!y>`tc&1K#8y!1hzf%Y1^P*QojVb>m3Hc&y35$0|g zlh7IdI@7(}ubzUO@$KTkX?)Fw<{MIhiKGfNuHUFgzVG>H+2}hnojO5I^oi zZ@~PZ#|GtiCL=Ntz1!YcOIUvz$dC@6yc!yHXwiD!2q#Q+(2Ffm zvK0*o5en(w%CB#B{ESfaVz*|QW6j|^C^hCSS7{4CG^RTJdfE=Vv>joeCp zKF)vo#lAp?LWHFS-cmK0kf}tPzk#68o#WhmxMC^4&PDbO2kj&bFBw!K`Ix~OzwN`= zM;TRk0s9>9Ey;su0hO->{%Fi{)FkQ}n>*JcDTV^~$ zuB`I&DhpIJL5=}A&x*eTSi4kxR&o@7JCTLc(zpXvV9cl%fnrkQY99nGE%pSCpi zaYX(wp2>UVUQvHC6#78g z@x$IGJRr^n#pzd^7Ear_h!5poOR5JdEXA}B9PwTDW2?ci0^#=50}WipfT0nM?C?0}x&(7t&YR(*0rlrt?*I+{yt}>3J;MYmm70e9 zq7LQflq3-W2kQUSt5s33O1wKP_q;|ki%v7y{h|l$fQB#o0~vTZC7T0%Cc~rg)@>g& zCr@gN%r^p#Y(*rKHdnwE zY7(n)X3af3YQ0x2|2MDZ!l>@gKsIdvC~uju{M~*dzGt4_W#+8&sn2kVDS(JNKmbcKy9TZHIh0>PIGzl{)5xhoW>GJEe?g@14$$UhEDJn-HjEXbX{e=@wE{ za4J0pjif~>Y{wbTC#iqK{*1OnKBcCQNdZ`^GTLIA@-zI~WBw`d{Q3&k8bK{W82L7Z zxsxpK9ui@)9?)vppMT4?l%c#Ywvrw`U6Jz7Bs>P=`V8ZA^DoQj8vUv;6By1m-Yr`4 z3`6{9QYhH*yP6ueDFH|b=08W6X$2E7=ur$KhouN#*zGpRt6+b73$LV$xPrMetphj` zjAuubl4Ao==IwVh3rVSUleq=8w8zmNv(|(JfQWPmWj!-Sz=9LVE5jeqM(7NnOyc9c|u*wv6S4Bq9% z5Ta^5Uwl`PVWoe0U(CNZUgP9ZoA+nP3Ds3-Rx|ETKrIH|C5s02+{E3ge%<4UPac6$ z$UylHATjRI41GXJHVbwz=el@X9(-pCPsOO+Hvps5HP^r@d+UMARgh~Y0mjhi&0|R^ zZ}Pg9?K^macTzRgw&}h-K;k}~L)aM5G*^lWPalA96|{fOE`@ECNaHE|7TNErjfW8o zpASd$gDA1Z_{X#!BPL%!R9-(anbqH~w0rj1`0)Djl4;Ht2 zYub8p2SujpL7RH8AlwVF(wXe-tlJrN13UW{cj70>iffrWqd2AWKBes_YUAX*!R?$U_Li0Dv>AeI zH$;DV+c3=Ol~q6SXxC^4rSV*AFbr0Zo5ZUN;5P~2< zhhL@{Gu192oPCeC5K{%aY&XvT>Y(Fii1mNMsaRoYu3G$xx1k~ zYXYAGF;R1}_~*$94EX+6z?8o)EZZqOX#dny8!YKvJRSv$39V2T8JF~Y#DGCFB5{9) zkXiCYF(80ZufwXkW1rJaw)@7mF|SWiWz9PU|1J6Pe1_L5oy4XRqwAj*nIQaxjonc= z{u#{!Yv4pQi`_`xH{6p6WO+8HO_arX zRtRz9BN#&Tguyv7OI4oHF~#A6TBrnpSAU`(-cu3K6ZLg)45eH415*pW9vec>DNXpa zWL5DjyYoVVv?&LM;>7N1oQxQ4(6ocK5UZeTchyD3+w^d=F8+-(xlkN-IS7AwDwEb; zb^!t|eJaB@ikq$XeBKuDdO=7xVC*+&Dj_+e`U*PpT+OmEGQRPCcnUvEYLe6JTe0FdfEl(hisDx~5eos!u-?au*5hd%Ak1GC7vERQ-oW#7KWWO8V;Tt`FnRo8wj*YVHDI&-VZ&pvOXLSR2pfh2!!X4bt=r@2yACd!Yp zbqC4>-IT1-_A!4{yrNronzLT;T}k~18FIYdNe=*0Sb7R;08Zw$7o&AetbE0y)fMxZi5sJn9T!}xzhvSQ?{k0zh`H})qoJ(QFy z&h9qAIzkl2t?`C+E@Fv^(jZs_SNMxkJ*CQ4Y{2tDB(LTc3!-NR%Lfg_TaHJSJJp*b){Xo{zP~>OV6@64Zz)-rd?=K?%ZpmrdWA^AS z_ar4wtUZ7EEbXU)xtK7%`1omPyP{DJd61px)V`{&%+5~p99LU$aj;~-Zb4!ul1*`L z^Vt~-l9lkJ7ZS6^@+ww5yie#^pVeiRkO#kl)0z6+oEHJ{4~^Tztw?oPXFiAXfHYwR?R7~6 zE&HCGGAtX@%IjhRR*xt($FwLAh1+igdmGG!huR&uQNN#%XVPTjX%TL11OfBM`Gm;Y zZPN+tv7n6cI+m2Ae}xv-HwY+o`-NOqN4dEQ&+n9PPrbf6L1NVaK@m~OX-H~`jgtWA z@~?kpyl-mOR6ZRKc)02P>=VRhW&A+ac`Y0)*T84}frruu0*=0TZ(cB^Y@5mZeb0nU zpkUVxz%q?wAP1uhT}x*ySx!aaJ^*}NjA&G>&gDc<%O3~2DMDQj##6eP=Y3>(9n7ZX zKcEg030Gd~J()I42FmY%6o;;7*rqYq_Y{Bp{En`Iv$~(Nl$X(?N9MgWsT#}&3>z_- z*}!wT_H%F_w;87QEk1OA8#pZ4pFa63^7(%wqcSnDCd?s*ZzRSQv^bT@H2Kz~xy$_v zXk4%h<6vUx*B{+)=d?Qo;~rK6K~Q3*Bwfo}`li_610w}<0e|~-%fjl zMUxftl;vKMazqewGR8(&hQvHJd~WZ1%M>QWs$F4rMlk5otrUI4{J>7V?3yOaIi3Tu z6bp_Mku|NEPiXhX8o`|42DOxS)knY%+L2{&SuLX1vWi}2|I$6x!(&aAma$JAJk&3# zW4fdbJSjN*eg?vW7-xPB>$CQ=rw;O~?1f-RVtyXYmUESuqG1)EJDDJABA_8O>Y5 zC|Mvjnx{|1cmujxQB_$ej^V2y5Me}H!@J%PhXWXBikXf>RQV|q6CVpFHC5m%Eb_2r&cxEjz4qn_AG=JQ(5?fNix7zGx;fB8+QZ{FDhl6ogCt--b4 zkWk9QWBawRfz7Qu{I*o7f z#e=3Q3Q(etJRQ}QgYAy98E$?H0{=;X1C@58K8gqiASU5Xmz}- zxxpmGExda*tte@}t{nA4g3`@u&#l~ZL1DlKWt=C#8eX_=Xakgj0@65Qk0<_7Op)nM z9lI-N<KEymf7wpS3 zmm3kNV;7sf*6f6q1gC$$wBGCa%=#K|OEECUd;4dlA(iiKk%Zz|k^@w)k#ChhVRKF6 zkv_mF&OOVXuyX}Y(qZ^uV4P)t;Ci zv?7Nyz7}S3#&M6!aYm-hre|F%kSei{(1pM1QC5x%%(sM5gN=VGmcvlHI*3{B@6oZx z#We@mJ_)4Mc08xNxs)?p$b~(vYkPu(Q9pOH2A9M_((zDXy6heFHd_`NurAA7QZ8wB zwk~uLtP(%eJ}i%zxr0p0E-Qc`W{nM+;yJ!xDy1O(HnfXkZ~_9oPklEFb-<_A@Nz6{ z;GA~HKzx>@r5JyL?~X^DB-q9)l4p~$@1^;Tf>{9tvoj*MPDW4gBQm)8Z*CG84gP{$ zI7B=mrW>o!`my}8G1k2GK)-b8d8)n5VD~JcFv`kLDY*OBjLS0y^lUR`44AJG&0{gK zHEs;BY=wwv@Ts7+#yc5>`NLI4`32ETuBDQ;Hf0Olkn4X;WMv%NaGgr5Z7Pi#V<&tD ztBto9R418n^28YaF%m1HKzFmuuMua$rc|^^lHzRAt9b#8s^iFD7rLRGlKp6%t>5v2 zI)L*MRE!h-Xow^DKo(>rr7bmMBrGOOesgOw$Ehcg^M+m45=x$Tf4*?IGNd4boA{lD zoYxP~&O(0zC>V_2hhMzAF#p(O*N!P`JPnU-S0&7=Z&c#uW!GVnZjevBk(pw@&z)hF z+RmddX(Q^$BQSoRan;wspnQ3svfH{w;31F#xD?Xca$jtbt2E@QN$)BI#P}PiT9O}3 z^jL;rO%Xp#6^*LFU~eNAJ=nA6&G6R>w8l9xZN7i8e}uWZ!+nVwlivV4T53~Rm&Vtd zk$Vul`Z z0j|3qrTc)-J_Hmk&b3Z7S`0N%P5ds+>taM)0V*@qonD2X5H6Il2s;HC2V1OYaH+>i z>Cu0pg`$F6qU`e_gu1#T3IOuVkEZ7i8gLKi`@F#{QOmRYn~T7fJxKQ|(gAz_J;$1W z*C1q7W!^oiw3)}NGCM=fI-@t{s>n z@Ix?yrNs_LOw3UUCDRgS<~t?CHZznl3JGNr&obBh6BHe-l`bLjPE77xv03lPUCKz$ z_~um>YuZ~O^tD`(7?;6l@2!I>J!hUWW<>>n_F&moH%gy3Q3qUN5;n6ed7y*BB z7NC$i#v1%k?2QHk{m#Zs>ixyUS+61~qM1j!(HNdhK3wt8y#a*%*B42wOPsIx0?axN zO$j@ADLc5ehhp4y1D`AVp`o57U5vmr+U13M-B~^m#~J!g-oBmVIEYn+{MyZKFpY+C zyj+r~lU}lGHRHQ32s>AvB}Uz+sQ`cAERnktA3p0E#lY7G{lW(4rdD0OsAT09HsCKS zmDy8qny%pw_}HR>?9`^>_sDgZb&DN9JHsdK2*VGVGOmY4^h^m=oBh^Z+G| z41^_d?ewPwm^`1&;q%kY;n(R{jmsKwBIMSVlx{U2g~I6#wLNvdt+2&r-m-r;q*Xuv zE@l#uQUW%MS9A>^G88U$zbe?{G%0?M%!o(sUsxsfALB3iIkOM9a?K_BK2hnN|I_n~$(9Zo^~L+$@hQ z=(<+zk5bnmvIbZ^s=j$aoUODiu{Oh4E=^Dvq~?%!WRi!j^ycq;T*Cy~>OX z%B9xd6{w-()w4k(FQ0o4*FL%cOPH7m5PAzID24UO#o6Up|Jg8nth^aw#gCLV6x5 zr*xP$z-jSffve143H^U+2XxUmRUZo9stcE>XvejL#f2_lRls_qen0-(5%UZJH1+{7 zmeN;UnPQ4_J1S(Ul~B7Se8QJeA}P=El;t&fE`vXS2qh4kLAP?RAkXHV> zz>`MwY&V!*5UI|Q$kk%um+LV5W>i|W!b4UWy2@^auO&`aOm=@@41}kvt=bF37iK+x z^y7kxW>3eZ({2@-8*a_EEa{fw7#=hJZKr4218Y!Pek3;!*ipu@feLQR{ecrK!Oa^mGNWZ1abpZWps zv=ifrR)c$wlFvc3&TBq@8=F8A>9aW_9ZI;5DfDzYA1xu7WJ=^7BcUBnT@$H^n)&L7Le zLL1{s1{xUb$n>%36=%`-C_!dD58Ns4WvgPjtNN_NfA%P+3GZJiT6*$qd5!dn8@j0s zeB+vs3d}BVUYo#wqq`(&M!LTr+xRH6oa!-T^rc;$@>-n~@0z@Q9m4018SdA?C+o|* zZO0T1B>+k$;NL1udM5aCV(g7p96}3U({@%p29+o?;<2tmDEOn`maypGt=?rQdd1W$ zNB$hU(>zDE@LhI&c0ZT$HqRmj6XXhRd47PBJQ-B0E~##Rb8K6bm`^5-IHeo2KyTsJ zs_w(82Gx@7nZsmM6~+jXbN>)sEv0yA1+{Qt6iZj1+a5BruDoB<6kAlS)_%jlpWkzH zXTX)m@9Z@L`xrr%#nN)kpQ7f32?c@fwfO2^r)2j7i*+*G(uW+KDg_Q6g2KS0hmofF z!}C+ajy73;E8D^O$f}v|-z9=SIfq}&3f#Tj>)(t_n0QbumHw>cqM$bDZ|RFMwozw9 zqG6pxdZ+WX4tW`GSx3OS(dgC(`-kA|4JG?+19nxhuX7lA#~EzuhD&Ms{zzGT&NGIx z=ncgTW@$1Fw>@9a%(v759#+53oq~@=dIt!QFzhgY9f*@OtTB_btxg4a7d%Zid7H~2 zG1wuU^zTh-gAe%;w}oyZUBc6c@ivppbit%-bse761KGLTupU=ZK#oxx*s0Vq$cbz2 z!g7I${&Z>mSXk3G_QYd6@FpgT^;KHq?|*T)zf0b54#OD#_2T|f(Go_1{_X|e-2ap0 zgog2#5ylKxe>bcRi?R}+Pbi6H6RB=4i;xgEPa{auo;++nNXeXW?y^XilQ-;N<&se} z&)3NATn;WK${STXKVVlls=-^yP=0&caz>$`SVwqnrJB^>aZ#g|)EKwfr9Q=!yiKWz z)cm}Cs>Q-`wiBq0)QWq@PXnMmd?8U!nv-jBP(PeYe{1P~QNDjXzCYt{m;x`X`R{`n zhT@6;>+|s-QB(7OID5M`Rdr@-_y>6)ezkg72>}8G4Ie6=DQfsou|+$;uYazf-LvPq z*S+_<--C6{b+ynfm8wyrMpXi03=jDGygc##$jWyvo#`P?xk)$6{;}WA&2~Q>Zua}C zy$o%re=j4;dE{iYbsrZm=96c=7@opsUqmd1Qg2|L>eD6O^bpHux8BUI_nXJp@J!O7 zwQS4X*Oa5y;^90v^nxH6=*R7cx$j5jVd9wA=_gKhquKFcI#+LALv*Q1{WR52ewMTY zSxlOl5%h2KJ(`!XySMo0t0)Iw&*wg|DH~o-f3;r+y`T*r*S@$-?rjbYoO{l~nHkOF zY7SgyG2mbBtwyu?Ik$3gk-_1fJofXv5q&AvpJ~0k+2+vPPVS3giRS%BaOzq4zSC*Fk#y45 zfBI>~ucKK$9rm{foe;N9=B771t`6bBk52~odKf)=*dKlDj`hA~7glNW=QOwme6~%u z=JB~}N73ik_?}ER=oEgsi>W3CQIJbE{leMVal5T(#t_+|`D z6b?CG^g8`yV*i|K69I?W7llhY9gR=>+2}Na0bmN?VYX^N=Riuj9G*U}Uz((Qe~ac} z7|!)vcNd1ss)|*&RolucH>&WE+!OOOsF<@@7=!+vZYSPzTFt7%@i0FbhfVs}OuXPc z{)}!IO`7OtYAN;a)4X_8Wq*5e*U4oIMom00jTYxoZz$Ztwx_-AW5pe}J_wA@saa9p9&F>}@6YaXH*0TSQ4i#Vt{0bEe;ywnp*=ad zT440L(4g`Cxx~Ow~NoGTqN65 zRUHq`ZJEM8;e7mDtgRcEe?$G`exlv{n3{2nsA2CHrrdv8@oZ(z;`Py;KjZxuJKm4O zUS{(F-F!jg12QxMdv50=uA_0~d{P@P1Dl;Tceie*xA;ESJ~@j>ev5-= zp+wii91X{k?9AP8f1b6c3;*mHwSRGQbjvb)-mvqtBHD6)x~FG9e}5uZ1(4D1=gIj( zvfeyO+bU`^F^#8myBpZe$)v8v}zwt1@{b=f^4AD526@p zm(p6T4`OBJ%j@1;pv!??{f}aNl#!h53cDxLqieF#)6VNjo)F;SU_7SfeDCbA=dz7$ zFZ7SXA%<_rwfC8Ze|$1A(&ChqS#x>BbY~ukJU_RMHC^Yr+!)4P!1#9Vx3CX{{v_k+ zXj^)d>2RmHzLP~pM%j9s#I_aD1d1`=^U2~fKTJ<&yWC5@OpZQ3T^)J%CRJ}ZAF_|t zt=NAgA6fNDw%*4`n_cWS+7)pX9xBX}Ycw*Ci?8H#!)3EWe}&mj@WHc=o;F6sc)GlJ zHhw-IZBj4j$+taD(%s6PhX?1lf5c}uYKTL=vf#suKm5!rHoN<0!L#J?am**n0P(qX zi>tgS_KWnSPuuO&ydZQQ@5?6}Kzgak`MeqL`@`p6A7@YVVpc;jJx273X4$@OPtH2{ zqSgN6SxC>0e}b)bwzlhN?M+sJ-*=DA{k9#QPc~=OK3GlFX&SZ@pFK|>qxo!&=8+xu zMzpu+vkzO!Fx9sG3EOgBBvsQlf*tN{&#;|n=b7;&`scGk<2Acszwf5wV7c9nWVMyq z3AcTJnZ{wtdgtrxa>Df$qrRJ#&iP=%$%$@LCi!Exe|t`DK~T$(O+WkC&*Of%%qMFg z!|~0on#*lDi`{-8hoKgZ%12miU8|TS%iF}xiv4OfJnF_`6Xy2#e%g+2M|-mFpBwvL z&PU5E*?EsuQ>~`am&kj=LFOm3qAh2)cvHhkbDQFn@{h}PDR1k0vbJnF&2Jme3f$4+ zc=LDme>#=17DZXl-Sx94zU-NB`(l{_pMD+|$)a49d5WUddCUimGY@aqeR-BMSDp88 zs)c4(C=4(0^E%pL{~9OfkJD}jk=MI@){ov~=0*^<$JPc$#rPAO>s=fsx@#8g zV7ehRJ5Jqsz~jRsi=x9LIVYNJ*dG&oI+%x?e~N3d45HCzatzo-24QO7&M7>Dj83w~ zcLU_O^H}t@lTqp8kE|^=#mD|@YU`tgA1~x!!Ej(alaDJ+&*SVe*?!o}ISs?ZwYYh$ zao&y}Xv^w+?JoB-96U%H%xz!$Fvz_!g6nAa48I1$#y*d=>0$5;{OM%iFIFGM$CQ74 zf1sq^Huu@;9u$tgKD5M~-%sqCU~96&es14ai~M-M^_uA|-QPaiXSz2w{yH;of4jnA z61k7E9wc$=f5P*yTbte$uW8QR*+jP%3bm?`RIRcUBn;5bK%Vz zto7L32jgZ%u ziSSMu9`sM~wWs@o>x$jS#bf(rBp>0CCDpOuqdkk8kGnP4d>JtA&!_kDsSO<>1an5$ zPvpkzlU3j*qy4Jh&6`1TIvEdh$QS1*t@{1TVnj|6OpMfr4zU2Eoa^Z9{8Czv-0f4}q>ciS8<4#%xIan74uZx*4;C)12v&;48SbcK1n zNoa@T!9K3nhw*{V`d{TSS!zqqx`u3V{RFF>W8D}I_r-HPXJ772-`sn8KI&`wH6J7I zV+PT@N5{_jv^`&%=gs+Qr?crnKTYl$+oqrQ!Zu7!(VLjSYN4?7HG7C4o)u22bhJ)o2 zXQShK@zV`e51N2iOHuZ`6AL$+qf{`B7 zPc+z#^qp8V^mGFWLE7M3$GKhy{A&6Vcof?ZU#PyIIUCFkep(CgLDpm+@K!R}19`Ki*~HQBfI+G9;m;)qID z&K|X$uA%tMwbC@7o3EWYA;D2D7-gDgrdhgov-6?vkAvxLf3f<^$K!!FTWufV&HQZY z5&Jk9#}#>AOk;Zf@J6O_y>G{6_;HA@A5XDO!+bkEd&#-{8b9#ly6%-d4hARdz4=jU z>18q*xT~*0lYOB)tm5$K^)&Xdrs?sVd(W!xWf}K+(|&!5=XdA%RZbJ%bzL!^nHIhu&%Ck*{_H=j(px-M5QYd@T0Q@$-J39AqSonD$`^&gW*)x9e-V{@N$I8=KA-@n+gLrbWAN#>)@9Sk@og^q9>& zl$xKzf6IX_cg-DHm5+9%(F0e5Q+y)sroPz6Hl(AES{QRDqk5?On`sMWX2s>@TAn|v zj~Ves=0P_2+_NFUR%7w3Yg^kO0P1 zf9}jxx;2+e{-gk=o^&rXc)b;&dkX>p%G&NN27bBU_+v9oxGw=>Ur9;kcyG@0*yo$x zAObM@-CG_;m+mbH-CGX;(5d?>7(L_l;2a0@exts>roKeL5_8@jyo;tyD*)@%z3`#G zQExp!-CGU-P~)ikU8+Q5O*gK@j#0c{0HmTN6NTpDl8B52+*DFbRdmWGtJXu&CBV~Ygyy9 z@#}m0(f7X*%2;M+)T9l)cwwY<*R+vFWo%Iqo3vpU`wR#Hu^>(R0hbm5j+eS5SR@PM zYOE8KERd}9rCuSNMLI!(+Pu^uf2Jmqv5tTvTf`1x7J!IDloAGlGQ#5(0%vm;Ob1Q~ z=72y~z(x&7B=`Vk)s!ZL^iDGM{kXDy)7dzUBBbmtV+)DcLTx-^ty-J+GuHg|nR>n^ ze&3r6-|KR-U^Pp6c0=E}xQhQYs2T3e=ewn(qv=gonH(*oLp?>%iVSH(CcjZ@?okxDL4>Y^2Lta zyj>kLipEnN<|_w@ikcI&iirzBd9krP0FMmd3DI#f@O?%Nsci|KT@WM)2axHw@G&Nz zE;lBmh1|g+BYZaCEqR!}Ry$d-L%>%{Ouu}c6;AZPNK}QJX_=Eoe+VQ)2&hR~x$(5> zM0jO@h$13l3?kwSl$37Msn(+3j z@4%mk6~2xyoOBzfk^y*tc(pM1Aiot)n+4y#3}CD+=SS7HRf3-hE&Dvft3CBiKUL{*D6RT=Ccjr)BsKlq8*}88!sTycDkg7S*YLbkB~o>j(#w<8#ZOS zOdSwC!bPgyN&-^oW-vRf))3oa;UZq?ccWf`AUaB`1icy>QsKNVoCt6UORoGPepq#^ z`~uk-Q)~Lde~(sSMz{7W1Ag#i;OR++sqdd>%5P8;U#Y^PS04Db*~J#-$^ahWrY6LX zb>MWvnwwa(H{t~oGtkqTN108+>f~cSvq7wEU##^&ny7`Gi?Ibf!4WTslR>qE8Macb zzqo9@Eh^VG`8ww3^r9oR>f^%ptE-kWjJZ~A3tCuBe_Wdz&v8_Djp%)s(kGP{ToAGxIi0b{`Mt1OSBihDx2&wCPjw0xk@?pK-hj13I_qf zDBv3ta|8G)Hd`%2=5v9#uo35mfm9A41qkL46d8zH4w6jeMQkC8kuf+1V_GglN)4nP z43TV#e{og$nZg7)GY@rhA6Y|C4SV-*JBZzRp&dD1X*@W2pOtx-a_1(Tm+A8ItkJK)amg7cr-LNIJ&0FQ+Z zf#J_A5Juz=s6>M5)@nS6*P26CXXIe24RxYHrg^y5R=5F`jPw45K3thxOfIh2PYjfn z)iRXx?wwP*-ggC6u?8+66CfKbnsH`=LjUjhk4IiWf35kC-M;aXRk44EqmKq;mPjHXyOk$6%j~cm{}5?~_#B~Z)SLDOf!WTHl|^SO zh|t_mLftPihNLZX<7D@dzJ6E zG$5}S=<)-p*@o5e3d#68PpE%8o2fj^6ekP0QoU0oUEV75utik=tz!N*ylC`+S6$&rU)hR?V}y|8L1N+^8^{PO0b-vzJxPve`Xv* zVI!WT1}S%3SSQ2_3kgPm69g-NwH=Cp_|yWyF%o0|VWDH-F7&5*zhX#-h#OE~NJ&V9 z;;^ev#OcJ7TnE7&15u+}n=gLm+4ySu%*CKSGqI2aK!SSRnZDxAQ}I2{Q2_8c3bc6Y zHH8N)y?bpOkeqip`P;{gU;IYle=Ke2<|jWx$H8BGwS93tCW&S@Nv>?m-h2jH#r?XU zS%%I=UaPpyE`w-mtRb>MD&82L>m0TzpFNIJ?OUX#!jlL`YZ73x7T0PbDuPr#&x{8g>k^g z(hABm)$An_gkbr;?kL8D8{;ET1gWv5r)lB)96`J`fKvow;4YA=17Ote&GeOY0Z0kx z?g8ecaU^;1mG(PUmA$%{%Te-}{fE-U$I)+9kd_IetRHc(Vfo9=ZPQAaQf>f6<4~Zvd1_))K zvwunp=7+P%$~f!~bkDI->c^9e|X;u&pIFc^Ete1 zc%_nIL+^O?+iuCQ6o%=qgn=X@NF-!<7~8w+*aJ>@v*jmu+CN~)yU!SRmVp-0m4QsZ z^)E(Ij@+Dwx~QCg_oMU{^FkQ>cpRd_=xV8N-`Ng`4;6ybwCq0x22tkLz&G7I(Yk3H zTq&_&0-pkQs@@ume;0}mcr6dSmcQ!(-r+U%|KdO|{LWOqW2VA(_Vc`NPN{rMU7L4x z}2CbqqG>X`-Vv-XN~hiP&t;6j}8+cT=bcS2=(X91&H^@NTVS66rd< zSTb2EJz$ZSI+lfjT5>$#TWKI-yz#W=f?ykns^whde``8OZeY1NAYM?EMhlu6jxHjE zz#AWdn0g_6Bo$xZ_j)q2KnO}u6__$HU{mb54@;2Lz$pt5?^5LQ$L6YDTJ|LjVk`me zRlVdO{&&5UvHZvHv*hx!xx&pZ_m5uwn{2-MW2XzfYNX3|-TkdOyfurJfxbCI@*QK! z)~|Y8f3Vz;g{m2I+H!<=V*60&s950avr?E5_owmN6=%+5^!Qi29k89VZuSlqKH8vk z6(p`MWp61W8}rCw8TxXJ3K@v{M5x@nWFVp(b7Cy4w8px$?{nz^mkq)s1x_gvVXxoC z4#9}W1o;U$1~J>5ZN6`!_3BK&^j0)~aR33|e|}9sI!`5Eqr2zoyz}cJNvWou|Hs$5 zIx=qFzTMeg@o?6vT>XQ`Jy3T3=IXz$1xce{cw(X^`Q}}JaJ)@I53AqH{?}{&iQA<0 zYHLlO7$^Y|)y&rpXPY0a?`weBxL$qQ&9)$QLOTu>lG=}w%H`bY;s=+zU2`aY`#eNu ze{=>zVI9Gu!@5uNQeC%*`-s1A>0hy5&_KnAs;v&27^(kM{e%!KFhm$qYFJ!xJGb|- zoR+^a4LHI2J5Qi4-ojpAC_S=L_o&~AK+Fh1vHo61)fvOfj<->zGhRBK@z*-Ob4B}W zZQk_)(;?-5_=B>wNUU!>W0Mm3#p;Nle{_)G5gYO2E)@~x5k@-K{OE8*yyLBvIWM<1 zpE*cG?Jt{{7)k+xVn`uU{20EzbKWn1r^Ty=~Me|{t~rie!duH|E?&k!-nnH$ERf0WF8 z2+ACZ@rl;6H3RKN>tYGla<=4SWs8+3oC<`nNF7=8WrD~RWT%~YPcw@8G93+H+IMIPC zpN%b)lOsX|!)ZE()nNmocwi}&t&zffB;#O>{gatROEnSvb6*yl){t(kFxr?v{+HPN zT^sO_jmRO+#y_!58-HCCVr)ujT4H7*hza82YJRVqWG=`UDJ=|xIfcC=e}OZKYNqR) zn;<9x9=k}YoXAX2IZD%Q;z#GrV21u{%nA~NPw_k6*PWL96YtJ47j(YU*^Nk{V-bf0 zgwol+{Q=hskM&CVL-W7jam)Vb?fF0W?YDktzSf=mx%Y#rn2r%o^v;%uwTF7rn5m#3 zh=KH3-;a{QPaU=t!Zr|4f5a);NQ&`kB!2jCI5xo-h=@)rf8!CwM07U6KXINOpN9Hd zGBVI|tn%j{9t@=8`)aEBF3^|C^M(YK=NtOsn?uE4N!`Wo-@L!;H_%mAE7l!v{n3B_ zTi)sFebM2?50_QgU;zB^2)l`O11L6p{)>Ng+#(Cpv4J3BR?Y)Ie~OfUzWvYdpG1jZ zNMd8azADKK{hEG&(B3wsf8`p2R_EY63kZ+iwj^eHnxr7*itsT89_4=IBOk@4vKNvX zkMvUUqX^30EqnIM<*p5z(5o{7?25ElsKxXT$J{+VYI{F6h{7#qM)e;4UG zBn;Bt2du}q|F!J(e?j?AOu*+ph8HjE)(g7z>UfzU{@Z%J^~d(7HwWy6!&jOmFWvH+ zo4s)Q*E%s1)g3p!Lq@N*pjTVcmGbW&&G*fT|EviM{pgopTS}nU5XkKj_}HOHFi`nI z#5zXEw~BFK%8ol*INf5j6&UfEql2`Nv*podPKfx%9}igIHUu&PYuBUtkbv2bp+V}H)>z3b| zk<-PESnel?V5F^9E^!RH-U?I3s@AC(M5fa1ob-0Le}_CWxZ;Vi&bT#31H*Ib+FPZf zG3vv}4In`Od@ zp(7ddHrbdk3soQ7JI#8~%`@elD z)mt8O17sKpX_{x`Y{|fJeAXh#EHC3G@nh3XrG>Pzbht$QXB0EgC{1SqtNe+mgCWVR zz}T<+lv@o;c(gU^HKf33tP5~Xkz<_T@2i>TeMy&5k+ZWf9> z3>;y>2yQgxr*{=!iSkpEs+J3Vtml^nXn*_ZAM9HH^tRu<6(o}UjYEHG#{7>S_3nS? ze`zc)4Z!^6+vYD#`FBo29*!@Lq(8CnV=JEY82=Y-tM_EDDO2;vXCfZ*x-ra4fvcs@4ry&ytCcz&8V#v^1ar zZvQR!6GPYG%uvjNoUHXuJLTC@?l7vy2M}>#h?0PaDGq5ypd!1POqZb5geM3%$8-6Y zzVKTUKvRDv&~<#|2>edpspE|wl!`a?e^tHE2p80PqPw@XWyhmm8ds->y=r{J=bM*a z{g+;<*7{}Zmxge$TNNK2$Nt9A;jeYP{Pd(>`t^4|dZaknui7`K(*DGmrO_A_4(J{UbRpB?=WE zuzAM^TKcw6hd&)hWBK=Nh%SC>f0&(aCjR90xa;NqrGHDMPtGuV<+?w0Pa$@gN2aeO z2=gK*B>GzT<-<(XS!@wTvT_U&W4mS%;xXdVAgHTHiAM#um40HR<;HJrhTd?BxoPTx zbu|j+$v(|#ed_utkq!YXGL0f4h>7djamAyhq64ajg06hzb(|`4;lXXJfBrTUSkGBP zGh*ZA^lP~#nR*T^69=TG#&Phwe((4Dpzl>3sL1b~g`nzp8ND^yG5#m_{Wp!}8{1#@ z9-CgsbK1mRy+;2huGRID-&i9IdblwRA#0T8@t3}0^0lG(4UY)RiRMYBpJrr?IA4&O z2D<7w394}BsAiOqh?V}ueI9UI z4k3Nz#n(*Ii{Fc`Hw;}L>)kt7eHdtd<}bQj(B1duT(nW`fOq~>e=`N&wQ(EcEJco4 z#0!IWHL8Jq+88_{(4S-=`NB*~9RpohQVty~UiPW^V|(To%4ZB^<;AXf6ll!`f^m#Q zWFWyU_#Ai7{K8#Ek>q-RD|I!;qd0%KdteBYUp7$|thfmgf}ZiUzs$czN^_A)yHIt4 zveT$PmBB>W=^&Mze<*(2Y53L}lx}K%+vl5qK>Oxxe>4~!+US>d`mdT(X81fX)XYMY zT;DT3FHZaFX{*2aZQskw#)|u^>*Jh$^T#?_BNFWwvNAEKxr8eApfdWUno}v%vl&RR zk8G^wkc3T8q+?02-6Vz0xs7>j5`4?eHEc}iblt0oh{p8We+`Mauz|TsleN+INQV#_ zvapdPYuprCN^BkH-8__mF4U{I^&72V=O3KV+x2=WWtw~#AgyS=;pmac7e|0H3eceveRaBFm(kwAl?gGay z5=%cM@|y<{z4nBit#SjY;;lMSFizmxS5H@SUqsha5~#IQ<;6R$Jori+od;eDsy2uj z$T3Ep&Q9MoqY*~Z@SUL0yZRyJ?{!tzbUG;Qdi@Xe2EX<7SIvltAOVpP0&WFe{jFxD zhGSKKfA6}vLrzt`h3~nzuAkKL&wuED-*KD$hyJJ5@qdWfIHwJbNX}mC^eYBgOYuB6dt?u@oAz+N4(r) zjGo|wzrbj$r&_EYUiSgtfhv7&g(P8yy}Rtze_hV$>M=tqo#UG?+E#wg&LFvMAZm8H z6bRc83GRQDJ_)s42+m)+hMK=p^R4XC<%w>-bxgnW#Jfjl(EzSq(DA*|QheLu&%D*I zIlb2`Rsf1;s+rD}awX&4uU8r^d`e$GtJ&gfZ`PkaR69=1@}jyFP91$V{*L&CX1T%E zf44bzNXiXVD2+7Ifh%kww9_L6#r>~SZ{E%WYqi~5lDxDZSs+IyYBpp5O0NTA7zlI> zBB9NA?LLZ4uJS?(xqC=6Rqscxk++YJU$EV$2C(VaK5hYkrh z1w|ao)bPR8Q%+Zfwj!w>*Q&?Ff7znLpswdM(~l#G5c1=&PNJBsRm=lOr;HooVux}X1^@%(5{FL_8Gk9P%r(yz*}&Jplc9;(y@xZ_1bT+`O)9HoR~DPUU7V(nc^zn@&D%9 zl70L7Td!lk<~U#dYLtG@;&ng!?pX~z{d8QdnPW`S&jp&PdVBld*0F)rx^}Wg_xfV6&AjQwx$cx|Xl` zov;2FP8zB?6l#tPyU~|j$8I;jqqL7a85)21^PgFzHs1X5_na!ekww}s9C*#Ld_E4R zg1y#v=N}SLBJYe(2#HxJ33J=w$rj8}eMGAzHwVQ8f^=)=^Ip`Bf0t0lOpP&yg*3^k zws%}&$eGkZ?F&eB+=k5btGyMmg|dmRY6c*{m$MOE0e^9{QF%8^XDaV*3`jQbIkq0; z(OEkcd$x#|#w@3;g(`quGQurA?YS zxDAc10Y6)u=r%G+YkEo4OxL^5rMRlK(F`9+V!0SHmZ}D7VX1lq*VWR=cxD69-@FLJ zW>~wU*@#MsT`V$|cKRFi+ZbD58K9RCD*Y{r=M)Ou=>|XXf0Yco*~@Qk`tIv?dnDxV zneP`)D!ucKiE15`&s^VmqqF~?Il8ev9;|Yx&Bc!M)dI)e>;wqX9Ugxg_1Q*h@>+KM{-JP5 z2xkaJx1T8Q`uu91@+U^#`bi#Xq-Osemujy1TcbZS^vN&G{kOdQ_l!O4No0ocoz`4m z3ToSp6uu@1aN%JgDR8O!uxlO_3nDy%!<5;+mxHLDe*_rP{W!A3%s@4m24@yYJ;UbG zw*B!yZzbBKWo-Mi44o3wNXxwe5?P!a{{y4szBiLZ$G9TKstAaF8#=Y>FIR}v<1YfS z!Y!X>$p;d2Y34MvZLPy8bJ-umdZzNU2c%Fq^{I=b%hT}6Q@`diW9tW-(&*p$%inu? z-nNdkf1iBz;?dvpN$QB(gY6d%N{f?c^2V1G#Uf(Zm!>7hKeoa3D)4aG$Eu!O6NI?5 zEr?ILh%5rJ!g|*rGtAQ2+QOv;sTNZ_e=;m77VZpu?&vj`mJS>VN(d@jzllzm&(!C0 z@F~;Ne|)a|?W?c#+ZZ(}4?+c8cKb0@-s$#Ze|*Q?8_)FjKD-~!^36}k_kN{6Jo$S+ z(|eAst4~&c{MSHk16!+^rADLQoVIwj1k-o?xi3Z)kAV4s#hmxrUBf7wD{y{ zxjUp0W}(`(U+YBJrIo`$?O(FK+KYpu6RYLgLWzk{JVw&Q1ZExqsk55L1e9)9dbS2T zf2>s|fxgi|H1uVqzj)0^@m>t~7`93$8PJbcoxn#*8~(}T^-pi>)g#c^d#)@V{>^pS zAC0>CnQ3|D2gJhvi6cVFM}K+_oz2yK=rhtxkmcU;2gD2_6XPOaD2}jCbj(GrhLhDy zua4=I_&_~vY%Ef~0H1F#N-joVSb^%7e}LxbAdn6MBeX>U9|tfbx}|%VX2q2@z7MrH zLFi*$D*b=9SR!!*o)~&6*HmaN_BEYE8b_R}I`6bX!t%12Ng??pJPlS-6K2gB_!_dr ze{fE-UxMQFUrWT+EI1TcZ5HLQ@An0AI)0|+FkG1B~$*3ZJc4pm&7=sD7t*}B^(tOipvb8 zWC=p`Gl;Ht^AVBiZ*;t(!=c_7)qq;XE6i6vee<)gwd*^)xc@6|BBeDBfA)yAzvC$R z9aGUS9{r#8_-oJ9&)@ii^_^Y5f9HvReOAs^6h|Qu)VEc^7?e7a1RG6twDDQjRh<;b zFC+7wlt%~$WlaM|Hzc+6K>1CtE^=d_U~>-2+@!ckEAfx)3v`TckM5Q?W^zWi`Rams3<17pszlS(m3`1 zN#Crn*N*kB*QNS0qI;GX{x`q(YsDMy*YlE=fA7;$n!sQ49Bv+!5JYrmv}N7z5Fa)g`%YskS3BFCs_M92l5r&rt4X5}ri*+Lg=X$ACTrav0VT^C9tLFqVcVMp9#CNw4NXI27{_Jl0(ca)f&4NBLFI7!s>xkux zY<6FTe`RQ7ys^^q*7eU%{RzYji_ZUdt-*h-cc`B24@PwJ_W6&N`aK`=H^=FE53jit z?;kwqr6=rRQB)ai#$O0ZhVr;2DK(62ys=VBFsFC}-F^g&bk;&fRSu?l7N1Q-fA}^E z*mL107RzSKtSq}e2JwXvM1|wj)fz|1(uP9yf2`j94uVnKSRjif5o4X!Wv4wfy;lWzz@LHVw(&4+k&;KjGlYjb=@3S+$&rHbQ_3`hR8UCfS*5A6S+GiDG zL*-aRRgU#^M3;yQzl#xB6h4T_sUkcyiTC_<2(wTP#Fv}7JUisvRYbNZq+#70cP12#`!#mfG zCFcodxa9VAJi{8VF~rCe;tyX*C8{|>9iE`BUo`6S=r_Mp^DO^3i{hIH{>+xWaL4?+ zhWk0=;=Km@r;oRoN{xUlQWFx>!z6d0e^^C&k-K+5sHHAQ;#t9%A)auQDtlJZN~f(4 zvjX#5<7--i2OI7(=&fVI4z8Nf5z>6mM!qs@i0L+m5wHbRCyp-7u#&x#oAKCcV2zO4fEA~@U>XSC+6}!C__=Rkf0{l-El{U@ z#D-uG+ZQLQwC1#OjEqreRgd4OY`wVYLLDu@DTvy4QrhCI?2)v*vRcMwAWcw*oQv5f zn$0{SXN8I6^ZOp%FB{%eHq$hw2g%SvV(ThhOO$6ro|d}L-hMtCLQyF{@ATeXYU__}H(;?<&zPCUrc+Q~&3RBr`aBg& zC#L@y|CwMbkeD@M=~%M~hywF8;RFb>C6%Vnz5Zq9EY5ML=bRguf7X_tq?TkD7nQWs zeyjR*DmztujI^_7+}V?p?tGu0nc4i;UW@m<%DYctv6n4=`{{2D_4>PRLCrs~MfC=M zpW&hW{i`54V$8@yWkN}@uV#V7(yC=dgGmuJG1ZaRH}bf`KnOx)hae$(QAx9|LhQ$d zovICjVz0}whhQ8df6iPJkH5ACJ*#ssHmxv!?jr?~Qk~6G>@bKU7M$UR?r1Yua?F$l zuBI^8Ak_R&v~)GeF{g%63j#iK4ApN_pNUz)@MN)Un1v-(TuJR^_+0$if2d=%uU7B& zKYjh5_SIr#N0xW{1b#Ts-?i6&-z!VS@4d1Jk7t;$8N>xTf4u7{lBPqa`M>OeO zPXvU(X_UawxV{joe0+iE(l7O^S$bw2x6mdUX#&& z!caRWXmee1f8gp|mVJLJJqGu&8bz$YAb{`52m61dy=l+tI@2!rS6b&&AJHws4DV4( zU3Q$r8F4}g;Y$kwI0FzMfD>B%-&+pc!`gd4&wGYjOIitb9NV$W<*KVLH{k|zQ2&r8 z`8U_jZ{9ETjzReGC;XBPhO4$54_|V6@P%R27H`gne?MZ?GADy*Y=$DO-CChkai(=N8gOSP>EGAZcA;rHx_qUonF|hgtbcZg#}m@6_Yq!JOPU^ zN5=hHe?(&MpwcfApC|V-kJC^HPM~Cltj?Z`hhyAVby$5>H z9=yJ=6s;}kBV(lOr7TE~s_L_M>?^C*RQg1~K?hDvT*q@PN(|wSzW2IpPI{t<)AFba zp<6X)c3n=b5+sh|sZ?SX_TKB;&o;S*#2&dH>B!>@eVu>Hl-uPiTzv-Vm z2rb^`XY80wthMoR(&zlPxyilpTdvCfZ)J>Q{NheHe|%)dx6GE~*Owd}wB^(KQ}1Z| ze>$dA)s9;I`&Km#2rK>?LHK0 z{(r+c!SpumV-xz075WvohcYH(#Q(E=n&d_)eys`WFOG@7t`~oa@n#?13x>Ko@h{^} zTmKZ1bIQ{tj;-Zse3hqMxTm9(MKls5fyxUveP$}yNHm5&pas4fq zREH5qEli8(br$xdim^Xl4gF{<#l!YX1AqH+ObQIk3WLZyVm|IlSqVY!U`~j-(dGCH zbtoMd`~o4LFR(BWN-NFwt4tRT%3?`U6dJohg^;K!OL3&HQdcE`L(Y3IA*gPhfvq)nJAtwhvIU$-}o-LucEO!qK@dK4A zq~zU`G5v^YT7USm*oI}22aYk5_QM0 z^0^`DQ@2w&?p896mxAPT>nG8jhnT4C{FZx3y>uryH$AkR)_~uw*nD_9@ zZ=wpSYb^QCWWpad;CD=1BJr(fDiROga{Xi9*w#AZ-cnZ>%68se;<9Cofq!3&J@y@I zzH|rEH1W&$!#B~H*I<%-8T&jzY8v*JCHGXONIsP>-iW7hPO2o0gAZ>+w-f}ca)X^F zsZkFlws>}7Q{51ihZxaOh=>ZboS#72&`d9@n1-kK2+6+JjxA>8Z*kWQK@m%Poo=Yb z39)KpWFq&N)GOILuH~O($A6k3R69L88}1EJ>TqD{0as&95QK@-#VF3Gj1!PsA&_|Y zA?&1H(<`%)#A2x*7^tG(?9Q~ouFoHwWAmp$g26ZebLDQV+>KKs(& zd~Clk>~Hx$=d{%ozU*0E?NH38s`nC_#S+I1lG@43QHxE67TY=G7=HVRO-#fl<5N$BCgNtQPC#YvZH#~KY4wl2?Dx0% z;-dWWtGDxQ&gqZOxSemmvF9Ist=0WM{!pK-$~06=URin;Q`8i_XNgy#;%wBKC%#(7 zI+~7D!Ar!LuZdW-q%9w)w$@;cSV*)YDUGa9VTtfzNK94Qr+*`#=`H6iC5RP1fkK0o zJai}7>-o~7b+S^GH7t%{$LS?K$ueSwY^tXUM$8RCQ0d;*R+(~dd03>)NYEe{)Vik+ zABa~I?|SugQ>?u_l)931@64gDD$f68mOhEsYq_=rW9FMjaBoaF(=& zH?LcxMFF3NLF1^Z5H8}*h63?i5mVYw#OwHBsLqhp_kV>iI5{dwyALqt(+->!{A6&J zPT^R{_~%hH&&e9VdE+}ieWCraH%d67z#QF8nMuXG6z;*l$8>Um1{NG1Fv$z<0WtzUmI&0P#*{w1)BLFIUyzQwK*Z=*wbOs>56KtAL+(GQ-t-4j^~pR=7sMr zhF9`(8ZU#r4vvRVyd<^I@k?U&BtK2dL!xyX1%Gdtp#o|7geuHi-trbt_$6ySPQQF{ za*VEibe}If;v1*%jc020?c|5k`jck17_N|z!A{1O+X#hLkWCMA4}s9mM-t~Mh7Mka zGt7ym*DzOo<`EdKbm>!O%4K~lPFN%=Y3m>B7^=>SaZJdxVJS!-lX4c)R#j-kj3S+n z=zk`JC8-l%Q;%!kXI}Uc0Z|#pHI+C7FuQLYe~&>U^=fQ~9oHwnas2Ii^%s36`sN;P z`DWkkeD~3xamQx;J^p$h|K(fOK9Bnyp~4NRjxzBi8F7GlG4jicjfRABXQ`#a1Ch9{ z;AfPTXWb_=Gss*Q57!!UcD5iGVIENAB!5`DFXscETL^4b<0H3W9+ok|yx8kmyoP#8 zRG;f=T@5gfbu+9cevvd`8gH_!yz3}UZRO}g%>t_K^0z!&bSJ_{B`>Yt^4w>=AdDj< z@en^e1wTHb=8ryFwfnjc2mH6+_C=e}aVmru<07l!^!e{9odlj|vlp*yGJTz>~5 zFbhrKWx6O`&GidJQ^zAHIT%B!|4Htl@Hr2PJ7&W>YMh=Egaj9CX5*=%LbkSX5hBUU zDEz6)DofLL0q7i5HLpSIKyIUIlS~GE|Ju9M?hI&MgpVZHmVa3YU-)-J?Q)Z$2#YJaA^aU{(WLNJvJiha67s(mCn9;UG`0~-ECR&9!lv&y;E=$_ zZ)`I&Zt*!Sj=!WmqP6F) ze*1@<{O@}A2ZQv5VO~0~82!O{v1n~AdT(iNt!Ae#dO~6VXLdm`R)4BNu$;12s%&Xq zE+qdXNeJ&gVQU0%FftLNK1#t?XwvJI2mNk_yh3Kan`pj&db~y<0gAmWX}l&b=S-n}rVD>ls;EyjZ4Z|<4+Fw}o_j>xl0LgfWmbdx`Yy9Px`qPj7H5cA| zr0154M65F{fV-B))bxvp!qCGtF|?po%|@K{RC$_+OWV{=%YRBouL)^m{HJk?Ei;fq zFqZgFk$XPzCC(q^Wask&S&Y1KgxmFMS+hjU8zB} z)wsY~%0dH`5ne6(sAdbl2^Z2gsB`D3r{TQ&ps^7Q3&drdZ;R92Z9%~X3gI))J8%Ly z&xM>nHh*&7_G5{SYrg%1av$3KS|^P^Z41BlwjVvj%7zFUwnVJuTvM@67=<2HYgXBO zwhfk5iXd58X}pKJ4v)`7Q+;k=HdSw1P9&d#ly7aE6eC0xk#cA8(eI@%!bEPM*i`jh zc+3=pAeOaXbzcb@bK3x2+EcJ9`PL8T-D}%tM}Nt;ju!Mc=Zx|dqyJgAq`c_OIsd}_ zy?KXn=hNozJ$Ea&HP!j}m%{LKPC)+UXMXDqf8o)7_sbU^W8$%$uvh zT`&|#uKTG7{FG?OUo(~SqlQPrH-EZ}(U)8djDl!&3i6JP7v&0*mDX~<5UK~v&|mT1 z^nXVKHyBF)62d7Ys&JT^ay$gdoyA`!6j87!O~NaKG-;(nWvJQgB+u?aJhs$2q|{89 z#IO0R$mTFD$+GBTQJme3mr+yr$d56IskDVrd0vp@Rlq# zHO==yVhE5=6N^=j=7Wd1=7ArMFb_{2D}S&qWc*0tK{b@|);e@2^_!bS>YZ&|=~?1p z7#KwQedQ+O9?%u$e~Bx7#V_|of5zy0ukbaOzx>{Bd**YL@3a)HQL&J5^s!NxTI_G> zD&1_FawRu#Dk__xsn(<>G&=6?%MccbY4Oq@rb~kbs9XohqUWL%);d(HdA9V~JAdA6 zCF@&qefp6<^*1lgcU|9p7CBzpxIF%Quh-&DvNpCc`x{2}+g?k>*Z7mRUc)pnwN+4v zqe>3%p^ods0-lsw9j6GW7x=|IVAV?(RisH9#?f8C?s|6_kMuew0f{jzi0uLw^B^A1 zNi4A;$WVD~9r*>N;od-@jajlz6@OC5wM1{vfRgc%BY~ye43v$QF&Wn=kB7YmHDN|p zwQ*zgIlaTNLPEg)6W>}sABCpE$~Y0-Gc7Ka3U`L%X z1jluPv6Q_WrKwq}b$Re=F~n;HBR<@)#$EVX%SW4bM+s`=zF(Dm(9VOrzxS8UaQd9t z)&KB1{XU=Q{3plOw=DVEgM7)67x<-(xt}sdb6bIjKj(RE)3N%HC{0_EgV&F?BOzyoJ2%a>Qd0e zd1;{v3DHte5tsYdxx|?sVJoDo;wkrGR_54$=P!_J2yK4zEVg_!Uve^aeshbxzv07- zHjKwv6eH5W#YHD$G*l`(?Hp;-lW$i9F(|cNIigJfF%`;Gs^MCf-4(95a)GMg2 zvpCuvQ2_ohCru$13)2kI^3mdkyWJO@s3c9TTHG-F;m`WvEv-WjzI@{EcD?&)o^BT` zj*PlmVhIsoiV3`0Vt*m^0#gz@kNSxx{PFF4=_4-w8-DE{_Hh4* zz5FFJKlW{|Pn}G9W2Nss97H&IuJpo{G0qNev1*ZqhVyS+YE2h@5IQ(u+=b~CgL%MM zJXoDzl@-$dq(d~_d!WQ#T08@MX>332u{UoWGlyRoHL@_WX@47Ei4n=nSE&P^ZX7-> zx42-Wm_Aq^ECh;!xgznIZywNw+{p!qdswn16MFFcX@UDk}pF-3_W!P$x$fxVtKcTXp@AXREM9UYc5}Y^CMz?Y}6l zK|u~0f~U$U4|D6RGD4P0wbPH5N?N{VO%FKPP``{Y=!Kb852G1Q97vG+iZ|~#O||O^ z>n`m9p|l4mJo(rIK5_TIc>KQj|9O)CEyh4~zV~0w4}VXo@rmvH8eej){uAH#;U@Re z((6d>c1~lOIZMJTR}-PM)yJ4mPm5J?GyF0Hf7&Utfo@ZvOX)<&&U-AK9@P~?V zmOdfEFn{L;HtwDDHM5CeClws5LbbD`?96u+QD%`|PpxlU>0RJEN12?jOU%FY?e`w^ zAMxOS#I~(kA5%)dc!1k8NTfdc!`u3eKmXfayyYI}zp=gX2Pd)~_~M^p+NPN%hIa%e z)+N8zdMNQ;a~(qU%!z3QJJ-R)!G01W5nnVvS$~i$-e_7m4b&;sYA!JdA&6wD2^sH- zwIYkAgeiZCZ(}Q?hSLrSh7=ODI_U@ z^G-=>_Id|tcSpd{@>mdQ`-h)6h!-T`hmVLYgzS@J4j@Z?%IWg3Dqt(^nJAZEfLM0E!2@DyxzwB)q=a>HV9jE_ZrzKB* z@WzUv7Z~3$Am6g{zt=t(FE#c{wtoAL!r%7VzV>Q4EC1M6y*YUGtlp%7BnGzgTR zbE1g!6(ebfh16$ucC?)EzLs-RjzvhFFHR%L6DqOis)j>u<(h^$UFoeKf0~;9bbo8g zeK_1T>lHhydSV(PriM$d31SAgw1E#DxH(o5k%uR7U#$&Q;&ml&qr_}UKG)RDGM*U? zuW`-SD)@(3?vH%sVM46_J+>L))DeL^^yq5ciFFVfX z8zlL!G2fpH+L32{w({l=eZWz^bntHFJ?ZWJ`LCuKim@}x0y;lrXNH3|C4cF?cknm!q<$o-L>D;UD zHEJrc6jLoG>p9jG*Yuqbw|pdw!tt_Jhl#1faIWe85OGZ*S+&+-&>0-Y2jHwUaulm| zan=2yF5?QBvWu;>m}@e=5=p=F97#RbZQ}y69$O?8X42n4-e(Q}K11U#e2#D5jg;|I z{ChrpV=XgB`#CeG?bjYEc_*>^TUc5^W$(t<30DSln%ua4jv=?xDa=?Wk5jaud%il{>cP6AjD#e5hCkf|b_O zk;clNz95p14@Y&V>%q=m^(j?-$ChWkVFob3xDr!oj4#&opIq& zOnq$`zf=fSZnKN-G^5oT2L*_)@0hw{A5jp0vA)^j|R@|CLK5s`b# zqqbVEW*dj!7Z!HLW`F9OF=)sSihwZcBA)cnBDQoc9aj`#h+`cTPTI9fsBgK!R6SrG z%`f+D&^$DW(+<=AvKKb^HOjHr~;mcSn*g@CgzJzj+y7IF!~d z+3Ex>Z{D{)^=mG>uP_ z14&Pq1iCzXEG^_dI+pwt+mGH3=qH|AWAQ)PU;pB(OaAqGB-3uy|0-7us#U2FmbeQf z#C*z7Dn!ebgMVP-D9OGJ@|Gs#jpmc!gV5~t;L$q6BYi@T@`!_H`I)Co~pdDDcM zNju}t2L~EwL*Hi%T6!>2)i;leS@UAJz(be#)0g2VTYrOv3npTe@?tK>B>;LzN`lr_ z_vV2E88Vo+r??AGh3~%r5S2?+09pd2AQr@c24I?SO;lj6fLTEzw%DMs(r#>xQ8hF| zap+(oXeo{toP^d|+$#Jc_7#Ls-J!c>gOhsVUQcuoQ4@Br$%U*Pf9BIExj74jJRsdL zOwz?c3xDHfAH_$lN>a4ijECs^P&-Py0)Qhx=ze z!^T)}yHQUx7Mb2yl-43YWb=!u4U0juq~->WgJ!)quhX;E>~ae!<*LaI%P%fTa2i{O z>2=%Oj#i)CCPBTjS$NOz(6Z0dM-NZ0_CR(Xw13AsOKw^m?R;}NozLeXsA68iKmEBqFkz15J(l(1lGoMD zH}t|D`(V^%l9{cIXX)}^hJQptOMWz z*8rKMJ}|j41(e`G;=isXXu>Q2Q*;Gram7?nQ>O1^{T~&$%=V-uZ^j{pO8U1grGM=Y zfEP@s@8?N{hVuKwCFr0Jx?R?eN4T%L7N89>w%zezDv;y6fB);BJ_N~44mTl>VgV>i z{FJ{_firH&1542mpaw?&@}L~L>QgQNlV9bA`dVAdX@S+u6%OydI7HlDg3Gx)!fnN+ zN0Wx~+|RcJB435Q<&vy~c+V@XU4Km5d{=0L=fRTydkk&ld445$A&j_s65bxhpq9cr z!MKn#J3h=}b;{hyMKZe8mz-FY6aJzAIbK0j{ z8t4MFlK|r6zAf$ENR0`-S3s%?Bfif10y2@)z)9^bs5VALC7e)W2(t++SR+qFvT8_u zmEFh@6i-0mhI@>KirZ^7j(-AyG&HQ_eyVUeyFt44MOuNWvI+GxreOnP0fm#Z8(o0! z$~xo~AQtJ%b(O}beJ<+@gc||PLujEJ(}v`93lUSC@4R}03h6NkZ6N0H+E_qbrYv)R z3-XM_v;yZCEzNC&NlyKT6=5t-;)>}OLF8AwQd1UTU3l7YO>KhI(0|opWtpyRC_{~> z75kJIVBxgSfoPv|5R0(v=c^S5Sl}ayV^r}9qqaPJf-SY7-Mv1D{VJ!+;!1D(JJWqamDWY3b~(j zR#glF)?3DvPXi{uYiJ1)+}9N#Rn8H1BXh`>xbgv;p(VhN<#W z@+@=H3c;1iIT4YcdtZ)Uq{jBAjBNS-eR&e9;u^C3ejf`s{eA3E8@zVj0m|e!l;4Kw zXx)y(zJFY{VL_EZK@7WKFfD?Kg>Ke=V)1FEsgm{qSaQA+~LTwV=9JsHiIDwCQ@* z)CjfdTiYhuJ6%*2i`#S~Xlhiq>3iEI^qoGfDt}hA=~g51pYQJpbVBx0<2l7+0ZU~| z15M7`%Ek__5c-;zQB3Y(gdHw9P7+_rnBrbxrJ@ZML^s<$t?-)5@j@G^!Dx-%kTeSV zf?}zpOm7tf>@WSeLNsR?(kd9r{&498a7-wVDr?IM zntvKF$o6HnO%TP^y~0jaG0J88Ox)DawCP9NCghzi`!{^2H%*PQHodfMV!YF3|ME87 z%9sU?@WW7kS1)S>ppDSl^_Iys@%Vm5 zuT?;}g1c( zP_Rs^RGTEAEvlstO>gP?EE(U4IWXGlR$H zd**XhjvM7y9xSkw-59u^xkg*JJ{j6u=-9R#tFWSqC7_Aqy7K;3TY#m;A5;}m*A$Tb zN|SvXL~_mg%B!1O(9H1Eu1gOlff*=|8|O2&j{ka|H^ZD_m}WW8TRE#v|Ezc6aJ%?? zd&!oiqtie3Ve>Nv85TLkBY$0vbN>b_)=Qb*GuIJ@X%viW2S_O89}E7LFVK7F@ApZaiR;XI5*nBl;UCT*?-^f*KM0vQXlzM z#{b$JCi{OgSLFPtcvfDd9!^*v6?6g`>uaqJiHMu(?LBJs$A6ypdfTV%`l5>5Lsd#K z(y6kSWy^IZjke$IGi;lHmsO8y0Z426i%XqkV#)nOyAH-uCmg=h9|9H5qf+HMiOhDs z1ju;nty0e>OAX2N_X2&CLxYf_(}WFz(H;ZOaB zRRv|vZpfA6anUpl0Mg}kh~>5Rz7CoO*ys05UVnan=Qrf_@}2*E{m%cse&>H*H_fXx z_chhM(s)&g_FNs={0f{6JJ@osLZl9dczYeT*Qsq&^!Yt&uiwAt@BCa|7w`P-_3@p*y*|D3x7X*k9)O6f z#|F!G5%6W7>V>R_T%TpWYuhgN$@gwQdgl}G_J?i0`@8*Gn{WGW|0kc+J7H=yKXN0H z`k5+j6=}y#Eq}lL96tn;da6O+b5uSvLAcE?_2I>LenskhKlwj%nvw7PyG)j^+okbF*nNEI-YNf}EpjqCzIpu)FFDd{pIn^T}^{FOKI zAGIlT8ZNUjmL^T+L#Dgo`#At#JFcVUaeWWuIWruq*>wnqj#fe7R*ZsgJK7pz@Cp0x zGv`qUxbz>X3_oT3PL-hNygQWVgJ`mdTemwAwtoT1qT88I?~uwE0IbUATmS{G6N!`Q zf6_bebgM7VgS~Tubi28Xw_EpR-g)WCd)Cw zV6vPF^g`BKm-X6mq)6QPR-i_&6WlNfV zSq`8=wtELC1zpH?=Ks^rX|bfrXuxpf{Zsk}sO%3s1c$*y)@vupQ1n=q*OTqq3RKE= zwe7*uNRQfc3vB6Uk1MnJbLSYsJ;x8-}Uw2{oOWgzqa*6vYs(RMLS*@8vRqfHf=u=*^lYp^AEqr$(+k^ zE9Ced$bN6f8GeZRZbIlBZw;=pxId z@N3-4_X$~UEb|ZKbBTOE2KhZ+^8c-TPH*e;_|&Vd&raU!>|M|Icq%{jS;+CYXzSU| z+vBJI{&?@=Z{@zn$Fgld{m6;wTTZ;kVf~Q@p&bAJx_-dh_P70C$n;e5yMJ!s-L9Q1 zw;ea{d~Ll5f$-}&fH6S%9w!EaMBd{m*aE%ed)jeiwC#P#`n>BYz?R>~pz^$xr);;+ zk@J>QvYb{9JY_kFK>0_GiMD(>zTfkq`aM2uf#y=KEPnZyyF=&8zx>aC(hu0W*jg{y zLaq~YDdWhsL{?@ES_zRr324e2 zSJ-mI5FJ>8Fc1^u8Qwzj2`EVAWG;mUVI%rT7h4os9Mxi@6B3_fIm>e# z_3klS7VD^1VzxUbhJSn6>azzjF2|it)b+|w(aJB||MlZ@rxR_{*F>TdeXDEt_~fVG z(OlVsh+&+SEH4CS@(M>4E%J*cD=9n!2p#T=H{}_@;4OC z?(1lmqVxOR4)euEFKTpp|L(9|Z+j|)qr~;s`~5s@`^sOC*MB?xs5!Ya_!tBM4$@CM z{lEU}|8^*Er_KF;|1bC3V}EZh`~UJ^|G)qHP@eDq=V6oV|KHEmDLXd*VQm)s)&Jvc z?&*JPf3EJ$asMCLeRIr`_b+0f@$~*5Lq+*-)h+pt$L;vP?>6_%f5^nHC=ZAJf7@=V z{do9)?39`A@P7}cgGlUFJ11b@vbc74|Cvu0od9?!=!TyX8RL&jy}P3Na?eR0GN$qu zxA}4i2!oax)Zhcf;S0K*om`DJ*8zy|5}MPg^|&2Z&u|~E{kd{hweB<_RxyvDm=1wjK9$%=5ec#d4wOq`QgtGDq7>49HwOYaP%w|zi%#LSXr?)TSXHiyH%Iw z-aN^|TprHGY;Yg4mj)A`^dmipL~)ppdfnUR)R~W-!8KduUDMo}#dtl&gVEGpsLq{E z=Zj%?Kc5`>wMUaBIvqRSy_=~?7UVE=p8e}(`{)P4fKCm!u`$+3%{Npg~yciq-ir&zIw_UuyR??so@9|Aw~P=WxcFX;)vJ z2QO>zn(3=+cf5Q)C&BrapL@=|=zHDM#(4r#e8vsi9T74Mp|bQ zbW>Ocv&&J`x|<~4Y1>RT_3;Rka)T5QtkAVu1H@H`4<>Q-C6@x&wQ-TQ?;ew?+DP6PK*8q1wS6n|Oly39`C?w{)LsOMvMm-dRye3WTIyo!(!LaRW ze>+$HT3gcYMv0~e;wQ~P09Y$y4)(m`YupYne z3;m+mUXf61zn%~5k{z{)$;C7u{iSCg7gnpf78d$ zyqY&yqwm#yi$Hm=9|mG}R08qJPV-(Li?dsuwQP z&3HD?h%VNY_?_|oyclx*QhVn#yP=yU1|iF1bK%(ABpU^K(IoeN@_Z_nP0uoQe?RIR zcLfuHqFR>X@O6Ms4{tKOMLsSTw$h8A(f$^sYFZ^$XBxQ~_cQg1oyjn{tghR~kuozJ zm8E_GKie%5LPu;IK7UXCg`%r}-&K52pzJv9c=4-qpI_9l97oacv5IE*4x1f%L%mMo zO~&U2!GY*QmmAPRcV4rpiLSL<2F%u)e%q&gy!IU@40D zwl9wc2_E>^umSBavh~v~=4`VsXkeu$ux{JSu-1yiIG&orQGd-`Yp^u3)Viylws=S= z(ixAFMANED(Tk> zqopX8`E2Sa_NKJvcOmAevm1Cz|Iyu!reZ_-<4$K%Uk$!GZMXE4Pc}WO+OAFw^Trg~ z=vp2q<(0VVYJbiYI@TXi7N?_l$fNCsok%~_&VF#zd#BB6Ri4dgk`5o0ieHJB&v(L$ zb3D3)9c%NnJ9D+?>hM(KYkw0hF46d;YzNNVRgw-nSlZ#f*wGG{PBz5nnVN)UPpvMT z^^ln_HvMu@>__Tje4JQOHVGEV_HgtqE@EFhE`jY2e1Co1(9WR8s_~5Qp<$5$XxbIk{wnAUgC+tJR=bhC z;K62jUG9P?)ZIrJ#a^ALyK_fOywha5eU#^gsVW2ix|j`&$=YIpz8PFgp&l~H{11oy zVZBnCvVUYvw{Q;e!WJu^LwVRK&Ti8qqAI)~S&Em!_^5loS}DAGjJc=a!opP5kK&>~y^fZ`WsYAo)4^>y z&4+r~i^E=dE9P^5%Sm+K?j|!a*I$nnJ)L*f<$wIjUbArTvr}?rw?i^q>+{R=(t{nc zgYfK*8-5;L*ZbVxtVg@Y(KJ^xyIa4=>#%1i-P-%7$J1zScXxCPjX51}f@j6T-f6C? zMzqt4U4ChsDusE8^vGtFRxG;o|{c!|j{_K!iKDj3_bcx~62 zHngnX$=Iq??bBX}lHB#@CP(HC$Bjda<9~&!@rYk~)EpyA)yp&E^(95P5;xCLk;4zO$$nR(tRtMUdmK+A-CeH2)07Tqvs#T#i*YKsWvlg^`Bwev8@Gi{eRZIg zR%MS+A1{Q{eVpPrO^36Ge)JJam3VaBCVY~P`^V|b?BNA$LdCNn-LSjlhHjK&Z#9b` zzmc=I>e~LZ+!p4H1?g7ARTNH_{eLVwp7@Yj$8tRC;k%AbLr3cj=uJN^ucdX3uf5^I zG#=xAo^L2x84TwNA2yTI>RRioEV~*(#(ecMS{-+!xA45dQ}5KzYvk6K&5)nX z`B|sMBg)v~m5Ez#bh*2mWl=iOvj_`I$&cx*Nmca98de-m_4H=e*HMu@HGeD9&4srQ z?^bQpzMk25u8DQ`OhS|(Wg$j~#|`HS+bf5jWVfdFIX?B&*=}z4@pR*a_w!K=pU&~6 z9?#n;)4YIouft-Btb=wjA+ENI#2D_Qm6&L^{tnY>ho2#GVvi8?q0{^!VC2N=!V0^g&eE}Z_@26+i&Ln6>ZY^9)Xn){VF&0 zLC<+@&%qQ|fpb<0)N6Q?R*%ETj?ULxM|;KB;~~6F%Vl!C_WjNxb$`gCm>1TK4c424 zPy7K4L#3Q9XGt|WDn=gL+N}uItHbUvdd>NIw@<3Y;mq0m>57q_jvQ+EPu=8A$A6fk zzU5={QY=zsx-nN;pAB4{-{&Xd-AOkO@@teZx+c$!r{`PakaU}w>oheMg_Sx(%ht!) zB|4VEu{hQB{l!&J@qh3<9xT;l7r3}{Sv3*sp6Mo93~t4SZhD4#j=Dj`0wa1nr+Y`q zW&y1Z_WGbtdW@7_CZfUWvzVs+!(dbQ=H7L2JbxE>4%y1B#o=~JK`Z0F zyPmIiqxD%?26|^{`|8uu_pdRc1I0Hn4vWFlD`(kr;LhT7VS{yPn!|lPs;*R5@0!w0 z2mDYR$0=3DGZVObaVFYRueOsW>4#J|<3MztULoS?s4{hBQkG$6+1BClJPpEn^ehM3 z={{m(|J)m?M}O70x_Q(cRL|Y=G9T*KLFi?kHF_xFD%zbcr)kH$9`T%;#^e!g9C|pd zcAZrui~?1nd8f49S-wl&zx*uo7wPbiH+{fn|qSfTXTrq`bPJgR6H)%^%bLD zbQ{EHFJBttbyA~QSARD1V47a6gMLQ)XR<+SWa(MG8)U95Ls6buv44*6W9CiD*(KP| z{lla@PThy?UrvjSu@7fe|1#R&=I(2JSkLm^c8_MkN~ydcp(=maH>+7!`ev=-hkNa7 z^Yvr3b1ibXK7$c$6cAOCgqgS4E|;O_#ZS7^Zt$1MI}IMUcsvcXFMoEpT4uI{)MmSL zoZGOgku6bng8`f5yuHS%XK~W4%X4Z=&5^&c*2x^0WqU$@H7?1@=5~kSlKa+7ju8u= z%QQR7G8);NLR(8x_jv5XytK%053T33Pm5}4i?DQjj-AO0%krtuBV(~X zQtU(EW(IptuDW=%D}TjFnl8@?-60DF=1C^^BUS6Y-aod^V^>>P3#{*wSwtjy8`3); zz0K&ocb+~vzoL61B?BXag)xQ!*xqXq+qeUkn>XM(oK8@W1QrtPbJ2%G(ujzk!32p0 zP@+Vk%qr!+-IkC#|9jz94ZCwyS9--1A7&5LdQ}2s=Ikl%NPn&3Ocx1RMvV8^vL0(S z)WG+=VfW;QB94=j3e&D>)_@C6p1t1!;6nq8a3SFhh_YzE0Q}iAKIVT!2GFSd_J`_* z#R2`6NcNc#hltbn2R$G%_ zq(f4d%#aH;nwwurdPu96?Z3%M68Vor&iGW#2*9lf45tW6s|bRt2!_Ckd}Cvtc`5Te z-zzb;Z4m?zl;^g-r*S-E1AMfufF5uE6f(8t(}Q(79#zl0iBuVbiyd`(i?)fZcJu8l zZ5h%aPk%pQuBjN|EGqy+$q_9*lFxz5sd8^PBaK}&p51b3J2Q!NibUz&rIdSM)%<1a zP(S^|LKQri0>uUcjc@fI^GJE!KKdDgRQ4wX5{@n4KFiC+z;X@*$ZxCrDhU z@>d%oO=fq%2S1=RNBxf*;H5j`y+CV!x((!E4th)q6$P0`gUItXHHrlI7Dd1vG?@j& zrhf<}Wc!b0|6eZbKWyv&_@Dpl|Ao2!-z-<>KP=at`VY&szyJRp)c;akE&runhGFP` z9#rw4C!==aL0>}ih@(FiEcUO4{*R|@+=2quaEvCM+|IzlRAH%R!F{fs}4095SJ;N}R z_>b%#|80@@DL6!G<|GsthDZJc){OjjDbAMVS{qXdhRQ#!u<&>ApLu{iEl1|tW6u!t z&%;ZaeLcyyZisgpZeEGR=PXZJe7V-gTDt|V) zGt`9syej{|zQI42HjQMSccSc@{Pmv@{JY8VuNAsVzBRFy=4zBofJxN&9_gAUecJV& zn?iPd9FptHrIeDnw;Q}Mz2pTNok`PwnHfmNq`wT%q(9>^@_@8b^~L}o8m~lpPB2^u z{P7lICTm3`6qd(0b7qBxV{Oh#TYoXmpg^Yy3XBM(sepASK-I~85~T)^Q2~Kv_`v4h z9UMGGjAh`45DX?zJ<*)73ter9H7CS-4$O{I{avC|sVf}P)o0T5N1QpxGlx`w%@^!a zsjnS-1LF_B7p}ve3Pj!So?}Jvq@?*3Ix@A6&*9o(!UlUtYJI1Zh5XFtLMO`T~B-|H#H4AU%UMt zMRqi&paIAq**-slk^!DbfFxXH)^<{ z5#idFbc1Uut9t@jEIn3Zwi&OyPC6iOa3jdS_UFn+;Au`IOtYUv7v3w*{5G$C_M9)tVa zUKF)p@lD+$_`yO#MF&32sqoa!!2LY6g$JPur-k>I)`$p2=oO2xGc$i$3|DZT%3^nu znlJdxPGF2pN>C+$W1em=yJUZW@ii;UfA4lGPDvYBj=D7md_&_qLC_hBr^u(U1bDvo zBNZP10+G{xJ#7~*0v9fjkS8}P-CR-@W+IbIC^gUQwDd6WP_^o-P+T9dKw)jyxj;LS zvz-`U@M@2GuWY#=spx-hQkbPaOtGAZzYzlR>4D;~9WiLiW* z&j~H{4JxbCe&m|cofW$aZS8EkRNZ2a_=~~OYP*eD7JDvA5jlSjifL^%0V_%q1J%fr zm358_?HKpor{U6wn}0*UWY8gs7U=Qf`2aq|YW|K|o-`CyACn-a&L~ns!DY=8K)sL1 zt2?m*BmFrnPsyA`r4B`1O6U{*SjrWb*c$$ZCZ~Z&3UXRpi3C_4D^g)O^LA0p_az89 zCV01RxvG^4ef@u>Ru2^cQb+Fhkh`0HeeGzHX^12mWa#Vns~wPQDt0jM2s=hfKx+WTbrGhiIBr$~J z=xbKn70A^YK=n_i6=es4jzb+0Qqvs22H(N(XiL9IPAC~1HVg4NALw87iCVjB`EJ)F zCj($EiBG4V{19o^yr$X{E1u2NcvHiiAnJ9;$PCG>k@pv+wZJt=_% z8KLlSWSD<%y5ypJi+>Rf;Fpb|0ImW2NqdwjN8l8SAUP*Q^eeGoDK%p7uavGj-ll~1 zKEZmosXLmQ2#myraC5s^BIvfVW?iUUzJyVm0t^t5^HB6=%xZc!*{f4XvrF+6D7;|p z!Jg(=$JEVq+5}!ARV2n7Y?1IbEydpq62Q-G-co zIE89H!gF0*eDMyn`(a7&QPu`NLD9Kz!qo-WJF8yW#MEoUI7s>Xa>>EW?mg@MReIW1 z&_#co`6!4WN|JK0#Od_DqfU?iI@5g3SFC%-?qf`DmDLYr8vcXi+qTHZk@&Q(bfGxa zw|(n+;V9LMO-LnA6lleX+E+T$VgWzPHKLPOUuW!)Cqu?D?KksmV95^>%p;UyATtWi zwk}9>lL30@DuuT z)hRr-&E-|Rl))LL8zsI3FwOgTqH58VLvb>{s+m{~<|hfu`t^?{h&(kqSKhWCG3 zDqev;Mt+Vw5x83K<{(bNM+Rm3PQ%4I4Lp~{2R8Qs#;cu%C0tk*w0-b4fGqQzKf)lo z^>HlK>fU#HHee=}t`=n)JM1Lt+CvzU*T@73D)c2esx}^BtSnFuxfhBW09f?vw!&_c zj9Y5r1jf~=JHx%VSc?S5J(9DgK&gL6a+!7C4g-Y%6QZ`{9SCI-9x*)(Ie5+;tMDsv z--zm7BuN=!|MbN)n1{DQl!-)fO(I7$tqL!2E7IdND2elIq*??<4J)EWF1Ar4M(mSP z&)#Mmkq0pbMZ;||34CwG1p81lf`ndV8z6N6oWey3E z(PJYZOVC~CS)#6upWnmT_;7z>#fqs{ccM}ITy6KQmUH=)qow_Q*Qc)K8=mnd$+>zZ zp+|cp@sga@KjJG;Qn>QJB9{RMQ$&qfTC%Q+wKoRGV{OL4f0Q$yK|QU_3I4d20&m@A z+Q%oQeLmXAx3N#pbTU-$RxEd;4$1l>x8;xf^fvP@R&HDb1k)g;P?9DIOB{|d`%b)nz9D}JeI^pD46eC@;36(Dw5lM_7uq(43N zD4jvuLtJUD-5{nXX90yzQ(0~*u9hdI1(2S6?3milLlStNCNqC&;Is_0IZGACH6vBZ z{Jwpi3OKhEaw5u$QBVWrjR!MDt9t;eg3A>ylcziG&T4hbxSLn*pVOwv=a>%3x0Yny zNf~5-=NjDsi%*!{yZ)I*XZka-wrgoU=GPVU4y8rO&p?kW7l{uE~)nXIaAE4$T%hfixpxfQ9B1e|5s z;Z}RKY3F8jEUa}*9=1?)NNisz4D&<7$4J(8yXVwUi38Iv2fh4(Mui;`&F6A6sPl_5 zCl(BJkFT2DCg1Mm0vI7e9lN1pcAA9<1b#_? zj$}ucp#=bFB?fm_ZZaEeXrM|mlvDbJ{37ZH-{pUPQZp`Ng!2n4(=qp%dKw(*(+;^-yHWpD= z-zg?YZ!`xeg|ysk%BHnOQZP-)Ww_3<{TJZhw}L9zof$IBuA%n#0LKrQewrQzw$MyM z0J(pQ!fj%A?t+){T zgF>yopEx>~ZJG9HHlA@|0?UsERvtk=ehjHy-zdpW_)b_oL-ym zS98u3jMO(XmrwW^;5w798fbcCNgM?n8>}_GsSjbw=3M$Kdh+oB71Hpf$|ZTk(;$Ch zXQAA45g;7Eny22Idm>-^Q-2uuvxugJtBY6{svbe!JQW9u?H=%RuaT%{HTkk;x{ea4 z6$hpTJK+RY2f-Vdi{Q@K@&Y}1GZGRr+(#~_Y4-okPTnImdEdlFr%9`NmF6zGUKLF-`9jF z3A$?s$MT+31a?O`9vb3(qE|{V-Vkb8e9$~%!Qxf}f=)H?pJw~io#pB0ip&gA-M(i5 zx1$cHDoo@IXh{PHZRs;}={9zXWv@QTJN}VJ4;*hGB{H?C#Yqes_w#>VJG!?dZ}mm= zF_J*VR0(uXcpMAD4+2#ji&>*87ogvw2~}M<+8KmohFP)G0pNALrN&6}7~n&A2CPGK z)Yy~3yMG^NcgL>@B>-vOLq;Kt|3O0dXt0vAeltcyQRi8G=ygz~{MM=^sGQ?HGpc_= z$?NQE@n8$Yx$%F+tDkhIWY*?CpTG zUs|!t(XiZrZQLB4vupKWcip)}GZ9#Oh$<_iX&jbIx0!{u4u5}#y`Pg%R}~aTT;D<9 zOcbp@B6ksrQ@Ub)n}J)WpAs3I3ia39?j5j#8xCC` zEu6xCq{XaQHc>0I=#oHXXY8!l6`=3 z2Y2G=3d7N1A!L90&?`u-WJ|36BBA(;9|tJ{F#&&yn)kXO$D=`876FQ%>>?iKDp0qk zhTQb@p~Z=U?5TN8U!v78d=&B0s@O}hLZ!U2&T;dnQRS3lrWUkqp9_2ppt5n)Dn6;c z5`jN8$W++8cXrAVQL4hfks5f}V1zVzJ826%+c1H^-?x9B-*C7z(nGN-mOkTHot2E@ z4Bj^MBx!Mbl5^C2Y6Z-W`vf^JUWyjJrCGueAu%&hj;NiCZ0;Sl1-XK7D~*ReZrZ2E zczZeUnNdtNwkf|eM3I<(3-Nmtd%Gw3jJfOjvrYDMvo#FYd%;Zy#pXWg64{s}wVZHgR1sb>z0_=Td~ zETQbW?)W=B+deQP;xm{|SaS9ykY+e@2%k}HQKXDK*2#AX8Y*Qksu0uvs-tpMyqJCs zuK}YE{$5|zAs4*J{xh?7Xcd83KHY$vJ!JPiOmb#?}qdF3!oL z;Ug#o=lsLom;BmiAl5?{!pJis&yeU#$fVLcEv?}Quui6>@8{4L09kp6&KK|am#`sd z5P^Spo{7<8oAy#ocwb*9$uFXtZM%8TFxhS&@GTLp%S3SEMW1MYD)YWwc z4AS(Aiw`WiL>m*;v);DpK-w|_jF-dt?{$3b?z?;M&K{m+QYWa!ii zSNDS&lRoJ;)Y$dO_>2#`Eqx;vtb8%T;~sy2&S2?Kf8-~B8Xz#I6Mq*&cds9%fy}G4=%_3o{PqzzafjE;uhdQJ5PNjyv7*DxVY5yk=mepG z@rCsXTW-E|j`DT=T2>&idROY=ZLY+$N-GH;=q)P^-V3rg3W{*H7&9#qDC~dCU zM@R2(7Q22y32FWKsxs0e^~QTr>rpDDS$53j;rUcSyBG8*=5|Rwm{B)MW-g4VF4?d< za`4NM4@#r}K}Ieo`h}u3Zwr5gpZ_Rw`V@KT#r2YxEceEvpRJsm2D;*^b3g7~R26tnYk6^AT{&d=iX7F`GuH z--|+^#Q;G1XIYJCriMz<)ZQq~UxLyyx+(3KavSjz*FgG&I!EQ`a^io7ROaXV<;m6N zKqk0nFz-*=fc(>?0aI#pV^6As66MBxW1HPATF(nB#vTO*?p|#O|?@w1(a= za1slBIHC3vQy2>K&|yX2#YXWHPnOjhao1H_GYBcPaSj^OBqVoo9_s;+`I#l!Axl%= zslp5Be$_ATr{s`_eHS|HXHgYh+K;k-9A~UpJAR^z_br~9*DN`Xih|F4e|fL0WsXE( zUxZf>0M19D2DpFltSLKahf)y0?{h3L9yW{gj~~Qd;~TQ%Qsfl+?b9oMhedTR?8`%c z8&E{6?{borke@QLt#{DrnfV+Y#PizE1uWtsBF z4;)~54dO#L1pZ00DdYlaGF2O12tO z&MkOEO?SU{51aOE1LlAgQrpls({)6A3L1V;O+4D!snm70Gsn$Qm%>En%>CbWYIfE95-#KKcdN=$bafKGziCz^0p z5fzD&M+=;uoNb9zdt(AsCg@|q&oHMnzCQkFq>TJLIYgc*+J#~b{&r|pQ^AJ;xoZth zMGwu~>588jH5oIO3sCL-t!1nc0n-{4KQUozJpniKy;E=43@!4G zh`^59$U7}dc&%Ckmsu4l2vt#*;-$sSg>A)i9^zoQr`8RBNxb`yelVki_Z!0p`;=&3 z+pNWbakZn3EotAY=&&l4=c9_s`21&T_VdG3e?F%*956L9M*fmyEr$>$EZl#H7S%_@ zT2hf7>s*MzTn_zIl-5Z0^HVL%Q?U7cu@sCK_gw_?AO`uT0w+tn{&-g2-vrUyd!qX1 zhTJ}_F-&$tE>HvE65O`!`_y}k@6yf@@gKFF&IF=IE8GBBJHk~=HE&zoo%Ih-?@Xzp zYOlXlH`pYkj}UKdRrB~QNFsj?(hBc$;;VtvKLf`rpik_>Yp}fYd-<1g$@3C~m)v(y zIrbHvMqpZ>b`eu6*&lBmB-ldU66q{^5$q|H-GDSX19k-1B&fw8A<@b2CumMww(z-6 zPeSb+ruq5KZ20zU*?~I9c_`icXODsF8t!a(h4w9$|3C=GfF_hZa>;-99TTXW(uoE~ z>DhL}9W60xVJKlP6Fw|KZ%9y^RteO=Nx~X4wu(ZwGo(8>bB2jSzVPZ_%`_a_$ z`EUk)dOkoP7v5SuQdobB16et=K6fw@>Pxe+Tp?CUW=@OKr9%PxWp9(_K50p+gnrAN z?NUDY!q&+sg;F?Lzo~dUt;kjll2#mQ6vTUVJSDbA5+{Lk8k``c!I*=o+BSIV7-J_E z5=Ej(_6NK`2GS-|>TM(*K}14D(;2Yrq_Ln?D6S?zxG~?GKj44SkMCq5wK)eDmVI(G z?mXK=T^w8NLBUsA8hJNk-5Y>TFvn0)GgMFlCNtJwpIC=Whm`l|w+-+OaG!uz6)PoW z`3*xEU5$z`2JI^|oT&o>=EcFNKl`3~K;EFYp33z^{uOP7ta;gh@P^#NH@IM>i5 z2SQ37TeDQVN2A-Ou$K8jV|tl+ogH(7aiMOEd28Af)HiJRywKR%Mc7^%E&EqM)3h>6 zk}c7SfBBlbv>iCjMp>3qKum(d=l0%+O*x7&7go@%T@##8rV6;y5!A?i?Id14>^bRI zPX@_deu009L5QBjaR%!+1U_IJf!NCYfqw)U)Ye4gc4rdJk-+FIeU zTuZm4@!whcFRA1k=KkoEK%Hlc(}#aZWob?0J*0ng=tj_zmN~~)OG`hhPNE)=l$fT2 zQpixKt$qhDIK`kZ!wKndkCm~3xvPqPFpd$f*^;^dh~SA!UrYOBJ+e#9nw>&pRMce( z^;U_wM70MXOw(x z1_^^4$Cg0Pnv6WZ-<-zf!o>1XnERaE>J+wBsk}TMDPQ)A_?xnCoGwkE9Esm zn4y7E`p%^%j*TEEA3kt6nu`~9gV;l3( z&-3uG!`$r5D*P*e-ZDf<5xl=x=I?)JH!$=7Ue);vf8F5oQ^f>)KXxnfZ}9+c&jud` zjU@JI>i0oWwgmH9M&()KN7e3>hryuO_8(D;$CeWm@WA3FvGW1H6}A6}Q(4@nrmVwO z+%&WSyEbPt0k9p8mh3!$P4-@%V2(iRFIWs0lLy`t(i-@!9d>J-nc@&>yO4imNxf0F zBJJw>_IrHjApmwiWE%PLrrJ0kkn5u}B&!vrd{tkFVOwyDkRY9z=}P7!8};0VQabhp zs+l_izoO{uaGnqwbMVF->s8{N540y~h>5_6diQO$wjI%X8G$qgI6o`x_-kUFauFm2 z>^Zp%eOB`dpP5!l*T8zi|oSOFF8H!T}O0=6B|X0ml4b&Vfa zcBE*KlKnCt~MoF7;EG7&m;iCkYGs`y35Rw|% z%TV8S`$Z~2W2je5d((GBQI-9Y_xmUt>rCbqkcaT%XsV~{%^k1y1|@$Q-|RxJGXk+j zKy2^#4P}AQ6F$lScF`C-B~+}QI?o|q@Pig34L7^tF0TM>9Ul~nPX~{&6`pH-mNiNw zrJHOD5;>_z>MMtRHfZ4!^E}~vD0T8_T|UjM=^=Trfcs3|IuvmMA6v6{ZDJaq2CNI6$_Z@3@8(#cl5sn>r6U&0>CJSPZxKnbC9v|`6g zTERi=h8j9g_em2X8-MtfK0}Wd(48 z%bk3h^1qshQ}iBx@AP9uWT8+-k|8pYOdo@PGqHbBP$>7`BtaOV2B%T=^KbW)aTBS6 z?5rBylL^a-eeQUYe>@mu32Q(R>n?nX#jWOHL!HSx5C~w;Rc@^D;X!Ygy4>dSSvsMu zOmY$+CFn_?37vTyeh)B=nsF_8p!`;xVH-qS0?WG8n)}RPPByKq>l?kGj8;NLu$jHG z@v(o?HAj({mF}eK$I8%kpb^7zSk5NRUDJqCF|krMx{(Lmdj-3@n;y%EU^sF{z1CV{ zA6>&t^AOI;51b*#sfXw{Zy0q3RT|Q|u}7Uh4!1(Rf_8-9y34GWp+>x%v-Hp<gl{6}XTK^)l^IiCnPr{G`OLpx3LUQL2CW zI^UhVg3XjwVHv*0n0omY)~*P~VWNoeu@z=;6e}?EJlwp{u6!bmx?VYogDP47x3q9X z^)5HksnceNu3YhRNMQzzd^YkD5uziRlnhyUA}RZv>5K%rQimY0-cR7{8h7xt??zv9 z_`}n=8|c$OSR87hlfE&yD=ET?788FoMP_F6DcgI0Y^Z!vIPCu#HnWcS^zGNPlhsb0{}7cM|4uW1xQVd6>h z4;T20*8{@&{+4AyaU77y!^5aPPuR1avah)rP@Z0%aKDi0WEYX1SM4D#HP(NIb>$xO z;>y*koU5||{ch|R_z&B^-hk)7_o)^#_c%vnh_m0EYb$W94qZmYN`NJTl4z`mSI472 ztW@Y!6aDZW)yr2kf^_g;KRqDpL_mQ9_U|N{fmoi<=?0#!j&$4ocuwkD3N`DtX^IJsNWmwvTan7(U&l!*yzgb~(~ErxyGt%R+xK4KCKj>jEH) zJbtQ1DufA?P+El8bL$YV5bi};`-k`Uw#dLhEEu`E`}o34Z!(dbKVN{+V8e!S2a0Ki zm}rxC1VA?O`bXf@^ket&JqZ(^T0~reL|*f?X*d&uRUd+$yfk!?ZdJ~|ULBC=zK7J0 z>Lk03!you1>E$+c!}5RZ)H969^DnhNOpEr8po*ci9CCo?<aYNM}Ep7Iv6(Ns*>rjvn`a@>?@p-Zq+2UvH5?|I7a1?4nRf~!25?F z$rhzXFFc(&7D5hf-yOkgKLtoWb`0|etwU(d-lfGmE+06lslOhe6lZzjCtrKj1LT8ZAZF2paTr=;O!OCOPf2*5Ye$6MM^MJIug|wHtNxb<;Iig5;j+vI3=%wsmt@~$oRT>+l*-Pm&~krdg?j5NcFqK6aaMTaBRK4Z z7Ui@ZN6ZVD1)q>A&6k^#XTx}Qnlv96b;LeRYE3m*kbelOuZ+N(;X*K-R z9dYXF&2J8LIIe?y~^cpwh^2xx!txyRzZDn zC?dFQp~azG7z0A(tedUmH3Qw~GX6JZZ_=%*(sT>|AR8i}V&gV4A%Ub$N}(sHNu{8u z6u*D|8{k^|?DIa)d&m76XN+~MiHw*r=ZxIsQ1(Lm=*CmP<=TOcB)!(xS`qBe>e4$_ z>O?fIJG8HPUu>vRv-K^S$aXwm>9!VjZ+rKAgEZJas?8;}r)~Tyf~RbsJ1sF+yZ5Ml zKJROooTi4}*PrdHzC2G}o%m{RezLc6?k0cW53iN1vwIkyJw}V+u4zBT&^`@INZ>(+ zxxE+moPN~L&r2}q^7u}64G5&`R&I~tasB=@`A$oQ6DHyD^JF4<+ydvq-}@{Q&7^rD z$(^>^?%1`5x3l*&L9IQ~4UBFnFDQmu&|yFcp_41Cj!@#by6md>VHqCTZx5mRGEoiC|Q*oW?W|0(@x`@FlyWZk0TbGoKJ_^% zo{Hq8rFWs*IcH(awwKkjYO8-@{duMu&DoW0L2c80pkCCM>}DRdxB0qR1LqH`_viNB zvZ>-`u5j&HF5RR&eg|1yy(cvtKjGt3-;bVW@(vr&a#Y!4-q)dIS*CqJ*=F~;pC;1? zP3Dk@7T)dNT{nvx8Ky>UTd+F-h_j0?;pl!O)z$>k(Bd-KYjEQaML6i&9bsB8I1huH9aJmBF=yM)<7QO-a2Q%R zPREsf3ktd|7)pN(hkZ5L))3cxa>~Xy}ClyNf7UkgW4CM-x%eg!cSM>vW`;ALB#J+mPq!&&ffi5E`^HXUGuC97QfZ1 zOD?Zshy$Mo%x-fu?DG>mV~BTo?6QqfykXuZM}?1caOr={aM){z3)aISRwP*GgWO0d zUqABvM)|Ux80DJDr$=e7Z;Vv5Qh9#_LBJU~y{%#@*<_y!;$}N-#0HFmyr&n)6Z;g= zVLo6W9Ay2u)6X5L1E21-19iBl?%Q_}ZM*n+#Jf6yq0y^L@C8cN7HL z^(sG^+GT%)0^gvzH{A*fe4}^wbXRlN?qMbGKH4{8c5Jfe*$*P)ogS_1BxGu6)|oR_ zN!!JHqe}(FdW7b^!8c(e)m!vf*=;WfO&xA756NGpauxXd#~3{LOmLz|<3hdMeBi1X zB}rT54Ig7WGT45-HjdEq!s{hGD50WWCuY}8t$%-2#HU@e&wWMw5Bt96Xr=7;S2JCO z9WPIT!fv*p&|hv9G-`N$_9w!5_9JojP*5gT3hN`-)*t7h*N{D*!?yKxd$)3%*j$Ah zQCfMA>0|7bhfS~2ZMf!Ji?D9WywtP{LbjJ>W}qNAy1Xs~NV-=^^L|Zn-74>*=mIjb zS|fk0U*bi`twVsjTOZa2FKo;EwoJV9mJ6!!66poEe;tgL-F6yy^v?U5#V_XUu7+t8 z&SJW+_;<@NgSnrwjC#vHMB>^v zK3k73tJ50sP3;=mDZ1`Qwm2Ta0FG`jpk%)aWfv5mHie?89IqoY>@hWZ_x={-i7CC! zcF!b-pzsCxaKiWJhhrt{@^lW0{WfFn)7Il5UZ)gCr|}TWyZG3>k1Zg2$*m6j@qK^c zwIN|hT(Y}$SU$EaCtr#u^fu%&-((O5^HFX0=-xk9#wpoXB6BZC@8x&fF!H=3FMPQC z(+DTJb_9LjgyAHvrd$xUmc};L{`_` zxSg!qbH`F~54!`}#O)b%hooZirg|A5b#pkOX*)D8v)5K^?aR{#4fk`_9kYM!N70){ zG|7(i3HbJv>%5>PQ7VUPClH5SWO^0f7E1O!#rN$AaiduzrLO4AYH*9qt8n|Ld;ymz zg-4c22H~*ieuv!j(9n*E?3(hpGX{z zkun~IRjxx<_i~@lu$wMXoo#cU0EY4|$7+FuH7VmS=wt5U7|R&SySd zUHm5^@$-`RnBFA~#7)MT9F)wmbNg^uxF zyQ!g0xzK1E@gy3`B?(P$ghLLzOkf|Jxze{CXtK*Ccu!?J3G0dBsZ|?VyMG=av){QE zsO+@-Xwva^;MG`8Hn)HG?vvjh1N+|O=%y7`{5c1_6BupdYCEHE;s>?+?fyAguGGxN zL$pds-{#rvp!QhXm{#`=*9BmB^?V!$+Pw9Q9Jy!;@^UBGX?n(_PfB! z^vCXUgjwjpnz3*Ov0V4W6}5fwdFySM?rD#8rZ_aD`J7SmPz`@4mAW8yAMt^RCx@=v zjljV7)u!1b+s%4s$Idok4}3q_B=!2;8lE2V^U z;!7uS(jiuPrf7dH`|7E;mt=CU;>tV5ZepL8$X+R)KkY6Z?~>DF+&Zq>CN{&CCvTE= zj8pV}bp9OrYgX<|CKnP*wff{(f9RG^3X$n{YZE!%i!;-;lzXr2tl7saFWe8?$zU{d z1^1wV!=;n-sq0^NC-{IXx86)c#*NWsrEcrHFkIiax;uX|NBc?wiFuQQvX}RJ_PNf3 zgii7oza@m05{oJ{oUdQkH{3>jFlKWSBK%c&4H%xKr_vy)U?`2El-PFjy<|=v$ z!d1*n{g!`ud8zWatRBJ(o$^}j0*B4+c;`ruQSZZv;%(K)=eLH8I~Pq8yS7F~jALkQ!00hwE;X6jkks#M^@4$sMd)|L<$5YPMB0EK=hErZV_u8I!pNZ#m zm;0>iSSqW>cvsAqGrpe9`IbNEt9{YHSGKKotYzjg& zg{e3%IB02eQ^cg#p26J^&oAv@Hfn!SY;-4XBk|VgSN`FS4^#h;XX1TgL+(4K*xZZ@ zddK44HiqtV8dv)zP|Mx>5CFhC_2_VwrfCC!AnpF^{Q#vkdlhaFcH%^S?$1Ymo>I$` zfqAKfIYG$3?jMI2yH8#$pFd1C4#QGSBO`pd#_@H(Z6>LzZoBP9bs7_-BHw=ulhGOl z>#wTf&`PP-n62=({IpiP@$R##^x26dLZ*6U*83|%_JLV$E>@fF&&@Vv62(0|uGb(D&6y1 zjE9SAteUNB#iK2v)4+?c5D?qo)S3lue)Nh2jzKu zsWpc-=l0E*SFh5ptLH5OE08{(&()`WRyDJ^PFcZUUC;hJUOdZwUJZZydTo?DO$B$j zSN5uI-GlpSot@LJZlA`##AG@pdp%6^rdPEQ>~9>u?gJn(G>ITR-Ha0vB*2(Xt|UUQNO&|o0AUj= z7H^owuoYLP+8Vc;Dbaskm-;c;q@S;a$t71O#o_$tVNntueX2)ARu=f137U9(QEMwHzWAP>j_a{qJ+bE%J~oE|T) zmnr4!;~ic?SwDeWRz9uxqB94^81{tauE+hTvz2x^uMXqW=~X5P?3!D#)TlIVze--A ziOsQ>?tnph4$OaOweJIvay!gi7%sA&hX=oQe66?pMj-q3JAN?Pdc{QRlX0wQR3|#X zL3GDu4>1MA#UU)bLEz|hBX+{YDwj;F#3M*ZW2Rt1vxe~_^%UWtHefm&iAykAox)_IdoPNy9 z^TMtQb!C4SgUhdrH>RT1x)jG}C^HD8L@PNWLB7);SQ8yjGq1;|F--e5ejbFgw3A5E#@kZ<(7bd- zRvdF{)G#{JP$`{zs@U*$#&>sD@s~d6x2H;5(fnd4W@9mRv@J6Ph`-@^+#FK^Gbsv>df0X4X1$J(pOv?!H9J9DZGE374O_asg9@TqasE2+Fi=6<3Vyz+qf&M zDz)>gw4ai$&AwBKFW$;c=~%kI1o#exA|0ly9(fcsMR#rFyyqkd*LO)@T*Yba zI(h8p=BmC6x2rhZMYuNdMz7BbxavW>H+%9ro!;Bl^M>9x_m8#cmhrlA%54M?%ePuL z-dab9jkVXaKI3!E%dZkct%S^Jw^~u9p}<~$B-uXtrN%$63Ey6{ZM3dO?b#eSr^rs$ zD;&FUAAD%JVmF1G3DwjHZG0TSl`+QZnv>n_?(}W+%pz6m_hoyuRL|h-OY5GrDDPt? z2#S?)S{HBemYUR9Qzr*x|2W&3-Goh-v?gc}}5px|Vqjk{tZ}Izo z&YeW}9%uFLdJ;fjt=h-%T5Sa*0REJ*%Htxv`Xj0Mhp0rS*v}+Gsiw{Th;VV)8wZcX z=T{K~=T}x|j3#Tlt#~~v=MvL<<#+-2$S$6F>pef$+ByesufW@4bPW&c#v!JwWIJyY z-4#=Htaj;VuSuNk#&*B4P0^Fz+3t>i^O`Si+hnJi`jz$1_o}rfxlZ^OOw6H-3?~&t z?}aAspt;OWM$b<6?%e6^e>Uu>)jYF5Z{D7}-)>wx+{wE=`RrgeB;!*EpI)U)(Ro73 z;dwRhnW%10qFZXu&@L~}b~WzXJZ*Q=J5S}K8x^v+PD0Vo0^`*Tn=GE(2Y8=WMih*DGM?hk0h=ELlOk#T7_oDSnAyI$p& z!V?J_-GkZjuO?y|ZTM<6-JT9^|tyxn$k)G-(_(&l!52k_k)`%`Px zNx??ip^e7qrur>>bKcGIpT5&_Ey0K3dc>l{vcYK+U+nN548y=E`{QFL`nq=*Gjdzo zPfg+N^CeNXdVMUV(e@3p-52OyuiNZxQ03nCU}E`A8V}K#KVxD2X)c+=IF@?%P5b@; z>U0n#nPt=J9UaV%AxOu6=SnKMH6z_i9$GsA1cw#o_4=CBxAEQTpJ^}b&(d}KWXvRg zJhNNa9wx8C6jm{ky(%S>@oqI{E2{cXow;^K(*mosAiw6d*T} zva$>iaVgei8_GAqKd!<}b3L;g#}NxIIUODOkxS?C4-D=AoFa?!GnZ(DocUB)CWXWc|Ua#tv1z#WzTKg*5RE-eSei zp@EOUSJ0GE3f?YcQExh6O0m+@UFPrH+4IZ zJH^)Se32#Ib!;W{-{SS6mu&uShsh9ccP~3|E>p=X=6Yc7>yxyq*PN>H_q&Y9dB00< znT#rLv+h5Cn-B%#L5;Xp|B#PUdNi!pFWC|C9z%TkDIk}HpesG;iHq> zi?Ki3TQw=9)Mio+zO|!p$G248_s?#XegeObcLMJ{MRHMRk6}l!*VB+2d2%w zw`9q`B33Y<>$N4J*klp7P*LQq>g#{h&-gCm%sq1%%URrZ-80_WDL32A0R$+IUD-8{ zNb|gZx>dqzdm*8$ML`();YJFWSL$xg4kt)tv`Vxwr>uV&I@_hu^x8uvyC=i>XmOe2 zkB86Q8-SolUqdEh(==i?0;gQkoAyYeO@4~bpO}4Bds}L@?|rd*A7A|Ce&4U0=hp8} zFc7`SQ^GM1^?Gy5H>+0|^ON615K7F+7w*b`MLr!~!=su$%r?J%gyY+{J_cPkm|v^y zP2L$jqoC9Clra6_sdwlyAo)R$JopfJ$OZDcDsjEH%<}qFz>#@iYMGXN{g`5b&qZ;% z6wzaP_Jqq4#!MUO&^ZQzvVPoGUL>ZmqqORM5Bu}!K%n2RsQ5U0tmcKs`&lG{7(Ud0 z`ym=ZWx&wJbI@al6sB2DdBw?rMye$%{*$y);LktLy3Ozrgic08;;d z{?EV9boh@W9bT_LCpvr`8(=R6SP!YcF+XM%Y2GdXKv|A00I{;1(6F4y001o6`9X6i z&v!zhOfUN$fQ(%ZKX@c}cuynzULSjZ02FVw+idCgM25v9f;)HlGry;5eaoB8xSUvE z_W;NT_N0V)qL@eWq5vq%i44HHEhjK6Coat2A1)^*xJ`O=`2)&qy$2vpmJ<{BR5_Hf zPfBHj08qr`;<(##;=*#m!hGsPvb-O@<7iVKcd{T;0J6jUpgU&DVLDqkhS_(20I2!= zez-o)CoI6_LVrIi+HnKN13tK6mKjNt9U#|5a2i|sSZIfZ|YJ|0Adjk1F^Cl9z)ZBfW`zO z40?3*@A=##jVCexix6%WoXwPzIR|L1u$F zo;jJU$((=V_xV0nWsuFk|87db&GHFn{a%9@mzd7?AfG=YF0}Ie^lEpIC=wwgzBa@s zn42b``QxOT8tf^8lrbSOQ$SCfViX{t7(Nhitl$7GmlvU%B|2=NUp5EFXB3pr*T;W& za^IPtuM|MY3Q%mQIZyz-h=8K`F$i+t0BQ|--@l9ra4N;3IPkvWHgk17)+2@EoL@2l z@M&2VL8Croqnrl;q?szi_$Zg=_`R;|rj2ol$9c`N(JHg~an|z5e7(`~o&x{GBIc~p z9Dm}dz&<<5dA#I+>=G5yScQdIXv=?RC5_^t6QV&X7R^U>=?1~(4U6gqX^aRl6*1@Q z$Ztkvz6FyABrymkL=L_s@vh2b`n=BI9!G#;>&OH|Rs%sUkctNyViG_wn6eCnB9W@X z1XP5ya{|FKA%dyr08nvprPUQ@iXcs-erqs+qPk727X4A_vF1wMTP^s#GXx3g zD+TJ=IRG6CD9#ib%b@SAsL#VN4=@U{eJJ%CL+Yq0XJ_mRGfqEdoF0Lm0EqyX0{rp? zR)q^r`&K#Q^jsgybd7;2&C&;2Wky60aySi-5_W)&RQeBX%k_sY3>nozAx7`eQROqUn=~r z|4+`uUwrlB0{_Gh_K6{ZK*AIuNCH^`S*>mJ9R!x@5pxo$DT+n*5Gg_-WDZ7;nxLh! z2s#9T!2+70fna|GNQNXzBnWVRykR-unaUFh(#A|yw3Bbj_y8z^peJjYnDYTq5)eP@ zU9V!$HgjLz0>l9Ed6w-GGz!4dvhLsh{F}$B{O=rC*7}QobKl7B3pR{c`RSEMYnrb) zV6-v5fw`7^vb;tsvOE6m@i383sFh>NLj_iX1o#FI*oA*CeEZDyB~jd9-_)d`vn+$W zeMqtEQ{9?-CvBl24ZhkBnpBur1i{gEfgDn`ccW0TN$lxkTxo=AN`VV)%uEDj5Xu@5 zVFHXI%OGi7$@gV*?5IMkmj<%ja8ZgVy_r|GbSH4fwEu{1EJGK!4vZZ;5S9t5zTihS z^9BZ>3W$G32(5xIK#8C7mnSSe;>%&?wGV!K#$77|<`0IJ@hF$(Pi%?u&6%aH{2$y= z{^B1$F-^F+CM41*qT4;l*xzCCG7;d{Mu{V7ic_r!phO!RR6{KGiAJPq>mcDW<)qZz zK9&HI5bPLj=st*cgE|C9i82a8Xiq_=xd5AXIUIi-kvUsE9|D4O8uE?F7P&r>z2#OH zkn)a_x6mCk(;k|KDjsg}h?6-!c%~??SeyV90qW-X=&nn^93OM$PfKrzZ%dwhxe564 z6El)s(_7+}XVGu2{m1Lato*GrdBWd5otvW8fBUuh>0B?jR2fhO5EB7PY_|Xk2n7+6 z%0qu7&QyS92L=)#rUXgn1w(yNN&ox$z@m-0FK`72d86Kx;716d+Yw{a-jg1SkOo8| zrUXVo0d*#c#MHobcUV6eN$fKu5n<+V3Uq-5dQ+6P@y-DOhC_u!8fX%w5CYUfuS)}s z!kJeuv-uAZA4^^GASOV6Z8p9;33o)zQSUhsV^FieMO^&U;DRmY2>#7ahWO|2AFM7j z=3A5IFJJYbPv-aE7cTq7rSEfP#)64_`4Y|kaH-`vmvO=k6n|~k%NjH!xkXwr2sGhLuX9 zKs*B^0fOh=FN0q207ar$#5Z#^a~&ZA^tEm=bnVd!0!lnUOacg!KwlwHxH2vQUa*kF z(i&jZ9La6{u75h41`rV4cB>f8+yb)k%rD3Fsu5SffB$%T`sZxN2h$_ zw)DckIpUWy{LLwkD#epo3Zl8>?c5_5UOMNtSTHrR{B7s>nGM1=fIV|F{1-)($dVg~ ziUv=3wC*J|Rc~ZbAa#QVVnHX^6bX_Pv2q~YigVsc1AmYgo^+A@<1n8qp-3VOlen)S za`QJK3=#pxQOt+pG0arY<+C7sC^?2=K}7`mlTHLxboM*g2NEPEk!RkxNKLZ@j}90; z)MnmD{Foww|7U*AdAH!)mp}jV>DU2tY^LD$qe+$=Ml@iwB3s@I3tZ0e1#^9WIXvf3 z4AmYZ2!E&|(#(V+o@-Zzz>wRK2L6UORk^My*^B9B&H)p_fuwlmQ+xv(v{gV(OSLd# zKo}V`{E>u6pAS_r3i?-mWs;Pe;*txlF2+9Yvv6o+f*~X`ZUPzfrUn77D%3`>pL2?$ zbw(u!5R@2H#Tvix%;f~dPcJ?Ie9b&FX6Vc_p?@X);g(;%K;uweYICXQ74*$frRH&Y zuK0=L(nHO1dF@h<@lRgGi&j}`-J7|||A=cDFnF%>AN<7bqV4AVEH$d{V9wO|n1>K# z67ZR$LTYHcKoP$@TZVlsLrNg%QI3fzDb!RT$pTaZ2A0CnmHC6}%TS|@BKM|pB7wAi zAAe&HAQAKgqhYCevFHglTw%diNbv&>4FowhVXul}*<(ZGwI!B)CJ6vT8($?9%U401V-86}0=b*DH-c+^))#25e*}WL{$)^51bcJFY#UKi zh2VIcweiC9;M~)V7kE%`nt2`@r$0PT`G1qcKRIaScw>p?O&iVI^7x4U>Dvn*nlP?F(|_WLAQbt~ zmJpfZ+3V9HA_5oZkmQ6b4} z6_N|=ll2w!BBZg50Hc8U#8t^y@2J6p4uNFh-7%Qg1A6%X50?J%GJbG+?vFq9^{u_% zdN3FKp83YiZFw1wU)}}#xjy5&1%H|qDa>PCK7B~sn78G)2NV` z^ZEpeY+V)aDHSdbZLdw?$XB8)$RZ#RQsPQvwxFmaE%h|_J04C;-wsNVB7X@C!tD=U zah3S!#eBglh`)Fx@)9oa4gdDEFPB(!@}hU=IL~qW2hUVlGAC=+NwdHA^WN|JnWJ3( z$NTLrlQ zK8yrGAWCC>Rph{z=HA%{GJhn21E&Db-h|yNZbi@@^sy>|%7GqHK+k(&1$_kgZ*9$b zZmx}y!SiP>b-H*lbDaux?ADAj0@u0EKoC1xgp$xDF6jR!f6^F0 zOt|nF8c5Rc?Ol+X4w@e)u?WfWHQyfytYQmLGJl^glS-cR6wIDOkAI*DumrE1KVvSS znS{t(g9roCpuhN&JaZ-xUld4U2zso70a=jZncN$oM-p0U{@WAhJW^(x<=Tjk9-=vq zNMX)-#Qj;vFCNYw6ZeNZ4S1uK!R-$o&b;Bf&XfGrjOCAZ6k(q&e$!tVIVoWme6QV9 zBpM&283#%Klt7{Y+J78m5R0=PG;hW7$}t<+yX{H+<_^wqB>NysW zG$I82%nKkW9v{PPpaN#?9m}w1ocRuJfL286$=sV%=i4p{h77RigYBtN|Tiw$4>^vA>ZfXj$8>4CIS zB|wTfw;>^aFaa%mlVld92y74pk$|MUBCto2ltV+2;y~)D3I=F61S^1IaimBSy_k6Y z%w70eo;}^3DB?)D^MG|Y<8MzSvD7v$B1gG5RTByd?0*9Tg;|bebJm%E_x^v?KkUt! z)4*@8R>2nsu)OXt#DLiIxJ~SF0%$OM>k=HO3Q6!@Ja(#FfE!0qQ4DbQ*D3m`ZBttG zEg+IYGAK*I<{0+!@>^*oBCv3*zqkPrwQoM0=ZRC^>(BY*bNIAGMA84QTYvMt{L%j3 ze(<@pMc~P+Ft2uwfxflIEVml#{Bqf?u5>pzxBj~?cA9~5`zu681 zl}IYjth%*U&_Mwif~2=#QN3sV4%WN~`b-8SP(a@(%7|=8Y#rPeZam|w2#M*Sg~Kk~ zM_%GYC^9bxSdFBbxsjO1M?V082uuyC+FGRXJbyDmi2f&z&OXY#bNtc6-HfC2P|qy7 z92Z|K{CaRpz5LS)i(P=-v%YWm*-aP99 zeLUBXsP#Df`u;zkLps}aUl_Smf;595cQGQKwDg;zNkoe05ttIs$3*oqR>W#!m(4~e zy_a&!4Hgv+Fg&AcN!}5TvC~QL4F+ z$T`IumGtoI>1bEq%|vhj67b^u+Mti#C6sD7rIVne{rIXQSH5Uh}LdA{q9=%t01E*8Hp=;7#Vq=n+T& zh(_>G=aFtX_t!-${0~3u4+mj?e6U|`q5kUYU*Ex^s)4*%kMGIR4(0ZAd#OG^UYz$d z?a|Zq85_9)amwt7#VxRX^CVBq{yb%WXV3vCweX#DwP!Le^6q^pHuZ$zkg>r~NWetm z$atFn-bPhkAwqBdQ=5D0kI!O!<~$)3rnAoyF!cRje3n18{*O;je&!6Zx^UzF$Nu-1 zC%Eu&iQ<{Zv%j$duJCt%{8P)HJ#X2JU;=_o3xzMpYF9Pq|E}93_GXU!t=qqUeVL@_ zTgX(d24g6ZK*S@4K}aweNs4ImmoGEtWD`JuM6nbmn3F`5s>y{Ig5#Ny{%^fW>TbBF z3mk~xWao6jYhBAzP=X1{Uy znUVd>oo7#Q{{J6t_T_8YueUFM&3OFd|8M@8ll^$vLp;qKPo6M0&rTNpH~R#!AW0*C zJ5^^BxLTo#@N;p|O}uGrdq?if14OsKY9+5hnrVP$t(#YgQ4mHGg}HrSrCvtbM0&Gs zd8tRN0zwszd{0l~L(HOoX7X|Q`}Z90U$5(5-12Ak>QM8~EJ_>A-#Gs-`ZD-WpWKVT zeelacMcF0OA1}uz|Mnz*|KS?qQ}69)6lw_1f&pTc^nonp)P`bAAcf9pV5h{OE9=|EYP1G1K2<1e0OpvivTLOg)zKX?gU`k-ZcbP#hl zu?8e-6*VmhT=Bq&(nM@XdrupC1C#<$KxZL9PhwdSGpFi&iTS*LKZTg$A6-Kz6lZU6 z0CRo*@W3vTmO67*tA75@*f8PQP;+7Chs^A&=fqHejAwqR%H%I!eO{Yy?fh%4agp7B zG|*CazkN)0|ICyBTH|=pNq^Qc&p2zT&0pahaT1%oTvm$Y&hf_BMeWJSg-)E$ z+JfL2`W1T_deQ2;tJ)I`R#chSLvdS305<&6#4-bpz%xZD=os_Xoej`If`B4{YYl0o z!1*6>fn}=;038rp0*jds8vVcc;IH=i&E4r6m+Z?6f9LUkKf3a-9+=NFTNy6+^)t8T zMQ!2L!Q5x&exO2%e{|HsK~#yx1xSSS>e&$YKR#sM3$i==2S^qR(-oQctTJY(4{?e? zrii_1!JLnzK32imoA;-bVAfYNpIkiV%9O?|!y=zRqL>ol512CIqIDJzc=l+1XK%4I z^F=V)hAVb|5G_7+byDj-5@)Snx3cb<-_NB;Vdp-w)Ib2?Kv>1iE@<{giK&1q=R=Uq zUh-Tk_Uwni<@;vcKFxmQYVN}knfF^Y!NS)D^4%vg%${)e+e;aT;&J9_Ki=4vOaJsz z_}w@5!#{X&_I&?^aliU__*rdPbUoWPU%ORXopwG*S#bI3z_q z=rBh|MDNx>XPj8(bM6;=;va7cDYO2ZRw$6@q7$d}s*wQEp8)mqd@Tjv`Pz@}U+UWi zGw%ME64DJ4fA3lJ~US<~qGwW&vI_wkl z=2ue?jfbH7;jlkin+D?SHLWQF&&T5%f<~76K!>EVf5pW=a@IRaPIDg8SvL%({PmBP zT;u+lm(R8Oi<>u426OCe@Y545ug=g(=KTP0HRHX~FZ5HYgo zf5+L+`r5e8q7V`t(oB@b*aihdEl)_*Vczpp&7iGFF!}#@d%NaFb)^mT2j#*faVE}X z>jMxVY=ObScQHu>#tBJCBJi(2)xvf>Yi8}e-*c+Y)KtYbGDcFX)gMnks&iw+_?=8G zmpriZ?XK?^j^K2Qxic9v$2|Kxag3RHe~gHX=Tq`!xbPJZLE*R;#O@_T%@GSG)yEqs zz9On4hrPw_3m%j%0Yf%Bbu0T>MSJdp)=Rthn8UmeIyVp*P+f*hWvBsuKE(#cSdU$Ye-_hv zzApbckF}eq)ZLdCurL3q2M@7=^6KEX9Q5pr63t%|!ULk^obM8}JQAWg6+^iuN^YZ-{)1lKzfrC*UAM&}M;o zUQog##Lnd>UBBNyc%9A1&(VJRM85+jxSm2|@!mw9a`@6$tEA!vtKzCqYa0xjNH0u7YRHv;H1{q7gvdR2c@$mQ5KiMT&3MTk4GIWKB?x{ z5m$;dNlA=)Rj>2+I!K^|@b<4|)aT|x(PVSWV0v`p0e;V^y_2nWp8EC{w zpY}l8JYq(5wQHR<*Fs3eEbMxpm zQ%`Os<)|A-HqO0S{8}gxSl?MoCXDbr0)@A_-6z-LS&~XmpdvXa6GxDWjit8w*ja^4B zobZb_hjWUn9dq!z!e%&vX0aeJCDzUt}B*SJN%Uw*2&JP;@^NT+P%y63ZJE zU<}RM-aNaN`w>=h=@)TGw6pcMniT!))lFoAR9D1wpI%e3f2i*TH-fSz3#l?L?aL|C zK@p_Rm*k&snA7y^y!(?oEh3zErAdzSzI4>a&b#5Kc}Ji14BxRo+Wg0Ux#{2fd58;b zGh8>$*}l!r*U2fiu`jM*HMR247*SHyA%=&8)|?x;5~QpaJ=!Ib!WO`Fx{W5~RRcCH$l> zVvI*J#-F48V@`D=iR$iCaJ!hO$3->8c%|zRvpC|&r4cWPXGzh2+|Mk&=!hHSC_CxV zo;u`Ne}3W@CtYI4<`kzcf9wN`-#qH$Q;s-y;ql5d;+`vW=L7$aaNn>wjp?f-g1Kqi zhJ)$@(sjZ#DI-omiHPID!8)x)wAJ^4YU+8KBhY0N(Roekwt{SaZ|xl-7S{h(fu_6J zPAJie#A(AiONc67{FmHfVrZYaaQ{9DgYrsHe_i=?^ByGJ?nCr@wudJ`c^H5f2OM=* z{)VF-aVEzY&lyJgu->ZU-{J=YG_JENNT*bse8`i0T*pnDdPMo4yFRFj`w;YFK#gRX z8~N$YDyo|yP=d0`#I7$GzkUqMK63+bQwlTfV*V#>pMBCKi*0jBY2v$;*l<%a2HmIU ze`SfUJGyeR$@~RA+$~V(OT}PvG1u8uGMIq%Z41Th!6zl=#dbxb^wKH9?U+Y3lSxKH z6J8-L7x|Do*ZRlVS=(ebg%^vm{sjP1$z-j z#$hrMLhYq5YH(RHlkSc5Xz%3K{#M%Rf1*^Ex5@sIf@!|E=7RR7XIf$`j5EeHH|=+j zK>3uZg}d)4!_&p5LV9LaK!eWkh)?3C3+UqZ5wRs0LvhV)|GecZrrSGdI?ihNO>fBAq* zs!uVpt9bb^4XP&fQeE&$M_h@*t+750?JVUntzB;`HvXU;qWEW|vV`fb^OSUbKn!^7 zOIq}x-3?Je;SNQ?tD^N}Q@8`av+GFfMP7?Ms>_(G;N~eQLQfTW4hHe)$vjuzLB&0c zo1#jqc{UD*JcBN<*6lP7F~+HSt84QkcI(vl<5-Tq ze+#NMagAuaUKF%yBSyMCSDa%F6VuxBw@?ei=OpWvu;hE_NgVY}5$vJ$5_vBm z3&ggW1Fq4XdIf$`wp0^(K=3Pu_20cWr~E$XF=&FW2&e{e_~$TN;cf^ zUjZzmE93aKu4^&5gy)ieYVu<1y1?SsmS^kW2S;Gn!kN8tmIM6BH59qa{B zi@?0XIJV>ch~2Ya#L&_oo9_W(gM@i%fDMOFF?OdVD7HL42qbQD$H zhU?kd1(7maN6@`{iT;XBf8nuIRSfZmeZxr7E*Nm%r+tAO_UU=mr4fO)NCj}(I?{rPhI8ADAfxun_=y0vKk2ky|8mqTnCXCQYqC^8-IVET*75TzBJjR zeEB0D<6hK-!_k%-f9pxncAGU_BW(fPZ_KsQ)Cx(x2KJJy%Cpzks!9vH2e$UPT_ZliMKNOV4wQt4tN^ zV%fG!-T-`0)|35|%#`;F^M?;uueu7>5=vXI`rAB?>+1~rag0}#wieab7Kc2XV&2Et z<_Ui~e{ul)CNIZzy|*^Qq4wuk->H7`HNE4pL_T=rGICY>DPi>AdB3aYRq+rfQx{Sk z7sLx=?pTd+4^xDkH)&GIL!47^mkJlIGYUzA1q{W}=J-y!4Kc+k7~qOmV#&7-r3!H& zfI##E=|Mse&)A=V-T_a^rfvNNB&v zl~jbPE`_dXEdJ{r<4IqzX<>o>VBgyM2J8P4aau=b_UgH>gEF@|$@tOE#j*XdUq)Z> z#mrq79;ng3>whbk&a0OcmSZAVg6Goge@pbv(p%~ktFom?rXj7I`l!bjm3TeLfi4)O zWieuc*&8E;m7xrd+~b04D>&E-~)DX)Kjm;((MsHi!nvcSnqr<$!sA|f3B0v zXQ~WH-MmG+pmISxc?o2`))>bwqO`nXrhSX_^qXTn($e!EdgBKVu;LZh6wvqOE`?d| zC7I?>2%B35=@Q~x&Z*-hkj;H6Q%?DUK(>mgOqp&1s>@ueA`^A~ve2{4A6h$39OENd zOG`cC=A&dBrym>i-)amqyPnRlfAbihK4NUo>O$4eTCy>N2j4TLE9)ktAUvD&-t%>k zwv99m6@w&lFUOi#B^Kd6Te?b0p`9g1Jf9JRw0TtNKMZOI%H^TvLDEA`v=pFQtrA%V z$yD$@lv`^9s0BlOR!$O9VS#`n(}cfK-H!SPo{}B%x*W%&E>Uh$9FIWRfBcs#>Jy&o zHQ1`7{(bo$e7)cCLdQDnsz8iq{)2VGlvYBT{bU(;Rd-hs=w%@JaR%hgbU6 zHPgsV2^@XGB-J_^vMjoqe_m3Uy@XU{4ASh0HiDVie9{S*4lNUX`O5;l{bFhBsH~st zFJaRm8Iji+4aa=(8Aj@dv*BdJlo`{a7S=BM%|;!$08TOL!#c%vroCscYBSzDlY56m zpqm#C#gdq2@9C575hT%rH^^71&%Qyhh+YysSR7%ztFDJSA5ai2@IGKRxL!RfLZ;qS| zC)=ew#?4kE{`tqx3w~-Z{;Qn*X3zJ>SY>%>>z1|_OG;R!Zb6F&UOoC@RSHOz3KGPg zi1(n9L#B&3%GBJgf1O5Ma!TrQMO5RueTFr*KVzxdAXv5@PaDH)!-HCV2!(b6I?oRI zFta&(DiYf3ft&uTL-vy!9{2_mS6`;I+2Sj~8 zZh0J1=Z+>AQ+BVORcY;Qf#a=PT&Fvm4=^P;*keEIsrsAWe{Eym=ld37Z_j+7v%J%9 zw!*1ikBYGt=rEU4`cwNi`r~t5aBsNtnJ%|;keWwcMcfHY7iXO)FEblPtHKxUu9Qb-p z^;b*vf7hwXa!H-f+m)^PL#%K)ox7g!IWNEC`c8VLe_gss*c@bqXY*EH^W$Re_&P1v-Ed6ZfeL1InrWJ6k-xSO#UIm@LaU;B7HFK=q zCHme4e=mR0#b)H;vidbcJ1)1luqhdNxU7t?&h*QHC*}u}O^~Om|3l7y#=kz`f~^id z@pE0rqx>Ia`= z!-#|U9dE);Ikx{^+g=}VE;yFI*SL&!^oU&|S)b^D#`hd%7H3FQ!Kf}qe0tk)N}cBe ze@=x=O!)vxqzn*0l0Aq0=Q=T16YWW&d(T7M!HBuVTFm)xHI%0sbX$*mtU-54#XsCV z5@}$(N%}CkUIOOS^s#0oL;nh+YYZXUKot0Ze0p8M!16Thh%PHZi^~{btgAfQ%awQy zw4g#G8J)~PZ7!P0ugqd7m0}c1k1OTEf5UnLOJ@e(zc@T=VRf9A^h+Nw#9GoxR>dRt z7Q36+x(oS_x(hY(l{k75_D%#K{~15{6FYz4e6aYIqu+AC0RQBC$}56c)V(_9x$GMG z@rm%>m=BX@H8My0M%GYvNub~A$6a}ot#0V1g~E(u3z;7n6?``LWJ0StCA(e~a6vYNjV!#AoRT#7a`++d+OSf0m(a2JoIi zfG64b9r9bz$2)PxY{e~XF`k%R{qP>CLk`z}BF1}rQk?i0`$t@|;l3~L z--&MKhk7laLD|v+ZrVj2e#AN(s-MT~-5r?t`B@%}Detmzjx0tnXO8O}|#Ct5) z2_HngTRk=*NS{IHF=hP7auO^qZXqmDN1+a*g=V(?c8@h8(xY_7Y+gcTw3^)P(HDk% zosKo|V#y6hJ~Wr8P=hHLPNJI#k9;GajAA>ZeAK@b3>OLNNW^aPf4Qh7&N4UESH^$8PS2~w`es2BVrUf}=AHNZz7`@B~2Sbsivd1mX+ z|I_}FX?J<7A!X{k&GgTjf2{32zn4?Fu=WyK6u;MosK3Hi^7}huc@B>)AgNGfl0C9`l=6PZ4fQ zQ{@D@@Tit~t~vDve>eHckaz0W>%nIN!%IjI>(LPcGf6Qme^hs3S(PEGC;EN61A~}4 z5xcaTuC~ZSS(ahmrNngPX2kPi4U`T2r6%Kypgxu!xoapH@th^<<8S1RaUK4}) zFVx4UxYdYj>2Xgh&T>PA-6N0keAGkm!!P)|j2~%Fb%a4s{-mAX+}mFP*)& zH@J@n9w(Vdf8~9B-2+L( zW#sOsFALK$sygZ-t}IcX1<~J66vtY5ZhdKlGL$ZNDK#abN+2P^bCjja$)^)qjQSbY z)>REhyN8n5^Xl?WvATJSljbHQvydoYKv3{PYU?lze|ZOAp_K!Jf zV@{2Jf5059-aS8aNZi$@ghUiej0ckGXO}pJ?o20_4lThs*3QyH-2q~}Vqw}GeN&p} z;8)YjJh9k#LR^qoJ-(wKm19)9JL%BW8=OxJ&jSi_fpHJr_R$N9ydE=Q<_)Fo2 zH6TwiBdRFeq+EFZZY@m9pej7Z%cH%Ng!jB$CuM}3Lo~@b*V=b~>4;R*ged8SwB?~{ ze$kGXK0e_tDGsEX4Z+k2_<$hY-!Hupb#PX`?BsRp`^ z?`^&x>yrA6BQBpQzVKDs6DXd2g;@?K_)O2SR&(!N2S z?t3uBE&$0^x#g~oyh?nBq}^vcj*{JMY4-@oT&UJq{}T~zelc5jCO_N??RH#Of2Ig> zSBAQI5~TfHk*VYM^256nCUvuB#oN}mHE)7bHM^pgw@Y~$M}F;0utD#G=90-5GFOeW zPu`>q>5ZG!Di3NMV)3BGc_U7D*Nr$`S0TP`J|CuZtg{tIK7o68`Hy&8KjvcMf5azj z-QaK@d;d!=cw1fvd-W2h$fb3#f3uPqVA6y}~me}FrgUBK3% zMnR2n3L0gHGG?(eFtJ;u-M-HorM^RHwziLW=uv(qusw0+)rVS?o(8X%q&>;P8HTExMZa@p_l7oK^jhb z^1QA~0t#^y%gG(OWt{`DCcfpx+&8^9@-<{9{@-J4@4$!slZ&y}4>}RK1#K&b^4 z$AFBoajdZ`UdQqDf0@^35rJtg5=^p97u1tmLA%#^PKq zlwR;vA^G>OPf5+jP1+ZhKuULJ>q5BV{fv@Cltl5Iny!0Fdl6p+S=!I$yo#l)%zB-5 z&#qDlai*6)d;3F2%Nc4M6GVOj&=g#tqM-kC{>yzO-MQ+=YF&Vm> zbZI=$GoMnke~(fzgo8<3RR&%%tG!wplFSy^|7bCD;IYJgGXOb2#=p1u60Hurpi_FR zAFy$PWwAfxD6ub3P-P>h$2jjldtHBqmH#bAVr%KI&U~^TjQvoM5OcoJB*>#ba_X0q zRNCi#@xYV__*Or=#sqUOTk)n1o|lMBo;^q;-|vfSrnE~c{eSG~={iu^txDWyRy~EO zZaiq^f>$!&Dq~)fxy;rfSx!;h!xq#`1y|f-P`osmxrtq;CTY8r$|$dfo*5x{f&_MQ zL5+BHsZ4R7q3O*`ZXX}Nsd4!MPZ5HDjd6IJ=Nz1e{}Y}9Yn{&Wc5ux5?bY&cxUP(} z(K!x>=_2NRTYvb=*_s+2EgA=Lbo5cnSim%e`{sAdoaz>y<`=v!&v9`g*tZWST-=D` zl9RggJmljt{>wk`Z#MoaCKJrF;C}P~w1HSd__-G!0oERje9=SvJD0-2b~;-x)e#$P z`4skJT(-PEaVn0sF1&A+5y+)x!0Z_lxVPCw|3Szb_Qs%hK1H6D5R8v%%}-r(1F5yY-te zS;&=*lk^tHuSdKZ`Hr=y9Ldddw?FlZgXcpr0Bnp4-(Uo;zlx<%8iP<_G;bl;;u4 ze}Cc`#h8eVCv8Rj@WPY$!2^E|2|5FB+*7-!1v9gSGHC$X-DWvoqhVmm5Z-xAL4L#c zHG?h06q-4Ip3x-B-9#JBwDIdKK$~hs2PYVLgz>zHfTJEtEN?A{gC2T4^F>jwoX`7N z?ZJ%xzD)g!oeJCBt6UV50ZSCPLPkU3mXSbpYAqGNSM}H6^n)#H4anENz$p`6g1{`Ik2SxD9$0xQU=&%B2|3rfK^anudW=KGf@nL>mULmB4-7wKk_> zuCjF>2~uP7%c=4E%%_@%po=-}c}hUD!08IEr$SVUEEUfs?Y1H@h=a-yGk=G=fvP}2 zGP_`;jLEXN3HqsraWANg%f$-o&&`it`)II!c?lHj#xm{d_Ah*I#W@E*JK4^^VYaP* zI>)mCQ!?AtMZZE$WOhTMXed(NG>xmLB-_r6NU{tz4WXBB{(S!_RyiT0f(P!tFD6~{ zAx%-_+jT;!T{W*OnOge<@qg9g4jyKpto-nMl%L>mme^T7^b)ZwDE~jT=-K6H)1J;o z^UnEH<&o=cYZ~Mjswsum?+f(jJVt1aHU* zNBZoU8j+;*Zk-Z)e%w8t*EPX?kjzjYh|jdEsR26T^2-sIfAX78d}^a#RF{YP8gPEj zi92wt?ThPq&FNZ66*%iPXv1A?E|vdQxG;^xP^`SZK^;l%si=fgLRab`zA}8OpB@@QHqH&`%>MH#a?2FGZDI$9L#^Yr(MXnGq zT=Hrbf^J`;Yk!i@@mz+~RpGGHL89MfI{R3qvk_OjnCvmW{$aqzDLs9t2N~BF>eQ1i z811ezzS$qFNR9d%*Wg#p2ikW&si!?zmM@rZWzaFYl$q$KUi%KNOJqzw6!8b3B$Q!M*6-z9?18Lv2%X)Fd1AkwE?hwc5<8#yTxrscFlm64T z{U0$~KRDPEciI8V=@(woE?J*ltP-BGH_RJK5s<%Qvb?3*h_o??$%=;Ahnr*{OOgp+ zPdTBAd<{kiS$7*=&z{Ju;4CR+w1PiyhDZPs)tFl*W%Js!vE$EMQd@%*|UtW{P1rfZJz zO?WP*3lPbYzpecg9t6nJg=rF7ev*W(AbV8G$ti<1Rovy7Y&gc{`|dv47hlhNXk}iM zOiu%**S9&HNzmS}<#xK{UOrC=@)4DZt3|@})qlJ7nUh4PM3tIzLGa*`#H>6uyY>ku zykD>4tF8;))rWM6FPiQXcItbsO7uV0lf9xV^qWSWEhLt+`N%U|{qcOMGd#^t3|Ri0 zF>IE{YH0r4AYR9GGvMCQN!Ou1ou&eXCj22DFffg&{dYxF7lTA zRbb9>;E1hgVDq`4eIT=-eI$-U$$!3-WL3@X_mM{2YvX@jLKLZo6Fj!~$ew}9OCayF=zF=6|u^wYP zKlS8CTm5Iu=~%<_&$GfTPbtQL5!ZOKO%8Zg)Fb^VmPKBa#qrD*Ngung${#9|5Ic@C zb}61iGIbZZj7idL*30RoZ!)rYzO5G9%#VgCrCxdE5<{N0%{THWM}J%3VfAS|ozV^o z5A^*Ps4fngY3jKzYgezI3&fC`^xES6oYLV*+skE=x(VlSF}b_huYqiaJE%zN=j%tg zoeKE)njtqc+lH}+uAGK+GAUSWyl=#h=Jx#2>9wm5e!(fGf2=J%VA(Aenrrumt3Ln7 zzHiTJJ=CK*!NC9EhkrTPsyWjlKgD3Wo03)8K&q^D2f0R`UJls5oD$={f$F$RipV3v zqckW)+6v5_f=PIx8*-zbFXoWlaZ^r$I{Qi!U|^8vE)fPX zby@m=_2sI}J;Ya~Q2Nz{NJ$$)Q{GbG{-&20JJG&=jF*X7XMa^uZa8w$VV$8sEZ1#t z)dx$&RPqI$>w9sqk3zJMl4bYB&h7Po^|fqG-8moi>i6?(El%>lOD&w=&g?$g^|MPa zu}RcNZ)Gt*g^V-m6;R|^T5`+9uee8*`I_JBB}Lqyp2mnTpt?;{S=J(rJy*2}r=CTKzR&43o_|4j=A->G%(x*{ZRkVUx>Xl4r8C-R(jGQ|HCieogVTP^hiNf)coQ-) z`fdtZS-BFDqso9UwjmlnbVK z4eRbDVDcgw+_!}mZJ=kAYq1SI(mZVo#DSu0eqkq5@55&wCU|L%@wp!j53uGfYf&^^tJukwRlG`Nv*p z2d|&CF=Ql49ha+V6cOB`@Eosfne5?3DnV8zBhN(Ni={KxZo%D3RM}TxoCIEXH?D5i zq{{ICPJc=Wzsc$Bg{y+jj8OJ0`pI%4jcZVjV+BBt=bI$aBzTDHx}n<0(8>L4PtucQy z{XHy5hukF#~diQ)9yEv4wWk0z?WfbwMYmr_?6B%*(fj5o0eP` zB2)DZRZXxrZ<2nS=*fIX7}&p7R~uG|`Kz8iu^Xs!xzF%Q%JlX$bPTqXNgf2AJRI3^PbH?`oxKKKqWae5yc zyxxuPzosBebdAV>!N34}142bLkQvnmo}m{YwVVK%kuQJ;s5_XeIKk`hc#h7j1%F}W z7T;gs_geu%$$Mae-WjOiC4(3O0VG2{%NaB!NHaIUBwL_7H5vGb&t(ne3MzPTAOx=m zDkuf0;88$e{C8j_xmFNjC4mHY4MdXH#l!*KfCT;yv|t+zIqWpJ%HIJMM&0LaG-8H9k(A8WV=T>v%S45%WGh7D{Ah`{d;&$ES<00{vA<1{hw z4Z99p<9`8l$S3yaDFdO!FCe%d!9A=x@Qt{|`S?_zpu*=5=lFc3hBdSua7hz<0nF&q z;CLtqjhv0&Ck)mFgy14FooF0CE00k;846hGML4;8og_wW#?Sxx4@E$bZ}1dYR9zUt z2kHSTp>63mHhBV&$YJ@nfdmd51b>vBfg0A}x>HbKMiA$Q6TMoY9L5?W`+SITZdnI# z(V;J$jq>ZJ208^a$&hdvV8Bs`$1wd0xzCp56~dKdIZ zC%O-BuyeA)@p9Advm<5KDV~SJmGS7%LzR6*8(2pXXXEv@>y#R=4?|B9dw;!*BJPdX zovu@*z3$gTPl>($97Q~^*J;<`I3MS~gSAdClR^ye)q(qn`v5g-RWL04b}k5_$94QRpCoMR>~rnmk)YkP1iW zWCL>?4~A>%g;3#99GgpzXnz8PgKEOCh0qMa%S5QZ7)Sl(+jaRLMyQLy-+E&p9t|AIE!OQ>L z{|G=`anvu@5uM&I=zo=&Q==C+&i`?rKp0?*bH$3i{_I$TIuzVG(@{-)Jh4xGpiX)T z>!#c$LtF{Bs!^wPz_;o{OhNJ7r8uu`*8!|LV)t8a_rz@cPNxj04~W$8xvT5xR#2GL zTl~H5&+nx0l?<^juF$q9w(5gR;Gv<8{16cqt!ms?lT{?SfPYMTw~_}sdst=hAscK`mHjN#rq9POBd6`tC5kh5Zys8s}9UGa^5@8X`eGVt=3u}`J8 zHb)(M#Zqs?EriN>Fi@`yZ4S?|Lit^@zv|m7*pAOJH>5$`&ObWHP+y~6LH{N*gs!X6 z&LNGs1%`joc7O5)S*?D1o(gr*EQ$c!54cC&g6oc?g^rH%xi=^`cjs+7Xt%1yNM^B3 zZt>aTYEG}rq8?CIjXT7&1G&R}Wt=O|fSVLi4KuVg@$;W+IJVgi_pD=EQm+h2^I+VU z8Vg7%uH^P8TmQb!<(C*hFGM@VeLvd39EJXkL?_62awrzPf>(*3Ws*XFaQu2Ul$Z#yW{J07(g99 zukrQa^Rd0a*XyzU_3;uLpb%fCSgEr?FoIxE<3s`y49)>Dd3V-8ah6f_6W8!+!^~-)7-00sL;6qTHE}j&p23jB6CX^JNsy zlP}R8KDB>Dn|OI@e|cz6e%cp!|GRMb(H)Ogww;fTTRkwf&y;={g>vIJWBXe|vF#Ju zl*?1w+bH7Xr>((fIkkPZ&pth0GZ*&W^_Nk$ujs46MR-dHw%2rQ-*NE~Z78w#K7Y0q z0`L9P7N2=}YRm0?hqG}M8Qyzx9G428o6Qd6LjAG*VN8U^d!O3Y_}t4=+t5D8scqMF zx*Pq@2t4mraL}=fu^HU6R+;$dpxSjHo6N6&Y{%3Aj8zR7^H7{@cP|Exd)_te0Ul|$ zhY~crcM5#g%>~X4WS4HIld= z9eyuEoXrZzGjIprJK$-E)F1ba@cD8aA6LV@Au+yxM7|p8Kx)6oVFTn1`y%acL&99v|j{O~< zs~n%pmDs;G{CuOy7JvUnD6t54!uNDK-k%wSDiD zF8=adA2^N$j-&Ty`$*&S2v{4q!10U#Lj2ytVcrFh^e{el{vL52J$&9XxuO6|oHx%I zILw^A$`LpZKKG*m>m-v1S z<8xYh^7QyTu0(m1_BjnPE{1?Gzt6DFpDxkA|3LVE=Th_s zCbZe~&%Y>Q*y5jh-CGd;sV}O9-#@*g{+Xk>vpn;ctB0}tE!d1ButF_(jAOH+Erg7d z|1EODIDa!M{dq4b`=6&&$vD&>n1*p>R=)p0`K(e4(;qn17FNdHS&b>+Z0v7c5N_5l znktkIltS|H7x+!Iy@`5!(h7NB*z)kAB9TN@kQqiN&k=jG5pW9^8H8u{x$D-@@L%t_x{Q6 zkB7DX?LU82^ERf(fBDbHc9zGxl>hnP{?~u6=IQuvRhpOo^Z&e!?eP9XO}#SuA8tDB zZn?G9zyEV?Ki;AI$MjcZ_}9o?wsmv-gM^@e-?#i9=QjM8B0bW7{7r)QTvf=wXX#!N z*nce`TGIb7Im-bU!~Bz9W@4^p7>=&~|0MbUvyFd7hN7b;)J-%j(SudQvD!(w#E~$e_(3hOSoRkO!v{pMWTxV3I3CKg=ou zJ@X|ojxXs^*$_}g)hhY$X(Kv4##hK_8GFLl=r#h*q{BW2wq+sb5m@q^6ver$%nL3W= zO!oVTjb>W7f?wdIF1;qMG1J7CByn79bh6*LNNg?#Hk=hy7i=9zgc0vc8o1ShgJBhc z+b;4~fjW`t+DGd%W?553TxE(L)>yiwyVsb1!Pj|F;5DJ!e1p}AHg|x67G(ixw9zEd z!s>Kg=Xsjph>#GHGJnR}{qi*ugUE!Oo%6cn0)zF&O!sHA!)o6)x#=J|9Dw+>NA`Ij zkP%>gc8T6gb-!dL!sE=vbuZc4xpX&4T5 z0)MqVI`HByt+OaNy%IO%KqT-9ALWfqkJh~Z4uXP4+9!!X?-qtu# z)oIgn#y8nRuK?WXSAmf%GATqeAR_})2N@#BQf2i*0^z?(^^~!YcRL=6%zAW1Jo*ne z=2{nAl?=5U8s?3u5=JiZc4Yfk*c_TdBO6I$d0Ay?2>#LeKx1BjetrIvW73X&y)@vz zy_a|!4t;-IX4yM_RdfF{L33yeB%!fr6}yy+kMfO!!`TI3AD+WE|GsaK<~(Y+?0 zcqd831xtC{vaO`DkP=KgT+{?LOGH8@Ic)ONlc9cUGRw0OV(DU>K9Rrx<&KWUSQnd} z*W!K=Xm~R42NYN6 zSAPT(+ti*2$r3%|IK}r|-Jd)TjlgvW^jp(G=)7l*DJ|f%9a# z5r#{*5LJ}Tq$28`mt6Q}#d$socVnFz?uRPhQ#@HX`)M%c@3%BxH!mWSpKIG7V$`2U51xOk zm!x3_)8hWZ;T{&t&me{w#oafhz~c#9zuAbV@#PHe8-B^JfC+18S6_2(YMS(}Y1g=| zG$3AJb9QS;-ure7p7Y+@Wmx*6h3ri%?xuP~?**K4wZ|*v&)QyCo-A)ig=Hp4w&H^u@UC>8AqkSFt=u^8EjYZ2dD^Y7z z7;-Dr`u%xs*8xvOSTBuS?78X?u}o>qOpP{X9+**^IpZv3xLZ0;A!#0kq0H}38+>Li zSKiSzzp|6g4rW_!>P1HeJp+H1&SzpbR?4T~VLwLNiMC>G&V3D(BexZE#qpPt-ZL7quoMBc7H`}(lFlUy#Z!CzNN4nM>h?l!N%A1E*Wgefvk1TN4>m#|r1G;kpRH2XirJq? zNv-57r~NZ-yrS0a1EVZ-QoP6UskkmeJ4w@TNJ-{Bxq3T>F6?v>xRZ+gS&( z*WZFk?u_L4LpCay9tMBQ%Z*+RxA>2#52s4rD{6xQrV`vpxq#d%(fF(Qxht4GI$BlG z(Ng$k?p6xfp`;()MaYPc;w%g9tg-THj@qXx_rVs-8?hRjY)Afv5ic}GA}7XAc35d? z%|n8(OiK@S*+fd}$&E<$w#Fc;MHCd zFzP$4xc%fdz2^XR*P;%C4LJu>z=8NCu(YmpH&E*=of)l?j`r)ZicCQgCz_Q?KYE6; zX<80y0vglYGgW7dWtmhJGka$qdyWaNx|9IFgBuObF9dAR^?E$e01qXCn4I@sA$E34 zdYz+@iz^EH;e~&0I2)zA?dV$p_uRn^IqH}f7L6FQzS+F!f7+{vhz&)QjJhUwzQz$~ zKg>*ShAT8?KRWO8OXg{_8^L1}vV?=FY*M0r=hLdAna?~7`|MmT0OR6RJ|3?DG{itN zS%bc{O+MVVZMohor^x3polcyq|7>hco?j?tY;L67O=^G8Hyn1rO2-ouc~~7ab4OG8 zuBb*qqd9y=d5GNH<-D)#xpjKBfakmC=1kc<8K3c zULowvZAKt&;o&uU6xgR50oEM*C7#`5r|8p0oWPOkmKHCN=)8M* zPe;wZTBP;z=WLW$+;ZOm?e`+K4o7Gy&MN>C-Ee;|_h+6J z6p|t=+Phv-$Ctn^k*>>O^Pw3OE53xp=ObH9{oFX32Y&PpuRjtWp|TeMUJ=vp1WPjC zsCd9bLGqEGT^y!fCayqBhti$mXL7sFPAt_*%~lN^1AKd$2q8ZE6 zS?g^+di^r-CY1t2Ax4A?gf2WG^vQqD7Yf^y(i+8&BK;VL=+XI_r<7@FB+rIA zN)Nx4_4P@VshD23(kULzE1rS!v6Pl4Hi*7DiA5icEb{nGT?p z0L(yHyPZ}G3YrQc)Y84GoWkzt)>DqqSFGtzqaBz-0&|e;DHKICK8GU7G`r;*Q!P5n z)*-SX%tUY8Mpb3ls$N=H;f=)&nk;{=FS(jn0K)r5-jeb$2o=A5>>;n6VveeLnsU@R zo0t`uEx@Eyej8)gaExCIyB5w`D=D6@2OyQ;9Rmgn=7cSM`nO?D@TTiBh1tGbR2pVIG_t+E`Wt&$K@lyprSDV;q`x_Ie6}+ za2?Km+Cq<~3Jn{Z)+h-SqAv@Zb@|yVTVtplYcD>-A*Pr&T$E+4R3#7kgk)l#OU7O| z&X-*zFzAYj4@6FwO2<7_Y)yh2e5UYoIcjdbY~K0~sv7_Wa4O;Z&%zJ3tRTJUK`p$AAWbTIzsNyg*q>OR06svdEG z9-~L8FxZ^j&YCek6xXguV?1Vt@43Z(L;KmUm@S)6(;D&y)b=N@_Dwg{uIq6eH5jUc zyY252Svv=DxI>U2QZEpB3`72lqh8h%22b%6ILWFCVB;m8=kA07D_(!+*LQp_&Aa6+q(-+`7ihLSL3*)KZBwpz2Q?70T(#8F9&&ZjS`Ps}^20x4pa8b16c;3h37=-MvanUN! z<|8`JJ#H`8N_SOTA@g(8szY$jy9#Tt-_Kp9VX~@TyJlUS3KW0oKkXsy`kP{IS@LdM zqGMJ!8FsE58U-)xRMJ6kQZ$a`N%}aX?poptGXwh2rXVEo^tXK}CRF<3+pLbqAE5ke zSF0ND`q@MNf)o@ex*gKH$x>We0K{P?XY^-##1&wxXUFyH=p#4KF8h@=Ql_`P`r=tC zXhI+mb6%Bnc0+$wz-}wSR-(Yc^n2}Jp6-)A+4ifkC^F{W(ib2*k<&x%vaRE8&^_zY znzquSNq`dtfdqH3LKKquaj=3foV!ZEuXh-~{@~Hn%+hJnjk!jl6}O_vQ8B_8Y31D!Nhj4h#dTra6 zGZ`?Ln^52I(Btqz!n?^#$CW?^TKx5c49ZsJq>VLS{wd3{($B8oZKL!tyRUE%;kk7< zqQm4=L2Oud!bQ@YnvKb!I0Ij()walfo5VBZ+%q7~mC7;PUQ{0-LUR~#FVD;+I@ z4N}Y>Y^i@-3T2(N8bJ?mFEkxD@z#i+6#T)lTJH#NAnEgiaJ~ij5 zP?atdC?JTJy|NsRcA+K<>2-d;I&xA@lAYcOox5-TU3D8( zV+ehHhYTQyqo7CGV%QBS-O}@9BQ{wDDR`b;@S}fs>OO2TY4!Lf*4*swX9uF+ahT(T zXOa(wM>nptC;Kw}v*X8kGaAN7q>m3BuewX8egwX!JFeg7QX&A-4+sV{^`f=bRTU-X zIafX+C@D%ty`vp_V?GrW8`bE*Y8IXjUQjrLOH)|^51g969z&{TG6#Y-?-$AoD$iT! zi1>dQEnnu&8t*E+<88Eic4wGbTWci*2uNl*a*}0eX%0`iweLH(4+RdVbJRm=Hto41 z-;sU|d@)C|ZLhWZyj|9?S306IQT|nysdX2o3YY9PR-9!z#&NE^1xjRMoj!F+1&3i3 zg^o2bk#q71q9K~5PvN%%esZjR@!nzSIc|SWZ6R!FBzxgbGi>Mq8w`uwtMl-12$6j+ zJ%smMX1_n~NsWzf3ko=OtU{m|z30Xmnr~tn;Wi zZd>dA#xR8QB9u|<8652Wju>aWst11%*Kz3xw>QY&u74C#mKB-%_K#u^3xFrx?b~?_ zO*F;ZQdm*6cv-NUGWt|I1iBI=(mz(d_pIH=zI}aX){QG6rNx zm%Q-neyfh8S9RnW5EdP|b=Ce%P5wb794dUoCK=Xb+x|`$bRED?r(uV!fXfFk*)zh&-f1 zra{DkoxP5E2a>V(H&Utm;3a>Ya1=@N{0P9yWJupG{|J)AVq-C5v%Z}%65&_oj_xg` zjblx+490fYBiL<5J8qn}JFGm70(%P}iFYn${YeQuCT0!{AD>dxGD0yv3>Y7Yf{RTG zfP7Z$4WL>_Qk0zvUTj!{O~r|QfWP?}X^B=P)?Nj?2}0^wuJN*Xh6sOW79Qx>Fnl$u z36%@BIaTJo-9oZx(VRJfKZJ%d{EXL9v~OH!mKF9Fpwn0aA!5p4(Ld(aPZ^QPD9vyD zGI2Oh+QK_G{k;WVa$FJuHzd24&95%!jT76bH}vM<06u;VsT*yAlYPP=?oCpr=qVJr zL2cL4El`T;^s5K7R3U$6qc+z&-io~tBx=R}qP-7fMN}E&UPXJEfaF}cf^UqEX5-Qo z#zgJ_wbu$VB=}wCi@7b3`5w4{J>nkSgAz9RYhYY_gT}!Wsz5K7-ewUg6eMMr8cA1I zDLL5DR-rm?M~~tA1@oo=uX|fQ&eBI|>f@SPf2~|n6u|$G`uu;j^GX27wn1=$kp4b7 z7*I5E^u9Fb|1i<7QwD@yLR}^3TizT`e3V{-FyYktLTw1~V+`Ne1a`;8F`+*Dvl~rQ zSD>;VQ5uFMyeq-H*SBw%aP(X?GA4#3Do$3@txX zTAlO4$!GRK2-8La=;~+B8!}1XT=0B|dP)z}=biNgdTD%r1-?SPP(n%qNeAjwzj<@h`6_>^n2Fav#0O9-{ZHd>`jmb#V9Fu|BCV z-mVjr>It`H(`Tz}a2k1tVBM)1>O*ntCzWZPWyk&%vC2@K7aI zuHk(EC^7d*`gtO>5*AnpL>Ey4D2?jB3+kcO-Iu55i?@+XTKfewNnC1_hS{2S=8LRz zG8|yH0bhKnQFoR*prS#S6S9H?ZLjfU49Fl}7`)IT7`P=wUj%wl-k|RcgS+NL<%&@` zf)#(ob94%~QS3qKHHMF$mFA5$1B^})MSxn0A+n_ej+s6^sX={}Ar~v&MhNT6V*a{X zTBK;l&!zR?U$Ydsl^%I8q3t?W)hA>6pjCaH8+{O3CWIn0qG_Ax8Y@jijy?wIcdEW1 zYN&WEIct`HR0~?ovgA^>nGg7`{$~LMiN8m40P8;o3*5zItU0o8z1iC}-P98#9WR zuP3z=!D0G$?{}~==v3^X>36SP&n2bQLQ1(BX}*^vlSwYyo_wT9k;Yd;s6qmY-S!ZWbnNT%Acd9CSRsc?(pyMx z*U8Yx34NtI5Gtaz>$!uqXl?>6Xuo+Fcktp!_iT{0^0v%Yq^vHulvLiWB zN`u1SknsGHt@m>@-zQ!zem1d;uQ45}(} znkh|l!1uY^^C!_WFwF6p^@D%Go12gNb;ST7k%yQz@w2+9B`i`z=@az9vLRo8Ng2!5 z13=IAjbIYk`MbfUDJmatE`enYjX$6gF_XMkGsD1mV3BtKRB9=K=HmX?RsIejm^n`h zh3ipHg;C9%N4_u#ZZY$%Ez2u46H<{&>5mRK%X?3}h_JlzKHxQkycU1=@8!2NVgAdQJolm%RIF(?ND{(hdkqOba;sm?{Fv& zoQkR7e)K0zz~cy$t6mQ+d?w`d&=?v9<&g-X&x=kDP83K{2kfVZK%RgX-e1P$H1N=` zMvSf_Uxt@xJs2(SVSG|F3(wnZA_{Q!{+S+mAr%ssSEpz|DSm%1Po%-gBa5G)1VdJV z-cA_V*&+5K`SVKO73AO1CHsRnobi3o3LiV>b=6l zI_BFzO-MAMp9#ywhXq*Ybo(Nu?++BqZ_ktQyNSE=Pr?8@yk*z~XgaYFS@EoK=mw!I zdIOcE3G_#UF6@6~IN{!?o1Ie%XYmizlz>r{yx?YS6W*Yk$aqcgRCV|h(ZKVIRZpwv zQrf;+P8O`AVEiCsyZUJ@*fIhu@lZ~BLs*s(5R6m`WV+zO2$K9VkWr)7s${3RxXx~U1a?gXgBSfc)sW5YS6TByEcw(NGp@Gbbd{kJ z0rm2kCxU+>=K@%9zaJ+zkh&upv6hC?vX+Qi@-Pr{t4w2}VY`Uj`u?U8Pw2sYk!A3W z_ywec-7cWTf@{mhpshQZNc~RZaw=sI&_%b1=D9Ryn8}e_B08suBT)z1pGfcl8!>?Gh# z`wD;lL57Ky!$!zd_#{O*17y`gERP7u>gW8zn0dfPzp8#xuXpaYRA}PmnE7x^FtpPAG1+9l-12`+M={0c*v~P2)AByBXtVGwBXsofus+c7 z>qsU1Xiw;D8n#rl$8i9(AHTMMVkM(%?59XIiksV zd&5DN@_4?2BojRrgUg^EnmKQUH~wqhA(DE!o8d%;sfm^oAw*@6y+3HNkXkip6vh}NrTtMp6b9J*VO9s0A_)O_SP7y0 zu8S_Q|CY9OY7g`@!Jk;@ol2n$!ZH+sDe*BRBMZl56U}lzd)E8lUhsT!{DP3F?T|e2 z+%5KyvK^2_k<6JVXTKtj!d&f}&;oz=lz`bJN|Yn=9;eQx$?5=>8l!9f-3y)Jm%hJ5 z{0R;~6qhp1A^)-Bo1SGqfzVfYmUE1bE%z%)7@boL6IfsICjY?iWPoDe1GrD0;oUT1 z{FFjtAJBtEPW89&$Ihgu|BRoGr_h~M1cYUTZ5E0xczK9X<&Jvy)ue@ISyX=$P zaXumlW8UlYJeVTV_KzHQ!rxG(n`9JEoqnOWCE8FFlxJ!BmMBnyIS9ScIQI(hZXc=< z){Dvm9h_olj2{p+`bp7;gRJSPAO<&{)K*ygX28KzegoR&U7I^-31k{CU4ZQA!Kwio zB2u0k+?Xf&y|LIMIHP+yG^~HtWRTRcxN|oC+xyz?tlDy~w!o!^so)_S{f=Iftu@-h zomABtRH5YoIpUWz&H*P;qCPNK5YCCru}QRjo{N1|okwgc4@_Bg$U8+U&{F4WXf5RL z(8kV|V4xWC8{Rb2Tcv7dUB5ey7q8PV$8EjF-t@OugjR1Oya19@B*=eRYAP0oW|7Ae zm-W~rC zH>GAZi+M#avaJ#bOTvH3co1`rTCyz4pVDBsdRjTFpiTVAocqqIUD!+rH!(jQ5iF z*Fz&o7(7Hm1OGI!#`Z#90GqNvF!?;s*zv}rP_7q{GDOss9B)JGK#(uD$}(W|(j+US zx_XWY#S~yS)GdTRhm7Tr6c^ZV_2ZyCr2^4!9!9`YjG)XqUOCX9Hv_F(uXDOU`ge=v zUskvKIu0xYf0s&)clw&b4-~V!XotGJUAxlR3&4Mbw&)juL1TtL0QXVzMLiuevzo{N zJ3z$0b{Dm=WCCw|5=N4k=Wn~*jzbc@xn&(dCyW|DSKVWtAt_~RtBST#OHq78tGZT{ z5@b?9j$baI8BXn9b!mDSRr*+H|f|qJgFwF^Fj9Z-y*DbmBWY)Ru zyWn(M;`w$@Dt}W^TY?E-j~zMf^qCnM;>+E?6G8;X;lkHmYIgh9bWqH7&^6UhR`-@A zx_rlq_2R@`IGYl=W~zs^&fB`osn>aA0eGe|HQZI-KL*3Ot2gynG0EUnYBFDxtd}`5 z3Q|tS%Y~VrLKLRQPn|z^_(h-3A$EqDVxD}_^MobbOn;~oM%0y2bL#pa#lv+~y41P_ zqj22cZ^s)wn}Q^fH*imsWzp|Dq4Xx>4i@6jROibSR`5(MeSR&VEHL8NR{Lv0Np7Fu zb#uE3YW4IvwPrZcVKF5Qo zx;2&qElx@h@}60JuM9pTE}D)$Pxo1JS-Bha!Jn9)xa&3ArOQ0QDYt7Xsh6kBmuxe) zxPKaI_#}jb1+bu`FanY~AxXguTYKmOa{kpv0iLFJxrCVD_KGZZ82pp~PRCd3yjygF zf|5j7)2Zt{lE`2OXMWA<3zR#HlSm32Y-sJek&jI=JM^YE=RsTdIrq1OQ&py=o?jnC zJS@v~K+R6Oq|EaQF1d#+(G0_+LIp+BOMhzqheq4amxQE{81^}Oz?R|H-bO#EV}^i- z08?N_Y@=cl?FT*V*`dcfP}?7;)=8!Jco)JUCCyw|aY%KdD4Wro{rDLas(dDiDp$`cVl!(F(BtFB`;zTlJ#Q)`*UAx2k)3Iv*nhUf zOi4S`KIp3o=X8><_7NOl)c6C=%u`V|H`z{$qi3aY4(Fl@X0hB7F0x9lT!S$1^!r{U zH}dW`=HL1|mfyl0?af*`W)?1?tuW$iTyBk{-i&0m!tP-la%J`st&J8n1|<)Y{U6i4 z6A(KcN@pXa8_if4sJLRNu1HS}e}6g#+@?W#1mIzu9gJj&y4iqZt3ko8DYHj;gv9dERB)yV55ka-ylnf%5aYy!aM(I}_hDL=dHE8b zP(KW5VX*a1@|Vl%7LyhfnKlSeY=iDFq@I$y53cw`D7woF6esK#bpi0rjej?7&@j4y zF_^DhjQl#x)EePoTp=-SdGF*W5N&_>d96^n`p3L_X2?C)DuWNb=VO~9mq!QeEYKLq z0~cicOy^POR<+96l_dNH2ra>?_+#A8c_#$w5H7)Lgi|yd(&kHSckV|4{m}a6&`I9Z zUl)}$i5@k`*E}b4rC@W2!VCZ8TBBSBP#Eri)0Y%O4k~~BK7Z>s{`Wb6!P^cdlTl|C z`Q}Yv^GWq|fHiUqU-_o_oQfqXK}*@g1+xoS`A{aL9F1 zY+J$x4=xP2fS~gdpet3vLZ!0XC?sPr|1AgjhwcB5|M|cEPniGz&+@zf#q#s5@?R`} zOg8@+X83Q_aDlE|F(8GDe~ZH^G~S8 zFx+7LN!L)m{|LLB(@6ZYH*ML!wf)VQ6))7W2~cL=^zZ)h{=bz@e;ZvL??#g< zb2Bd{DkQps?T`Fn3yZnq#(Lwjs2;*YA_;#X?Rk-YoOdp4$wtXqKyzPOdd6!PjQU`| zk3S_>OTYVr2g2gxV-KL;c?XC91H47I@MvMA`??F;AQw{fNCoR+G=#2%^( zm(Y9hj2o3;F4|{)zvgGpAdnB5}Jgv!lO2a}#HT0VK8wc*BynR8o zm`jHf?5SpO<{)P8tXOcg9yf^S+nCHUXLnbWO>d(h4&)GcF8-MvqOkO$ZsGX7kyJ3! z9&jo3u17e7y_^Iv*NhRFLQDBm7q~|rx6->0h`q73-P91}&V;~vq2wD*u_1q{gs(i8 z>PKl=nTXfp#Nq?iX^sBeIV=gn{}CLK_b`*z5JAw^!s%3Jxq+tm-23xa>8Q@T0wZlg zTCSF-J9(4DS?cPE3|%JqYH}t;x+OBCiI|-1fyJB~<9BCcgUph*L93gm8pU))p1ACO z1ehO4E<$KBY0ry9c&d!{<_UlIJ|RnbPx|>Cxt4mm7@o$-aq9JGy|)^3nA_*ft>8)r zUFl3gp8U3K{nAw|0aK<4y*zV15;IxLJ^_}UkPW^9X#^wj-#h%|BBJ!=b{eB_?YOlh zP2xgghg0Fz$dXZ{b$hxEN-IT8(`O)Xi7WO&!mX z-+jdm=fef|WDuLcjdYw|>vc zXQypxr?coXuggb+hr$|C&({(G8X=YXhhlPI+M_?r#>%$8f7AqvUgs*PX7=QU?bh>;#&&_Tg=n{kVSE2`Yf1^@x znPFf)Yi|UIzAb>T!-sZMS)Ox^IL6^i)KaKE7THcpPgHW2%pDs#$FB14Wmz`jqSMm@ zZ3hN_2Bk?FcQS*o3G!}&dJ6nqBUsfUCYDD03WD2&9>a7-m zFv6eM1z^}&vO~sZ1ZB=_f)Tj-^-6rR-If0!I;Wf zqzv8&-&%NAL9rx|t1-m%N^2`f{1%lMgYhdbCkvEzls*c|9y23v;f!7e%r+QPposD& z8pbXLeAa+zke}w9N}^GK5-Dr3R|PGqV^NkKL>hz^s zzyt5-`A7ZYqMJW!o4@NK?d8+1mw-ugA2$>zGaRIQfYtnXf7LU<@qiS+Xs1hHO!A`E z;EYSU*}lD27?nO?tZL~m@1&93N+>8e>o3q1TBxjQ8EHGHp8#VfZGyUleb```G6fsv zSTuj|ru)5;Kpnm;)OtR#3>9E>!-^VY-2>3_5|ePu$BTC{x#pIfiaPxs4r@E0fGLP^ zT10UNs>u`3Gj4Es6~jR9Sg0inyI6h1xw*GN+C?#~hH_gQrctR>FsGMbu-!89zVQ2% zR#_IQ>DpZ6?~%$n7r9M$x?blgi_U^3sP%sfny1W;gY-0(UkPE`uLE1@aS)tYV?qzj zg-zqI#p+Y&^sBnMxEb_Uq_*uJiDdnpT!4M7G{ucmCJV8Q*U6aBNs(z!_ND^ur;j;_ z7!a-IJ2U9x0!PZ$%EPAjG*+P6&`qqVK@R&~RhEBdGLZQX z8UA3{#;JX>tsV?nx>yLA@Uk0Q44vzEohC{<#_7G`m}3NiX3oNQ;-1ljvv<@H*#y|! zJBpaoUEw550S0-dVeRj}wG}BvmxnWlV6{_gmU&oF3B53u*4*34CZr{Nm~}jXXTNt} zz(8Q^HgoU_{tT`%3P>V&+n9fPEJo8zpL}&3u%DWQ?TW@?XBO5jVu|_Spf=!O_vYW~ zUIj-i>Ys!zB?F7l{$Tx(hLE@l$Euw-N@9>_at2cng`cCF;dG=u5DD_!=oN*w`YfA# z6RY$veMLz9aPZtOY&Iww!NY`}-_y=SuDVT@+$CtihK5Z^8?tML>i2*58}}4*R4WHT z(D#N-yQ69T5aN*9OcIn+2;e}ihS}0z9ijGMl0?~A%AYyH2s{5$h}feSq~zVJ{h!Dj zA$;R6I<_~FwdBuLNTAPxRYJ1N^|Bwc%cP@pWNls1rXvRdx-6^xD2;LZsz{M$WB2`+ zFGH=#J|dn{-7p!_Yvg|!H*x#ufBZyR{%WKg>=9T(P-6G$2jmkch=W3$IEV&2RI`S! zOjfJG)K}3uL*r1=mgl>FpB;;z+UdlPyr*Y%+N~!J{J9?fRhjG2{Wxc&TyGc~+Uf~M+Jtw^aE#c0E95;W4PHxJ7%9%USUlZh( z?dH(Ufm+^=ol$hd21yV?Ofi474AQv08#k=5(7GsAw97*vn7<4vNP(+HT3Aa4R{5;( zEsMSO9zaflaU6$p$itFx907O7X*mK&$pY(M=wOrh!;|cxKvPGpeh*19j+c_h8v1v! z?EPwC)}ve7N4I|gVe2R1EJIr;Y-(M29!-DklQ?}y)OsP>t8tHX@rLi%`Tm5{>yXg= zxsd9*^De22i}KJn=}N}Z24;%Q0mn}4FhSvI5L9ujd+axu7RSj=8MMl!;ECSl-OL{#&2(a( z=+;BQIu0lRi=g@}v0pG-rK_#v4UNM+tmt)2coF;7QGOKog;$c}PUPOHE1|g?Kc}z1 z_6kwAv0r~EaG7!9gP&=*fm1ach=fRc>d6m@L zsXU)OD%UF%RzhJD|BK)RJ$JBLdUzD39=xr~J?MX+9V58uRTWD3SV(liu_0>-t);o&5l}=h#KYOykZLFr_*%*NiX9ls0_B09^$1yw6i0oT`-)T?Y^JhjmBLzk% zlQ4e)5PA@|?&QL-9h<f4My*!s@9u6G`xz78{d zC_EGhy)o%sZ%Y{*%)?PRtwMm(9>eSm+~AYQr5%51?BWgXz_{*u_n0&{@jqFzSm*(`B@v)Nx)iq>w)=NFtbd>5H=^pMA?5J zKy)O3NtEUCS<2SPUyRARw+X~&s{}^Zk4)+k-A@e2P)*#zm*RP#wnjDAW9clKm%)1j z>w?9v1FkJtWSePO+Esy!@~RITMa?GJp(bsdHfh*d5RnJUdlVy?J{uh9>&jTjK18bD zEQax|d?k+YM2+Hcjt(Egac}~CPD*f|>F?eP%wb zUGj}E`bBU#zty%D_v0o`*}7-QLdQg&3Wx_Oye&z`lo{0-s zMV1PM!)C&rqDyGFhzNKqu1l=CuQNc;@&l|2OfG~+(Eu`!(l%Qd`%!;D@ZnU4(lA0G zD0(KXN(5e(Zg2I9VQ8lHIP4)k6+k`7(+nie2gB5OI+y(THnHfq1vLkyVu6s*Z(la! zPHS3Piejq?uav$U_Ql?gcy7 z-g5`8nB6T?9t;t2M#+CSzVmI5?^fH6KQuNrDm)cKPI2Q8EwvEttdlg-?ERY7XPrJU z*97o7aa$I#d1Z{?DSiS8!WW?50lt~&KAp|)( zVwM01(6FZX`9lJhdp*+?Iclldq%;s-4F;*Pn;T-&v?J zf~pJH3T`lENW?_9*yS1lV+sPo3R#25eJxpB@) z0gG502qy;S?!a?~ZIzU&gR&J^xXisSlU&hMKKWVqERGChO`mdr8XM5nf*%}cIx&U? z*$+>ng?E2iO^4o#hLylpO(v8Yu80ljY(bcriP*c0>L4T|t?l7CK^M2D{hA?Z$X@avicp$wToAVgAgdOczAVjYFm)MCir`K8iNWS5hwH zA}bS@-)!74pIyl#ixc7z5*hA5B`dFc6|Q(DifMl`$Kjq%V|$~ArY6d}akSOp%@thY z36>-}-n_tbYQ!w}Jx~mof%B#VeB?&ZmgVowG08S|dN{3Z3JxYjpGwUJ!BM~Ggaf74 zTY#s-=B%ZeaIex|w7r-+=RKf)D^Gh2Pd9>t+=vgfFn5ex@jm{Pu(WSBi?y^c)&`vT z8y$bkmCJfY?R=O7&v88IFOkt7FEdPvu?4s0Go;!c(BKbj8Ey+-sy@NBF`za9C%lS_ zD%uh!pvE))5TR^jXP(FkP;#>X)Ek3C@&q#PJme3V$4Ya#rr2QHPd-I`D!v7V&_b%P z6*rB#*w%CQDJrB@$W?OOk@}E@NZH9yv8aC!Tq`O-fDsibH+piMTHN3fh9qYgWu#*+bK^)kwS(Q*&V!Om~-@L;^p} zyxY>5TGesW@crpw_gXJu1-hnc>)2JTjcYgP$W+aIQ;EuavX~n$8YAovR#OK3r~41U zb{}thm$+i#E+O6!z{#(oT5Knutgr)5LM%1FjNpUW_iO;W_;QdI0h>FJ-zI-Gz%(c@ z)9n>7JU5{+new&`<$3jvdot33TgJ;vCxiIRQB#r!D!qboB*k~0FOA-&+g1#v%>a8z z_K!TvemQKN8JEnA{)FAB5Av79g31kS2%{MXov+YY*nX-;^zKFx>;20FO7A3wA31l- zb2bNB1Rg(k`df*tXI1)x7s`Ki&Fti}HFvOSUOjEde6;1$J2SER-5I#5o^sL*hvjTg zKvy;rev}-rr({+sm1YN%I(c!~3 zrI5ZuL=KTeS8~Zm`E!6`ru3N5_CZm2^loMuZUO9{;oGJpV3k^(h&g`?;o3Y>n1G%Z zzMMcdA)`wfP-9Bx=pIOMSOm2U)A>SlT6HTC1S)Y{n1OyqJUBT@OjY*t=IrX%CN8W* zqBw$!*lpU5NvDAVeS4Xwh+YV_$jn4=9CgKL!mlZJb00-L)hrDfgBiw;RMez>Hs+BO z%LaQ|rk@KE3ZOOfHsybWqjzXdo5@kC$>S&sM=<6NdN}PTN!x4mR}H|B<&H*I;r4>_ zE_I?d1#My{gNwF-jbT-;0D->JO7PD(LpW#!3qjDA)Ey4%wWD=u!ug>WuE`h!V7A`! zpCOAfJAvO5Z8}hLzuvms${k&7q3#{P{Qe6k;?)Q4~s(dNrkmD-Z0 zss8RH1%StD&C+9!m`;UR+!VA2?@9`A3*yW{M*|7977%|AJG>*icPllod72_!v)mRk znZ=aR2q;`S*AF@w8E+k~wW59ed0T z`gXDIO$OjFW3oKqbU3duWa;=emfVIubVT2PxaWT#mP^az?OkNoDhV(m3%oAAiH}fNJCZUqLX8CT`!dDg6Za3pGmVc-M10X2mX_rX1 z_47GJ%;JfC0Qe5k*CLL#M?t&e6x6c4xkG{5mjaYA-ZvtGDc?c>>1;mUtw;KY#t^QkO082h!G=3{=IWbjV0V(a z!?Oobr|*wh^iwYq7vA!3eQdnDJC9}@uiuHA{nCApMeY@)u|U{3AjYT-?MQlVIBY`hg9~?8ly~ri1v(86&=_od>=t z1~2liIEs}QCzx?)PkGcccp-86DlR_SuL~_rcAZCH*z&Qh4yAh|B(S|v3UsWi;$col zuF&^E`i;0WhR5?7P_Rt&Wo?zzdr*IjDlPonOyg%HEedSYh5;nwZ6#1?AfpadNTRkR z5>igj7yd5|9*i1238pJDMtvY1_daNGbob#ufiB8D?fstoJ+6PC$9k503 zq0&syfPtzWYHNeNFsPn7M{sD^1Km);t>)=_CZR zq_dlfAzxPuqnV!*VgWx81=oL?Li*zb6WRw_7;O{8u8K{%pC1qmV>h9(W?${m*K%vG zg<4ZjEhhp9bR4avPX_8 zW)V@IhzA1f(nJ+j0Bb*=!j!l!$3h}4b}oMeCFc4O2*-mMb6}k8<{jiCRME*F;iy67 z^>c0N5<8xwAFHwnoR@!2YkkQR+L`7^2jp600Um1tU9?oUqt~~~SsMj}z4D#P(SULk zYn;NdIXG<(3t-CP`<=-e3SB!Z&1ZT%Sr0xo#(ZQ%(PP05Ln&4VS?-cJP;|$U$n>9ppB;aKbSprf#rCVcq_Qua z%s|W{eN^T#N5aH~0>GcPp>&LO5`z=)9b#=l*3OJR*y$1~7_;M8;p3#Fl|=Xq{mG1mUJKalQC`}uhStpSycWb2e?G{davaHncB*@Q zTZ4M-7ajoG^}v6Y4lg>4GK{-qiJ>;FT}l#PY^?3z1dyEs|EV-k^q1?RoqvXB)&@OG z2ImUFv;umKh}oo{`v>*u9JiT@$<23UW>KzN@EeWEjmS28qrnaubG@V^1G8<^X_Jp)kOglInM* zf*5CuCzRQ=|2EiEFc~UNO3R^NI`g?bjl?FrzX{U#e#81K1x-RShGX2eUsj2X$^JZb zy4`GFAFj{c@T z6+@|1Dlj+f02g12T(0WX;t(4-8LnR5IB`*?y@P-EoP`xgxD#i&>F}val4d*+$Dw7# z{`QFYJaozqk+dp=p9aN27o~?E;h^+lSJCuvTR=3gdTe$SR|K zA-#VW6e}Nzh%bN%#diU~pyGZ-*R{IAR2<922k7q)yf8lO$YTZv(!eMIWK+7Xm4!4q zH=mokB~^*XRh$>|9oL&=P@N{hrT6PoP1zp$Y_qpvB*-=DM#kr*=5ZqiBu}X*lKs&RrKZd!i_vRn1K1k{CC%zSkQJO*o?w;woRM^ZI zR$lX1Dv5S$nJp_tV+!9*t~yK&D$5~yTH)3{0@nTom>o)6q~okmRnOOVu-dG z6*%|TK_+#>P0NyB)44+=M2XY*V^9(PKkftJ zHhU%%PJnlR!XOC?)H1#mL#A3Wd5PnM#?I3hbMUV|L5oX~)q*EQqOVvIk=p z<~K~FU%$`vc_`ALbz{F@0WQ^57Yh0N z#ggSI#Ky~)9~OXb35F7W3G*r(R|_{k;XS%F8BCx*Z?zWX)Ci)Ja+2Z-ux&sx*FY`z zj$x)|LBNim0EkU)x^@-fttJhAch1ukD4Y%i0^<8HAeY??j^*^C;)T;<7e$~fy5?lk zI4#S(C@_pTLC(?hoDhF44dODxjyTS*mu7fPij>zS(ia5V6Tw?rD&_~G2ttDwXdgSt z#S#D79}t2&R+^w*m!Cs)0YEmac8`|zgtc&Pi=QM(5g%%jDbX#k%6lkSWwnf;_BXqN zP8V9AMB7AM2YHGAA|(+SHA1065xC@vpN6VG5e=3I|7lh3fzp4dS!Am#GT&EN7x2M7 z7^MYn8V8gOx~Ul5wKF4a;C`X{lRUdK%6EPi)Tir*!K9}~i zappU|Q+P+T1ug17g-=)JlOPV`vT{(`b6yZ9LW=l9ZfUyGt=kHN(^a4E)qVvK(q5Uq z-vO2j^sRY^JJ)|PoT1N3ncpj*%RirCFxu)ML5M1^U{$EP#=9$w4qS504ZSrqK@P1) z1yHXyn?^m#yP|w|d5IHlL8_*?8th6-(!bhUihel>GkgWC_^YYG-Qr0lIPepnV)Y6u zy`slKW7K+7Re6?d^Nj4CoRJ9SZfbh-o)P&6hm6ax#bRiml3{WIGQE1nRxUtHlW-&$8!EZTH{&>RdUU`$5Gg&|ES&}ErM*^^o zgA!qEdh=C9)3=IO*O;PP3KEm0LaZ>|Zzv;qv2v4krn~?!Cc*U!YIvK!7sGOFr#Hp3 zXor7lNHw_kF2&(}Hd%f?LFJc*cZH(W^a;EJ%B!NN)VrfKa}~0$tn*t)10eU^DEL^^ z*>>5|^x%t}ilofmXK&EC4MVMr!XeDL!#p<))Wq#UHsqS_0Rd|*B}`G~2rrMKP?=Hf zo-H;MZ`f7efF&rUH}6uR!>r}OKY~lH$)A5gGQS)^fO3_}NJrYJVR!S?+;vB{V)KzT zJc*s~iFENf`McS{fR(OsYc{1D6*<7UnKHbpd4>6 zC{NyAc$4DaMYBJF3Jq@$j7Ey9ZK>0HSoj~cJ4VunqT02PFMX?QFjD&}1uw2%`gS}c z<(&8z0>}JPn&urk`9#wB(vs;qabth&AhZfyT*|lbvD$s7c8+!GviDthAv|QGJqVw#A0Z#_K`R#4A!kD)0E-uM^NncCyjilR6uStKaOj5kC zhq8|wl%E3ozpQca6FJ6`Ur#Fyp`RLWL0;#{1ic4%ah5!ajeu$}mJ<_p2SaCYZX8&Xj*^Y5#2c~+E@`QEQ+4&5Kkr6-4YbZI}zG;lJq12+p&@gArslf zmu4*M+bxprBG=o)m(U?U+^d(_q_EiMC#z2JxDX=;LRq#5AU9MMK4d08L7lsxqX<~@ zb|SAtLECcDp_EOhaO(M=EoPQsxVbBvHohr}b{Lxhn*S@+36g(5kZoL@IkEpl8UHJY z&SJ>$I%?>Nj?0N8@O9)#o_sT+B~}S1STakk5MAJaI0Ql+m(PPHH>e=ifI*gW3bF<@ zGmbnnwrHSxBx!>*mMbr&njPP+f*cMh1#?JSkWdHcuhlXyW`N)4#6=O3HGzFk`38t_!G>-?CH9NLft-i7=NUzf;kaj$mNSCL45*H*Q*B z=cg%*c7A^hUf`MAVOZX|4e|~WbbH1|(HPS#0=6k6)(O*ejzO_`T;HOSx*Kl8_GQB{ z;UA)B?C5p3!6*#3WkM7%jNE6bK=bjQvXV>ym4C41W}8PbVJ?B^4{HbNaUy3yAa&9} zAWlv+f_m9)F?;t$R|Z389?0@ac^tWs@%QZZg zF-Vso&Xu|x9<*rK72g%vH~@`et%iSugG*Scl&W@oCk-q2Qg_G7)C0{AagNfeIgj$ za-V;}%5@v#(e992Q)fx-aQ9Sax#>Z@2kilwEHUrO+pIh^w^kYsQyHW&q%nmBAw<3-ro> z^u^tWTU)d(b#|J`OG2TPO4((PPY`IO=gsF6;$|~K6$u^}kid9VYKj5kuiMW5q?muy z<)63um?NQrVCntX5)OMb=u0%w70>P@F&^thU%N|0i`b%jN#d^q^GPB&J^h)ZaQxhtJpJI?8$&X-~0r3!$EG_ceLUSt&_61GL{vgTJi zxu+A7x-Dg`MT z*lZ!zD}@KSR!7N2%^(rNnOJtFd$X6JQwSOszeIHB4@aK9Nq?RSRf{F>qG3e~SI?Kx zQwSn7HV2SZxk)OcnzAoj#7XV1HPn16S^%DJN_^Z5V=OJLsat|(hHNd1M*cTx@7AWO z4s?zFml$*oB7gh)_QiZ=pKM7|sTwtERP~zdcH`3x8$oeIaYoG3(!zcZ+u%&8WlY0s zG1v{>g0(fGWMJo!{Z^OmbGMn@9^1q8xL#SE^R)PMQ!Cuv#xo~Nde+67(7^_B+v$1L z`S3Zg-^G=mlij)Kgocb}{`+FW-W=;$ELCUJ-=r5QRy*r`a_|IOmAt*nFm+o`+B&ehyG$anpg=ttFmi^!v!6Nv&*XMoK?9}a$gVE__Fk+ zT_4@ulheuK^m$)Ld704X^!=goxIcR*!_HhH?|<$F8-BTlT?==O(#ywm{62}^te|?E z+2nl}#22f#_s+5kkH)axxr6@BU1q!a<1NY7@3ZZ5c(x9kdGcxQdXpjR-fnTeoH_h` z<}S>Uu*Z0Gv4_$0a(Ry2`E_8Xbut~k&!4`Lji2Xhw8-W~VtJ;?9$Eh}{FIl|li}`= z41WckzMsQ0xteP>6mxU6qh69dqW4w*-AQJ+3e1~5iaor6$ap(VF5|w;SAE;ro%;_k z$^N=7H~ae@_kfNDUV1&tkHj>bsVmdt=|l%mR~J6s^(xHH_qrS9mc5COroCA@OAq_O z?k>&){~XuMc_YTrdq3Ox!6|7z4A!eyOn>qso5p1x7&6GZ3pP9Lw(nHji}KBUo1Q*8 zwHQD11u4Do0C~DOPcJoF?bzkM*kvI;kMqIm%?*=kwb)I>BII!|d-l7KfBM{b1krYy z`tApcF5~`Z{xRN`*=pxa2ir}a?>gHkJnqun-I#Rbk}r~L*IXMHEPLmqLXY*}>3<9$ z?JQ&-*Xv$b46g6{^8Ow#-go6kv8d+8yhz2PKNtDH#2;(EU0U2&%gK3hOb;t{%-ATs zJg0BH(Au+*7-e6MhR9to*E&z{yT|(}P|;+2S&==zO}ttE>RNM;zB_R~TVJM6R?fH6 zTY1ez*(be4H+Mh$?LAn|Kk|Wh+%%zhA5I{G$@nS$FsTKDl{W${4}7w|5!ziF=p;9(es;vUnu7r9G%N z{w*h$Ior&-Z--lD_h9PBk(j1Sf+amf5yh4nG0UA@qT(4!$k&|aE;P zF0NVL*+lX2a?C%3?es3^^4$>DcD-_pZI&%eX)PCc9!GFoUGC}p)5FMv@#>;r^NBnC zYq(m(w_~WCF7rBn zBxmY8p4ZyHlk@WA--FA1sDDO>yf&u0Vd}i?-_E7CfH~V|Q+!$WSF7^#8hnmA z-RVFKI_oj9;`Mdl9M|*b{{3$AkJ00OT4xw1*nb@Om(g%?4vpAj2Xh@Nb{9BR* z#WRFGc3E6pPCY0WeSWEA+`Zk=n;79YF{%fT&+f-&RAn%N&p8)*PR`D|WU)o$3a43Y zt>kJ-X_+R7$Dvnau-~X1hoBMyiQHgv(R)~*R;gh@Bqz?wT7Ri{vPPws`PK3okE*OF zPQxxrXSrFJi}!_HY%pWP?&0y_?&F151glecdb@<1`gF}p@$r5pqbD*KaW$S*A3Z!< zKJlPj_@7s+mpbEXndIm>@0~YRzBydi^nQ0cpW>N?vpHM4r|42GYB`JEW-G5-~&%<5k!#QWS-LB)UTML<3{F3&lD9XIIffKRnLz;X5PUfoUzD*J&DkcEn@)HtJJvF@KsB?%;I2;@Rx5W}hGH^=jt$ z-f=#jmhLGXF3aS!}-G3lfI zxnmAPV#sbMyT{A(a&S5I#iqYSbbO2R^Sxi^$*jt3)Wd1qZcPI4sM-1n43*I@9%FPL)$)o=4G(k?e6P0v-kG? z7WW2&$3qszsMG1VpPP@5kKuZ|-#;FQ&o`&ry?@@sHM;h!&*imeJ;c>KP3CvweLMXO zKBL`Z_Ao4hih9F?g75e#bc%YDtjp=1FFskowu|}aGwstW^_Fb6q1Vu}CpjBAl+_o? z-dB|M-ze*tbmCvK{>S+A+yvYA(!VaS+IL|sb7E9Q-7(Z%~oz04|o?124 zKOZJ*Yo4wweipYJfNmOU$2&Dsu9~S<`u&>`U)V1;OMghOG|>a5{7V0*ecf2&LmZ@? zX+Z(#vU&cF)l9W&rdRA_viud0E~0XQH9z$^NsLGq*lua75DbIIo2U0K710!Vak87#LC zh=L=4-FzcN5-`uo&`c8mM?kp0j8tr~EIvC~a`Hkk@D7}K2evfRWMM*LAr&8-IzBs3 zG4LL_2~wG$-ljxyf>dmRK@~D%N>zkG#sPl{)Tb$81Tw-mnONNew~?R-iSSbUECMwd z_a(!~XVx6z5CKCN@lRL9VZ&v$PjR>SHBx+=Ri2r~7 z9$ZHj2tnjC8G>LINRB6&fkCV(l9a-b52ipdl8it$yn&=vvmP?&&ZM{0>sbSO>h&a6 zkpbxSBw71xasTyvueIAAeS&{5Ixx*prQ+#5_bE+fh7mIn9nY9wT8o~F%q3F# z<4zVGK^1G7<{7FuMbXh=xG+g20t+*ns)&X?Fs<OC%!LQT_3FAJ8ijqv-X!_3Ic5 z3X>>W2Q)_zY@Gmw*h;CrP4f;>BMhS4*`gp2tr#deu~Wl^p@os3tf|=cEJ}Z;L^*Y? zlCXK5pCTeh7*w&f!9ad45h(83zVcw{hN$BDb$u%w&c0p;`pm?XgETFDulT;pU?=g1 zL>8C~<_g?fZyQEbDgs>F;pw%9djFqZsLv*vo7yhZMDKNLcUh7-89$qT+}Y(qO7x0t5TON6xqkjE?KWTp`nzJx{p+B$bGwykKe-3)1r=8_HqPWTGk?9i^fXEpJ zZV^0GAY%*}OzkQ)PbsXMy;jU#7+iqQ5GZNl!-^817)0VxpHC5(ODBsFgYh#w@t&;= z7iaMq19coxnka-CLvh4{qLU~ridJMbF}9%c9R?bhP>2bX6j=#IGJk(iVCWnUJsQA{ zs*lbk6@y8ohxJ7;)B8G#@rMLgJ1-6h0UZG`fCoA6dO^H3)rf(JYFE*S)lA!et@lRn zo0-N>en|RWD9%pCGbH0Vtyz0#B>cV$fK+_^daYT#?v38}l%Ia~mJPf^WWaj;yj=`T zVbL2(F*ikSvy9o;kLG^}kAh|nP&%>*V!kB`+TAe|D>FF@s--%mpMhru72NKO3z?Wm zY%zfdJN)e45uiHdlBEWv=6FXz>{v#|d7ArJ~xKhWx;l!cWU{tpGu& z4093;Z`jNhRK^~bk%h4}T`7gIiXtSApGa~6vN(g3S1^(>tu8i~qXiO$1+pi?`MkzL zlNd?gs{nzDQ|6DPjfV`n!>M?x#k3kekYH0#fWQQFmn46C#V@N=ia^Ld=`((6Ut#Kf z{_?{w|L$Rac6yW`Fp&_PI(1ASReMk@plUQj(tZ4QjG+*AM>Z^m`d)1S)=wfC;#p~$ zkhHP;pyPV-6+6G~!knUH0gIs>h*QF(WbC3G5Hx2hnJlZA?ZxAcAhFZwu*wg%k}^S9 zaFd*uWVU}GooZD~_508EBMav0=luoM?;kenZG57(!x8>vPd>#u*7*c#alk8;VUXDw z@`Ei(00fLdW0R*{+jFecY(F0*q*KuChjf4qHfoK(HM9`CsMxF!oNc>UaNb0dHM_Ztltqg2S!}(u+*2c*2G?E1YfWcvcSdohOQAEyixw z)>oV&@Q{Zn0omUDTndr`Cm^T^hKZ1*@O{4;JOZKqy;wBAcgeEGfApa}x7TxnjZS}Y zU@D4Xo?x67fhDNfy_S^BiZvB6L2?=*r14Wk=&Y&`Vokx??RKxQFL1431tbDIi7u8> z0EZ@?>_B6eH2OXd=P8=|ps}P`$1Q$r{r|j)F-;86I8(GSTT(VV%APF>gfJguu^ccZ zkcH`TVT*{>d4!d~AlTGId}SdXXtRwmxT3RSnTBBVoJO?bk(kHKN82MiM$JBk`uw*!egYiyK3b~r6Gs!0)X{qx#smJ04EBZ3 zNE6#$zEj@|>hmf_egDz9BXCgVZ6H-0GiBIR(IS4b85Y8xK#b?8{IpeRlQe%0s5sOk zSq5fC^?jjS3=#Ob)x9k{J-@k^Iz>eva>9WW`W`%}q#6Sm4bJ(A7j2%_@SI zZic-6#*klM&v+Bdy|7&a%z58cg(dvl#4=i75E7(dv*A!pa&yYj5xLTTq^QCu9a-Nw z-KI09N+RS`)J6&o!jyuzEvSF59NnOZ)Z2)nOD{0Q*nY|!S%QLKCYABdR&9=l(rC*d z%LCj@zD9sQy6@5+K&fD=?_pKU^gV=7oLJ4;+^xAUzjB^rp8+@hDmesGd>T{p#`|doWbA#^`v&=xO zK}I3MNr4bI;XLmlpPPRmCu^)?7XjxKaaSm5A${jEG4BH%h43~}84tZfVo|BjI|`7t z75#q}osh6jSS1)lM6gKIr5uIyeJ9TjtU>5=TrWjEQVPUp58HH%!A3%S`3zQ!BUidkVbADzh zwy=nW$cFDsCKDD$d+YNx8BBZG19ZaiIre3Qb2?~n#rst(0f@liQI3sm*pG9wx znU3D5NE*L5d1$|w;HQ8SkX-x4%7vowizr}s_{s&-q&BO_Le2T<{3MzG^RC9^wXU!H z?uDO|6Zl)Ye6oKCWh6%^8e1x%NaTC=JW`~REXa+!gUT6BWg6CrX$m#5(t?pgAxSDI zO5?%WPG^30{-A(NuNdjnvGL7C{PsbbSEzr4#l|srQHfB0wW88;?OfPQ zb3a1!K%8>8WNsO2e}O@rx)?=ATtQ03oplx>0@0kCHPgOG=y)JPV zPieG6q<4RgYh!Yact8X*LGscH!u)MfSfJ?L6Le!`$%dq2Y6Zg{mMe?<5+fy!)-!jt zJz^MbK(6FU(0OUfGnN`pP(=OVJ-AP@eJwFaItyrdu@y5>7Tu>=A<1FZnFqsefTGaM z@_B&D4Fkjd!(IVy%|1+`;Y8&~+beDCkuQE>-LrplNWSl5lM5b_0sGDcUzq(6_dFSW ztzGAS%b#3)^_*EtW-9;Tn?1^56d)NLu*Ntd$tU_xl|xKGh>Kqv2X+uH28ytxg!;)F zfy5&LpA(|juJAG3ce*4V4Ufsw7Nv(&im~E0!^Y0fq&-c+4+513Dqj-iFL6a5_T0Id zRZf3UdK;Qj^(UGhmZ+%*45jS?7}_q_%@?+IKmgEYIwsLWbD-BW=+mE z%RmL1<3mVzO@4;{1-n5w!U$H#^MQaAq+5DBYqMY_9aExHqsUb!Pro?+n z)bK}&-6w(}B%Am(26IR(X)QHwwwnFpo|Z)F{cG~djHb3^I!7>DJ4yO1pzZzd4O3f2 zC={lLRWV_+Ue0{xAz^wDDbuO}`JhHvFbg9(v0-o`$e5XufAM#X|DEYmOmeIm|66}( zd#+uiW-Ts#;e~cze&WavHi}v-AOGN>XI%f}rP|K54@>m@M?4j26pTC!5rqsk!Q4A< z4!%#N-jm_k1?^LldEh&@|KhzTwOD#Sqi~c2!|?3)JnoV=2iH-CCB-O{wRQ9Y+I{yx zYMiu+P|7Jt%4l7FqLjDm-gkR&8DW0~@Bqs7x<_0XwqCd6$Mc`uDDF`lwZ82K|9$y1 z{q?2B6Rc+b%byM42d_sj-m&4roI1uQNSNPeTld2nLKH8#yj|Q%a-!(!ag!%Lq!P&@ zWKu4Mi{5(>8q>8bEDOmcQWW7J@n{A$mzPg0go!B&(Z^cB9OfX#m_6Cs=1_m|`*bU( zbc7&P93CHa8W(FM%r2xN_TVzbc^qJn!FIrwFMGV|J5*{sg#~bJk0D&KqS_wOSKWes zt-p;;XQ!sE`tld2rEwK)@z(ieCpLdF#8favC~;vN1@II-K@MPPz)_? z?8|gDVgLk8ofo_N7k_lrP6B@>wUZhfBtgfqfs-YERLx>yypaZ8`6 zGGU8BBp9YF8Ot`0Qq?aJD}p}qx|ND-T;3E#dYed1svib zGTtU2#1hVKebZgEI@6C$`wsZyRCQpkr0HU>dY@#$n?$Ck{Ywb*!v+y!CBZ9d!W zL6K;?zBhSti!aO{J8pS&qSzPK*S2f5w%c-%-cxfh@xl}hrb3sGpbyb-F7oneNjf3G zO;C};@i2B5y?uNdjzoWL5F`Z#DJCGO13nvuaPO7E45=)AEE0Pb0R<(mSZ&`&b~$H) zX^f~bCxw1Yktp_5X+N&dX_Dx3G6Ay2kNXL0>N?NlTIdgG|>BYVPw_Ug?G6 zw5^1VQemMgNgUrDn?QF5HWcUchy}*Vj?!vKwX*zY@3GZ=AD@5Mat6Ygg0MR#P|F;O zVsiG{{7OF`I3av;?LV!ul0xHhCu9v*zz;gs{n(82?|3~!8Popd#4_I6RO@4Y`1C45 zrdS$O$_dhZxUTJ`%p8n|A%-{tWtD9n!YnGONg%hTM$fhU!cJNAgXiM~-ARs6$}EO- z?x}fzcn)XPtXF^IcH19njEF!`R4DB?qm>0wfP~xAcBSjGa1gjuI-g!Y?JCiBg`Aqt zzjLSGdC331b_}zjxn2Hx&gHz<`1?>isgjln9GxL*v24l^5(>{?`e^beKacfuL*e3= zf45BGjHuMHR)62vX&u)Lu;wK08e{&ey1f>Fdgth13kD9z;rSpJS4$*!*=?sOov&i5TXK*RMY{%N$ z)o2e*{?L5h)qmsUE8i`yP8JtGHrK2}b4H}E*Fp6<)bL-{0YCVm7g;7SgUfGQFg?OU z4DROpydQn`V+RfIbMJNvbX7W@(U$)d4e#@H<_mx7!#5Y%)Zhh0c=%rL>%;HE< z@pdl|Uif~T8&lAK8$y+wQNDAVoFc+Ec&+_$=#0hI3Ik~&A^TIsC-jgkO^6Uu)I|D^ znSI@-PXpg14B9;Kn=fmhN3jg4dCi zXwHAFbW_gsS-W-7=7TGmg;*l2Ms!AsDM;E6qcX7+oj{ccq!`A6o+kBMviRYe@V;WK z;h-S1vUrpQL4q)FL}ZSTWc|P>HdgnYHyMIJI?>0b#ucd4=ip;~4t1pY;QwKL)Ytp} zo{AaP@1>ULQMJ$iWvf=~4{!=C4<`P4Ho1T4d^|oLAb#!lAjUU|sNQcT_^XbR{Mdu` zy`-s&_Pu|z#TN7UR3sL>xWR86lqF}DM&hq~MSMefR3y%d=GgCz1V-S6QT*fc=&;4P zscYGSG74`~l2V(PTMJ8yTKp@|R7rel))w2;C!KM^7NB^j_QRBdw0`b@&XpT}P;-CW z7B&u@Yow6=86@>|)cZSElWzC%`%Zq@<51hTPkB>5S>6$3}ovvf@<|RAKjyzB${_-3QhELavb@n(c^H@;G-J+_Rha*gmuf4 zcsSO1qRr%&E&aiVwEJGiy_bL9sOI{t4s3@|vNT()*MUG`3-n|T71^+zK;=-OA{#5o zk^Wm%b;4d}Q##AHbb^K^p>(XZFjJxZQobcYY-5q@ZcYwWY?&x}otNFo+}pCOe?X{ z@%J0kWnvdR`5b(O&GUtA2HYZn(pX-w)C2_Fnflz({?~2%ufQd=_}YS3oj;@U58feb z@XDdZE?;+?`%PW#hcoVZ&z0uK3J8){tdSYik|IpXPLeWR`)aTZSJFhH$yd_uBR(VfjRggso-0HOwea`q9i%{7 zQ>bE_R&#IHk#29Iwi|7SsML98<5!t#@HG0;ll<^K$)k14B*7%7w^-FCp8prDUfIMnO2q`ONd!-xK^1XCkOwNA zxFGcFK4$uLLOgvHFBd+TI8CS{-9Api?%u=BqZ(@bW+6FbN>Yz2fr44x!Idx@TU0UL z3cX1bAz!fj>|?=X5zZ8qM`g6H6(d$XJ`4`4C(M6om-za%&HiacHNG|b7wP@G5omM} zVH3nvpxC~3LF7KK4Ie{7ps)}~hHrnO!MY)F^HE!eiayz8Xc3iTy=JOni)@0A z{n?0+)U}<&LCf3d@o7*{iD=`B=CqwcuglYMrB2+Y?k&Wjt*`ymVt#2+Y*Y)4C7yrP z_Z!apWgi`o)~5R~<|Hj@wqvEBqj!}vt@ty z^Qv$Fe8ti>Zw4`9P;rUm%-9VI3L*+o+He*c*CL`MF)_E$Y*W8QiTVlwWy>)`OX?=4LsnWW=qsUQY9G3@NS7p;HyN1xnl z^iu+t7)O{Wi>Vv(}I)+)JV=#J7%3IzY zx3vVOzG{V{1K;cWvDJxWJx?_6{iPckaFz6DqSbqR>75~>8>c6_SaBD0-BK|V@^SV^ zIchZyYGtCtvdrjnHiyxKdKZ79{nug?LMAK{M#u|>=b0=qHudj_aKqsSYYVS-i&tL8 zOvHSKtBnZC50b`KfyCCVDaK*R@BsqQJTKXF$C6DNK3?gfzwSpcyoTphQRe($TK`Y1 zkhC$&7srS1rFjY3*_ zV#q|?L^8fQQfoVn4OM@mr5niIEN4@UwceY#(32Disrw?@=iit5{$xO?&O6Sm;!XY9 z5%FJok>raD`HPcl^onPGNP6EIDy`8+F#iUza&#U&;i7jJrIC52h7sFi=yj$V)b==Na;s4pP} zT~`*$6_|6SwIzCMdiUu!W#zUx)=6T8V8#aGwzS>pB_8_7uT zzYRVTi*uhq#7!p~ZY@-}<$a$MMVF5(*8MBF5W~n5f`BYYq_~Od`P(gIH(JFb)cTXH z_NjY~KS`s{RJecfCtUllXL|inWe@9$kOX3@fB{^zY1-)ArNV{b!_!N7n||Hho;|&pOg8Cv?7Q z8*M!J$se0~+IP+~ugN&`^W=rS#eE_~TVECN(h7fa18EZG3 z;#GhB&Cfx$Ijbljp*W$~?EI?LbicX#BG++H_tSv^?f_5$6Wvc2 zFK+DJ^rPRHN^?3AS@Dlg{r-5~chmBWRrD3hD-4kdI#2eeW)KdiR8j(;ui8vFG{5UK z`s2rQWcq?l%l)7|KdonGmhX#HJ|ZX0z(#+{)4yBWUsv1TsIk99+g}P_{_B@5na||c zdYWFj==&b4Bx4zW*=LhS{o%uA&G|$*LJbG-A*AxqGan#XkaIqcF)-bivcff86Y0QA z#!J+mW6sjwH7a12_ws!vrM3f;h=iV$`MHCVpp=>*N;K~Y)0FXO`onKFyoRS!C7yrl zcp0?)AOFIO|9WOm{)bKds+Tn}?RPzl==b6e*hb(tJOs6V@es>Dwe#Pa8f`m2y71q= z|5uDaf9jPrj`}{ED!kqGABCItQ|9a1Z&Pb%aFga$9QO%{w)g)JUpLLHC|j<^!cktb zIoSewdtZOw_tpCz`J>EAho)Q(zt?|%zKl$}W@}q6(EdAyMCjzhq}3wk?K$WSZ?8~K zy9mvXN#pArer)Yq&z$_9`M+oM^Zcot@`ydWG}VPa!Pl5z&X#aI$zGN>w?)jf_qs6>G8r^o^p=P$}CqMC5 zn#2#;uecE{CfG1d5j#^uQEGqWXZIqLSrA=Ns&eiZOGlPvd}2R$&n#gn>J$jVjGcfEWP3>$mFWJM?0@$}vkk%>olM;d=K+h3NwilT|i zxz%E^06jj!?oE8v%mudmGpbd{fp!Vo+D@H+8i)zf@>#k?AFaTiYn>MQh6-VL~;{z39uzD0qtm}2QuUxw}Oc2`> zl9Wt?+}te6Km8f{=#GD-#bH~ty7jv7+CHEczpwA%;dkDgyn3t~-e2?BU$wSBw6wo^ zc^Cbho}ed89`TbqPd-GT`DO*)bWKNp?mIMc~rSJ#b{xvf)eo-d4tgc1vx{)KZDdcb~Ar9H0pL^v=xr`!td1! zf9amKe%rg8{22@X#_M{Gtw#Md6JUtHYpI8*C(ev3#9316G->X)KvFcHTUABUmpB}<8QG{7Y=`N{9kbrR&s3$%DfZr z`ciXWI%W=UFRq_rT+Go^2+ggbl(qNiJAYJxB}UE+*mY6`w{!ZfML2${oP!*`xhd((!zoqgu9g7AD14KjV)d(EomEEBJp;-eLdlC#fTB z7pGu#&sr0y<6JZ&mU*3u!ke6rJCsIdtLuzW22qHCa;y8%XZO;vs*-i@3LWBtnj%8q z>)%mSOPxjQe%P<|H@io4EeP0gVOU?6BPa32-Z>i6l(U1L*a3h%@7;lltP2JN$Fi(13 z!idm3E3>JeR&kJ6AXM*KH}-+r_%YJ+m*0Q2j^Al@ZcfYN|Kg|q&=a!xFJ1aC4lC({ zs1L+XEb5m;>*A40IFfc=N@HW9!u)(6nwtM3MuB#~ESwsN){*OTv;4@0$Hy833$09O zZkjoMumo`#UQwfBm8Doh1q2$a);))yGE|`gX1Vg?sap5%Mt0mgN zk)vAad^!SG-iMl{o|?5#ah4iUCSX!-~8&4_!CFBnD5L@exM5kbV3QeP+f=7 zHO*2@h!Sd|Vqf}I}BIsf(iU;R+Od#DZb@Xh}$_|qNHqa;L#+Nx6@V`kWOHnaoS z+o?*Kaz;~0r?gfbqV)g9ggs$Osw5`5PUgkf(z*q$Q`C8x=F6Kt%BFwh=Z=5rebfE# zhZs$zNrmoJTp0ucmgqhmVrqNhy2j-pa5aa6kVt|ZFw)o&dvpcRPPZdp?=zzMef~Iy z7w&FqkM(-86cJ$=Cnz1+SdJ`@a0(y7Iy7-o*kI(oS#zp+s9$-k_80&E^4Rt0Ke!J- zdP+A}q-gGK)BipCtDfHan%{qQ{#IApX8%MNV zNgBdDMi2=7zGMvydxr?aBC?Py4i1!9{AV>wO58BZftRAQFF#zg{WZfK7>W zq5D>U>Q?;l>OpGfA^hqK(=}<v@vlHE^2(MrgWe75&r0-N3TBm!|y(N*y?*M$m7&o>YAkPeR?Q2 zLf~obq4qn(%?hinvm<{2nMA!Gou~QIDb&9(e9t4VJNx7Rn%s##ZP%a@O9>+|QI z*+R1!NIh4l(Uj=fBf$pg8auNnM7@Rs)cpxEqDUqV@|i;+wuyhzHG^n|WZTz2Qfv*o z7e9oWe0gIE^{XH7*Zuw40~EKt=Q@W!@Oqp&F(`xbv@9YaGY^f5J2IqQ4HFh{ye9=B zDKBSor2oI+nkynlOAAJN-`9`{R12+hZg4vNr4RYt<6(dM`R1N|&qQi)0VK?Va8wgH z+&lg&)?|F!^cjCb{4w$6!mLz6(nRBRe3R1h@*%-UMXuB~z_eq(3n?8l6;~`|vp*Zj zR**=&KOx|T56!IgO|F+Jo3xWj8iLEdKjxY#Iw+uD4dKL3MG<|<34C$d6Li^^IA8vG(Y@z!) z>;`ufKf3fk@Wy}WuzITLS=fK+)b&iZdKgD_ar&jX!YA~lQEoI=Q{7|LeUQ!w{@{f) z2qFt;_3M*HubN~OzWTAgc`nfPMpm;7WrgMFo)T?i5_(>mOhM|oWvu1ClsQI`d^cq| z)3bq0$pU`@<;BZ=78!40KtOU2v3{k-XIP$}5&e1R4 z(v-Z<|AWiZabqUUPgs&p-hB^X@%CKy&?iP{>7jp-Z}t>LQ}w&Ir2nNofVn`s*}K!d z%r|K~b#qywNvg`}sdh7I?qB-ptE+x`8r-_jF+_XKi`Elun?5B)m4l`5I6_(RCEMnh%M#O@GPUh4_2j9YqO_3;-qX)EY za)N)Q+tYoirr3{jRZfv!r-mY~aZjqqSI#J^%Q* zAo}VH9eC1tnp*_VD)uPoIZ65s2m-!Bmim9)|MTovVz%gsZ?4Vkt&19|$kg>3)8AOE zCQDQ1-tT^??rqQ+q>vNvBH4tioik(Ri1>Jko zFK$s8=Z^TEuXuE1(|-WJb@ZhI`8Upb?VtZgKTb-0=KoVK581CCfmeN}d#As9U*dnC z-j@%vTgE>ghA3L$3M^>29E}^7o#*~zW+{%h12Nqn6>iQ7IF9A7vrQ@QUa)RH zvi9)mRm1d!FOvj|*gV8JrtEkEmFIupXv>kjP|e z`aGh6*CXZh?fZO_O2vX&Sa?Gs=-CwdFXixdA8w6a>RmTmMH1Yb(c?2xrXctq{ZOXg z^oK5f+W~9%&VOnPG*A1Z&(=DHq~SXuh1aahc8-CWCc39q_x9+0Heg<09*kPsR+J(& z32b2Rvt_PlH)uVQ-kXXy8U%lVvDDwEZ~FVV!D^a@5`i1=(9A{*_Cqg(q@3tH@-uqf z-S`B8u1W;+rC|cmxH*fza}`IENB*h8sLE!!?%$(@izXclXo)I>XW{7J2!$c>BB~~x z9vF{_JT~*-ntu7J(0xP=ex9XmALP%BsKol_-2S<5jX(XKHPr0Mw;g{jzVX=Kyz%pg zoAI78u26E&A~jH2{~RYLRh(XR&)2?n&4#NJF~O)n1V+_VssomZj@i=*qIOfu0#;34 z+9-5Tvu2j8QV#@9O)4YBf=*4N=(1~mMUb4GpW0~Q3U*M{=q>OQ-4A!5YGk974Vmdr zY-;SueSiMZW6#b|RXcwxxYhgY4igS$qhC0Yb}m!)%XYqSb31E^e9vmt zy&ymR0U52oYXy4WpZ__p>6;gN%~eDP^B7@Kd^*yYT5|rrYxb|v9ES)?KSIKA#PAJj zKobWA9x3)_CD2KW z>A!g97i+X1ymf!~g=tBeFqvr0R>oBGzs^&9*px7A^21o4w*_szx4Wbk`lFoLPh319Q#taq5QUpmpRIQ&1)FpmDO<`l=8ulj%4 zd#|47d1W#5uLQYD-~_6cWkVi{`i`V(E;`KpUR*Ee_sZv^FJ+{e=M^K z^dKA9_m}<(o|9G{(pR{AAm)U zpOk3+$^(B8F>pR%1zf=Q4!D6P28QE^fJb?I10Uck(C%j-Fzf_=;Lc{ofJa#a_SX>u z;2JFu0jE|!aBWkbK-ge@hIzeGo`7XwzW+VXkubmhcc7k_g4cR-^5uHq2zQ=hasHef6G{{EYXCtG=x4L8;XA^h%Eo^ak%{l<7ffed2_ z@G(n4{1(Qoml42{@FTE*{0R<(f7fAF;L^zIC_do7TUK#^8^HX5!}=;%-mH`z#+5H1 zJdJ;WTuA|79nNq73LXxSDt!QK#ohpjVP1CR{71fQ3TzmCUwHy?=2C~d!uMm9{07SW z^S+<=|DvCLLAd|L|H@y1{4%ip!~w!r&;{TQtPt=U9|3TIC(*R8ykMLqHizwDJ5V3U zANhXU56V~g%7+!;#06|$;ir8Y5;PU)f4hI}0IW2^R&2@&C$c^epB3hx=l}XA4**mW z4WUj^%{I09JyzpAeXR0XXpR@f6k5Tr#eIzm2>3b`Q&VP9y3_L1pv84>D%=mtm^y>nr;Qi{bJ~BWPD*-pzd#ig!2g>+cve%<0SLodj66_zbOrcc z)Mt?RQQvGPyKb7;S4qQZVc2Y-3G3CV00-DMKFmfsgn9;SG6R-ZQ15%r$H7@n&j73I zaHmPU%s`oyLueHb-@vg|iEi*B8vv)XHUTxNSYo-^Q340mcGWTY8K`_HSJi(Ewic!x zThcqZO0D)NdQKm$|f#sBC z2F^SKW188Jk2s@N4rvoRlm8vpovVa{E8X12a!EQj70U;v7^Bg~c44e!fLFubi?FIz z4k2s$s+vXCi8yI<#4b~GKn8zeRnL5#L>{*T2jkty6ywO-G?9&!O!!q_P|f^LIpbT7 z2Z309<=i}lY6fp7k^ro)`ql^Dm2_~M*~2;j9ff@Q zwDE$Gc{C`$<=LwqXO#maYvTp8T|ofizE2WKOlBZVQP{OJV4X+`lgNK#yO~XR1yvJ0 z1ES8cqM`#qjl@pTf$U&xJcGP-CZH&&k|`dnL#*@+Ncu0Cf@7`{U6A8Wsb)|?T*(cq zY<2|GLBsa@PXjI%!gdD1sCeAQ2}ad4oCfKceITkC5Y%3bV-aL=s3s9-CQ8^serv-`mc^H}-!xHZCxx8LFsYR!)O&9CvJ-XL8EHZGbbmI0Ib#Og9i&8;H1# zJ%(il-K_lZX*sw>vVd)%T?AsYITD*B6JTQ;ddn}LnyxD*e$DgW zu?0itK;?Se2D}-{nV*5v#~dp*jae&_Q_b9uE&z6gCoSTc0~vqhE%IQyvT+hVH*f@P z9Js&HVE*$uy-kJnfS-E)um!-Yo>{wsw}A*OGa$eAub|q=!=tP{5DTkP7jj+ss znP*4g+>Gtwz*%M%M$i;1h2u6JHMOtK!1Q5qQ3f0b9lv_0=)n1 z-2Vj+bX@>KXNHk3ctQu;Rpe$?nBYA+P|wsr+xboDok?&&6LK>UqOU$2p%dvaq_0^) z`8mU=Daz2s8E~{lXlEeSf67Ds1rE+_KXoYm&vU5%H{O4xA^!#RLx0tsgWLEH+MNU9 zhWobZEU;A+K|M69*@3;b3EP&R0l3U?tuy%1U(wsZ>;TRU-FRk|)@fLA1|+m8Q~BDE zQpI{6OW+%`+y{cnIm1`3fr4rqh@@Qv)if*fBpu&2iJ-hhZ(e3LpJzu1wFBQI?$|*c zOnL?qT^E1&ANVlOj`IUQj-7SC@Lbmg=?9L^vy*?}a&zn)`-MMrT~NPpZl0l>e{264 zB$bD`v2uWQ24VsG4`{bKx3y4bezQyQ$?Bn4wF*aV;4XZ9jGlo-=ZtD*+2$)#8a!jD zFPu^76JYatu9dJ`W)93i2ew8F0CZsx>A+J^rE-5WU_p8C(59X&r$`)>4JNS_3(rG3 zouw>w@?L zhw_cTaE&{5_V~izx-KX`a46sI3)h>ivUR}5Fv82RRk04@m}6nz#Kwa4?R%l^%^W$x zzSMuURCY^su^7Qn1(h9{WR-DItya4|F%`k znYG2v-}W(@Kk1-DN^YQSz)d&98+Xn-{iK+%zpSbzqaSQ;5r8wMacg-|2hB;Es zIefFYt+Jt<|7kzLZ9p(#Tk-K(Ei>Oi9}2`KsPu-FXBdTUfZ$syp26m zigh4;`|rP}W3X>NejCTQd@MQb3_?DRTqn|j;NU{`&5N*4i#G85JF@qT>O1CWOeyxO zOre41py>vhXydwHbVN{p6aMe|TO5D1KXh0d+dN z8u0KRj-8~#sH6XZo*MchM%Mv=tGCd%K)YkTxYB=X&vS!%bNe&S42ITeK&1$A>(Y=m zv$i;Vvzr6z>Lwm0Hv^-G{mM^UNgEK|SZ@FW$C~i@A22A}poe|dZ@6p|ZLEJ6sz3cw z_q`ADE8Cb7NHwP}#~r!hT=R0P;W5{pJF>$V_(KN;Fhd{j ze)T-vMAVD}cJZRHs%)`aXCJU4^nZfzSG;<0{)(skv-(As^fplhN0j2hqm&y4@DWZ*yA=< zFsp>KHa0Np&pM(v#4wRPy$0XJ<*I!-Kfdt2dmi_~WwCfYtU|G+D_7YSs{T;W$aW*Tuj;~Ju z*BX5DntuZh-wlL7paB8%K5T1mSY`qJ`c2m}pr4lDIQqBu>lth5I?&+w!H44-ftlwD z0JoW!Sp+c6{-lL@u7rPI&lQ;d_1yTuKVaJV!T+X%AN+4R{=xsI^LhSGjn#)UwFrHzEs9hC;fx}lm5Z~Ngt3709=1)$5I9PVuA3L>HA9u zU<=FK!~YN72kMZ`&42g}8Nl%yqtPsYWpfYe z-8RjF0P&9>8--=o&4KMC8*n`QJAMJj#QyL2*e`tc5BwO`f76aXd`I!sZFHK2uOA@4 zGAAEnBxU{Yd>>s!rq~&+q$Cq~U&o*=U*4?a|^W_zOtAUF~ zvn<4~xF7skP$oU&*l*px;VKHO`wv`5v;RA;@U8dwlN^-*){Xoomm1Q|Gr#1*$v^nN zWTI5C?%#16NVoqxuJ)zlciefNtNG`tMr^Kde?lU7y9OjB2pf_EoD%83aK$d0 zy_{`CFxX&5b%;g8JvoOBo=Ey06WQehS z%m4ayAcyy8072lOn6&S_?{9e)&IS*Ez&R+i?GJxA4;QD7Kk@?LzCla?J3z$0f6I$N zP+04ay!5Z&CU}x;`A7X0;2oXl3=4JV!#({wLlmz&K8S zdT#~at3&*M*kMn<2f_sq!n|04`^Ne(pH=J$=}R2;08$5%isc|3 z8DKkj&ttZ+D?DW2R_q7Tm27PPi;ne&-=Fz%Z)0m9I1o6jqFukDX~zU_y@|KuyTKlDrito7AP=SR=}-FgM@ zcLf8~?*Ym?-~Ljs59v!P7KQx8UwseY^EcEt1#ZrlI%uc;uljo+eam4m|K(FwVE)@K ziSuLEp2HfDPJd&!AN&5xdyT);%Wd%aKh@)ZFF&I5qqqNvJ`O-Wa44_Tfp7uD75xA5 zML_%l%HxaoLze^hoBWhRzxSF{?C{H8HkjApda?ZVS6_dSLw;qbr@!a~e`;rrfqYBw za8CNgb3p#}2Mo|Rh;RI~j|>-4*%R8^k6+yWtj`~KAFunpNAqWu?)$SZJk-~J{A1~V zU+a(D_g?bv5+1yR7rG>9AJ8N>c*Na;yMY7%-0yyd4FvGq7j%K&e(fvb`4@>KEZhLj z04lr(gNuO*<8N^tTs@wCU#jDnUv{JbV*@CRQvvS+hIVB+aJ6m*He6@tNjRir0SrwA zSP{zK-5?Pi@G4e^#P|+tLH+}HEsdyuK>ZG29Qd$e?~up{o?{SaNNfQl7z>cIfMS5w z;Y(n`7`l86EP-VKpTW!C2!~IA$*`;=z&m4Dc&9RO2A10d12`zl!~5rS1K*?%!G*WuE|4m7_$}Gv`@B;XZvpRDd-rr30k7a->KB*nP!a%8~}lE5add zi6WL(th%HebMYbukr`=rC%935i*4j{xLZuMq}nI8Lw9fK?1F;!+t#XoyP7q3DnB~P zxi-k_Z1J&gujF!&kG*zNHo00)e@r;%Au$*ejt5`ePk|?wazb0vPE@+)kTbi`H=+?j ztx~#XsP!Orb&sotrlQbmUs;YgR1z z7=*^#Rz;>esdwfbBhos5UfqRPY86=RZFzAe(ROoLW@331=;O3a;&NK*E93aLn7%JZ zK*(A{tcNussN%KPcZAhlD_8mhC6X! zP{yEM0$W=qsxB~V98q@|F1buI1e4!hw=WTCRZvYTrEO>Md#eR+z|@#Xo2vG z%ZkceWvX6hlgAWo`usT`NYLBlr??)W;XKmzEP+^ns5A7Be*5eQcC$|GMzoRJM>%!8 zq~KbXx0%@A_`cUOPk51WJ@$v0-`Hl*bVU$nu-&IhP&8l{jL0CjaiQtuLo8wROwm=i z2~npMWbCwtSr>|b>UqKba+oPaX_Wwpn5j2fPM00IF3yoHasDRE^nJZ1IWp8WtG(A4 zNu3ZdfFk(>e&2tbhrqz&~wyR*mD+?6UfKAqYV`JdqCK$vdJBV$EImxZYP**Xfcq&1vr}cP~;No@R=hEL#{$mPLHD*qL^NZB- z1lCnK0!FkAYgFtWik}vOx?qkbAa0;g=HfVSFcoODaq2*n!eDF<%hMFGTosU`3HBnl zYy0VpNJrT$pg&{n`-V@y54I2p;brD7;bU{B28CUJK6K+TV9PThcHB2Nqg1okv%cn4 z&+6(vXuEgBBrnGb$?oK|UY&ws+3r;5`SO(QT2&wFtKriBDxJyBcD#qULHu_wwZH@bOTYMjh~o*uOx1EYtQC5^QlkKoOk>f9l1HZ1Z|?W zje2mQMswG@8<_g!>I9;ln4+>FN}1i-0@GT=m*5xfU2Q_(j!S-U_$ld{`*H`r37;=n zBgPxSldM%~)y}@W57;?7yYZ&<3{@Rxgmm`blN@$~$5H#f-;P8wQ=qG4pIEw)h4*=X zK{!^jX#Nhbrt4lhb={juITR&t^Ra9WP<8rVOczS7^00VxS6LYym7IwOhcDzMwsw>e zvgfdYRYn?8Rl1F_98ykJwdF1|z`eUtZd#*0d9lUP#$6vK)vgY{Pf0=0ew&~`e{O|3 zeiB=(25a|HT1;uq!x0?-)Km+nNry{+%yP1QGb>%-^b(cxDU@6ET#kYjK3&NiD`f|y zy>^dBDHs`IMxt|(UveAKs8{2nCh(OkgaN8+qAry2)ftF6Xd;zSI_13tqA}l#u#@xL zx~cYoAs#ny+NDw3e5LBSukR$io^?XX1) zku;}vMU8icg=_E`NQ3R>aeP?kI5&+^?Uo8K zwPhuCr^%B$Ig-`)hq=62;k`e~ang=3K(O~lWR<{ZK8LW9`840z6?2-xe-(_W*c)e} zPo2QM3of|w)we}{apF-Eywwta#H-k7R4*@)8B0OwZc~caG6-Fa;<-uyiJ8SynOmCN z)ttp=!@9rH1rNExqHU{Bfn!%ssgiSByFV|^S|sz?tcf|J0#_U-<&WIXs`q&1N<68a z;I+J6j8=m4E})Zx?w@k3$~-ktw~vo;(d=29^QHBg*EEx6Y@V6obbf_@lehqmKFEae z8ipRJIWO;VGo_1QTI5|H?$W2kUb`}AN~u&`oG3>1y5nuF5bSEipq6Z2x*W8`%pZBfMOy$0RxT`6=Z|Pp**kU!@&?^1*s3keetO;acw0nM0zu%tuCso9YqFqOB(`KLqdw*_%F08MV+E|*JG){Wo zjT>%bL&1CdR4kc^T)gT8t$624Ih&(3HR}L>hxaG@*wP_4;u^Sr^70O|DBYr-U!Pno z%nmXbktrEXv6A8t-I6(4gUBjk0JoM^P3{=xn48nsP*ugdT$i1#*nK$e0*ehJw@C}|vy=i+DE1yb@Pid0@L61x8Adqj&mb!AM+z%_6qP9NXA3m~UbQKUu!N>XRX@JhnaKx`XG*YV3Rs&H^suELJb2EY@crb0`97Jq%lR1m9yJi+eFHKjf(Uz7 z)Vt`x=wSgvwVoJOiHRPeEY^=K+EpQ{IQ~3v*CksL;l93Wr^>o+kj=O+=rtwMlDP1Yyw(wSJ+1rI2V+L@u z*LUeXXE)wcxa%eBM-}Wo_IT_--ms%ojmX=U&$YNZjB%)QhP+~$ z>nR}nVZgh?W6uu_f@nnEz{2h^mbup(T^RM-eSD?*wN}z@c)tkkx^Vrw&0{7)`C#s_ zzB4|5d*EI{MT&dU2UV1$E$YT=SxSHBWU0Lkh}U?uB4Iy8JxI_{C(GjK~0& zH`4r)Iucb+eA!)Mrb8iJ0jMUCl@@oGrdNy!rd*6t#rmPGgm#dkn)kRlH-Yo>t?^zD zgdqDVl+zi+{z6)G!*sJYJAWCrJ(Y!H#~xInBTt`* zI~Xo87}u7;Lx}n$p0l~^%}{oK$r{hEosx&=yDN*dA2fT#w#2nA%qIG)Q0fGe zvbjfKYX!=R{yYQjk~y~ZI6~ww8o69$x)831+w_!iOf+>%oPu!U{Cy3r_jaj0BV`zY z9zI2c(pf-x^;xQcMQA1)w4HAi%ecRNKH$x9BQBf3%<<==r*beVIA*I6^{P34;;Gf3 z#`Kd9F%x|U9gl1cb5@=G6q@*9hiUTidHc5SpJ<^3zHsw_eW@SKj^ z1H>kZ*=mEj0UN-CfvyE_kg9O2&|%S6!LU-SbZ-S_yEK_7N*{khO&6O(o#LXq`n<;r z3$DL&PIQy3UDjgxRL{7|H6H1+#W$Ky^nP+BX*G1XMG2qWnG}B(BkEIs-B4Zn;hil< znvs@B_D)kDG~tgZCtjL-7@}xA%kUBMT6YOICNC-Ob&1SF!}Ew%qCVQ*e6r_e+=cVRC#UG5jw1Q%OlB4E9)R&j}x zsaSS$YwaNFWW44cX3BVI#P!1~Gnf(YjQqo2p@$o4ZwdFEI`SY1_=XyX2Q^Q?qFo1?VL!h z-(VRy zXW|!P$xowe_hFe-Ti|Qfm3Eh%w`zLJPLr}u()5{bj`V$hqhszF1=Mm&=KK1t9hU0Z z-4E!e{u!D;8@3Ej$6U(lx-})^Q~*Aky2JT{>aFYauqFLEWpyg7asL!|U(|Vau6Wse z#ON!SVyug{urjGvaCaW#@xb=wIOu%3C+%5VR}$-rgRx_6I+hx<(ECy3*GtCF!cNhI zU;EOg>ep(22sYwfNOZ_E``+@s2&fOv`+6;)o%d1i$=vGM@i4_B;ZiaRY{E2lq^Ytn z!=vfSSQ2YxsV2^2)-Pz1kwkoD{4HS|VsmbH-riFTeW@e$xDAcS3ca7tuzU znI3F;=v_~wL3NPw&SnDd)F!qZ*78xcF67f|lPuDq zAhJDwj~hek!JRWiTCHD(Xj);n_uYXla9>8@LU4_G3BDKuWVZx$u0j6pyUdXx_Bq1+{r+5aHbEsN! z4|#67hq{>K*lp1qOm_vxYa1g8G{m@3+1k`6_sD(JC9V<6>n`=g1AQ(V>U=FFCdgQS z5!S{;pj0v1sX^Hm&d?&;l%8{cVC3`Q-o2*hL3Po2#=S4#iRCSi$B5cWx6koTg=Wd< za>_iCco;Q*O;-(_qSZUJ=3wvo;xc>^5P;3(uA$+ghyb2jYMpp*NUC^q=Dvx=r7>$s zS8C#g@r3g<)E!;k*!&#wyIoYo8^6VWZ+v2a7}-ycmd52e&(yfSLLk2ROSLSn$oV$T|^&_ zcz*_qx&|80UTm@9Wbf5Z)93wiDE0m>k2v&d9-!F-DM{i)V^|4Jd1UcS;K$!WkGh{FP?rQ%eOJ4c`&Fuejts+|esP zM<9-kz~{=F3&;DY&QW_lcV_eDL&D|kGrAu`s?6}Z@6sKl731=`lO@Ad4Bpq|qmg}a zm``UT{9#D3t9X}+`|Dno@8uryAL-H5me?TrMD{{B?JjSgxMilnQ|J+Y)gYJ0G2HnA zF24J38I2DkV1nbh?dc@Fq?pbMox{dy1ZjyK*6BR?a z^6oN0XUvE!YgCp*g!oE?7`)6N#+4kTO&srmoy~eC78Bb>F=shMH!k!r>I6>q9re8N zpVy#YgxEH15v`mpkJA@_L0_Y*8;2DTWWgWc zt)2B&YWb^~N0mwAH)2V3I=QdUr(XFZwQ4%#KFaA@q#j3IWDAy}A`;-3MSKxOq0Xk8 zn&~DLw0NBL2Lt&05ij=ha>|(`9r?t!k*^Gqw6#+hi8X^ zu2GlLQ~dRUPlNVrH7cH z4(lx{lj@V&51rNvBam74%22@&6=G=g=PZt^{G z!|X;e$nj9(MxSXmqw^#0*t<^LGXb^L)JJ?k7PN!krKc5tFISuzn3?J?Nc~8wH}%+1 zuO))O^>12HmB=bbn@-RV{o^$m8=StBY?rq(T6N5FW;;JW#-y&zaf?~&jn_!Bd19u= zm|nY82o!DX_P}#LRnCVy7Sv>I0>vFeczZln_cAJ%K}ny?{qkY7 zG@oQ*I`cYzS8I*8dn|~jkM-=U;>Z$E?|ynMrdabOeJ9D)ZW-b1!aQmY?~qfmA{%BQ zsKsvXLRO&ihLrSNV7Q@-eV|N=rC@^m@sPPu5$m=Hfwu)TZ(qlpBtvVLdgT640onqy zz;K_$2f}~Un%86xZ+4D;;ufNN(wACoSsXf+F9`sd78OlO@hTEIl3jH_-lXAq~JN7$m_YFSjF68p0b9`pk5WL8u z)aI9w&Swjgr2%IH-jdN9yd03(sYM?y=8e9l7Ef4yD+?vvA(Q)VKv^}f7>U$FfVXWS z0w&S+M85mc{H(n_n)*6GyFB4H<~f)m$=U9J|lrK&|=+ns#|pUaCs8&X4=sd*H65RTZaUv4cU(;ZoQ z$g7{)T%RY|{pg>!wBy{qB0cNtMKBMvu#4*jvERtd-2EgSXs~hj#WWB{VVc5KLt7Ys z=F6>!^aN$Whr1#*mT78xGlwu$yT^3W24h|yuea4?y(oI`?#XWKRpP3LY_)=KoCJ;9 z$|71oYKHh_P)nqcQA0jMOt=)q-pa$rP5tDqje1E#LaX|)i7RUi+5IiNa0MW#2bBTNMCN0y9_R(yd zHyihn`YKs&lv(`g$)iB!J%iV)GYV-@eoX56d7-Q~)YNc8+34l6irhG?UtN$#RHw$R7p~7U$W#tpb%A}lBq@)Li}dSN6(?ugM?$7U z<1*t*1w?zTw_Q@EFd=%jhAh9YwWibARG6q8gPxG^PQHMZo@ewCQ-vLW?S##apQm|T z-*nvWt>{)Ku_T2?B{#al>XhEEPxqEM=f_#fvKFih{ox3GxlbsXk0LtWpUw8+BL^;Ob1e~-0z^2Zu|XimhWtM2KSG;aTDoZ`Xxvy zAyp;967`-!KJJ)dyciIF*^3SCUU70Z?S#Xf$N1}nEM|q{*3G^kzD*H(_84{3V)UFL zJsqd~*t~If@0AFj!wheqTN{2RjLzP(zAF_aYx0QAs2{)bMw16of0$G{=yW?{vZy1) zi`-PqEg4h5F08Jm_IdgpN)Q>{xEAe`^i&T%s-Q3)zz7Xr;Uar?cKMtW4A2Bg| z%*{vqIj0cvazjlXEi%k8qN9A}HBS0B4IN*Yo2zj>>+$Op*TFv$3h$L`8i^y3>P~Cu z7Iz)j_3`0{CfyN#bCsX6;AXUUzZb~d5E5Nz(D{4%cFHzFt=r7G$9DxQ$J3A~ zia?VAw?MC*WgG1y&Z zk7>EyU3{DhOnPPlqpJHZUA(c{Kl-QK4=fML>t`zl(z=R(DFE*-bs`qy#;pefR_~$;dY<_1Hqh*`R*bKzSyX?7S zK9GsSC^aLx2KTd)1Xi&*q%-9BjI)u4`(4cyb=?+?-eKTyZhDjbtwPS$;Q( z7jf=%CfR)B2{C&4XvlW+k`B{soXcWj{*-wfTs_7gpS!m zfl3s>R~(ZRjY!Tr(sx7#5au2WY3pEnc;p;^(IBIyiv2KfwzQceP zSf4nvg$b1#AHK21X3SzlKj(@)7R*{YhI!z;D?aSl+P(Wz{qsX-78HQ3&yCBTlS&Q(Km8YyM&YQz=8+6+4Q_2jorr& z#fup|p=e6b1e>1U4Jr8|(O))=7J(=p?q`^XHxL%B9y zgR9A>_Oc$zOfwvLr`^~*i*Vl3A(>eWSvm6I+pj1&*Ogn`TWMr9iq_;@8EjR5roL1k zhNE+v@yiayJB$YUsS!sD3A zpt*-^!Y;&`QT|eY8j%X5jYI2ZP8K<44@^-d=Qmg%%l4wq_ z**(Pdu~2kd=4!}Hg4&jehmQOJ`l(XxzYjdW{oWA{ADMdpeS=GTlaz!ZgfXzK)daJ7jfW!D`_`NJSvb_6Yn8K zjLBnuJsowvzQkVPT)cb~pSRqObf#SpCVSWlyOKDIYvC5n&GpWj240-oCe6qbO*OvK zK05tM>3ecV=-`gY)vC))QEIBXcu!kuACBc;uj4AT?YNr9<-to2*TT|=9=|^mrN;KP zQI##h8IDwB2PtnMrf}EDc%IUmS9rw;ymi0o)T>stTXsu-sJ@;<`ojbQP^`=HxYdcq zk>_Tub~E)0aTRQbdktItZfknoPt>32l3l9Yk}&52lV>KAHEKr9} zd6~7ZBc@DVk8r7&7KXnc2R?F07C)B!Y9F^*jT$9GNE#WKlPrX5P3Z81^yOx2FZ&^L zcT;UD|L$E{#tjYrW=)uv&e}B-@R{a|E0VyiYD>F+fDpHyjDHnJIW`qJ#o5q~!eP{L z0<8rgDAs!A%LU!~&S(_z|6%LAb+(6r?0hfo3>i>P#TLprXLZWqRnF4YFTUSl4cM(D zk~1twf|~OjH-UfdKwO$D;w9TM>|RT?&Zj?(@I5#K0gev^8||%i+9KB)Mo_IEsY7!V zHj5j7&q#GpZDY=P>AGPEo{_>Dpv%OIl%jK)f})g+)ouO6revE!lzlsvT0~8@Trw~E z99lR!gReBb4X@^#y=|$-dADI9ijb)DYyqUd+6r&`91yIIiQ@oo*FvPDQS%3zGgC`R z3|AWQAwl}?S7im$Zzz}i#(2msoc3t{WpA+UJS z)W2|uLbDs_8LaDE!dy^Z40Kr2pE>CN92 z9jsti&H%q{4{Dn{$@v?w6kbEg=_1`_hbbJuZ24~s+G9RJDF|awPe{&Z-9p45ss~vu7%ApXvLP`1{E%!diRP+ zIbLg=S|+0WdfNRN8Dtf*nd7Z?M?u7YTikgmr3HC;Yo?R{(^sqT&17fvXaRl(BbSS3 z=Xi}hjd)Qo_6rxlI5!`XlMDP|@5^i+a^cA~NV{kRDYuB19t(CK14!plvja}^5>3a^ zF-oZ{e60cLwJne2IW09^o3dv~`K$FpQ5{IF^R8|&3PDR^U~3JRRgq0I8u{mccTcc3 z^gv8nL&1AXt1HgNMIGr4p8L;Nn^A59TOuFRDbcjJ9KeU+GCA2|cOVSuD}WaDI%~%a z0PW^C&dECi+yT~Z3&Je&_$>5U9B_b>iaL(KN#gs^z>DX~rdA+yVVwb94-C&SN-EYu zu!>!X0$Xy{%vdp#(NM@0y;@`0ZJw5O268ecH@1YlsXu>Ir_*Chl`Zm#qxb z0=du`#isy1)M$2(P$-Q#I7hTz{{guEw3bWRp8e>>ByRPF(KE(ANkGVds93K$O5z() zF|hA8S5MnYAK#4*y3xj+*IlC^a?WEsF%_y31@BC6Lom{o@$yf5bz5iVl$Y7=>>QV0 z*_b$hF<2BQ?@`m84nT16ucRq)*wJQYByOtTF#&ob_5Uvay4IZoHO1#@`#Z6tU?9S* zGc>;_=aG9B(R`WbGY*q~seq0Z&LtQ%HXNj44Wzy!*2l|r%P@zr1tune|H(a3(%em5 z8r81(3oUxITOxz*X`Q}BUX&-9)2|dSynoY8wBReQd#g;<-12bHfGFqpLoK7$xLWzW zEyjmAo$UI8d_$gt%p{%cUyX6-X}6My(D739Yv1|25(Y3z`p-g zZvvYJ!*4)hgxzv1StWkT`zB}Zpw8s#A}DcbOj+zwYN?x1_gLYS}BqG81;O25H%;t|Oy=tWz zrfb_+oEMG(%$QuBMz(6Rv%MFl$S*t9?)7OD*ylgvzQvjj(6zMTelBo7z6*pJu0Zv` zaz2MCd&LD7&>^;bSO`4(gJTLIOySgaCY#RkE7O_9%{wD^LYXi;TghNR7g`=!9#*+<} z3djY%20CanV!d%&m#*TC;SSJN0hv~c$O{XgXuUI?{R7WwiU0sGTg!w5nQ~Fy`_dSQ zo|k3QQmk~*(K3YCzc>(%f`ni=WaDoyniI1s|7tdWK3W%rIXzypp{3oa)@Pw8;CNSZ znO{<=TE~y+JOn=HvTgF1j@02%=eLgFM3$yWPZ{ja2&kiCK3uer;nOX*F>+j4m8qK? zGQ!~@HcCvFUW{vDmD#dH23z%_qLMAsnOLk?CyK|u=xlA&8Pa_gW~w5RBP5U^d}7LIkSAB z%)g(2G39cE`^@@ijvFnbG^UB_xEst^shWnHiH7!IF-DIjF@9^osg1YAW~cx^Y-xp2 zsebR7P*D#jSy{TR&J*h>#{6Bm)Wi7<{}H_Eiv3dZ48uk`?AR!l)My~Bz!G!KMg9qY za>eyR?bACc5D=|I&LR5`hQBb3+;mbli8CMnibk-fwZl^mOY_6YuBTTw%r0o}<)?kcFE==~NChZAv0ri4)DNF&~= znrPgtXOc1S3uOJpKW}vd1u2%8dF+5hMZ-?S-MYbx-Doz}S>iOCSNc0{v6Aa>Os=yy z-P#)4Lc1F3W|IhdSS<+0Hj?LmLRot-w8`8)ecbn?lY8QMr)^CF&GV9JYMDnZzYzu5 zv_0XQeR-fk5=FFrBbKgWm;HwKI6HUhQvV_P`gDE{mIq2kFY4}Gg~may@XgEw8M@6H zgFdD1fDoqhRvq)cw+aj6YaC10HbF0G)oEH_pt`*#@rgbf^d+jJ7SJ+(K{xx`fLI&+ zcySc1%ET$lgPzVx$aGA1HdE6v_7#Uni|;Valc{e<9-(4P>s~obeeZo(pAZAMitXp^ zZZrJ8h~3{`s+0S31@M~5X-)0w-)r2}zCq;vdxB?u zB{CE#(VC&YY`zB{l;-At$=7mNoNC^AEJ0|%fd1=J1pe6uZY-$|?a{y`#1%?)G%Kl+ zrL&GzE>`Y=*rB!Hj0uc&%{O;DbJ$^E3&ySy0eUN94sm%*wTfnmk}3dOyNkbl5R@bn ztQ>Vd5SQrqCrMrlKc^qJVLWjuuBv8Lr zdA(*{vnJF;S8$+1-tdn7B|p!&_icPsWGU8Qg}KGsf{brDPH=smOSMi)^PUYQPYh$9 zFRfROK_!Dz;=_-&>oFroV87T;=pdo`%&c!E&*`A$SOz5TB?z3H*Nsi>sc+uQ7YHAr z!%mOTNk70$s;CZs@U1T$18F3!2!T42At7J#v^}lB_071*ah+V{1A}hQJbUI+e%{JX zbdhb62RrV3JIs_nm$T6dPj`zJRW*Xq>EF?wG-ovDryJeaG30K0q9xM$i|Cb$qlYhf z{dbbez-3;@Xb$WY*wa4S#Rv%$scx54bnwkpG${@T#cXzeuS2C}?b7T)epZ_Yd;@2hwsJ zKo>DNK~v~|57l6De-HoXU{Rw@QoM|XR33kyBY!<@fA29P=}5Yk%C2V8e# zh0)GTf}!e2ETnzxKj7`hHENcaEBWQ-}P^=bzb>#KtIVJSh97fAhy%e^eyq zUnsgd+>Itx=4PIERLEu(n(#KEmVT~#x{?b%N@u&sU8)+-TzyG5{Jb~HjnR-kTl|)} z#mpvur@pn246VQ&=+kc(l;HNZ#C$`;tF&FxuK=+B5LgZUu*4q>08#st9Re5j@~N>x za=%W+sMXp#(=R($%fF}4R`CJOVyJkK{8jqzP%QIJ3r) z?3aPN!jk7X9vrOYFAaqtQbFO8iR^p9*Ii!(_4J^-J5Fn}7vE|cA7eW^63{2HaAbZ= z&=Oq7srBz-5ou#I9KQ!`FHwUu@rUo##1g6BjEa zIj}f2QH>0}3y%H>f&U>cNn&=OJ1f%w_hCD?-aQTVAQtfZ>M;j-?IN@h$dDo`AEGHy z5?)g>uKZvuX;!-yUtQLR^}xq)Ah3uJI_~IC`Je?G4OI`rv~u}{aa@$s(3&yY>dqcDQy`kp)P%^m0w773%r#UWkzRq78)CWLvq7${J?A(8J2k*H?8CzH#&jhFBNji)qf2ra1LZn z3P)33dcB6Yc1An^JaKyAX(L6x19&;L+wKK{!QI{9uU!}+tMlkdUS$Kn$NO55gP;av z_?`)B*~TW;23aP=ew71ofa7~2FA!?*4;ow$6JR+3@$U4OR*bCnvK}&jr@pB?*A+if zIDYO*_&L*En)a;q2`fLy?D*pHZlcq7&3Wy9R8A+H84KhGneybNh#DI_HfY_4&G+8r zC<1T7F`oz#h5h-d;NAzoDeaeogqEn5<9UBY-VQWA(2HdUGhf8qh{#r^q|$RCYqaP0ufRp&uj??D>k zih}V5sAbx;dGs`tcG>nEC9C~H)ik9ogUtYktAncV^Z3PC=ap~n)z!Z3VzwlJ&NU&E z6kT`}iO`il?~Gx|@j%NYfw{RPjxxQ(iE(YxP~{_Ulk7cL5F-qKAg}vyVX<84akWAa z!VyPd`io!^pGH-#69ZUC*S~^3ykr%-vkft})vS!oP>;jcBIQZyTsI(e8m>eR$@;vZ ztnjRZqaQdXGr<;*wN=gc1#c=_cn8$iIb{_yjkFh+3k8wXTM}8GgZK8`KE5{*9=wrU z-F_KHmzlay$I*y?Klk43(V8i9!)e#tEZ1}#G1#&U$c$$)F{Bg#eu9%57*_dy5s8uFuJ7a0tdX- zSiW>$d>0r9Pczak^0j!Ur#_m}HX7r}4rl3Lr@?fddI*SroVnzyOz^9>^V(-bn#PZH z_WJap03-y#S>BXClnWr&40TKfhSe>rBUny<5bhGc(GGkc@Q98gMDdV)&hK=w9E|SP zp=e)Q4EG3PTihpzGFI3JWQJ@?%FP5Ui(vK2yfh~RJbr|7#+xRK>2g$o+~%Q_8~A!! za@Qd(jAYq=LWaQ)W7E)9AGWC39z?TAbtuDsB=|!LFQ_2ii)BfVQT?vD23ZuRG|y7? zVOLCeAWjw>fFcp1*v5=8Jd*Kv#nk~7?r}O9FVsec9mhQ#NqOb;{O5G`SOn*DG_E!N zi0d%VmW5QenNn54vfwLo?46mJ(A5D@iF&tJrm#+bT3q&3!=QSuUS9%%J@TNJe*~^H zV%PY}I|dS~=rdye=}EW4gbzn)A_e!g>>&WX>uOG%+3tu2qO->}&%Hhl-eX(=F<6^p z2V>h}wm;3OiQag#;V=?}=ah_ZgwG`7q)An2MokbxYs8Vf!ddjJIK_ltOGd|^3;8u^ zNF<TVu$H)^C2G0K|uUUL+8ZTAx;bS<5yu`IJWJJg}=Hl5c!4cqIM!ABG>* zh`kpsF9lcA1Zf55(|u=RtJ-f3-==#rjd)n8iNZUX7slHrD(mjAScIle!kOknFcq8zLbCfMe=!ksLdFhI1f(F(Y|Z28K`3$j_Qp`vJz z46=rMxt2z&=~0W_{?b=KL)`aq@rLey-cPfX^$n;y2RpNZyALwZhedfPq5J9qx34O+ z^#W!jNJGYD=$Ae|g$Wdy>|W{A8Tk!V_Io+OlIRmVL)VQYJkVMF_cS=+ags6#=b&hg z?($LSG>f?td)N2W+_x?#p%H1GCqn3ZJ1Re`&x~$cxf>HuTd_A>{(@TTP8W;OS`u;EB>@R{Q7V6!X_Eo_PAys5T>#b6nTX^rd| zUZL>NqY>bECt0N{Lh+;$kW^8BB{HLS92Hn%FkZY;i$EOxp{V@mPnZ^$@+aKJ zuiBYoppre9>rkJD;;fsfFr?5i_-F?R^eq;WN_2HC^ z9a8Cg!22LSZE8V&_Q9KdD4D$L1pT-f2=)O(Q(~EhKPkPj@9fM6b?RDwa%{5i+-Ww& z^|s%_jhD6w#~ivGMmQpid=k`ieee>{O$%{FiLw^3wRrS3vlqxQe!D+V>rAZ&Sz3W}UHdT5IE*gwv!i#JK) z3N-lLlYnX!`5HALd`u34{^4kvWM9}qN+K`c9PMY$LK}j7OTYhqJe^fQCiYZ-8@lsG zu2hUB2-K$lNkF#0qbnbIVf8t)aQYcG!)uElK(TL@u<9M*id-)B zyO?sSe%+6cwFD%DwLo+@eZ*|HegO$JJ=P}Hw0ZoPU!D%S>yJRA+#a!O zWAb!0kd8WuVC1369f8X%x=5$jDV-pL5++XTJHEE~T1!0(Ht^dyZ7PJEP8|5}? zpay-PgeBb##HM~B1OTSI+68}+sjMVxG7GaZqCIX+f7A1g-jC&YcMLk0 z&vquZO+Tv&U?7bxG3vgR4w>U2bR)U_QQJgli7~lxg#=&W=(^-XO_ z#dUc~2iBg{Fc#+=Dk_7bun;m;qK;7^ve}ryE>-IK7xU1R@&y^HY zdQa*8lGDT-p$$@==}ZCv$m3fSgw%gNLWWaWR~{ExV0Zhpr<;v0e;p!Qgih+Sg@28?9crQj8nq1ZPil;Z;eH$oK^Y`Aq0u>YO<^`tMeE*OAx-37NTp(gC6FPEnX+09iW41=UyuS08@aP{tt@bHCqyPna;ZuJFOTU3&gS z${{wt8fMGGb}JH6Q4~m3LK}Z(Wz{zh=d1pHLXkx6c&Qr^XIHcA1RIVIm*WSKdlvzv z6N$e4Wq)x_QLF_NIvQvwS56|H1f1>J8j_a@e>lEV0;v^$g>H!HeUHo0wERG2x%7tP zUBHTQQfVaSnMU8m5w@#vu{}H_K*_45=sU{RmS_>vEMHprh5?p30WaVW+W!i}1`nOSV zy&hx2O7WM}-egUR!t{~!Aq;2-ucLKve-W@o+Ux=VjD@9nEme8NIMVAv2m!3CGd>{c@U}9xQo!Jhb%J>@}dFrH2$j%()wci*p zthw_Hl7Ozd!d-g=kEA3EVKrFUP=$-JZ7UR2?gU0tBz9xf*SXjX;YM6(1Z8Osf5W-z zQq?y%6nXl#O_lOlfwxB8DovwnAgIKlMA`rcJ`j-(bots2x;oNZRhGX4NDqI4=R9FH zwjp^n;%hs@^6^f+{d^dO;oYt_jOs2Z)}LJ;1Q;0}k=X#HlSWIHi5aNF-U2`7H*7=>4<@Ch}?MM4NxNRqC8*1E&r4VJtEx34DT7kFQvb9YUVP{ zt0GA>t8q&J^A=!pGJde;>Kc6mwcVT+`l)E7cXb=TBzHPIhbtAX<~u!Je_C0N&8+5E zn3yZ1$Dj3Zv$b>%6I8z&j~g&!TZId$;-D#J?G>z3Ocj||VEf(mX0bTt0`|4tYGHtA z^vpwwLphdh;o>&F;@p(W*=*St6OHMrd5f^Ghj8D3S~^cp26$ObKt6nSV;+yWR~<(z zNmRO82Ar4q*_7-cHagPse*{851UBG&(z;63n3la&_7q9h=^nvNgRDeI>$`~K)#xQ$ z6m98E=;osyKv*X6F?j7Fjz5Q)kI8tN$7LAVsEhq@EiH!G;$~4tqrs zSh1DpGhztuZxwv-6nvo~vtyJsQSDqB*d(DIJnwOv*+F&sZ!k3zT01h*i}>O2CiEcY0xRJdPT+YdaNO*JBZrFKdEFXoNjXLCdk^URy_X9WR1e_R{%(EgDO1Cfr#cN-*HJE%5=j(K@l7aBhTc*yG%ud zIVDM+P=Gn?d=z14cVBLL>b&HTg#*55k8sdzCNn0J(BA$QDeUa;Kl&wfj7yipwj1CZ z`Lyj>Ky#{RWFViFCNC++AzGrU5SR-r`jPZ!S zWaXv|Gl8IoX(nsVC5t|u|9-g&ew0c$kJwI^mp3C%y?p@ktF+j&;4Ll-8cx`l2LJo3 zKS|AHf0Bq=JALJF(^XwT6Yj1q)YEkqk{Il~h339$7viiB)qZK%;e6KGLYqH=O^J~e z8y`MpF_Q$*Tu9gC2wb%cec63eAk~nKypW zf60l1`vRw*(38Yi2qlSRSHi}xMx|k&r(F6_I1#cX=x4!P;l78_ufg!I4sa^Q$WB%vV3wJYpZ2Yh^K^a5I^9$mB z1a0BTsUX;gBBinr-{?EFxd z&&1LC`p)`5$O>d^FmzVHL=!kJuuxVbak_b^IVLz9he#I%R)Lx%QwizpbIG*G)#P4ax3^O zwb6nvgI5qV!|$q>cEsLSlDVW#q>=4sEtahrQfcwrWEwP$3zdl@sr&Nr&+sOYYHkz& zm_n%gObjsAFqX`1DUcF3%!~n+fB2HAO!Oa|f>jjFX4Vb-sw3x10O2_Grp%1LPkmU_ z_$~)WGPPZ{MoI~BlL0hl)3tf9<=1+_UHA|Z;p{7Pr(79a!z@4v087m~-<*jn;cqiv z$djN|Ql+BQ7feFR@TDf!UOiQJ!u+&)18BlvDyBJSFC(Q2{Muab6{=~he-1}gBiP-V z=8WH~h4WjN+p+Ayc~sJeoO$(o-*OAbd*CK~SILV=w-ajvGs4Bd)XGF0s>=DX2tV!5AL+0P(vC&dlXT}Tlt zadiISiO{lZXK+I8*iJ}m5=kNPw78SuxUM$t>JJ|ZT%*I1;t74EnfOU~EF<7SKjS*N zVn0RE&oNw(e>h$8lapfY_b7~+0;4l)d)&$JV=8KAiMJQxm(_ZAo3bHSepkNlml@+! zdQY-3oW(Jd9LF@o_Hh(6$_x}RKv$%2cKhw;+>FRw9>WXQT4m#69JtqA8Ti~`<2rJl zXY6Y}9U4#)HreKd@qx~ZlbfMBkh;1+Y$LspV?Q?#f77c$ypv&U+UpnUwe&;R)>`1@ zc@A_YD!Eld=Wumbc@3=Mmp1A$9;d4+Etnu}LziGEM-JRwLt~Kc9Aw*wjh<_(W4@Py zuI|^kyFhV%$k!3(On|aaf=r{txY)+Ai?$iRcXs@sl-pD{W9s864D+G)I=PBj@!LnT zl^a#Qe|*$@gbn)Zqtu_Kt5X0iUgn~gSc>a92%kU$`j|~eLxL~6gm0^^y`$-z%YSX~(!qKO2^m};QygduPGi##T+(+An&PLY-M zUeuMB(gDyToi7*uc#Z#^c3}(cXQlfweN}63e>n|oS!2<^%~@rBPP~MQE|s-%^|;D( zG}KMayy^)Xgv?u{Ech^em#bd3?<3EVkkpkt@-W130B=V`-6ZJo^4~AYf_K&G*DFs3 zS^F?cb__`CBh4>*Lw-YDJ!{$4a5b?^%TzYqtp}A4jVw$T?i?1B<^`RW@H9IKXD4_Y zf4fz#KkWG-naKzOKOfw**+F_#bg1WBhv<}D7b?6HV+xw_WE4gX<~Sj@$5~umO2%mz zW7t4YZ<;MHdRGPBejCY?LtQGdrynK^zVW!BFI4nwf@vPTwqN~SuQlKvcQQH=c}K)%1Sr z$@5p4S%H64XT2lfrX!;(L|L+0e=U+HnZ)!IHwnjyrC?2y<8hy%gOMuYfSl=z797lv zt%j8gw{jw4z|i@qNKeD_d{guz3Whwbt?E z(Q}_tO>R)%uHyANZe+rqHemA6y`g8Ze9t+Cp(#hVkFZT=m{!h}k35o}f8+#QG|=ZJ zey}AE&$!g@*0BzZHI8W?JI#%l~l%%KakRS;?01l)?^8vOtZ>jXuOpY*K?GNiSxC~m%G!*Wf@Q6Cygv5 zZ@(p3<^yODUw=S|SQDqLdUlGGi~zP!a51r$f#rsQ(*X?A;L+~E>L&_5-iJ2|NsHPlk$#lOIIpmr>T11{q# zb~)-8BHy#8du_2PeI|%+(8QHLoxUu05#0bOOjL!PYa5ce$~zV%iLK0yiO0GV-~;_7 zAif9&QvmCW=+m8+e-J-`=$o9twMqi%T%0{)48`NT`eCVm#Cy5$8}z%dH_aPd%ie`gvy+n$3+S4x(3w1ahb zgsxGvl%Vd}Kx9z?O!p@DX_KIXCH;~)hpp#t&k#wzwTvb(e>QmH7Q&he5vPwza?!T) zzLY2)!2xW^B(x}Ld0sO4Jl zexH)A#6$#todkZU}fpfVw{@%q%18B~K1q$X8ME!^`WUNl95IZw{%1907b1 zmF~R)X0GRGJuxXNhEQ&#BycxA_5J$p72~q)f8zSy&9)N<_auj#eo>Bd$Ql5jKEwkl z*S>vYIc8QwP->_}$R3!e9CQ%^Dd7<4unp*v#BqH2{-#MX2(Vwp)VQH3lK4}NV&)0s zQ6#Jd3iT1la$jqpAa|QzI|C3vqbzAazaxqOetubvwW^5b;zll~Ua8 zeObAiZ_#_14g3j3ecF7Kj=P?{OTu7u8ic&dmh|Qcl-|n&Sg+)00BQN2FV<> z+iOqkjKqF%@}NMAC}l~)J z+&Y$`5}Zd3A7sbZy+I@~0u#?h!CZFNe??m}4Fa(sridF=_k>)1KL6VMgHBNya)4{# zB9Q6kw2$!$+)&aDO86O_#|x3@bbr6hK5|cTX#^PXb!Ici=UXZiC~+5rkx8GTT1;!% zMPhqZ+Cpu>l|a{vG^&B#^BV(vZhw#$f8tp$DNne;yJ*t-y_()*7LmY_wBvc z7(G^NDpVPIQYMQP4w>*?4EcH>cYw0gstvw-t>`6Sn(x6l)F zy!tqsQ=Pc>z_;7MWDp(M=#wJNU#(ZSzOlHS`{g~EG-Q8#6SizNdo9RYf3r>o=&?RT z2YVBiYkvkq0k?Y5YPoOB9yQAsj@pF1c=cV8%k;!iHFRHNtWSmm({?8Z%WEKWmM~3} z_)eHu9XKEs4~y&0HXH9w^0dUZzJlrWU|5AYa)S;Pu`?$-=csTh$^W2~J#@MriqWP@kV}+B!&Fv?bUellOE8jm} z2Lt38w8<(vU*%{Mp4X=)&DVN9c|M}^A(^)*q_5K~+CA8HyUChIe|R;nLpC;7nBAHt zc2P5G-D-MB4&}}m?d~}>b+I@)yR}oTZ^P8SiQDu}$Yf88?X`atWA2=mJ98I>oAkWT z*Lj?uPQ-Ga3pgyI(E(TD-Ybny@AmdREsx(9B2zkM}0PD?`m%pCcTWkoqM>9 z@#g*>g=oATjkvLne*!07WctQ8CWHCpSWeM4v)JHJ_S2=2nU23lJe8HR8my1R9L!u% zd-FBUFmrUrTQU|8WtNTSrjd-SnFE8v=^6##_vc21lM*gH-6=or^Uy{(SfbPyc<~(d zjY(aOP4U_j{5n&b+G3f^r;uN&58Dinh6k(nTTf^)KECRvf13BwYkY2D$$K~(>(ihL z4-ne>LGfNnD(T8pSAXRd93?McGRs$db}O? zX!p5Z``espY3iT#)59<(_5LhOl9S$gHR|az_cp2qe}}Ec?sj10b+EoZjN;t`xYes( z8jSbzVGED-IWf}broHzl9C41{fY7D)X@*T4^v1p0XnjGGsL7idUv7NsaMfl1l5F0= zdG$c9ugS&eKHdx=?!ETz%a}YK@{sN(&qaGWjYs5j%C*h*I5VnwI~u!T{So8cpxTe} z(P{f(f6McBS=f(f&wuOhL9#g|%^`Xm$(g&mBJl^2ZqJ(aJml?mZxpAj$!*xL`B2^) z%k`|W)wlN=&pglEA6ZzI+hLoJs#osx(XD@oFZ=%BoYwuxr?*+Ha>wp3M*Z{b)t9Sg z^`eWP2l{rKw+`w7?%#jiRw@&)``w2%W`DP4XH}9}wk4laYgB&8S79qx0O9Vn0Mfu-@X7VMHm1+`%hqFXbh}$I4;i*5EsH z=KOibTDm-m_Z;_txlj*uMqJDXAALXf3_0%W6?iY0Q(a3|N9a}hg06Z#q(m3|iu4Bw ze-l6Vtrf|%pstxsS_o6*Rb(DTOW&789@Dw${H(hT`F^v^wW&$pwj0y7`)3iJR|CVPfnFLPF=Zr$80 zpB|RkfLR-UxMriXReVSDcl-~@VT9uOe`>XgK$qR7xc~mQ>WRKghzrZl&m`gBKONm_ zQHHB@FeH&b>yP>m`}G=z`@{J?+{Dk}V77e@7rWs6ZY~dJvzdI(t!MY!-tx>}ubW*~ z!^mW>>27>E?=Bx658c`rwW-!_lKcCD_J?Smy898@t+jl)IgZTt%ATRM&bG#me;*E& zanM$<9?H3pS9&^rbjw@4FZ0U&e-R8@O{}e%y)bjj;DP4q)1F+_bRcp%%37-$ygrv* zdu{OQkmjrD%NQ~{js20gny!1YSfugI=--~Flb!9yx|Kc$;UgRMv3)iAv%}&Uy~XDv zN7qptuI{f}o!;%^E?eL9Pk3(++4LjVhdLjiw~TuO+)v%(@)*3b1c!4pe@>;Ezgv-* zzDTS`|2o#P{Bt$>a?{_$857v4DpW!BsZ33x@$lk`p;E|}u_lM8Ldwqr|e87o}l59oh$})AP zx9RNOd)1#KU>~pY#p#B@U8k8&(g!cr21^!Y?8}Q5>|Yx1AJTR{O6%JwZ8zGy!K>Wa zjDsy~p`;0$IJ&rwJpJ{)II~CbK94-e=COXl8{QhTSG}FR8h`d`e_z|@DBnIcXL*tn zIlL{_w*!7?!LvVzX@3yFLfiC@$JK2=r=EcFXJ=J2NP=C)`7r=aeE&q z&kmlL?r=`#j;DJ;^xB1&BwpyftFwr*-MfBZ!bX9=(bkio9e&t!KUfLo1x9hZCqwHI zHOEb}AoUK7j?cK*n>%ec%ZB2%;j`_|Wo~uOn|98;Vy&-yf9>h8cE5I`-em6W24~G8 zi#zleA1q(~$y=<Yc7Uyir(Myb)saStDm47iIK*74rp5`qRx+76K z>~HLCMn9jJrZa<$D{tsEX8fAnkFNt>i_)BucjB#ff6JgEUb9a(2p~>p!2_JJ(8tJ- zOBf}#yRh$Q5ML9+uiDA5F2wd4*xK>tSxl;2*bd%SGqaW<}os19=vN(F!S*@Fk zD1z2#f>88sI75-28pW=PH1FsqTWjRd=k2k^W_Hb(y}zVcZ&1m(QTw|wc+U1Tm!?+1 zEc3j@e{iQCkBiF-Ye98JNv<20jfwpMw!rp?sd?eO8XZTIU}w&WKC;Tlnlw+RJ?>Yp z&!Ohu?%XhK%$|SoP!~66`=yiF#R=rRY z(aN8$mvmyAZMBiT`EZ}z1m7Ob>9TM+-9CML8~4oq;&G8Z`p9nC!>zp08QbziFY|QN zoXbbEG<%O5(}S2?{E0t|&qi^Kqw>^@p7-hcFxfPbzBIM-v0oQX5M1mqi0F@|2PB!94XsGa|ONR1ZwNh|guq&Evs7p?wC^#{-T=;qda*jE|%kt`GC2 znbNC#_O)=lCgX5*+xG3Nd~T&UHRBz=ZG2%hXXn^5vRq#t?_E?rQ__0Vk1UhnYO0TW zu|V$I3HI!E5BnzWYfraaki~QvP0XO4f2=HhdJK}AIXJmjQHN)3+#H?J@aSyvWBi$< z`EI&8wBxH;?Bn!d90$uW?Y~}0xmnq4x^mXWoLdV#zggqRyWQHk6W>Qh)}-Ee8n?cd zFWT!pTIX?nl#j&=-SzxUahjQpQIcIyKTakWtDr$ zvkWv5F(%$+f7WaGqAl0@(+vwKv;Eb(HFSHVxA*#(hxocEN85=L zruBAxT=I3ul0n`M`%pf7rhS6w;AOruL(y$9e3Y}r;hArbt~WaD%ktPeMB(|E9pBor zCpK)I-uuQ*yJ0c7rZxKPMy)-9thW|r((DFdHM&nv&-XN0Pmj-8wXS0=f37nA8OFRE z8F;p1hmFa!omgJi=d9Sub57^kt@S2@&^*yl*PcT$A0uZ?9rNnFkDuu!kkRsR+v+Fw zTDq&@-7#aAOzosx?QUL6pYg$aJ?|xd4)wvhUph1Y@D4WdtQ?u8v&o1tIJUck3{P{5 z>YD|;PNSjS7usPv2%Vn2e_ZeDjaQBeYiZqxJ8Vt%zSoO*R&#IFKTfC1mz!sl>i9L8 zieRWa#S-wy9S+jOvWrf~ z;Jm%Pv-sg+c-@mk5p-r`RdYDFdm2L(Nh{fw{ zzSdT8-k({T9?*L*eT%XwKk1d&pZ4DPZ~eNt`0wBrkLA258_he8GkbEHZkG8N-KHt9 z#oJvES$sWO!D=;|e_B_4p|}1-YxUbypn0E*`tZ6;mgi5nZ2B;m_QiP7U%+JCgYftP zV|B_seA^7S+vnpRJm$T=H{zq-ZlRmW?mn{5ySP0~-xizAKhs5ZN;HZ5QL#%t`cZBG zM-J00UGVHhMuY6Wsn?gSi>w7o`M9kEu3wzuTo^N~C8z7;f3^@)$AgLH_%!30+>!ey zTA9x%@s9m`6@8Y@X_jqzB?%TW1^Rjf%d)xo_w3{i6SM|Mu5~!owx{K^KVyd@o%@S+ zqw%|d_E_u}34TVkv)s+&*Ly;?r`8Fl&B7S@xA}N^4YkCjg|Q1IeRG`2{LK}fmY|i? zMmHvzUN(YXfBV-yE_qvnlP$;WqqRFR)c59!OV)2jFn@h|AGvJWe%r@yqJteHqTA!) z#TJQ&O2@Rt_JYbW4;n*~I%6YB;Z*WN#&lgW)SxHzu6hc4XM3@DvJK#QF1J^d9V$}} zZOb^`iq>@Vd|%8>(&KLt@9pG~R~+?jYg{z@bP@Uc=}=6wj)_S-N&pQ*5}XXvpOMvvY0QP@n+N?No~03_g9+7b9z0JNDOc%#;l(?8Dfom3ie>qBmB$yQ8Yn}8;JvsXM)i3w%yWTC_-WIb> zFUSasZhleTqN?5waJ_r2mvQNB-gE6yGmx$YEqZND=~Y|pRqc6kwzl~p>a#Y|$b2kc zVVvpgY%j^{JSSH#j523S3ug<3y%p^$6#RzqxN6h>vb_Ln*sav_f@NkVUl5P#f7N~d z!=G6FzJ%4k&uaRwqnci?KPNQ-5cN&3J6=d45f0dgmFqT7o&W@QhcG2(ck)tq(h>kd z9WBQF;&9~x?&fPS=B&F`;Bmh{1-S^QuSr5)z*X`Hp{zG*a!7eoy?ZH^ckfi{| zhIhwLZPdw0s5>bM0CBrJIf<&nqSQ%M&;X4A7$6D2YSNM*ZArkI^MyA|f6%ypd^WHr z00O<$^%Cg}0iKFpq%WVXkR+Jp&(LjIWAMa64a?my(cL1|SQc4Eu=1HsYBMKOyGhpp z;wZ`D9QM}*Y#i3-M$>=A2jCQMgt{+mz#&CLaE5?VSW2L;K+6qYecLILk&9X3hJ5sG zGc}P&EhG|)HgxoV|GQ)8e@Jp&j+ZzjH3(vWs2G#l)?}1f2v)jy+AJg(c!aT#ny%=R zgV?QdhCs0QQW6Vg<#Jiak-^FT^1eEZ3vK%CSdXML0^!AsI#T28X6YKq z1S6TYuKHY?v@A>1wJoEio31UHm|c5K>Oscae_T&DeRWLGYY8%PFi0%;;IN7Qyhgs3 ztz)UJB#x-TBO|qlu0v%WH#G|+jSy%UDv2N-9DMDGk<@G*i0(((I)=nSoCnbmuJ)}& zcD_Aqg70-UPGw_Re=e_EO-E9r7>H|f^Kj+N(z!6s+X!J^xkwxPj;tII)YSRY1YxFE zP7MUi(B-K&;zWn7cYsVW%DI0Af{~Cm;v7MYcm=i>3fm*W)4|HdfPuH_Qz#-l6UYW5 zf&{>M4Q|^=drk~sjnk$*D58tqBx?M>V_MK|%veifB+|&Jf0K=&##N^3bA>Hai)HEB z@VTLA+WTl2ix?#;VOs^K8r*2IIBQ zqSppJm!VbqkfnysxelBnhZC%asq4zI!{CW6kr2JrZe}1T2#N=p_CbrWfh3K600W#& zzXA4v_ZWzT0`w9hKqr6&=tdp*?elH>;}}a~u>6VKe}Xkeox}$Gu95HBP-90M+N~uu zo~-Tsz}YR>&s>0oa+cdI|9-wm0lGEvJ6C_NXRD_#x5>uPS<4EwMk38_)LKfA(IkTU zWzE_c>yTs?uBhqEGZXPZ7nN(k*fTLjS00%}Fx=lo7>RO)xTA#yMQkTD7R(Q(s3I?5 z6-Kbpe{(pajP)sJOFa{r0VShY!&+T&dw{Yqh9zXxHAGsG>a}Ur&;GI&M+>y=CIuZb zY9O&%Ai*qMD!+9qlM;|PO)$f<^3eDmhoaDdT6$DT6HQK)o_eMsYmYxwA$XzIgWvDp z;r+xdAPk0pJqPDCjMGTtSKzF1BpI@n{*ISre`LwV$U0v$FzTfJ&WW)Ng$jWPY|WMTrD8^ky7+fy5vZ ze=lki?gk~nS$V_oRBOTa{3<5qxM#}rUX&6X5_TrMF>M^50|8?NcDoE)t4 zB_o0^v4|drb5bMmbalx>BQIATl%KUv_GTSTmKi%+V!U)vvbUneh{z*d32H7IxOs@# zAS)dtcb!bz*r)M4GN^B$vZ6BbIcNx^f5s3HgjAg2xToS}D8KO%Jy?tdX`GrVU!~E8 zsaWy#SzlkJ#v%ffHpPs*UIyhq`&( z{KhV~NQbbEm>oo`9WD2IO!W|MJFE4FOSvqupziuJyF9ds{TMzC-3sa*wZft;e--M& zs=Z98_JnFL1lhHx*-hFSb+Z14Pv~ruw&{?x30jgd4WYzX*sdMgrHN^7h!;bwCWf$9 z_hIE=!CV+Nx5M~76#lrRKCzKAM$tvVHv(564HN>;B(ec^tqp*bFJf8Oru7qRf7(yK zu{LKaeqU|%BfB{8jpusm!Jl~Fe~ruN8_PRibcBL6ZV?S#VJ5ntaI)4c4C6?bTi1!l z7DbGp8V=Zx48*yCIbTtj2NWk7;atZV_yi8tGS+NbQv(Zrp^_pZ`2pVZK<8q7-ZcVEi)=rl$(S2%U(f3|5s&s79#8rw}&>wJFD-89$4KE>JD`}W`!TwcQM>!WpzbIAkNb&=b@qdc z&l&<@AE7&ecHc+BPP+Igx_%(_JqUmC-A`Qm#w>6pog%>$_QL&xfAV=1)j36wml)m> z>rm3ZKG%_y6mh1*oZznbY!v;epl7%zU`mhx`M^P%51jt^ebyNNj+MVYeemPcH+J6n za0_Koq&~|O4p;Fr!!pL9IV{ruZMmTO<){>F@`8#xzX&@H@tBf@(dHRA-gp10osh!)r#@P$3Y{mYy}C zLlanH%k8b?@R`&IxXrI*W24tqmlVx(Ha1!)DJCb7(RHW2F!EVKN;a}Jw#GagfFLM@ z6q78ux@XNdf9vfglHc{=t@?M!0pEm5v3IS_=iPX_I4P)5_M$Lym#?TkY<~J@b~&TM z6Td#Zv#V73LFLyKmg_Kvi`DlPj*gnH&nl+u#}&ivkeQZ7nnwCx(wBfj~yZ(ja{#CLU31trQpuLj6X1N^i4L)Pg8+39ti90WYJ2 zSrZ+KZv6+H?E$rnkSV{U@;{4q7>RVbtQ{udcU{Gv=Z;5iu}vU;@@201Bpy@jTfM#@ z2mwU|e-0F~&@sK23I5rH&m;#)alnmLg!wa+kzqi2rqb|{Vf8IZI=k%mD(pyyD`$xw z>;PZ<5V|^R)f{y(^@|&4H%BVc(J%h{S^HO8#X@Dnjb}u=_Z=k1s6P)8 zR5}Q5IIXWo0nPRr;UrGRxK=ne*+GLc14P5&e`#4#a#Saw6WBiZ44u0C<{4q^?~Q!S zu%x;*hHimI&+K%WMS7-Gek*Acj`5T%}3cQSN`FpXe7Lhrb~4= zcR?SE%zJ@%6#&7YBFw^>;in_LGbJ_`=`uX!pagtPYmt+n%26 ze+@a$Fo>m{k6%v7@4Dgckq^kssYf{X-Fgr>=zfS0AuOs)gf)*8^S23e+qg{jiD{>2 z9Hode201H$b8a?uVM&(&j|!SUpQe9!@qo`TaKw!sFUPaqSOxGRI#Zq+(nbXVe{LKxAsO4%NP5P66clqLHyWC{#u&P-Je=>DD7|(>cF^dls06U>9+`945@J;5yRCqXvfkK%z^Mgm%+<)0$6WI`LtWD(qW8L zT(C>=SrU2B3ADPLUo0&t5fHX4wfVN856ENBYpF@{^cJ4TZ7B)eM9Fhm%-b$LlaY$g z8QdeVB}`O&c7u`vbn&@mzxLPVf4Ird8vKb3MKt>1m=qrU6R&@AuHSgw7z%r~2|CDW zN*edJK2N-5O-j^2O*OHmb%_*ThN3~@_{rLo&ybhRDaa$?)alNuCDKJ~upq&=bc^HL z;u<&e^%BHk>5rejU?vEvBYnIk+W76@SIV{fV~F zQvD@zZgqI1X21N4)$uAq;W(~rHKxr@woH*Ko>e_r0GqH>C;h`x@sok9OgYAXM! z^y2Et52yGKO!7M}`cEtl|6*C#R~M&z*%*j{GP1*k964}aguyeme}lwDd3x0|BlJSO zH?MziZ!tcZcy8Kx$GZ}flhrPsk0?scew!YFYps7x@Ahgm{~bp)e#cdR2v3G3hanIS zco`$lS4U6NhaJ)p`{54$W&i(>u848$`&f3ux2K0BL6utTY>&4!%xkeEiz==O{(BDXj2pBGw}J3Az2KX(cD zD}%P2f*&2Tcre?S=d%H?^LlVTtgm%OgL7UXVd@20fgt|cFiNXI$AdJO(`uo4Nybh) zF1WI5MdpLugGT}eSNJy{n02am{-R|=QnQwHd2<@8`L&BEe?wiKN(Nufm<`le*_k8_ ziz^qN2PC&o5OmXji;^_NUvgt%*sx2l6hXi(h$|GIrd2R|-W|yTG)b*h12YQ<4*h7@ zZ-!TC!yjLc{rGcNUdxXNHp}^K>78Z3c^{ z%`-RX_)Vtsb6N7BI-=~_t0iqA-tMrTZ3r(&s1%3vS~`nR zjScb-uHmN_yNphKPS8FRb6t!v_Tf%3xO!{##&S7Se>^I1F|ZMf?OF<~xF#TQ>g;^3 z?EIzTH2ucuA5Jt~tA46@_}|zg8M#?x>XMrO!fI`Twu9R#CeIZF93#%>aW>yZ6_3z` z7t8j%bVY?hMq8I86C3LDalK_0I3BAt6XH1r&h@2C)|VkW*>o<6p9&;MXyg?$GlKPr zD|h>He{OaUCqW8$;c3THF>sW>@wHgz-}(`0S%>%ki7UJL7UV)3rg!hvy#0y&KXbXu zgWGzIjm0Zyrn@y{%|p$jFxKd7d!85{k<8+l*V7=MuOJgQh$UJJf=DuXiuJ(Wae*Mv zo)2l9;WT^kL@U(TLKc7gOA?qReTCiAT9mc{fAABWR^b!$U{?f0)d_|~)w;S^(bd1M z9l!7o{6w?JRQIm(O+zM@5g2Q@0h)LWW8nJJnIj`R<`wU7Jgs)LhJCV+<#aasMR*4Y{W7;{gPNprwI0LYDCaL0l%A zzjX{znmofa*}mq{^j12F21b<9d&uRTHjo)?H^zK7r$X0 zo`F|9DRwDxIU%t|=3APqak-TRNidPyf5@-r7u?u}iy~)xV=K5I-!k|xR=jBrXOQ~| z@G2qedTlp@xUy^;aUyi-<~W-rNbej7rVric?hWe<)f8 z%_ow^4y4r^2||#KS0JrLBpL4ROW9^j)qS}gFZF}pNz39cuMB?H(cN&i6N+4&+c;0q z2D)p0uhZE-Aiy()GdC)3SRLO~efcK;+n*m_{1Y#~akQ&}b=Z*)U7d2^y-piW;K^psB!Cfx|-PEaqI3ut*^iHpyHP_0#Ri0k6@e)4?@EXCzy!W5yg^E{yZPbYRm-CX8 z9BFN&!)XvI{sB0-Lon76X9(f8=bz_@$OVQivcCRGEv{vUSM08TlKko4zvp#hpfoV* zbkt$euD`tdOxK@0MXt2_5X|3Llot}+1Muq+mJAWhJZ>}rY?%&?(I9DOaDN&W|8f7< zekq4Kk5K3!0)hPPgR5I5x;9e9v7yRSdveKCJz^;oZWae!&<;2M@m+th%RlRiL(=ju zzWh_~`dvTC$Q9NDYHC>64Y$C(H1^ZY*+$FIzqr+G;Oo!80NHGs*!A_wCHj1^3sM)J zPlVCh-!){r-ACh9$j*myw|~#H^S1xV+^x_sD(3AzS@6Bk^yL7jZkK1Nad#Ru?xuu? z!ijbO@lXEqgR_2e>W;%vK1E^RKXY<+anf--U+neGIji#Sm5>?hRvb1utha`Cv~;ei zIf0b)U2$N6FOQ)%cXwFu-B^^}VqBi6x|G6D{V@v@6MvFi)g&;9Y2M|D z*1j*~a;6@qMpfMs^1-U$Ctaw#IQmb_-~8cnelbQp#zvhMs+MTCyk&K|G5+$_fAfG} zPFrD1VPOX^0-kHyw%B=r!n(dJq9G^xHTIC?j=`(*F;3?YMgyZx(NffmfNicI2@Tku zuUZT!i`Ga8suzFf3V(A#jji#Z{`veaC_!0tBielm6+sZl7v$30RE*i$;-LdU`fKQg z!>6jh{qXqRd`>(6&EeDW_yOfVJbu_={Z0eI{sT*^pP#(0O*@SI%l8@KAKR>tI@f}H z9nyfb>=!SOHJcA413`EYXzh9^+-N`(#K#1*S7Ho#fu(`^kAFF8FT34E{>8+=U)Tk4 zM_M_izGa4pVc0|o7vmKO&Vx)SezIF{312eMJKQCH_(|;t`>OlQzjM{q9oNRc@`GQF zvg1PjuC>3pz{S3s#eVRzu^^?=K)CAhy+P%9EjLN+EVu51V*JFwW$38@QV zWx51BrzL1~et(CKOi-e}U(RbM(913Rx8o?`uVO-L^}SPWV)mUNNO3Kz9|XZFtTnwK}@M9xS)N402Dq;P!BH9 zBb?|C4tv^ZrfNADq`d*9i2(xQr{4b(aP9CJ|HJ!rxPRea`9p`f)7IeB`X^p<@po;# zb@#gE1cxZBT=BVlL85fX&pZLOB=A`mq^O%TL zkE01vC#w4w!&SNlLyq0=xG0?;JN?+T`K;3pmk`Mm_q8_aAB~fm*X$2(qpP^6@|}_X z9JwN~n17Tm@FWjkuwFKkLz<3oV(Upm9K>G$smTH>Zw z<5npZpw88}Xd)vmbszN9ui`SG#2!BFGs5 z;mchzQFcTGBb3ouJRQEa8JHNlSc*}@dZO?%CVvRp3c7t0u?LY>due#7jOeRBi99iP z*4#7?!s^{V(=!4q3hzv?LQuq`T}V&8-dgA&oC5)V>~|_mqLvuICsF+?y8gizZ`p!g zyLy0HTb*v>t6fw6)RMHYn=eP);SgyE?4)R=Yd6NV*uF*QG+Y)YH!`DTOy|bL*Tl>p z+kXub$9u?)4*{Y9w}Zs5ZqAP1+)uiNM|9t0X#zJV4uhS88qtHh|Hn;@5%pW3;MjCtRgY-8^CMao=3l|B(co}59jwYi#I`tKBBjbkXf zdg4v#BJTW;E+Xo57;PHXZJ*L|8yZl#@2vqH|B(0!9G0?icNp`?|fywSAP5PX2>YGBO4ZQ1hpjI9{R9oU<$>ObgXt@Lw&craAXjnf z$|rTXe^j{Pe`(A*9;4G?e(Pk4Uo7k&e+xDLt$nJ$7hN0^WQhD^5lB**aeqabjD}vF zSXRyr96pO+T}6mTS5&(j&Laf8Qje*Y`=AoGMqLoqa%s|y1r`HgTPxrm%sXDMGzE=7 z2+hvQ$ZY0IQ$}0UhnjHu25ER8G>i17Z`0Q*OGH&yZ{$z@txtUV3r_cHJyHp^!X3*z z`HLf4b?r3zTl@XDhELUi|9|91dGy1zrY5R8%r!hm)pHq2gvE_Tb!6FrM0jA6Kue|4 z8!ekIheB>kKPy~XdObFb!eGhQujBqZQe2f`OX|c$xx&=m`1#WUe{6p7<=kPqxS=xl za8S>bodl{bc4DXng`;+@Bbl-jm3NH3wWd_X$gjrdf8yN~R?PU127ffMI)0;kueUyo zy*x4r%sY*-owK9C=gcM55{f7=;N}Hf8i+@RE?fx6;)JLgz{Nn!MAhM@t06T*3UI}_e=;FN{k(i5MJ_24=*gW5WCo4k|D=L%y*X~aJ z1-BANB~bCrUz|MhvEsR#j$8U$7yDDYWIy$?H2Wv6S2*FPwtw5g?ujrE>b3L0h!*)O za-RrNrv!ofFQP94RSp~krl3!|F1O2OwcV?eGdMFWF7)l$-baIK4coMEV4guR?qN

znJSu$7lwNWwTIR3+rvFyjb5dG1wE6rZ|!(0EULvOx5s?$Prx(Vg$bwQPHkCC9U z5xB=tkeqU1(SKao2}5L<&QJyi14&GP*j8ZJri_?gL<5k5u$H6nTw&nG>qobQusFm> zuh&Qtf;dHixeKYHr_ ztf>#|JiV&*q~_$S1xbJWV0y9h|HSWIo@pvw;U8@O!+&T0Lx*%#w&{la{9C{3=2+RT zU|r4kThmF`-Fv@vg6r7DVggRUa^8T_PGg3m5AAS*6ix<~&WQm&#dy6%sOC?*<3EXK zl!t(fl3Q4@n;k5$iYK~WQsr+TAh}hiYcuDimCt7pWQh69w)oRP;$}J_+G5EWd1yBy z(~b^_0)K+$ot7E|^$?T^Q!h%Pj; z3WWNOL->67s>eB&HFD565Bf6+TxnlvfNP#mKe2I5nW1nMQM*$pQhwa&a5izz;k!#4?2L{K@)23#wXpQR`rm47cTv8H3LCHmQR_98+nX&kU#As$v( z566chq5tCA^hQonjm#5YY|&}9QJ91RiNEW=s}s-QE>!(C?&`!sa}acKN9FqiR!2F? zSxjZ3vFz8^?H1s3v;z{sTEy$K*0sei9K(g|Yp)9JMOaz3dYg$)ttcbsB z)PgTkCP|{WLHLXj3XzEgiLl_LD^XOd(r7RM5kBH?1rx3UwMCo zjqmRYSN&+WQwuhJ0lB#8a(;Jya{Tx#J%2r{lD(b`VD07>c7YveNwwJgJ6Ee-1cc$1 zMjK9097l^_oT4s&`_9uy*_GbqwLkgWKlr;JJxLU(SQ2Qjjw|YTA%^-f(|_W$s>7%) zOu8W~3rMGTzFOk94x?%rsRuQWzuJc%pQ$wE^&g$czwNY${_Gu49M=E%USr!3&wrne zx^FcHl5fK%ni#c4m40PelqogG28468;bQvN?&|~AZyby2LqR)Qc>Y%3|8ROhP?^eC zfMgrW=bS1{lhSbo&lO&i#N?aaKt>pHKnAa-Ahe#93{}lVfFC)YpzbOgy5gta5_Efx z2&%g{sPvBTjVBq^Sj#M3u&Cq1i+{g*<^OdI{^$PASNwOq{(tl-keZ1~qyI{c|M8v$ zGWz`pV`;g72+(gmFv1)Ze%{c3*Xn;Tqtf#Hsm&|yw9{5Cv`I+=d(73|^IG(S-HW?; zo|-XDaFKih9X@y%uU5rEUkiZ|OtN4&eL_HMk5?keX~xTXmmE8_)iBQ@qq3-L!2`@^FQT$~^kZ&B>-bzYs9IQm4GokN`> zhUafAU(|_jA3Y-f_y8h_s}ADA=!M>k}Gx?iO?Z=+7zK$}1H;lFXZ+vAa1KYjTf z^O38rTZ#9wnPU**u*L}^Ld%_;Z$tbP27b;gK?%y~a+QytQ4u}de9UxdS?Dw9gRl;m zGlSHXrJw0I)SgXH6URcDOHgwNbJ9u(a)%L1} z@wM?1am~V>@i+fb`vAVYWctfX{&GY= z7~HS)%hp@zY;URDJb#Q0nn6yae4sjUAy3|{jgL+oB^c-dcsUKjd5kh?J3(Twnk^xE z_I$({M4kTPgb~dRQ2Pp~0g|{LJ_Bo`H4)qlIJcbqxzw}LLtIeESY>1pQ{$hgdhWKu zUYFk~zOduHMyya+Ec)HY_`7Dm`%c>5{fKGHvfrBc7^_^b(|`SDQM3r;3&D8IG9G(s zjRCpZLRe~}9)|zJ-ka^Zt~2Xm?~=Q~2rSuhw+{lONcEjfwU%xIOSaWpwq#jNkarI# zk2&Z1+r5Jm93Vl)%@EBZ#imFWtG+6^l9tsbUzj{yH-s@nEinZ-u`mMnjKK88fzdX^ zz>vAwEN@vezJCe{nPNe&s4Rk`G3#s4Ww{KnM5I078Go$;)n5$p&)tB-CQsX^{O~x{ zCDQ)<;B#%-A1aStD$O>iUZ&8;jVfcKxB40pyzkCLykH>drLR*BIEmMfHI>Ejay4Jt_ZZ-s|i5Nkm5bB=7Z6@QBn*WD+f18j zZmUj2PY{S_AA~?luK}NM3+enlr{TVzAnj?p=5-|f!rOjzt3S32*F}T-XAS^Jz(}#+EK&c+ zk$*#7qENk``Vu03Y_?>Tl?yGe&L5#3As&e?~2P7E${svb-PwV(&XRdn1?W->%$k9 zI{>26J1CZ{Yr|6P&dkre42FOW5M@An`hQ+&-hW3on|10GYm?5lAo@Of=a*((++TDY z;Iwz__NK)vp)?vE55);*K?OumwX}_Bg`(9bi>Ve&fkWc3$Iu6w4hljbU z2#`<=tE!w61=BMYQM?9GcpyxOGX~A)yCq=0mq%z^%31MzFR=va=Y0RA^Y|Nkv^px% zjQPofiQ2&64COIRs?*SlvK^m<0z^3r280x>fpSu9;U?i4q0ETI)D2~siKLXncy!b6 zxo`lmn&lQKkd*pG&2s0N;&{x-pnqqv91!6s1H(F?93@~rJt)4GTJ-#_#o}vsn9p~j z&~@?kuS({1VVk`28^iDtpV!%L7}Xd)8I0hx(% zg503@P313sap+gOD9*4H%zqfMSH}U4z(W)fs~-9EyB}_7`H-RIwZF`}L+xQ=O0g}* zL+v=_Y%>jHL8aRqei7Do69riewGZT6R z|5w18Ag8KR2Wq~Lki|UsK?P)Sl|hINS%Ue|P31Is0SR&$1)gLL2WhBpkhG;j@k&OF zj4W@BF8GCkE5SJjK*D0fkon=y7`a2e}#zm*6$;~W! z5w6Bl&$vL&LxbYTE=-G5d*fhY61x{ly`;X?zwla`MDdZr?0=8B{}=d3D)*UZpr42f z`x&cQJzoG-f+5k`*`}TmR|NS`Q`;;kpwT%VX8Wl`?POSYP0Ay)zTGk=A$3!g2$f0o z^(0Pb{U!C$y|GPm-)hUcMth2bTkcLGkumg!iQEEty?9@`JqDsIMPkU(l3OUynUYq z^|`#Bg#{=UhlF*pbopW%g!@)aG(kvu-?aL3bl!8i<9}|XF}sC>5*8-YgB3*7w_?^W zeev$nZ+t*+0G&^R`ovN^L#lW)X2Sl9&(M#)1NvB&!9Y=OKEP#GX7AcUlFau#1l;nd z!bJxcH{c5B#Cp^pRM@>9w+yaIU(0-!9^&PWojD1N@BDul+yZETT$P>vZ47vpqTH6X6H`u8FBVG)B_`y_zqacG20UtRW_V)!^x`fWLmer`bxQGu%6D6*P?HE!2 z)_*VVgr32sPVr?k?s8O)KmOP3U-VEi`lUyke%Z#p>qwb?>D%V}+h28Wc-t#iX{dc- zyE@eiUEr7G`-4~c-bEDlSNt0h0#BS2-}r0%g(T)p_)Dyg*|uh^e`4EE3|0o;m?5You8tC6 zFo>X2*#nV6uqG3uRTly)OShM!k!a(G{!;7XtreNK>FZ_0u^i{TYu;K z5fVcL*F|?;u`elMWs244mC2ykRl+q#&j`f)St%ByL!vN7FYqB42ncH*2O&~zWX1!a zycdulmOQeAT*918Hzoo}8UmNkRMV1N8B!MoiGspp##`Rn zv0iJay~;rFsjo34Eu#vou2%h>?j>S&h#)IifU>P(uCPjeonGFD%E6 zDejQgIpfqRz>VtHCZL$#%cKqp`m3MWAD(f&86!jWsJwAD?U+%&ahSei`VYRIvmVL+ z8{hcvINIhE;7U9wqBY>qG7M=bq$N@Nbf5gD9gR(}k}imi_c7fXqP-C5sFc~p6MZcu9JVe#r)r~@F&jv=k@(Ht_+#a1NiRGvVVww@aD5TW}JU~ zug*pyu%b^fY5~S+JxUghL9v%)*qN(DvG_Tp*wSDWLA zxY`fyaW%(8o4#^9jVCwTgTJxNvQN^-0_=~tFX%pk5m7YWQB287&j?>+TzOavN3;$o!58G3R; zX@BB-Wlit*s1GhU+dn-J6pV%A2?YEve*gbYe7yhvOC|rW|56E@YM6EF0B8um}wGPm&qn6P^%U!Y2T$7ks+jQ-W#;T!2JoK<#;)pnn#u08YJV0&gHOK^M3a zKr~-W(1BiR?gWfvt`8jm7QfTt{Js-SJ49wcV7}$&`V5VhUjcr@vxOUI5uhGdJ-WbS z4|mXQ5EJZC`8O}XOSA}N1`mLITHfpB05G+uqX1G4yxwd;>}h@g9l$kFm$Wu5g}bJ1ls;z{YimmFd|5&`YfCJ zj0seajbv~XIMe<@PxDnc?!it3eYpj+CCK=ujSC3HH~&t+B8mu@>dBA-UI2C?E&&_p zLQo0`RZqw!NIib_sD+pTAhJpII&BXzEkE@9md@`1_az&k1O{qUjaAqnQHcsNQ3)PiE+SUrx}O^2Z!IL7vAWrsJa}x&UH;nFQ9* z*U%8%2AT2)Zs4tePT$8Xv{56SpHK4TrvnH!DDE%*#=GTS0X(0HQf9&JJ9 zpaxu1;i>~+fIP+njZ62g@tPc6*d%<_A8)V)zJKnI6>N({#1JZIO(X5j?>`Y(g2+HX z(1L272D6PV1qtFWu zZG*A{aK*IrBgF?o%PH>`NZW$Zw8-OqY^2I4^vMhHE03pb6G6)ykJE34sLUKw9$hoAs`)lUNG|xTnNfC3?(saL1=uh$Xh_fm%Pxo zQPHB1R~R+aBM^0rL`4Td^~FNbL4PJJ3R;MB(tx5cN~GAd4l&Uqko2E1g|SUSU66x9 zNm@uKtz>yg)DpoYjF->(WBB3yB00k=~{Pa`Pbw%Y9#9OZNQleQm^uu3m z`%l(_p<_ra8~0$hbe#DXNEu)xW|^5P5=mP8p6&=VA|5!T4y&$Po3u3P5i2%}3 zQ&v(hxMrd#&qUF|QWz!IBd~Sh?fSYPgn|{SI+&qta(V=gF1#&Ycz-*;@}yCu`OZ(i ztLp*?h3WaaU<(DER}r^JX#5C(@|o&t1K%qpX#@h9hg%T*nIA@v__}~7uh8)$^-EKf zIu9ZsS|;Qzi0NO_lz)M&aR04CDZjr{{y*^6j`sgb_o08}oe}qZLDwCDAj9*VwgO6g zpYoxZP%i;tH0j*(Eq}nGrFsVU%fF)CLrVZ#o)xr6$w`L97C5@5Ol4j}N)qUCAb~)` zyEq`InAG#Qx(cK9;7fxDNz*EABaQQ%_;kEPyM4DPZd)RF+PLZ^>c5q`jUGX$>jMAH zuiKW8Z~izA$eQzGT^FQpKGU`&p7Z5w959;m@47Cib3SfcI)Bc;=D&q7u~{onM(A1) zuk`$&>sH5oPI>0nx)dv7JS z;@x6sL5GvJ695X&^>wfnMoFv)j&wYDx~3|G6$#U^VI`CximxMurUkm-V%+uF*hWf8 z>k`_QKtlVcWPjrTHqid%x-MV^)BcgRCB~eu+T(!moL}j>Ab#`d_y%*nhQ|SobN*A; z1?8Jh$G4dC^{h`$)G~ory*~CyphFN~M9(G^2*>&CrE9On+>4&21|EBT?0TK<|Ax<$mB1+@J2Hq(whw6JgftMn%B6Ephu+njm*hu-!6ML&)`*xkSB=QkhF zh53;=g;Qp5twWG#*Qp-CO2@EQg!te0Uk4y7f$lGB9dqXsUiC-ZCnq}2|Mr}~JqRY9 zD?XT#cYn(lbPokFa}zy7atlD$E>_)2%ZfO+Ml)5!G^2Z33qnpBg##jZBBt|Nw=IC* zqWQcP<}=a#mT5^3qye3b?=s7{T*0u8j18G@R)7vELqa}yWzY&K3=FaNu!baf770n?nQfD2M`-L z(|^5%t~+E0iT-Oor=If7;jc6;)H%r@QG_73cC>9~dc>>#l#P^EdvP>z3r0!LmG4|h zJ&0DIXRxQTCjI?S9vxe^q-WMIT9o-cvOV>CE*0|!EP|Dh|KhEGz0Qq}THjAf&CzpT zk6`(??SAqK-u%oxqOwX7Hveo4jXyjnKz~d3cx&eKOD{5J;G(zfdr5K`G5iFE%H zgny(<-{c?Zlz&%WuHcO{k4*z-!lt&wqR_{%_wDUqfZ)>om5gJ5%-X-FB@(Ot1FeO} z{y26T?a#bkYC1OmS%1176-L+cbdN{k&lrB!->-RJNj;BE{EJUoc=Ot5Bqt1=GJj=% z-cQfcgp{QDOCA4RYnbjk|8lL&>qAjqT43WI31$-FV~<=j4QCnB;sdLr>1384|LOXn zeNjrQqUQ_jl_`l78xkus<+*Np?s(Oo{*B`ReBJ1c(1S(jcqvpSIgV{Q^P&zCl?jjf zH=nj$Z=k1TZJVwaH?elJ+)CwaB!61O(HmH4*{Zj*|0I8${nTAy2Wy&k=v>?NyA_yu z-?VIl?$Z*Lqkp|$kI+%qL8J16L**J_-?j;Wm9_6y1p17=&qCYMFx&I9LZ5$rH@@^E zeI{S}`Pu!_&(Fb^etwSIc7|yh^xc838}l85_TRV1iRu8zOjoxpwXb~XqJNnV#F_4l z$BFj+OUF<;0C%Rl={mLR{m_l$2!KTT9ANq^exL1e1oHEHMxTFv|I#b;IsDRppTG3q z=P&*Dd8F+Cu;@CL65227!cW`roZA6Npmdjv_DlGoYrp+>XS(*=e{rUZzWqmEI_}&5 zFWrao&Akbe?2`%EQ0uuT&;>e$%r* z^ym+LKWbP}ytHDCsAw=mD{H1RNrO_jd=DL5;DhjRp4}UFfv-6u)nCl(? zGLDK%>t6mEmz1`f-~WsYmcR5rW1=Lq?%%W-ZMXBAR-4=Lo3?G+r2XDIb&#Z;B0yq_ zR5wGqj2TykL+kOKy_6k)p-yB~wuQP9r2Y@SX$Al(dEdiM{ZiHbk zKvMromne1dPg*5SD}R3#YYO1;H2}QnZvIs>CZKCgL1UYNr3(cFsYTDvOw2YEHxLT; zpn;^gUdCa-im(6Bibd4gG0H?S=n|^j6a=EAx%X2J1S_OE96xfe7}5B^+^Yk5nMNSw zk$^$4C*%_e1WWiYs`CP>|CH$ehVBE(Y|EQy+;JK|7|@mKF@GI`OlsEyY9EVwfYYA) z=4*2tnRg-^%jJ1_{FeXuD}T}E&W}-2^ELZKlLAhC;R~r57HiDKpT)1xDfQ9;MOahF`Piq!;`keNT8=B zk2EeMuh5YnXn$Ivmp;jbxJOTrVu+|eplCoar(K8=l+*t@txqVvY0Lpm{_nh3phoMz zyqD^!kxuCYf;vh^2z`$LA)pc@25!`@Aqn8ow3tvmwnNiVf+pI&kf0Hy0z&#-CYjKB zTAB{%kV$(=$AAz()Kj_`LIMeWzfRkO0gkkvLQBiJw10m-{hko*H*Jq>&`&s&KmDoz zGhh?M%GL#B=6@d_eCK!W&)lx?r(JGO=cS;( zceLLjfq$0%ZvUk9G|zu03>g{(`yP33bIvscA${(1*4w>|p`8_#ePO|Eygt z9mjc|f`6p%=Jq7$0X#?jnp=Pxu5=y91TM5)qVh97^Zc#o|8O3+;5&~pq3t@(d_qt; zkJEpZPi9%YH_$le$ep*u`J9`FxczA&M+;{ZC#eFI zfCCx_zNYI1W@=*vYq z{#H*LQGlHwoi@fH=t%qT0*&XOQ2uu5ck(=Hw-pocsrx*&L(+3sqV`dI1jT@L!k>W{ zumV1%lZuESf!WeVT)`}0-3v011Ysg*Kw89uU;^O~-wT9(y{Jv_1hdHzGoZWj)qmeS z4IUu_u545ly^$MhwD!6SjoUmGKlv(8$WMM@V&q=a$Y;|Zy6NZ;(Ua2hDhZM!iSMAY zQYh0Wl<0{(^BJz#6tP|(ugW%Bal9*~Phin>`Bf()Y`$|#{kN7&fc^ja=PRY}rn|kz z=k0&`pa0MQjeeo@f4N`c+yC+NRDWgV_#fKExt;%a`|@b*Ta(@Y+nm?F$MW_c4_Zn2 z{iVNUN&5Z|j$!^=+rtGW6;KF4ZzKh4vn--11iG5m#I(% zkR(L$lPczspx{nT#(|^G90hP@hdFTzbe4i5L7ahjMHr=Ry$FMwadkS9A%ABpWXkKP zn$>c7cz3oiGW>z8FVEnb5j)ZYuaYd+PxcMbjbB-y&1AM8E89h#{o2R-7A=TowIJ>uKLXTMy=nC&|)$b>xb zvzNjL?m9#n7j*&~b3m-_a;TsM-_zenz9>OpXxK!1_(ZM8Tpia~#` z%kZ^2n@4Y_7shFEMtx?Pij0Lcj@rk;WBmv3p{LD%s<44uWujY^Tx|C$KA#J%0?Y3L zGoFHHa=PkXes&V>VV&NoD)Ax|`TJvDvmLL#yOMe9w+GXltn{>U4>&D0`xHOFACwx| zH{X~$b9ZhY&g0A!M}PcTtlsLO^A@)vKIqrGn^?zyB)jw4RX0pb3SF|QU==tQ(6i&2 zGGO>lYSd$b?^ffJtwzrGD>Ft#TR*k^GG|xCa(9)F@1hIJOldPEo6OVaz8GHy#A5iM z&ef+wOqGVj=fXwN6RFwXZa5!SBD5Y#r?jOrsXR-va!EiwcV-HPwNJ(y__$r&+61U)6MLT(#zd$ z1M?EGpCx}^ynkSh#o3>lUT(2KbC)R9(o-(g}6s@|t2E1?%bsN-1K$KBeB5^Av zWKrs;(}Uv!ZvR;)T{tlwH+X6&*s>-%9E}5t_e)otmbe^(@E#BPQ@G3fWdHDA*%;l# z9eOs{+G77*TZ&no)~SpxWwABt#M^EY=y%+BT|Z2-AAc1|X49BsK8?4v_t#b{g-BA9 zRgoRe%I#4HC;xM;Hen_&)*Iu{uG{NJ$QkKUI;{<9w7M#Tq zpV*bOzIJv*8&dm|r&CQ%3NvVqcB**$#QTQie`I6tS(RF+_|nySus0&@S#9ghT`db$ zyK7bC46IV#miHH_{b=Fri>>l;%Zb0Jejl!z#D7qe`fcSQ$Gu*%F+w%ub$<3H(kgp5 zQybka-o@%QS^ODXSC8=KXzSZ7R_mP=9rM;(zsDHY&OTji4!1Wu;Ay3_jAmm=@icqG zOHpC99->A9v%a)!dff!u!NJXqy*8t`KWFAO+k?qh={paqW3z4?(Yk)#N$#cBH^&`u z_kRgD=c_rsMUe_o3w0NL*rqCdPHVKL(K>C=eM~+#ENsMg=bcDUL<_?YPs;hh?iVP~ zuHNflZQJI>DT2F>aJoIL(?&$N4Nk6&1(nnbK`!NyZ-FvdXm*Qw9vL; zUmVs|RFW{C_DI+1KF7pZ0y&V@}gp3#rMKtBm`ymFo2rde(i(+Z!>I!;5n_@o}wanbM3aadEC>$z$_re;q7#9X;Cpu76Q# zcB(H&m&-lQV!CW?^U?>W^h2<&lwp#z?&`%82;5FPSx}S3MIuM`z0uT@qr5LiyEQ*y zF^P51JYJGNMM%aNPy`Jveq6Zd+}IK>{?hol(bta_8{Wi@qg;e@wDvyH(r#=YAlWs`w@6!nNKChb@Vj#fG%a_J8pSFXUoL zE95+%f5w#g81cQT$%b}7H|^~0U)+VCEU>SwTx--3HG zv8@5NPJvwd!1CfLSn_2ryHz!Z14BeeH|a*bJq@Gw}@hify{)FT;7V zb1p*myShA-0J)p`l9sH&%D1FlOV1rP9L6oBt>#!`CgqQI8kXw~o__>AFp0d_p@wbL zG}02rFMUf^_p%iT%4z=yK37>2bN6}4xACPLgPfa4*Ba-iztN}A9Lt=Q;g%LLDjjjb zzeu)Oqs?$7f%l9`OR+AyWFo=mdT3dzRPdE(ypoG0C~s*cP2{DRI@i`9*X!#ramXed z#_l745!*_oD9B}NQ-3>KIS$9$g(vX76(tBSsgx#iiA}vI8dzI3t8FwzXb+u9WZ!Dw zX^XMcN~PhAmtMM>uAlR_u5QDSUqA9f2`ahop%M1FqJg-H8doIjs{dd~nJOzo?aA%i zlhLmpoT%glB&RqgcN`P!JjU48lDd=AODCscC#O+o+t%h*Xn*l}3EuU1PIt#s${kPX z_IOI3_EKLb$#}gv@|2f{$$NJ~@bHUAw4P|Kma4GS z+!Sp-j@*N??qxlg0M>XL;A(j~0f+dUd>r7bqu4Rb12gYuaFf*gI~dD*R^#ncUx{ME z67lA`*J$K~y?=dppW>v*oKl+_?+W@I_uSrNUYi$FXDqkGQxm+OdxMG8#@DnE4cTE+ z90GnC8SegGt|Q%eXu+1fmY|(3*Q;g#-HC}punXs3scdEnk19-VlJCJ688(yF^enmno3q)@4(&<+A+EL&8qN&cbM_7 z^Kh1{jh>QpSS3>V84i=43*{?(p4Zy8>`=gptY?w++E{Ig7501f@)8w|502)GpqRDS z!zvTN@_$rANa`E+6j*cf8sEa*3jJ$W2_FKdtK;&$5#NO&vf;U`DyLi z|D9i+k+$DmTb=9>+_R{&F5Th@_w4k%Rl~&E&wmGs_y^mr1jb!)XEs=OD11IALr;C~ zev6)Uw-2SXP_rmNVsg6dl-fDkQ|~WLwHf_ou<97&k{jGb<74sjmK%*(1`ICT^+!4j zPfZKYnHWrbIflAZJ+HDl@Q!E86{6=(*zYGP%rQPk%fnG$iI>M^IDpCMtv5zyRJ99dyFI*lH#)bgExXbB zC==e(`ecwXNo;9)wYZg~Q-x$tSvHdNZoKiL30&n=cbUI2rm;L8npV(_t_1-l3zFKR?P`6HI?wy2Bynnn`dzBXr`QkME$~lhQi&)!*=_%&b9v^FL z&`%~m6&$qr`jtt`mmytJEyfIgy?RZvvv&#-FAkAJJQeTFJ7?D)w66~_zHFR9{XEWU zVcs+Cxal68*si&%bz?nG@1~d++j93lINicvcGtKcY<7~uTYL`0>w$e7R>8)+pMR(6 zZ7UkJ|h!0>FUF{=Qb+4 zg+V)9S=s&Av8z}7g{+3VmknA@)M{}VbG@HXpl=-Y z`B;LHdpn!Cun2tne!dQ;NRqncAs9P(yTn)TQ3>bInKisf3D27I3Bhw|3W4Mk%IiJP*b8#tEFL03D@~wI)`zaS?Z+ZKe?7ny( zSrs-b70JhV#Q0?>VM3O>MBYN(*A0_otCO`t{&j!r+#p+K@qtOCJU?8YoW+SS9Mo*b z7*|zH)b%8qayM0Jw2|3%l{$y0;P5q5wn^z}>AF%4o_}ayNiUZ#ruJUe@gd$nJ8n_O z+tBeIH*Gv(x2(=`F_m^RR%23{>&i%Ot(327?A2P*uZZAXj^VrAU!46`hDDpQ>9|&2 zjk|eY9Q|fhm&ukc#D87%+bb{;dH71$)sHb{dXem=%0GyPt19=`eV?536I#4hmt9m` z%S&jPl7IAevv#zesM-ap( z>+<~g=7bNQqx)Dymb%_<>cd%8ZI5~F67FdPLw^)G(RFPuFW!Ld(SEJNK)du{hUZrWm~8@BA*eV*WrL?xa4td%ZX~WD?}X zI+hl%9eLh^u3)AB9#;!wqYBDKnl$0IG=d>WR@r||T@}{NyuD4$_KNeKzf@|&*W1dqO`{m{{UBJCa~;*onmC8W_TgR0ZZ|d>s-Da`{A?>WX_YB8)3h7dyd3>{ zeS0gK{*X4ws_V|7x30T<$Ga;4=Vq7g>$FVn2jQbGxTdY8;vrmjXSaD>D&fc%1;?b> zWIBKMGTJ3Ue&{cLq*tR<)O*$z>Z$KWA_du~%cXu@rniUlc-cjScK3G5J`3BfPD}5k zKJSHcziG`Pkt!3TirD;BR zrI@Eq55;m>LRg*eo8h%H3Purj%kepOJ4eq>la?Ab6xxCB%GM9=1+>Ffzt^q1X8V6a znBenBm+G|)%~Mp<|Fg-mtO|rYf#7AYBzXgdU)TqlD}N8 z60~dVq+!M4Fg!Z#4E{SPBmes3+{1tA=H8aeZuxQ6$$>F;BD1Ngt^cssrF941ylvmZ zbye3w#jNt?GwiO5CQxhSKfP`FK%#TM8Rj$DpKs&&v|H?Kt5Nl5+MPeEXP)ct;Tjxa z7wR8fW?qB;*xUzlSSX<@T_1b6-t4!k@6-DlH|S>VPl8d3W!IR|)?WJgq^^IsDLpr@ zmQkmS+cnbOk7?0!COcZZrQ`TZE*Qq+>NWjJfrNR zp8fpBFZG_`Qva+U>-FhnaFSOa&eq10xYO;q=aPf!zkT*#6oJpmVN~U2vFY&kfx1eP z;>UD(uT=aQvg^XF!qe8c4SRpW+ZF?4QZCbV?K8XO8=d7Mc;~Ol%@?{IETYmLFy0=F z&B8wo2jdvr!)}{=UUUN=>QF|R1J$F%f;cT$kPCY%d!+XIRpOt?R=^t}+w{m=c z1(RlLPk!IA^21;+sI=b7TiG!OvD^F8V!cjx-s8DNx8pl6+(SKHo95ofLLqv~i(g)! z1K`CXc{ah#codGh!-tcyW8d-aES`23@3Wlv|OF0l+&8#tIr^ro z1c3?2_81NPZnM!*@M=`sSYJ2M#jS3K!?rb~%}N_AetfSZ?B+Mp-4}abJjSkPMTLF1 zZmmcWK&qu}7ZJX1ob%Sx-%WovOb47Rvu%`ES#oQ6OhA2XEc<_%DTm6&A0gyfd}Vgs zb$z=`cN1foNm>0ejU{F~KbRtv@7(3`^uVjb4fL0P0xnvsf>2p_>lN4tA(ZH+bzlMs|+cwryXsoFuDg2*UiR3RmfP5lKfKoQHn}X}$6$7^<_#7#$a?ZrMuO zHX~c)#W3A(<>FGX*N~N_(veJd5;Y<+o83cPj-Rb>?Yk$h4s|5jd;isxnSCdyoK zUgbK!O!n2Ozbtl_D!AUuWlY4&6p(5cE-sDJzpJ50R1Y+kAD8;$bTiq<<@Ts5XAfd! z-trL<122DHdB?7**=)vt!t;GrUUIwEP-QymHW1tR@Ao z%Q$A`QI0CVKK4N6Ii_;**2& zzB-*vuVYvK<5PBesGd#CrHAElSc>`8txo;EwW`zY4nxrvk1q4M>Y?5q@=-ALJd0P1T;npki(rQ?Kzq#N*x(OElj(W4Zdgj)$rSvJfPQ(U%VISj8 zieP_w){_6u>zq;8y7BS(@sS_HAll)5JSCMPIh(=SU!y0wi0AEd>TFFTqisFJ+v=04 z%CJp`x`^apC9NKq8OkMk*pRGutFquuz+6Al^1d0CgjLpes%|qMH!JqFY!y3E#++g@ zzR!uV-}T9L#Vp^!+DN%YZtrSlde`g4La2X-X0@t^XIQn@bGT+M@l&e~?BdqT;@jFB zU|8)j=4$>@@8}2QvU_%t>v**@qNU2dY}HNUgO!EXd~XEN9rN#(QE1_H_++TqnA>di z7QFbvY@EIET4?El=kEf3vCly0yJ!5=ckDTqU(fIy+gv)maK$Q(_ncmJ!z#J{(O690 zHYay2KRZ*<$wC>nO(>u2{X-u)_wwx8tHYl&IjOG*?xuNNJDX|aUNT%S*xFv?c<=Zj%?axszFgF3bq_HO{2#!(Cu0 z*%>s=*a&|BV`kj=vP2+A$ioZrQ2rhaAb?b>v+=C)1q-DNRouhVzb3rc%3QX7B6 zk>%Kt*$Fny3C*|6YiU$H&MaujVixSoH?%dF1B(R&=BjL$|llX9yS*V)Hg8P;Om3y%}r z$7s_}X2-CoQ=jX{{1RSdUR<|qF0O%lnYm4Ko3i`KE3TvDV6ADpHjn;f8dNLUw5S=x z$9w1wF1ywkJ-4^%cDJj}f{W{>l-uC`Iv$RP?I+YneWWoojd#a^IbDSL^X`9S*K{%- z&R8(rIxkqQ-qYhTZ?IN3o1r-#UVO2!kT|>#$0D@cUbgO6{ob3kx_!E1ycM{g388~s zY||k+UDn~-;8luUMx)a_P6P$8==P$KVj!^9H?=!eH|uEciUJpTA}_Jn%B(uaCBVHKJKt|4*TBx z^H@q-+r*3Ws5cZ|={SG0-sYJw%0AM#f=1jN*s3>^G3-WDsvWe^%74u7nqUCa7CZPnj;>r$-F8y9H2x=gejo7&X4RW6NWHD(hh9NZS0e61~~*<%oz_nB2w z-tHaq80^m8a;vwuciaOxp{vE^R-}h#IrUREw?kRA{xcm!v#XpuR%@O`!^-Fnbq^0reWvMKIzLy3bI~5g!?XgfdT zR+=JeI=h7>cXxj~)e}okR|jwYN_TH|cpQekligdKnqb^Ny{N$cEf6%_BU4km)j7q8 z8)#g+ck19}=&prF zbUU5!dCktRnrO@2@sXc{;)OiP9$>b+d3L^#Q*R#UZ5@BNCoxNBbh8@0ZqtG0Y%$+e z;Y5t*)zcMx<~*B>07}}YTf^f7OF`C98+%cXv`b|ttG!69VtL(J3v}7jYcMUx2N}!B zwsd+TKX?|aeC@oN6d3^?4aQ?y&3En=`yN|6&cc|ME-?dBuKar*@yW!@%VSoZK9^@o zx7LBk^J{d7^B!$x_FTX$Iw( z@AzbKFZQ$J*{ODtFSA3yk5^Yd{7Ky#&WCK8+{)cFo1W@B+4$3ureB;k-j-<{?Q6`l zYdo?Ji+6Us;p($RrPWUG-nS3FHb&)mw!HWbe!YL5ZPqO4F>riN@@?YHqrH3BJ=3!n ze~3%or*P`0)8J&4>+R!S@>BMl9*W5_M0{@F(z+w}%gtsa>y11eaoZ1;c^c)ccfRVEBW|u3^}W1u&wC4w zZhV_D$)DTpYjO+2Mn-IQ@8h6I`_;0Ttbl(E$2X_`Ty9G}_4=V4Mp`tgo>95+>{8E` zw~2EqcZohc80KOf70&o^+>CDrXR_*_KhC3?kCvxw>pzoEoy_95D0;)eDaiD)t@K;E zZs4eSpYoXV>E*hVx78zC*^ZnQx3zDF-e_^S1zTs8%T$ZwQ_tJ>PcMAgG2!*aG6#P? zyYCm-qDrbFM{#l<^TEfRN4M*)I!oO%=}J2`LUU>qMVIt-9c^)NO|$d#xYZ%{d$)V@ z>`inphN#Vk*8N~S<(ik{JGNHaG|CLmD%-(qP3Y+`^X4H>_s>%t@1NN@(;U;8PVjMW z?F%Zd> zB#Ywkxb;4>TfVzZ+gH9b*TL#!;{GPVQ5Jj8su^Ty8{FZw->xkGidVGwSXVZ?CTyeU zY#Fv&qZQe6 zE_QT7uTgkeMvrKD8{IgaH^()_#&8&K2kBvvyyB*5G;6lLPjTdi^H0Vv{l&wv#*6)7 zV@=%ix-dSA$m5gQiCllr{ag0(gmt}1Xous$E^Su(@t*4acXh~?+S0eL5nEjEV7GH@ zn8V?&e68l}?dit)(KCusUo)=782M8j;(3pb-ScsCzIrhgU7n76@BfbX-}tqq%?Glo~l|W@8jf(}6{L zU#!@=_JX{3yj+JyRgH}+zmb!PZB&K#IoG#MASSaOorijT>%Yw7ln34*7H}Fq=__*2 z*4@-+8)2q^PDgY32+sQZ(XX4qxM=K=e+=*)41ems2f}S4QavBdYB(!{bD!Oo4^La~ zt!pb@`(+@s6H9+?OE^uywXr)v|WX9)3(onw*U&g=5M0 zIWug@&OSmz+)dT@(& z?#j0|)#90WRQhpD zj_~Xa9RGh+nB?M=+UT}=^3^#`OuT9%ZE!Tx`mC3ihrgQcy%FmTn%nGt+!*u6XfE30 zO3W8=QjU6`lVSvC$mqrbSv7r?O>9%oBDm;#EpO3n`*PwG--nM>CnF}wqts4cKYnC z^B1ws;3d1&xOxVqN$csRI46m@kuq2v_Re86DC4PP%F3LN*Pkh`P3LW(q_un1W3k?E zhNt`0`pnNG^Tn=%L$DZ}&aWvuw*fmp*eO_HXON!)m_WXJ{+h47j{an7?QNLH7XQ$xe#HDfB5GZcdHY!}N zbZ}hWw!wMhuVXU^le3w`qoccB9kP8knV5q{`>c}n+Ftv{?wIZC{eC`gN0Ka-%jJ&5 zx~|W4$1}D)eO_;NvspWm@!@z}?#yJ~pX-0V|33EHK|9i)t4DGhOwH M2E$_01m| zytGIWRNBTLk_&mI+AO&&Z=lqBH*I6JyL$DGju&2-{jv%S(~Z-Ko49PX=k@B5oCX`? zJbU}D9sx4~$>eSo)W+S~ds|DRlVF%$lV!^WK3XoA7Y|b2I$_v&nNdT{k{%{{6GKa-)jo|k0k}3 z&)*dVei)WTN0dk;q5*rea@C&F3cyIZ3#V4MLO{17000cC?ruS`zj6T&eNzg6V5OEH zK-aQh$u~jqn7sj6z0lI%R3Xu2RUpK$*?DOT#NwQJo*+Tu^sJB*VQ>LT`QXU&T=d|37<1u*Q62I_rh z{C6|_0iYRn=c8A;8l2Pl3aiQcLDsFKATn#-?#;Q3SO!3DqFW|mt!DV6ZbpB8xnLko6CSDB z8F~$H8W66T!0E?r*nl;$g+705DYw3U#*^sq# zWd__rQhmo(CS(@TFxTi z`P)GQ8Ns7?<2T_c}6c~?g0PmI6gbTGvAQ$#%kjc@YTi6 z=bSX@Zxp=_95{$HE!bYiqQRN{XdyrV0{;`@ADxR9>=FdHs49TU3>tVoU#W-K^SRg6Iq zsBP|v+{yaMh^XN`z1e}h6Hyw3^MxDG7z0_v5g>oFDY1QG84h@#%WMgvDpmhLBMFW~ zWuJV9$??u^OcYw$+_M6Mh}=)b_!GsvFb2sOL28`JeuOaQnabc-mE3!nbZgtSLX@ z0c?L;%o$>*dPR(E#AId>l&$G!@7{8g;oB#}r3^SY+C3oN?LC;@L>;K>_%z+=GjVEFPev7)L}- zdYrSAG~l5?W(LXUQz6?DH-4 zjs9$<27mvVE(}5Ls=Zgoo&lbO^rzntC#t<$g+q|Yif+7Nf1Bn+FrhDj&;P~-3f+0& zRmkMT=~*Vf&>PU{-@jA#nrzdHb5hT#e^i zh-#;(TU<-1=j__a2HA~kWw$MfSVE(?^&?)WID}+2SYNd%YWB1K{Wb?qe9x$SxNyrC zyLqLZo%xa>1Y-nUKfqmGUvhu=YlY zCSJjb_v+j#sVa+HrD(H8;_D z*CMH#sLKrr&}07r8BzC_s@)ybjl)qFuW@#vAKKl|40u76FA9HIVjAqnH>K;paFQBJ z3ZpBGxf-=OK*?+)7a`4=>4qgrj$?#Ryz#>cGGQMsaa?cWbOix-G3TqO-7NT8PeGL2 z<-!1NK{R-7Q7(jc&I(gIHx^8&DLA4za1`>UTi>n2Ew%&q7+i@AmA5Ryu>h5~Nb)h8 zIHn(_-o-)_WuMX@V<2i`$m-??dh7_{V<{h2K)D6(nSA2XwEP4F z_kqWV2_+s8!*H0FL0FyL)8V+WdlM=~y|E-1n|!<~NNIln#kinlSY`!ND+uC8f6i?O zZU97rRm&1VhmGUhZ*tm8`CTw#sAIE6WHtMP{t^{DNqUsMOe& z9>eH%Go@?t|Ja!a2>;lb_mhvScm3mQe)A8e#)E&ttl#$>`fdE@Z$EiOI`Z@EYnw7v z@7md!{~Ld{3ua_Z+ljtH4Bxm z^0>4DNG%xWE@U25w6Jr!UGMN@OIU?C&1o1LctCpy2_Hc@2l3Q+!1s$dz1p~1^OiX& zl$U=6R2J}T8MRzcjdvoPmBI#|L~pewBxK5eeSJ&O*;{M!zv6y!JEiaGCA}zq1#S9c z25+!*g%iGfNofLyZ=8-;0^RwkO}nwmi`B(B{ew?*q>lUd&A)MqP)Q=inlL{A6`1O} zLF{9o-|HG#{>74U?&PuNf5-tE#Z8dQ0>*#%$n{0P_Iff04p)hgWQNfn8pb#Orv}s% z_-bOv7$MGV1d=Ll2m~k=3{?tVVFr%6Zy1+Err-PG^!k$*t#`0(i9vIdvf?4b!S=e4&_w=>q4G&509f31#Fd(1{$JR@~AUFy?<` zMMGJ5@8P6td!Vpb_6Km`SOQFeXn?)MT&e~v=mUlHC!b44zc#J>rbvHnx3k-C3>Y}4 zpE&%J8~@mEmscQ}8vnvp-?8xZpK0jHfaX_=@z{t{Vw47gX1$PEZ1Rn{K2J=P#}`w^ zOTW=0yL?^^ZuZu^PI<(aF^m&St8RZ-@WeDq-#Cr8Wb<8%J2b_h7>j^0U^#xtH7MJ#iUyV~$SIRRL8_%9#S4ZV)UCx}vEN^v)xOTj1*e`!Jxc?Gp z2{GbO-_1rkz|RTdl>sk~pWN=<`qL{2cLZW_F_wCMh&Nj*Gt)mZWyH_hN1GE+{#fys=HKz|Z-1rwR-`sj`F0bx{$Fv9eA0_6X~Mp^YxIMQx2FwV zX9nHiPw+CgC#vG;jWAAy||wNAsmWGoHU6sU($fDY*9w!Q&KbUdFlFo*qPu#P`c3h3#3-4 z#h0$X2+%H1NuwYCr*PluCl>ti6$*z%&WRNy`i&=FTzozyXFhnJq>e$%li)n|&)3R} zDxeOVH{U)!u)QFRFdc*H(aWQt~ATf_L1T4}qAA78$Ga$EJv?{A8t`&lPT~ zSna>^xPS(6;WTk>h9qHamL$TqA2UF_Kb=1xDXs^A23lJ~lKx)D=&n=!->wrR!W!RQ zft>zq0}r3WAz=NwTlsj_@5eZi%C8D_evB3W@rgfv8y9}Wf?t0g>L-```lW8{sTlk1 ze}yCRYz3*1{JM+qU|xFR{CaBjCMq3sRkDTvAt=6`47pGD=YeCFoS(F|*OPH=1U>z@ z4vt}1=2noAwIBvE#t7dUnNDb`9t}*@rOxw*ueWMRK;f=$-GBtFA%*%sxd;>t^Q!+S z@$)^~KzN*o!Zd&S4^6e#y33!X;(Pv;KL@Na*IRIh{kT%?(&4XM@j@v6=8xZYQTO4> z*Zkx=DxcJTuq(2MF+%5w7slfdq$04hTR4bA!Res?y7ne~Z$>BLC=SjP#AaQbC!jhJ zs#^KIM2#<1k@~UI#FV7{@og*8WQ8Xc&Q8(|5ER>%K?#2iNKm|t%jCX?8+W-oRQ9qS zmw6D-&|OY<<{e98!0%(O+EEp3>{hiS`foc%S?f2+dTZUcOtrBs z@jjjO+tfdljere*dl%=^qK1LT1>DAC&9pi_3q0@}9qa{~uoWm&;92=Ucw9`!}w4 z`OnM80V-p@hU-FbUJl2cG@Pd-E3Akv$bh2rSj9@`9@BV5@eOAfmvdN; z%G_FSAB0nEXj$Qz=eJ+v_I^3LY;IKulDL(1Ywn0*$gQf&^S)+l2TyH_}gXbx`3l#2)$;kZH{m!%$+OBXj6WS+z%JAL#!Y` z8P28%=7==9o15o5#ek4Vm&lmh5|R2WiMReL2yh;+5T^t}hJsReSNmU5{?mVN zKRmADHM+Q$+4PiY&L=B~s&9PQ5B|u8-#&aTagx zjm|L<6cgo3IdM#MOPKLIT9C%385dVc5Nz(7eqG}oP&5<7G6_=}7`GdzxK;U{rs^R} z#m~0crsHSZ9~{xufW9{6eEas#*y(>bY!gzu`~J(fbvf-1{`#@gUz?~&@jTjr#z+7c z7NrZ@V12Xo4V-TV(O~bJh#-*s92Bv}rSdN!G zLB9NaND}(V0@@JU0Y@CKlj$cD-a2wB;>miPz^x|?%!Wo|* zdAYca1CXSb=5ZJo`oWbQFRFhw4ZuYgH@|q|r=Fs*mMP!6Vl5x}Ia9e$f&{hEhg}^( z7k-hZwHzbhT3I0Z_N3QG+^<2d`|#j+c^ucFWTv-fXs6W}-UET5axu8#JJUr;78bc0 z2MoZfKkSE>;Vo;%*lcKGwqO51+N?Djx;9(&IqhON%i|a%E1YkBeF$Yqo%aV7wMty7EhS#0baF(tJ;^K93O+It<{x;MbR#Nr1`11K*e9+;s zPxgXS)_!ZxS^M>MX={IWHK*+ALuw;AK|FTQAs_cBnA7N-A$)%}!2OSJ=;)N5C!0$) z^e#@e8Z=oI?ju(?uLGz4Tu`HmZz=2!!}3N^q=n(Py_JJ@*Q&kkuD+7}wD-@m7}*bA zRJrPyAlcb_{o1wN0(z0NrKJjbLM=E z%um+vlfCG_YGl^>Yu8lWc<>eGFF>`=TD6bC{~1rvYbux0x6!t@N%DdV%lK@t?6 zQp=B24#>VU#g@GV9GCr}P-y1&eUh&Ij{YYivdhn%cYl8fn+&x84=^f)dj_5u|1Y>F zE`0vOTmRMz72lz9+w>3bYpdFVQ^b*p{`jkSKs)>0?0B4=P_bc{n_JvY5>tp96RLKQ z4~ZnE+YCGo%m$(A&g{)jwDd)7;#SujB+>mu5!SRZ-rQ41aj@b^Sp&oksuK#z427h2 zIaT%Ekeq)6=|8m{4Pk2pnvzdAs|lFc^BuCeeED% zt6zTd7w;V|(V*U#SWXO>gBzrqASy8eg4ao9kdqLEr-OyTqygjB;Uzjp)HScUzrgGh ziZOL1F_fMO9$Vf&!`2eSi^eF#f}ZhLEq3bnQ*?h7HUq3HyD5L{=9e=@4Zr-=7k^=& zOvTYP`rkANm$(-f<;WHF0wg>6c)Z2oxOGoyVv{pAR9|q4L6k;N${i|iCj$sF!16ek zDUy!L@v)gxY$R0o<1xo!O3fgSO`wWv&4CZPO1eM5|WCm5Ihn680S&w=vGV=dJ<(NW8lR~oBv za&h3BA2|HKx{mapdQV9Hu7!*gW`l1ZS>1oQ$YXRNx6qP%cf&OdqA5e3`Ed(Rw48vC&Y(PGXfD?z{!(I;w>89 z;S@`afu!Soj><0(+4%*;@+|3c|DXC5%HxDJaiQ`)+IDr-?|TVisRiX}pi#FG4v2pk zZ!h~T*BC+rGDX}&ctDAPG7s@8_KmsIuQZftrSu`4C8|aP4D6-;fkTWMsoZY`YYy&n z;CmrqBg~nqlV5z}k3OdJLl@RP18dDDJAHYIv_PNipZdt>k6rx1j9s4aFHMqP{M1X7 ze-iBT<>vmCx2?^56Ph`*p7UtL#|uw>{jc8L=p zu{eXwi>1_+PSY{4=D}mv3y1qSF@=Mcj2Na|v0OI{E~P=g_54ubL6?84`x^hfZ+Nco z^F468-zkXp1k^~SSJ<}rcl@U7f9kV8`o*tJt1Ztj!GbZSN_Wx#hKx&PUwzhLhkSwbH@AK|z^}M_9^YSl_ zQ#Iv+{okLrA#G*s35b6&fuWxKPdm)e$;fP2#p~NN++RgJHIY6;;}AjC>5(%PF9*_Q z9bXKJFP5>vySf(q=8|8ZJycp<t-I*`Q>$+=NESdMu2q zC6bldpcJBuo=fVlW4f~k7Y_@v7-;*LesaFSRfNLtB>b?i_uzlPRC^?WbTT&8uFN*MU!Bv=sWyY0A_I8^<@wGkP}r97=>goGy-o(Nv}n zkSP~8?d*b#qHvB@1*N~^B+gX(eRWXsCokx70oL(YO24ac`(1M)B&%@(6^7)NK$_rK z1jOg(@m|`vvrYcBlP%ax>4njQtvepHQTD)X^!dvk^1pxL%AYz&ll}C|pE#2JQ>QK# zy|^rGq%{T;nq~8S#1$}fL^bu-`hE7%aCDxyX;GhoadDM*x{IIom$N30Ro9%-3jBadyX4D70yNv7X78eTz_>+UCn;*U%VG8&LY=OQB#|c47NxMUKG)b z;#3R|%NoUO!DV%rTzdh|h{wxlDFyuI)w{8(uLpmfAN~5r7EYO__z6?-+HD&8E&rH& z@u?(vd6I@HlxU6S+Z$Mu4(}FGuV3gn@EZf+^}ahzDn7!^U?;&fmzS_oDu1p=zF7mLcQQjs4gIuaHej7k##Fi8BxO+Mk^;QLA(V zsH;nbT|cy){&d~t@=cd(t`+yH;##M5|32sNHA~J*L++?iHHkHFs$M*)=KYH>y&(@s084PM92;E5W^dj1BIo~-by;!`m2v>QJ zJW{DyhSV)v+WQd@M2|YeP&tVtwh0y%@0o@P+YwdfQCw#{MhK*Vkz}!|vvk4*D^!2| zZ*=*7^)H zy|UtgRsRfsbBNBLcG`~LF`{s~8Tt{lqP9-_L?pK@?lWk!hxLhnk6R<4ez%HJc-|HfOL-gSR7bn;X2*`1ObT$8}AG#}H>HU%e9Q$U!WonCW{ilAo#GO90vqi;)1pfn@MpZ3=Sk>wFJ3mD`KP8FM#l&B3iCupc?*csTtvL-H zF*lu4Ofe>SV^R!3{3HoMSl0EoZpf6K)~X#1#b-+KZO2d7j^CPz;#$u!UPoh}azUMN zV;Y|{RQzXJ5+ z8lc&B$VwYB-vI6AD)6QJRo?Z}`CtC(&$$2p&|kr+(t@VmARvEF1M0s%D}NQMaUnXr zKv!2k4GdY6AAi+@4xh#f1GY;+f9esbz2S_TLVtV0CvdI|g3_h;85;pmY{=@xKN6vd zu<>bPctGrn+D!VZD?qwbI*jyx(P5Zt*nGAZ;kGf@^J|Bn6?e^6-ScrJ0XPka(5xT5 zW5gO3nhh6zl%ju*Gnc7>nymG4hCh>eeVqd82OPngV#EnH^w=98$l=P(43bCh_MTq4 zIdt5yP!0qaMQ@d72--19RNiP5{i*aqRe!T2NM-c+k$qU0?M>ju&wP>5FUQ%Wzjf5V zc|i#OtpWMFZU<4Eo6)HiAc8QO2OCxE5OZwj8*>G6Z;O9sa1}a&mE;!IA4~1lthm^? zpN*XuWZJJK+DJ1!&w)Wms>_9~7z#cKX2Ak947uAJp+bSA8#*2wvW*NNheK)k_vrlY zy@%iV5S^~9OqKnw{+gc=kxoy>Rn6zpVXmg*fDAR4(x-!FGeV8urUZfm!<_EiONG5q z&5tm*{ri8ecM}w0k=U@E2Uzkny*+wwA#k^cch|p;RLJEUFKgAl4u+Tj2jjOsq1uQO zHP7P>McYGQ>HVXQ1L34Av*?Y_`*pwGq@70ONAI)a%FeR?*x8T%MbPzjIi0{`Ighw= z?Qgoe+~^-YRQ9bSe9s$_qN@WBzIOVS2;jDdJ2iu?BUh4ukKiSmC&_TB%yma0N$)lKOt{|90Em+oC@jJo-hT`vFC zYyRrS*I!*hhYu1eodKN0!EBh0MNpyKSyKc%7-;62Fm9B*G*vC)eU%;@rl zA;lzbQl;G>iR8A;9mIJ}v}y?_QEg=prRIMJ5l)BxsL=kB1oADYR-FV?b>+9?sD z#%5X~o+2(7>OU$E{a^Z~pE)31PSUFRgo@kk_*0SEpfm&_ z8~kXjyE$M~<@4?D9N8beL532GRwT?gmgGnfH!N9eAc+OJ7J(=h*rJQ%QZW!2HUfVg zBS8$`hz%QRg%L~~m9LkyXMGU|#)tIXxW_0Tt2u^rSv-3BCq+9;Vq!PKSjQgYLjBEl z@uuXtHjbp;#Yr_G4DK;*J6-v zbb1o}XFgo41;KE?b^I&Sd4!$mh@>RwvI68_@jGhMi%d~v6$r+0JH)m-(24l0Lo(R}Al&QDN?Bth|f$q7z7U0Qc8K+~1LiziQ zQiu~aNOvz|Nbv$Cu^Ii$=@0(-{N=%a{=d48!2f?m)&HMgQ5DoFpYJEr|2YPTbl539 z6b6HH7{gIb3iS~f0?!59AxL4xEfYwEa^=5|9F~%~U z*{SRA>eyA+M^YW^*R-Z~KKWz4|+wEC7~g{DM(I zNCOE717NOrn^8cD0IpC+aDA8NS~vUF#q)>1j0+xD{3jtmOqtil*w;FLP*rg2B|jF1 zg^M2dk`GT@@Zb9WTV9NE$=l!51Q_%WzLy1JOU{9iUi$I*k&jXXX1n%KFW za_n_qW|o2R)sK`#uKX6Si^wlgmwcW-`XnJhC4On)e&i==qm#R}c0F;UR`J=#eR~{S-T&Fn@ zPJYnDrDs&Z-7eZ{l;Ez1{@m05S`zac*>&3MON9Qje#v};&+R8U5BLT<;^opf1OyxN z0#XlN1zTJ}>2z(*yF6=bngpha#R@G0q{Vf>OaMu6Juquo>Cnx1!(=5XsC5!!TUK zkOmVkxPb34j6Wa>rd)V@K^=Thkov+BI3DlyiW#s`)9CATVMC&k$7#YkPM;=I6CziH zzx=}gH-GE~*sYj<0OJR|AImIrB?mD*Z`qjEcm^6kL<;mlz;v=a_<;skB@jvWVvA_h zGA1XoBW^(GHo)t(t$pu1a1k%h6X2(bWs11MHc)N#^?7wI0i4D%&9v#BU@@#k0oOX1 zBI4B@ls0FLdvoQigOohyX#we_R5tOmLl@m}ni;_*j5?G04 zH_+&{HOelqjf;kQ0*n=iems>C^X*q*8*?r09m^(oMkF76Ehiwb5dAl-crcBuaaU#m zUTHsv3V6dp{Mt@{X!}Ju0sZ=3a1yY5E!&G4u5LVy7oR_B9c&BomO^3G7@%0pPXO2KeRMS5AO#?Xxy9`rV9MF ziTrK@&Kw7hxdDNRV!w@L(b=D<)unwNYRd6UaDEs5a&>*a;N1RZgXR z;RwJyfoS^0Q%*gY=K6HFRk0h(Ua+wcFv|`sl(Pf#`+ovTVF%{MIDIK=K(-E6mF3R{ zoMqR6*G|CdJijbFW-h(9jbyYb1Bcx#!H6cnkK!S2meBl`ot6Me(|}?f%ke#PfHAC= z0QvfVO-AHe)&P(Z<_G!=~rctfzNzMQ^mvp@R99>?FkQFJ17)k67AAvv5mj%Cjf=4fro{BzB+T2WkiW5 zhT)v?(!-*d81SP%3_r;lu%hm{fj}=7V1bC}>nH)Xg@K~#Kru0VZB_a_G4kY~b~IUk zc4G@oGXb=f@nW7BeR5DCa!})swnX-uW@3zA?bb9|z_Cz2nrzvzF1dQ@#yfL70fxJ5 z#2WbrsC;&^0jqfG2JGn42Tt*sXtH=ApBH;h5+n4?_T%`TN zQTac=``yMNhIEbJ#@G#X|EfE(2F$O2-2tY$^zf36B{K0>4>9(-fA7kOvjNZC0CzjC z^}JvD>uca@R|H?EHoh`gnGZIux@-!mFB`HdrkM940v52ZHG8X3*dM-HwRPc7c#mb5 ztvaSe{>mh}_~okywh5PfZEw^P5NR6E3eGf7OfyZE{%selV`rFed!lK)@YBA3O_LRU z+a>4Nnf=?IYZ@>AwA0gMHQ#pi*y=y!Z*EM-XueR?GJD&Af1Se=F1wvHRxc)o`NJ-Q zh@ZWH)eCI|Oln_qOfyd`GRX(K@uKqO>BPpxr7!XUmI(-lc<^Q5V4KWtU;^|wS%9^D zJDM1|wkk%L0CDldkSV}Myj(^hdLQmm%#Q}xB=96MmaHNHyI zct&QYd1CcxvgG0#pTZtHIKJ(zrt$nw`^8`Hr~No~ruw$`n#M~%?H7NGZ@XF@JL`Pg z7mWq<1cX9Mz^buKF)?K!^OtWD3XA&s-VFrord9prOE*met4e|On!oga@BEb(|Ey=A zj@+XD`f`?$8RH4@_{!(_iw^Ew5et`I2_x9mkr#mN z3jxQ(;-Oryrv})KJC^?{Jfg7x{WWg%dd?s4cJQB!g9407{lhqa+JP`yzg~_lNIt9G z0KBhO{5)~dBy(F~AOAdm{%h`t++v=%bkfR{))GKmGP?NukA6Y}z{120ct&lq*k)cj zRDeKnz#{Yc2EP7KZk_0(+J_-s^K~G~$=8oM20PUS18mQ`YwoZ%V2wb3vyQ2l#4E{rsbzi@Jk@7Q59lONy7C+am}qw6sK2i31kUfpgn+z7xAjatHRng(v_@gHG9@{KM3 zJuI(jfKrKn{uIHSC?9~9 zS)w+z-_I8{jaRRD_iUKY|G`!+`qc0P8`AJ6k+KaH)G+7CuIJ4#nHr{^bykp`q~;9I1T_9@hW@lRTh5A&T#rR@8}OLsNhqdiyWTPMYy7dMU3-BHcfkeKae^y;{lZNb z901-IF4eSaokaM;h0gIkGw=gPOw;!q(--cgUGFOV6~i&bx9ok(rKZWuuivY0`RDig z2fnz<%@6#y-2T9S%iRzBxBULn0T}+G8sawm139+K6ifLCtkjP zU~gaizhS3oviRSyuZwo~H|+h3_pdf15f^Sqr5Nyqzi8)(apL(=OIYv&zid`D&6CVu zxPQYI7v1h}Snf;5Z`kkq6#Q2{mS4K*UwmQ3FYeD2H zpRmVqu-ngj=LDi;Dl8Tt{-j+%9?RdKS{Ry(jnGe`jQ zhtEqj9x#Y{s^2}E&kjJw&h{1ZSu9)qsym)|t&D03Nyay}^~oN&{#mzxR`BVcbq`pDp#E8p{zE&$Cf9ky z-*o~Q;Bf=I4w8-8fYgBW%Cjw|;CUTpWdTqDQ(yb~@|Al0n`sk}fJJ~_T<2f-t6p6B zk7MgP0^5LiW0tF*w=wm%ZZQiGbr4(E(lG$euew$M^?&KP7C`>`-0wPnH9uYOB@nOm zc!s&~Mi$cm(F2LakXL(TU3r5&5CG-{gm3$D9YyyxrUjA=o)*&qLB8mG1IYu)g-?=! zT`=o~Hv^1#9Z5_J=6&J$(!&xzdLOanW;JbUX?$e76S*2EX{8eGd~}uKtHVKl1y^vH9}tx87ZJ5f|S1 zNALd2dbV6wKH1{Z`{LsF`7d~dFTc8rui5sahp`2UtADy(|F^4O{TiYj3my}-c$i_zq8DGU!|Ystr8r(M_-Ol)Px{4w&3`}hW%5^leP(IkZj5;G3H-4|{(v~Z z$^Yc6=wDyEV87?9!F6=@o2N(ypt;U*YT)OnqIsz>|20kddW3hKv%Xa6>+imzy!NXv z(!cz#zIg)y_~z?Xwo9p+YYT9^^jW*)!qS($?-zag1`xn`z_|8NxdNEhwR<;Rhsf`j zom!V&@$&V5@GGSau(ykd1~>(yUBtN8d1?N-FTmK>Jn3Ee-USc%*FLyVfZ)pGEL{8H zOa{a&Upi|7SqFH*h1Z>Y3~(i|EQYao5g5}&LF1UX z0M>QAi}mBb-|ha3es-726&o9GI&T#!YH^%z@ffHq?gi!xQ^7=y%wUCHIBDXznP6kp z1JG~a`yBm3>LLb5&Smxuu!icJXg2QBbW`1b{=LKMb;3++I>KR09l$5GNa3WheMPpgAt&mr4ZrhTz3UjBR zkl%4ozLEvT`nNwW)QIm?T;u+G$R!ejeHx++@xvx^t)PNsI9gi{9`X)>JVbUnzF^9K ziEjvF1LwW942)4A=_6i0q)36TZ=f&FGDFSJXTSqF0$$T@kPoL% zjn!~P{((Bs7(?ZAWdaPEjn4BCdhd8tMPjLebLFEVe$vd39OPZ85(*4^2#tj21Aq_b zKQuJNZfJe8lsFRK5WlCnJ0Yw4EK>M?$zNZ3 zzFtx~_^7ODS`qjrOjkHGs~Q5{rWkvS3^uIjC?2+$a|3kB6_`*yv*s9fmNoHz^|rjz z2_fCyTY}FW9}4T_-q*K#qCE6*lxZs@M18Nq*j2?lFvI2wR+gR9GVhdwEwjjbfEN?1>yIiOOr-RT$32c%dPkJQ=2{ zH@kjU<=)<~+6?7@U;1`nGN?lMogq`lVpWW`vEmSdM40%en8Cwayb~=hA)`RcRaYDoMj=3FK-cU2tS2ugIJ`cu!TZAEkIYv`2 z`Lf;<5Y6*e@Gu>wzzJN4dmq0cVlv8GSb|mRDX(~EP~LK@*P#2t__klA&fO&9_Yjag z5{Xz$Jc)sGSskSwObSY~`Me4?V}wQxp4Oe?&qC+5Pl=DRy*FvI^(5&0cW?PgZp2PK zwuVBudY2?S*L3G|Fnn2m@n-9RY&RAVqt!6Q4h&J|ys>VbP>??LTqi+3w5gMwh0(m( zrY_I6nMG6YO**&YnT?u9GA(zWU*Si_DWlRGJ@$&rgSN8!QwgkenYp_PqN77TgX}eW zys)&CGu_7_8xlMqhq#383VRe32@oe#7>OM?X750d2w7i|lz?YG)ktQ70w zAe!um|2X(O+Ty5TsKV7U?az%`9-iWI&xw3ja~6^2I0&iBz3f zCLG~v?`pDDMwnQ6lvGN$5F}&?>qS^vbC+-w5}Wa&Tq$o zG{vVU_Qd;tc#Lf!w2i&=X?QB09A+}0UGtVBaXVYA$cjyqTBMr8XH4oMm^t+QeCY4b zhj=34tc&K&e6cI)#}ajONu7+3!h&x&SxM1bhy6tE92xIk<-ing*wWQEZ0GSYcH|S5 zqo`o|T0M6*TYI5(ooZ==(9jTze{<;^%8*_ita7V=9Hnx#?xh@EIg*0+B4ot8tJFNY z&rgD)@C_nL~o5(*p)F5?Y`XSS~u3-vImx-x}?3UMxh8oBbqOL zFq?FL{vg#mk#hqBYwQ?&OY`qX`Ls`W2>Ap1Obn1`s7AO_nQ<#u0V7%V$R705$)9KK zg{+euHu(&Ofi-#{*XU<|91YRX-im+2ef%_%JGQxc2hjpXi)_dx z0|3)m*%^zKhCY-jA!uq97e{J)74SYU5E65jm)bjbnI0kK@U*KR3&; zWdanfuitLdUfkG9p!g2lILu)|bUv#o!lKNdAny!BV7RPb$I8AbpKU6OTi_Z28=$2P z?xStpM{B!}HUWPSKxCaqBC$4FP2P%s_AQd6xXm-sy&;M;*z}7BoZ>+!Z@6W6^p%%b zSmkdw0-v$!w4vUNxI^!^h-f+^2BuLrLW8(v*}cUjLX*RQQjSCmJ%|B6oj~269E6BW zn(RrTdld%d9JNKTJ;2Jd@40861me6|-;30-wv{x(&?2!D;4@Ee82q> zK+ZW0a61D&sA8M6vKC}6vn=Aj^~IsxWOod)@*^nPa2ILfY;#+o1mXO65HVI%(&}!X zy4wpbb!Sr2c4oQJ$MZH!&}ue+mBmFj#^-d47P?Q%c8|mu&Qnzq)7DE>t`v3f{Vqy* zTftkNvdY~_3|o+S;AL55^AeS57O#la_7JJixrI`vPAo*|Q@!iT4WkEdJ!i7rSVDYh zh1#tH*s6TUeNF9h2({$3uS}=GQpHm`W`Cs6Z@Z&G$v1<%anJE0e^^l@%k#TqM)cynXrH&L zu*BV}hjvL2szHI6`V*9Y*rIzIq-5*lmitFpoDfCVflyy^D$t;BPcZ;)(N8S;7`o}^ zX`?y`oLtKdL=V_#G_Wxd6TLST%-=^&_Hk#F4;xL1@CcXOhIQ|A_`}DMIm@?PNpz(= zbZ;gfYdWJv$obL2p!v{^xCEwO(;vNXax0aq)mcRA7&*}0csnP59z^|I53tHAWrLtH zjlhfB_r4;}peukHhvkd(gkZZ{MVRFomkMaTg*k%HrcDi2ju(>Od6fL(kAZh%vA`yS z6g_HuvS$|>kIqFu&SR{j8QYE?R0O$9lGmRVgn3JWhJx0n*}9kQU>HiQJQM2Km+x2M z3rj+vCUy^^=&2Tem6$!3?(Kw=}od;8HX3CCS?yQ&nq6(zH>MYwcD-TQchH8Lx-N%~79aCH70 zZ_{C-N}e@;cS*MDNd@6n&mXsw#m-Qk1432^-|&362+o_;ax3G5hHvsBhiRjE@R;ue z&L{0>ovZw4lo^z7%`vu3`z+4E;QPAS%zHLT7^*!x zZqVWLa_TP6oFeoqeTK@=m8HHBZ=nG-Z&c7uuZO8c3axGPCVYS>mdFN0L|zoS^9xwq|-A{nn$e z%3kY$l#O;UC`N1jLF5z|8Og&@op~U61 zu5h^4_82hF?utZM!aVWEaU4d)Es!wax|3r+uc`jL2<>UY(zE=iBX;v|o-5YP%Od!w zPy$7ZHr8uz!Zsn(K`kvOho6$ijZf#3yercq=CO;Mh#{;lPevPr< zF<>gC~5GL=GbhvASCiTP>l zhK;i?o_8zIAnRD`t9Q=mj(iA9V$J~M+_5^hC}K%GCLZ z+O*tY92=~?-~_E%sydM!H@Y?O59#0X6vR1TH$A@%=o%)`9GU$?9T(rDGQ9O$L(KPN zbfyVCs+-~<*r|p{-}P$d?yuv2vjb$Ut6t~4MkP65QcgKjl%Mk2O8 zSg=!v&vNw=+bjBo0dww^!t+Gww13L1ooUVNFeD>?CQ<1YSa^;Av_jeDVWWR^Cyx5? zQCEq+c(=7}5&_k{`hHW==W%RA8KsQ7+=bRc6!$?}WZ)iDo2JueZn!~0Fv{I-V#u`B zerwK8Qx*0$4!q#VtXI0wq%lH(C{k zHao%I?sY>coX7YG;##K_FiQ4*Z!+t@NGjrRU~$bG-$qu~@3=ESHv|PcR==ixYLR_B z8|VEL_&R}ak;qQs)V}f+u8>MBNMC7(!vs^=vbkH_$>j-5=JpnUHtLoPXAfD`^k#}4 zB?T^)O~;-zKUhC>CoHS|Dp6dg#E9c=5!ac&hMVXoNpVYD+gn4f}!*`6R zYsd^~<=rz!2e%l1YVz0{RD4Dz<}>ZB$7$_+j3T2K#*QryA;Pzyp8RemrxdLBk=#ul zHN~5W>Qaymvr56b#1G2Z(t00hw~zS@H{x{%{5zTsaWmNL4mAyd$XP}=@dxyW#4S(@ zX4ROS+|5ow{UpH<@zX5@*?D({>Z5hW#CJFDlr?C>#>U)#Ka}HW!F*%rlypj~r)FV= zsi_#YBV@!m$R8X&FjTyBS-dxsh-#MjJ}cEh>$__3uK=sX!hS9Z>WOXn!btlALz7h@ zah~fK_MCj<^TcX0eudPED$6*2&KI}yJja={U^*Vny=&(TwXntNIPx@QrqEO;_s`vs zEw%L9NJAliOvvAZr%E_Bj;>QA>pgT>LTK+g8G@H`+cle7%9kUX)_O+xoq9U6#V*?? ze!br$i8$~qMF*kbw+9BJPBv@}bU&N{_boqnrR27~;HZtrj$twj-&3BF*(|Bl?wOvaM%l$rU2hAvw z*|3eVen9DPw~Z*;Fo7l;t_onUUQ}A?HA|_;0>0?So3FBX%SeKR) zqWS1Ud!)eGCe=X(sS|G;FMFu0a>!iqHyevBS@U{tYG*3C zOGzDcj~%-L3ms$l{H_Z(^3_d6n6A5jMh=1Ij%>V~b9B?Jw{A{dgUe1cs*?lVp{;wa zRASAq{KkUqv`bsDxBH@#Whv1nBp_p1DA40kjI6D0u+3?`BCgP>2yK1dCtw zEl-YjudLz8-tNUaa<$u03HRL#x@0(9kPJ2}1RRNDv!FjIX0S_98P;{3VK$j9JYX-twhwGmT+Ib1b<{Zj9=y^h6`?^h|k&Y&2+^~lBA!VAZ z^n9ReUa2WF7+f^r5m{{5eZ7T#PVLK;?Wql%5dRDhL>c7aG1vFbIcu1}Q-F3(H#l@} zbjhrZ8VR)tq9w$P)7odupPO8NydnH~>yBHKTZmYF5(WFb0vdTM9TQEJQ@xT&RG{5- z_hf=z=R%*JZ$24%NMMV8dVI6$1+NLOk6yKPEaPE)ui}L8Z%%;qTUrMXfyfVJ(VB`V zZ1zDf9O8N&)U6#VG(?0;ZHXcp^-e-;ug*-Q%ivz!`*4`l?lcy5%9Y7~ZNSi1Hf&0< zK~@2n-QZIK9A)qu)x|>Sv)CS2*dq6wn$vjtYB}n7CO9368qM+_v#2#^PClaepxiqW zPT_=a_Pbao++=UIZ@zqiwI|gr<)^KTq)y^`^!X5|VCS!O*fr`tScqgWTTZ-A89qJ1 zd<$!pNgQ(j^zT(GcVZlWP&wBX6}`i%O|PuMZnf~{B|j)`cQ(iG4F_lIGV+-WQ01Y< zRle5=O~e zy+}IgqzHFa^eMd&LNpKmO^XeNWqF1*E9Q0ySriu4)F5kk27T9mfiF*B$YF)f5N~?EWkk-Yhm54b zP`&#PH}6Ow-plw%ABKw9ev$fu4nq{^miXgCjf?hmkQcG7V7cS63?-8#l8x*>v(GRr zGdfL+)o&X#E()iA_BA=!GP#Wd_b?HiD_+QYzF%gNJm0>UinDR^6swrGBMad>9ivU5 zd%&ea`d;%RSf-#o@%KdN=8R@~~`uSFbeA!HpxfNRU>w zWyx*Qion2S($AuI>&+BnjbY81TceT) zGN>MNC6v8yvqjJLVWODNsLMvkX3rPt=*sg~SAKE7|D|L&3`TyODT!_|3POwxpjUnPBgq`|UxoQBfL- zZojk?!swcpc}wi6O2X_Zb{#mPjTgWO9q%3aH5)vCr>uqOS%IIx#cF6fN*yM0U(RSa z?=W=J49MHn_6%+-*6{r6P3$RKxDc*C9F2Q@X7q&Rf`+jVU^za6^|xvTX$xxu+aAK; zA*0L9sU#4QfEK*8CSyW~>*M^O;sg7jFk8B zhjhDHl7EIaLunefCmtVWUgl1@org#nz~pOxVO|Zj=%%J*+hZdzGLCY5;so1}X8oQx zPYDP7y~km=SW0eq6t;b<)*YXg^BEXIbi^n7Ed9(KjF`RAcg6L9iV&?_1MHKS&kr^d z_YthopDLhQ()rhvA8=adK zIVZQoc{ZG3`*^elS!_|XhR@D_#vfR^Xu;mB%__U}EUQL1<}(?>EbWk|6G5Y`lZ38* zKu5-CB5Qp~C=mHpQv2a8I{yA>s$@5GknYMlzf||8)5w^dP}bB{CmKr!yTp5zl))R4 z(9q7Oly0h5Bnnd8?xgDrx6ebNUUw?F;jehs8CqK&*>kBw30fM$vWFpmB-4z|AxhN_ zSPKqd-`<1q)}y3p%0ps|tj$XFML#I)$TUWZFm17U^g%$JyFJCUftXN*@so}Wdaf$Ey1}90lmarX?`8c8c_|J2s5fdTn5(Z1j(pVeuE^7e)7Qi47#kFU_GAY`kKYQvSTD$epnH%Uuj16h z>6pRNs=l6MLh8kH3SN4eOS$nbKokv~7v3}4qZ#i3A4NmG_Y__h6MrayyC<&y%x1%S zpkYUmQofn0v3B`?!)5L|7vL3lgt{rodmV(@h@DBMeQcaIY3h#k8T)zJ>Gnz~-$;sC zoY-y%#Bd9uXL!qn_f79ap?AZNax(e-aU;uoE4ox|7eT?pT5Lz%h9L5M`}p-vU9C`0 z&AAvMPj$XM-#0)f{Pv)1*34=-d>O7se-yTRC!fm3#~V(6s>>SqINNtli4CB@hrrG6 z2^$!a0+D)xugc+38gPS@bZ;SWm^ej`H*7{A=2|utxhL;4o5zXWMR7^=S>mP%Vnw7co5@7jlm?lv2gY`Bv6yFUAq$6utc$T{ z>30lknWuXisf6N_DAaDc`&iREZ(GdCFjKcStY`QPx)-9pb z1b4fC@aD2jR-oY1AoqTXQ6IZmD?{N=*gzwgXGKWfEW#LQ;&q(FpyY2BM#aL;mUUHz zh@M-(;`9@DA}PROwfwvuufYr&q&x1MpuFA*z~a;9X5LvNg-34r#!OE^T$;np!&%Cn zUP~FBZQ--qh}FlzkV#LTMN)S}6ijgOXr@AcW@uQVq%8Lz)17=`wJc+m<(9h0ddd)L zufU&9@*_&0R)S*U)>sb+dGu8Y%;;PtGylAR_v(n!s&oBlQaK$1WL5VxdvSBGml|9# z>Y}unDzFU_cB%B}w>C8Bt+7;!f0yms6sOKt6eSl4Mq$<34_0yis6KBzA5KON-t*Xh zJMWXJd%5ioZm+e@j(MOz8l7OVx9RklPBE`~K3RjV&xi1)2{(m4QUKjeEGN&v$T+G> zsxt9j?>lsVU>HN#B%5xvhh*n-HsR=8*V^FkkzR|6$S0JSjmU!6#w@k>^3I?!A=7VP zyI#V}yRshME!p!&1V3wm>7|Z0kyDj_^n`+xkIE#daDRw2ro*?7p2F+X*pC3+Jr~}h zn4zY_LEwgLHk;NmNYTVT^fP{Dc))F1gwmp3_){wqXCCgab_ffoPxPD^9=!smY?n(b z>8q*1rb4C>eWUC2?5L0z=592FlKC9hg|2g9&Bb8OyOpR5r&<(UKD4Fcat7#skfbeP zUu*&%ECD*==o@+aWR^83Dv?tU1agMA&&znEw<&l^J>;e)WZ!sXK7D@^GZHG7vwh1Z z4!z?UsC=Ezo$1{ir&E=_Uf;}qqvlY_J%!B`UpW*_5(~TCqAYexgZ1p^(Joo`JgYKp zq$0P=JC4>5a=-OC^oA^4-O4e4R(aex>?HT04HOiLpvNh}%j>`$Tw&mv=I_vPYY%DX zqLR5Y5pTup1eG}X@X1%TyhkLWx}ZtfPrIa#@O|7AV&m`p?3Pc%a1ti{KmtufxApwy#k_EWh>GKGzw9XqGnE7*!oODqHav^f5dYG{vdNIVXBinjKMXfg~h^D;sq z$;WAAKia;_q@sL;J_hG29#m!pdTwD)*v8M7b+GT-fUA~9?wT-B_~Xo<#C(VE*u3u6 z3(guwY6wUR!7MzFh}XJ*@PuRS#Qw6gl>1Zqi$SByOQug4{o6ON_#P zGb;5KQv|Y+K_+V?M1O*AhfeWi`gbQ}=G%HI@w2ppq`YC(A`7q?h}GbwM|oP6bmQrW zEU3f=Q~3YV^z0BDZQ$;W)9Q+3&=bX>TIU5tMe#+1L?7hD*<0>a1f$VNt zYS3EC9ftP@p^e6WC80n?b3-0nUk~`14z1J;;pe#n1?3lp^V20!Y&~gs*PN2rzrUkN z-4x(av(dP9T)ic5Q;Y1`c#vLwWD@DS5r)(i@x0a{VP2Jb(e!Lpi-Fb6hz4(Zt2i8_ z$>opqTD>&FqLfxhOlkUZxEWJ!KpbjY zt~2=BXYNve;uJ6yecg;bpElufF$*VIbicrr(@6tf5%gl-vuD~+1C1C$9i6WdoHE{t zV5xJeiX%d&&P_*Bt#|$F6xhbUgm-NF8KcXVeA-J81dvrvKVh zVxP$1$r#V(km|<6Y=vs=mSl#px!v8@=OrQv=*Iki9ZH6yV@BdXy0WYnE*3vvO7){IWVWB!0j|d_NtRgAB z_CJ)0c7k2+A=XPC_04hW@iZ z=?@{KE%T@JNktSXe`S9l#{ZzaDxv){zOZM1nDOgx8UGiO)nQm`H$nx9KPoHLuVpJrT7^4j_A-9dk_-2j0v7;gHuLqdUd)~R{y#STV~p;P z`dl?SDpr=pf|hlFi^zz5$|WoU|5*$F0af%0oVrZMM+g1P>KYmoR?~x5*08yW>uBXF%lH{Y3$(%H!JG`@)Z)7 z&t3?&p~njK__xIO!urA(wxzOt0KF=I7nF8^zNUotxRFZM&yn8bTGmjKVIOk7U*9jJ zm%}t_Q?|oPaW$z%q72pCZ|o{j^h+x@cD%;(5%D*i4+eS=ANO(<`fy@0zl|N&sO>{X z3?njJ%RCc2Tun?yjH=k@4(+!OE?I3YpJ8|w;Gd4)RxkRlU5m$5%8bptG$KXk?M*>d&TJJ7n=#AUxU@M0Q6*C>pg{OzmhJc~r!3V9~iv*Tt zu2RU&lLj^e^vVjb1p z(qi3)EB(rA^G0jZ?4z-;N~EJ*NDFEthq*0HF$=8&63I9esVZrsxATIkN9TBS__O%! zY3>FWdwZ7C6^|3_f#MYX8xi}uz0Y*azNHH(B8;S~7;MR@p5`HcZa!foiTcowm5lW< z_(-395WKuV9q>R7Kq5QrR;L|G5+Z%Guk)J=QN5lL{O+TVc~PRl==)&4zfY<0RJ?5l z2w@}J53CB;Sb4B2kg0CChN_dV^1VT%j0|>K-vNQ-6o9g$?#3vMXEnwP?S(9 z!jTXR|J?}ke&!H=)UJJ=j`_+)FL=DRL2ZjfBXSS1GbkqHdBut5Q39`w>$VOO7O7qW za7c7axG_4WA~G7r6E+Xbkd^Qh6>*WR(Pf~yOHAw8*mJ%`Ls4VHjn0CcBe22NIjNVD z6TgwPo;Iu!9)5{P6L14N@r7?-36h1b9-9s(Jibg6e7!MjVXCoLov*teICzzlI zf-RaJPa1iD7HaD}S~GSX%WLj89=MH4O>d{yQY8}eBLB#Ce+%YzSXvLG zx#`oxG!=7f6Kkrx*)V^_8&caX@HC0l$C>&rH1n0U0gBi0`gWhz%%&B2eKYMvGmjo1 zBc@Z$TPno_O-a-aXILrXSZD($JSQ1b4H|_ocx7~dKJObIV>F`glOC?%gL4Bgm)Cb8 zFkz|YYVPO8N-ig2bsxw&$-KUA$G3KB55gHL)`Z*v&+1eT6~$N3PA=)btK(v%Q7@GK z<+ju5sx^9T7@y%4ofI4zSL@n8lT}juXVQntlh#tY-b}t$wC|lKof0WQ*qK$l%E%P# zGqU|2zPh_z6K&*Je{EA9&&hFAQOPt=1`5bs7Visw@ zDC`sQZB`{)w){r`El_KLy5lRPXi*Wq!PYfjH?2B7e~vumnCBTVZYu7ox26VjB1Uj% zawy{E2K=|g+QR)v?_I;XD(D@-nR-en*fdikp!HY0vbO1c>^3Zfl#S)Ld~7tCZ^Gm> zmL-Y~(lmf&Rvc~@4`!=VfyJdZDz-b-Ag?6&dxjXfa#gVndZ|k=N_!Hjem2u$;Ixjc z{)Fgpf3n_;)wX5y;u*{OiUrw&OKJzmXf{M8cm{sE%Wl04@*8h{AzS=RLvP9dcBWDV z_wC875PCiO8T7Crs^pVHQ$j?qOXg$ynq5oY0_bhvp3Af;WF<|~IxL!pOJU6td51%# z6?y7XK7pU%PVFY=E`Ixv3F@Qdm+6-73;dWce|)tjI6(`Ba2>b2UX?i^)Lj-ELw4hA zWRH|{ASzDy;&|g$qi(}46P3Q&Aa3eYGxZk975>}IO=4uU8#(Nf>dCuZ`(Wh+uvcmAjYxAlj4dZmB$l7z zfBK13aE9s6tG{zApE}#7<_OOFkvHEdb7%MMyuVcVT|`gEi0P5Ze7_I*f5+zjhK`5j%$NZvy*(x`=@6dVl#2y^Xkw-? z)7v=QvA*VS?{oE<%e!FTx_GymMdOCzgX_*6HPPB%T{eb$Dn_4~&Rh1J4@z3FcqPH< zsjjEn>bpRv++O^+iX|?Cm`BY!lliG%H{RPR%u_>w8;(!O zX)v%2rF@8xI4a{(eOTTo>kC~gHSC9r!q6j%&Gh87^u678tWAb=e1OMuLKqa@By&rY zkUGt63ObO!jvoPenZN08vAJe&e|LE>l2Lk6;vihgV!W;F=HA^-C(i)a%j8%{H8Bc> z*Q?A}o&1ikg9-eJX)sOUho6N}s;b1=(r-qIqEKVze*+8z<=f^u zS$Pv|QXNt<>Do^su{RboAp-;EJ5z6O5GiiXvgRb?Dl$zk4lfWeA# z^m?+%(WPmLU_N?4Jz=8bBZGBMPMR ze3&b`?Tshd{{QrgDRa9Oly zS%&g+HZGAn#(^I~e=U3~Zy+f#QnQx6jk?;ohn!F{?In_2-a-{^<5{IP=dT9`)44%0 zuQA#K&?Iomy@E0}zlelzFO9}N!G*YJuFcNQyk~m2haO6`_5FRUo97)p){GByY6~gJ zfJ%WX9=>xrJm5v2pYa=qaiJ><;*;@4j~e>gZ3Tr0yYqv}MRb{m$a4 zrM|vVcLN(UrKsYLTGl)$9U0p^9VvHYvpCH^AHDDOWcI~pG{uw7WfVW>J-ly{U)RBi z%g5?{Ms>^}f044v(eUJc{H9-#VOtNr?gQ;Ct(WvXcuhct%q`HnO?nhv2<&#s>HFcR zF^ndP;=iBG%25h`SD}PO1c?`=J|&Zq-6QojQ;8PBL=~&y%VALWl7_?leh%(Q2rhgT zg_XhUvsguoU=sbBuET=d4^04)fIXrg36nF(lE^t-e~Xi@I3}(jclk3t30|oar6<^@M{${-?%kQae<4S_4yTQ^0TM=;17y3H&xD_&+ccI)= z4k*4HUNT}uaW&bL^7eFt7^eh<&kWsha*{ugy_GXgghiSQgtha-fVY*K_jkv+5_@!R zdgtDie_EF)nR~s$7E3~yJ;?=P z(1b|Ux&fxv*NI6>ij*4W)o{_(?bE)qCUy|U_&nZLL@~!_wjX$s;XC|r-q5yQDknlE zf6Fm^CR9e(=GePXxNclbSFBE=XG7>~qF?iATGPgQy2Ya)i?5A1Bgz@+I2X%?k_%Og z2;5IQE9$zP@Jj+wu21WN%VHI;Td_mzw6;HWf3R75Lqsv~dq)lyi*x#Q-SIY4Ni7iI#f&+~e_YZm zN+*TL&Y_bv*K0?XHK}g?gy*Ng&~5+d)#Hu|AHuJVceDO_D9XOTD)!ZedziL_PGh!F zf;`EV)c2Gbx#3IIKsU%o^JqL~e;NwG^Oud+ zVQa9jb_;3q%;pk^Aor{89eV0v!a4m>yV^KgzCQ6Vph=B~p#aGM%5?FY_Cg-XCtA|2 z_lJm30$&I8?kE2A(BNyoVbPYyYojS`dWi)|KAW7{Y(ehmyrdWSB59uWkp9XNNq?aP zL47j=YOrz?=tF#yib5VKf0MjS(bSK1qOX@&J5`bkE3Gp!XAYyYl^yzx^obF_539)L zt11_$psA3MZKF%DiLPN|-IS}`cv=-tS>2YgX5Ds(Jhl>nt$e~pza4j^b!BF6k*&4I zAt)os!;c~?yknAQYvFey#k>+d>Mfef^wpjxG}p8HzGyGO@*K)?tX6eNrOlh!`*6uM{MiG#xf$(#X;gla87S znv+2(ed^Z+&%*^_e;7xv-^imbbA=9?&E5^r*qU{0&sH?uD(W&PyfVb8@_6}ZFb28N zP6`yLv_8f+!vGG%jt|?&V-EDTS1$dKuZkS|-<-$4@T+zh6S!iJ;>C%!^CalM}9 zZOrcb(%g;iYukQym@1TY!_rV>g&lo!h-5#})JN@Gd+UtKe^-|$0j_TO+`Am5;<^59 zFl&-m*23i7`*RzB-$!T4j1Ye-wY;RWCnVErD4JnK(}pH!Aq!kLXqCoS{L}@agQwFQ zO%z_5>2F-os2-gOX`}PZ4(BNhXqD-ZFs1E!LDdoa5G}3ZhltXv!nNx5A9C&=Kyo@- zVgr$|%f=uN24!kw!hjmvCsJ=y$^D7!PVW*v&sa7ze?t{Kq{gnNXWp9KeoOe{dsp*z z!BdlFmo(V)n0Xp8R4|ng?t=`c6`9NRZpK7R z4);n;!cXihStA-WOkko^5R((*JBbcccMXWKzTUIs`=Ab+y0+POQAi^EP!X-gqIFdH z+oDsrf3CivLBe=zLljPiB-9lryh!Tj!NonggW`L=qo*&eXV30k3SLoi#1A2m{jwb3^Ab8pYd4 z(+_1XZ+@f>E{PHCpC`Znu>e^Z*>PQ^d+?xonvrW+c9ERQMC)EAEc z_;9@so%R;;UIk$yzpq_LzH;u*3N|&eX->Ln*Gv-@mMC=}U+Qt;ouP?-yV5I^wTN~v zYs(y8HYlF8E}J`^#i_;j?D2DYF7fvU5p7C0tGshhMVrz? zf6A$&x>za{sJCN-)c)PhErQPW9W9{8p_D0(;f= z-M0;b;2MyYT!)(wS)9ZKn4p~A)O#;ff4)+NQgEK{gx>NuEw;=HDQqB7Jtq#7LP*AX zo2P_r`7Ff^0S87WBCi-fJjF#RxR;^GJ})vBhiz`|Pj#5&N#Cu&v#oAC9_^*#GG?ax zk^JxYvTn{(muPhF%3AUGEgXW|l~sAOh3Vws#&&OL8isTgRiPdA)gPhiLmVFEe>hh( zglmz??7Ri}7?>hUpe;WcI$w9{^~k835)%RqE<_gk%6v>Ra4^fK_wu$R!CzjX+8)j2 zbO-!!i{3m1*JcqqKB;I_nFPW7v%Co#z9OWWOSrJ=f9VxZ+K068tZS`pyP(GT$ob0)9g$xMZgay{>|~5@ z#dwKWe5kN=C-FL{@ZQ%5k5e(2$aZ2>+Ja9zPENxSQ}0Q*NK4g8w>zD1-9(@Z2dkz- zEC#PbD;MN4Dx~2?1QSY3OTF4P`9>zN{H;&UOi_wyMA%G>y<$8YK^Db?f4-NtXRf}{ zsJLAdD7};39Vd;y{n{0Zm-IQRX}@TcqL)@92*lv%d<*WJ%*6=@S>2J=!_g)HyQ!y} z5h2CEz&%z-$7tDkv`G7?O+u7Qx?#_E7T0kslpYh+!wmj};70nU^;>+9MQ zyAMK{4+a5)Um>qNV&)aie<73ZB}%+_6@Iu*aLCW=a`ao1~^U4vF?}UQh^>eBUvuasqLtFbrF7+_?LVB|iK7 zg`X4(gOu@-xPjl2F6nsP*c?Y87xfmzeQOoi)f^mG=k(rTHGhT=e@jQ~c6;cLZs>*I zGW~8<`h`TqCV!DLQKdqo)|ItMibv@tI?fC+aRh_fEyXSJ6NzZ(Z_^P_-gb7V6?^B5 z_NJZlx*5ytTbsvtsmV-lWsr4lo+TqTw%F^QaH178mr=q5UPy=j7Inn>xn_An8WMD% zR>vz{*hNm$1rsQBe`RV8jazE9k;$>AKnJc_=CVJ4I75A2Yj{TNRV7$IcPyWfmPhYL z2`ZIPS^6VMO&@m-rzOLki0x4whPu43%2c$lOnMf{v#QLiR@W;}QE@!a!Y|}Vkz|8t zFG`|S5ltHv#vre8wjtFD07^m9S<`Gro9O}19?@l(H8Avve_hRYL=pshoST6UcoJW* z6+zM3#X}js1GIhd3H42oLhv9ewa8GdQ`AnR6b+mczie|;M&g@fZB$16blcp>%Y=*S z%YFLq&?zB$PFr%vvKHwcBRgu*f{i;dd6j62PWSQ$LDh0(`QtV8{m{e<>9pCF;DJcxof*nLzaty4s7RCz$ZH*Q*($&#`>fu z2V}|98ybY{dCxPjQ+WyY&R_Zcvu?&Kw*5|&eMY6M8O=Rd;+vwcTjyHDw@KNJMaRj9 z;hr+`X-yRmekRO&VhTn4nyDbBib$_yekvHSe}n|Wv!u@xy&y^Eb}b+uJx#r9h8bTn zqPYQkuDB*cq**S~tFY?*Cbj-n38JO?&rCnsl0v<7_pk$hV`O=upN+A~7Z$qkR8YZ@ z2p7#j-9|EjbicA~-46|(nrX8wCZG_=AgsdY5Bd|PFD(<__Ui- zf4D!0aA#^v)4968_X(UlJ_NOm2xXi(VgDMpZLlue@vF~^Wsm;&U41<^@>?b7Du;&1 zufHq144an#;aX#{6ff;^6-^fa7ojorNoWMz?Q_B8mP9AwQ_9A!*uhHase7fVeU5jc z#qktep!9&7;_TB{!s49Tgi0fvNCTZde|u{C;jkKwDxXI6aN#E2#C!jIj%G<-Ubb(x z46pNw-Ys@qgm41peCJSCu_eBdZO(j;4>@b_;~V4~@$?j;z7JA-ruwrW#SjXhso<$x zkv^DtL(%ITdkkeRf(;5kFSwp+9CYm`tpxi6p(E}=$bIPag0Tb{(C7EWZ4+(Lf8pEj z_>xc6LVh(`wxQR}GOBnl*kj;|m>y;*$lnurhnJrBDU!5v_l?Bx+3HwVcu{--py65eM=;;rCy%v(Jn$|Ip(VERarjH z5-zPi=x;%5c=+oLxiStmuVO-^fBQ4=?LJsjZFz-$`RZI>M0Rne8X29Fl=)V7GAV7) zhuY(8Ea+zp66o|kQ&RO zuz$Gh`I2hZ#G~)R2W-q)u5>ri+u?#Zlu7T%SVohm@*osx9GB>oyqYG;E4JP%sqg=s>jmF z`Tto&_}|pu%l~incWwif%x<>vVK_uE>R4eeAZ>O-v(c=&=dQ9irJE-bafNOYCuH&5 zZc~h?@IB?(jV%kF9Q>O%f7=G&1@|=;P?lx+fII)FO8@VN1w-&2^O&twf0yq4M-AGV zf2sw47W;Ys7N*ua!0KH06S!YyAlAAFc%IdNDgZAED5uf_aB}yLkpAw0dD=M;POkqE z-D5N0Y8wX(&NaX9hYVt1p+K0q`~(<4H5jgq9xweBE0N*@-6=ZMbn|72K7^dr? zHD$VjFPO4m0%z9Q27rKdZI8Ni6SAoMd~~kC)*~nbK~2G!cbgHHDncXLMcUY_ipX2V zdZjEN`rfQlLK5E`UJzDmy`qp%@(FZVzfW!g2(LCk4qp?42{NzZDl<%%q7Bi^wnc8vr_6=zCHxbw;Y<|(#7S+Q{I(^c)~0d#1$HQvN_u!rR-2}pJ7H$tPM5I zI#0rgby{-IfB7*!G3t@i`QF0F&OL1AV2sjORG>(NN@aSO%b|O!b2@3v8Z(c2k0daT zED_c&yRL319@}P=caH>KVy?$&=1H~hWuY!s1G3z<@xu(I1&A0|X)BY`Wh2HhBMVAq zif6V;F%^hZ9F`h=#MxIbw-SR|2{PNu!Qu9hYt~qte_DZ#tPi|o(P^eDq{o;JEzs1~BJ zVdsrJSUpf;P&2r(9^R9Br8x%p$W~`9dxKGLEI6}=zOHUgCsshqg%x*H*Tc73&T>^>p2`+WvB#0Z z@%)vWx2lKj(RJ=4+-%f93fnS#Pzap6`aN4Ycp|)3eP1@06`ifslc>)d0-H}nx~}jI z4mPfKL_%blXiSsB2&pKI^~?I{fyOsZPqk{;O644p4)#U!$h3XJrjfkz-%FmNrMmyy7 ze{*Cpcc9I0Io?-+vcATMr!g-^&C%kI&x#rEhC$VQ;q<**t%|2{`lQ_FZMV5}D__Z= zmPDo6>$?&jKn6$YOTQQrCw>iqB2gbwJ%*_ar~%y9M~S{veE0p*`Uqmx((8&P)E+uA z?{JcO=80oW$(i8(8W`5OAB_r>S$mH@e=|*PbcVQXuEZ%d@!cyDbx4g&4^zcn6RS{~ zMe~r*nEXS8)tyk9n9CA%i1ivEF+(lxv_FiF45^4OZup#!j<1#IbuzhH-4y`-CYNP? zMumgp4Yp6Q!n|lcJS5R19xturt+sv@QJBjJkmq)9WUZ2OEtj_s2ZXla@0Zoae~Ix4 z?p+}LZ~djP;Bazkzi0guM;FKujp(Z;CPzuS7qKftNx8|V@r{?Oy~=QPRwHbHkNEuv zYwOb-_nPV_N7dGcwX?A@b#)HnsbTzJz-#(ci)fv)eLQnXkTJZt>{H&a$Q$vof2E=~ z#r{ZGAq;wjT2lH8y-(Rct1BPbf34((D+2XdfBmY(gbfBoqnn^qi@ zK*7AD;q)R7=Q|L$A|NRFK@Zn?k*LFl1WFKg1+jTkTpN|FE+T@^fJKqsDt*dXX-Qe< zVdem%B-E$lB8Lti&5)B`XL*Y_=G$&fvnzs0WNE3#GOC6LY5?&WRQ`H%9+W)lDAQ-C z%}K$$ZPx$Vy<`>CQv~{tf0*j>?hr`{$S({%G*#BX+=Z`#HX5&bBnmGHWhVPd_dw4d ztjYHM&?fF22LHhMNkO6Yac zuh*=nZ_7K_ri(P=e{PMbh1Iymh%RWb!jVR0M6LNrdX(>~$(#&s|Ja(-y(Q*5=L5|> zDvFwtaB&<))C^grMw@8|X7!NwLSP^e@Y+1r&U>^8lJd*8Szz8@p%AByW*!=9-WNqA z*G1Vi1kIqT{8VzkWa^M0`hsjLMm@7!pzw^a57P&fMn>H4cIx%^EZ*oSDJ>zBp^X8@40~f_cU+p}PILtIwfA7pfc;AYgv9dNt(C&Xc{WZSF zBrl5yo!i7p^VgfRA1mYiMekbO?w18}a8ql2ReyJREeIV&{G?@~xBAu?%J(#|F;`nd zF{CtnXtcRA2`)K&LA?7h7T=%#m5y|0a4#U-|M*de;SiDqu z{$=Wyf6}_!G@3%!B8#SfP_P9lsT1REKzJf(J<|8vptU#g@)Hj>nvdc5BikZ51zSbx zbH$9572EA-_1fMfk`0`U9vaJIN{hpzL7a5+{dUW1KkoYNOcbPwn zPNnT-`9EEo0;}7N^Sw+IYKjk2ZA|eA8BaAFfBLvd))sV^u7>aUXd>SKni3l(nkd3= zfT=X$vQv;iI?BhzMpIViiMNx`LNceFU&`;MY`)5Y%zJdn4m^AhJ8b%UR!l!6BRbQI zfVKT^e!e6tE6AtgDl)flwm=A^vKbs3ABM5TKl+lphP_HD=eVsv7&&J%B?BhtMoDAO ze~+Y~Ib5BzrD0SRJp$ej^?_X|FS9k%NaSCD=9qD)bKmE)(f4PLg!ydjQza zpZ2!+58~~IaQi87{sYTnjyObzIRC&un4I;eSFW6k^iyz?x<3>EJo^Vcmr6f`e;fA) z(FOn@&0nFK|3G~|L?O6;=#-zX+6jPu@el9j5AW*tFxo$r?f@?L8)W=1&F^5i<{+S% zK*&MzL+r`aPZCXSUo?_nO5!jxuebY4==9roVobT?N$GsVc;ZKI^2m5%7zXIc6gYTk|XrWR?TraoUJzx`@I6L}Wk>x%SC2wax-tfBtH9%(C{( z^0&k4_HHIU^%X9l@XqORmmDZpt#GwY0owrmh_Fn{b0EHTc$TM>@hzT_SI*Wwj;VD9 z@S;$EM9%@3>e;COg0?q)n+Tyv))i?ueSpqOQ z!+RaDj{ae7*=Nt1<{6t~e?iQGnirT8H<`Nk7_jKVNkzOPw_IHZyrZWKkO$yiU1H0s zKIG!HXQ*1%Zz*>vH$@0JICKY z<-y$s!^y5dn*qC($fla9f1aG)?u`78zx-pTPJij0e*dxQpZC}Te@*_g`_HzL(D?oz z8@77;$It%pqd)+;_;1^De|!-&*nj&__oq$$x7{712KU#WK-?*R{3U+$fBDLv_Z(5~ zxRdL@`>k_PaO{88`P2SS|D3=5sdG{BJknK&Q zmtmiodG4ui;nH~l=?6QJA?5;uh=5*Xfh*j5AOQoYb#IzT|FMNjiqKP+J;PUB#ppRb z*Xo+JpZ*eV>S1g-`iT*;cN{8%&x9BM>9N!f)mj}&GQ~} zI!eHd<{jJMtP6$}2E}J=rsfra^LK@*L%G?1^>YcxrrXjKehO$Z&{x}{zNDmT_^kVTLHupbIaXd|L~c{_w`4{^nYUE&yQ}S)5CxJ?62P6 z7$rD%s6X<*;uC7He|_z5>|)ONPn=W!b8h_NY53RQf5d(fI11ISZ&vTt$DsDeW&U`AK zuL5U|c+mH-k2xJCaXW%*%%i2Z)Y}$9YTP$SH$<;AG~b1`S}$249Vk zM@5$x_#h{e#%I7I5c&x?z)LW08}Tn*#XntKe_00XR#0UmO!7JZ;+6O3f9L)cEB?f& z=bu<$|A{Y5YN|%p%LnByR&Z!IT^P>_-HcoQ2z_6#@y4U{V&!2Yb$%C^Om@{^3 z-mgF*GaO524#%io)BD1$Z;b#DIFnWR+HIlsB^LM`Z~&@%muk2RC)U>^f?fnCZnCzn z%gwANDt9|MHo{yn>MSfH2Z?zrQda|=n!W;_sz{3fMT>jM0|5YQ42&xMOT3x{oQXAx`zMW z`+3gkI1zV6tPrTAs39wdUna%td5Y%n)|)@JQCm-Hv^ws`Zd$TDIWd_o^(<}Nd4dcm zFlA_F%Yl31)Cxnnpn*Ktb+I$$f51XAvIv(8%0*_N#AiG;DXJZvCucW=F#<*`QS@{W zpC(}kB$>k+-KAVoJ+4MhtqrhXm_B8M)Hb%@VhBo#Y8sgh&r!{KffNYp^&={_2MVd_ zT(2A8iGyH*aZq9qhEKC9!n8Xwczq^3sv^XgnUyKeSd3s`W3FtkYQZvQf9rDMILHl2 zoy-hLEeGEt8@W2vF2ey|>LNZ9E^0Q)}5kH=HecfeKr(tOE* zw@rKcp8qd9UzMh+_I$wOs8urCY&D;J)Z#&&YViu5qhX|-;G{t^Ks|(Kg)HAxk z$YdG?at@MW16+zxiAX+OZjTkhAs>A%5|akn!q(gg!(?)nytL-Yf5koo*lH#w%F{du zQiDp(AoUe{8HD zIw#HwX*t)+qY+d$8wn0q84ytGzX+hLOccnq>I+7tK!SzV2|igUyFL~h$PxmXk|{}O z!@!s|7dGbu!+s)nes;gh`%5rR-ta?-~TEe-W8=onuOkzSlR9Cgl)dQcz) z)f_>Qs7NrGbvE!|_{lK=ivf0GFPo<>9N3L2S@hCfzw{UXf6+ayKbHL1x?i$|zsr8; zl*~JS+2CKcG|ST*i6qk1n#;a<^!2Rg_gXkvPk#7qi@$j>YGvs!I|%0d>)b$ZS@+B) zE=UoZz4EcvTo)Gw1(B{|cE>Z}G7v7tSd$#Q5#wC>mI$FFCPH(KAW4keb^~&ch)9Z&-7jQ+j zzPSY$xGr=$pCP#NEPwFd7GZ-+Vp7s{Fh=IGxS4(l2jNm+(L&g~h zJ~J`PXc$d+b7l zTK6eq2r8ch_x68c+iVlhmJ6i!Q;$A$T5Q zYx7w!IMmFT=hV-t!wN+Nad~HmE$upTDMU{re=7+VRnY+86$p4m ze-;Mc%#IbuM6(DCLKaSxzh>&W@Q|c_dGs=zT`1y(!cOcI6$AwTMBFt%FxQrYxmiN0~>=^J@D zF;7&W8a{R4h=^pgDhB5kE0ri!89*B)UyKNLfL(xVU-U1}l_n58*$w7j{_U48QkdWX zyWc)ii>b7B(n|)tY;Pi&4_Oqoe`EaoC5L3I$G-HdI6~=Vh9D~q1d)MieZa+p zp?P}uX%!-_g-8sR-l?2JZB4VTJHfR#10n7v89v=!xg`AoK{&b-RCR2lf8OusE`|j< z`=N&jBzr2{5su zyer#5uU!m6t{7qwJ?J@+u7_zfL6U_wQscr6TC+3b5z*Oj=W`7Q0XJEZESwN#x{Oph z2nMc=>}-%n^YZ9s)xi~Pf5mcbo8;w!Hfcn-JKX6()N#ZEcrr|k1nIfd=U-@76jkSIIU-{byGT%Oq_U4Cg+v6{|N~?SRSi>zJeb*7OCOLoc#v5P$ z<+HWdTiNleOnPlu>$mm4_($oamVZTZX1TIqzU-ysg>nCZAEsZne~$SV`?|+u@~yAj zXM$HUU4Uerc|6VMhlL3ua-Hyn=@A6fYh*<8s&S%)AM$iXxocaYyD@5TDRLJWw52UH zOaqSedWdx4GP6lTjO5T0=@RA4Acy1%f)XUt-dQjuk7rf}2*Yu+#z?xH*-#6PAfZ?= z7aDJ4@@R{mXynV1f1rk5DU@9-lP(u!pi2>$I!G+4r;kd{5gZeo5?mKc0tK>pkZiOI zvT-pd4gGBGnmXN?Je|4uP;3^dWuk~8kPZdS#kEMWkc`fY5V%WGpdCFCAu}ivL-__F zqXsC9cha*3MN|qBdalCskSVx<4%eAOm3~fZ+@IG|jxf0-fnNL0B?2A^h73?SPk zL?jf~=_Z1T%$~{9Lw7*Jt%#%!leFDWk$iSO5mZXwG{C6ZLD0Sru5{N=D){pIe&*L? z;@j`~;?4eVEaz9e!~DBVobKSz`iUQCp7wuZf~_2EZ7w%Qx%-kyFTJx=7*K0>z1G3f z!^v5-17!Mje+|9(G&R0`N$_iXPD{?BQBX647Z5mw@F2P@&ARJ2JPmZ2ieygDER)RF zlNCNpbsZ;L+)asIz|uRUCsbE4aCRjslQc^$lSezFEC+_*LDhA)h;!w7VGt2vU7DFW zUz!?NR_GG5Nz8DbbetyY&>*!C?h}wFdp;L4*grLde-gt9QWwMuSy`b$rCqID`$*=+ zk^=YAAQ`(*E^sDBm?t{McZ+g~P%yAiARUD>3o)@ ze^#Vj=)AW8f~uP8%a2K&63FZI;;~vF&XCEwhMOIC^G2j*!%WB~HpvDWt!ZS)HhSFp zRS)hgK`#K^+$Z5w$%p7AAAaUv&ASfC0?98M@;xs(d)cP*7CY3upR&VWWWhh{Knbm% z)B0?}nNUzZL%doSE0=(4fMm)>;YP5Xf5*e&9SavT!6*)utf(AfO`e%CS?l6{(7CG< ztiAnZ$Tt2*{OCcb>5u_OqN+jARG{i&ih+q|**(Zz9fXQ0l@ugr1i@fG9WQki zI~7R~cInb8#5O(*X7mA*5=YjI7cR+I=F~}U$aHCD64Rr0+i>F!k{Jl;L5tTKf28vP z?or{jrbL>u7o3nr@Uj6GgX+(3|4-5WJs#NNuPuJ}iXFaV$Zr1k zIN>{X^R3grah6wG-u9-oC#T87kj(I#($&%B;Ps9h90cw{QfIN;Reb9wW|$;2y8CgL z4O~8P)fX0Kizvao^cmJH5S0&de-P?*#FW2Do|=b4eQ6)k0RfI7RQfzI_4zc~U{_#1 zdCYi85K68p_9L6Y#)8DPqGW-1B0Md2UA^>yd*K{`xi&u@iw0fi$aM*^F1z7i;yhD= z8@Mg{nQQO@1E)3>LWw+w@WjR-bdaJ3d3U{N^_ZgwffyJd%H68l9fv_`e?eZIu~hS4 zDZSA8aq(wogCNCEz#dqo!mZ|q-o;XW94OpTCMwqEqu_-D{4I9!_qdw+{aPQ3Z{OfA zHdpI6D4qGOQ{VR57iQYhyI=UhpYh`s4^_`i)N|i>Z1U|-dQH2Ym+lv)0QQJ-hEeT? z3L{Yd0hUtV9r!?(RsYz{e^whRHnvQ4mJJ+&qta>00abR3U>gvH!jnFW!+>L~JyC;t z2b;#8oPN%D>NwIh5x59t8V1fwH`E6lL_(!@^cLr6X@~{LXgdq&(hiC_NTylyD-TI} zr@1f@a5HGQ^8qeSVuA4C9KT4-;sh-G@omlRSSh@^*SFDvTgm%!8>ld z_>6G$7d)$#sbBKhz!Dy(SWE z%e6Vd>L>!NTH2Si&?(Y4}$X|z~@*Y$LF&MsC{e+^7=Z4p<{y1;@Ij4hJlz<* zq1};)Fp(N#!gW}$kO`N_$zb1&E)JT^ zi4HlyS8+PL%c8aV z#VA=*ygS}|Y~*o`;vii*F^GAf8DuVeN;J<7%aRJE%O-&e0wj4Aa8V*BtRk#i;o<;C zi>Dh3B|^MlIE*S?$h+OhqdG?r`Fc?9DCK>ee+?2Mg^GpmC}=YjbvG6iCU8m(eMIa! zaZtlTJ>z$R!~)jULHRE3vG#O931qeJ?uS##nSq%_Gmj$!%lR zr1X(TzOm2O*#0%==jG4;iX|s!t4^a|^YIs)`d`H3e)5vuyb-SfpQ5*>N zO(*Ui?D?Rhd~4lSLRVE=)Ua*Q?}Xb2O@br2Zw#MVRBArv@9+_JkWZ!Sjb$UV%|8_wj=8A~rz zIP@A6Zo^B3+rXT+{y7mmSNix@K2Y<89skNZ{u|bd9A=Il*~LKk6+B8mdO0#VLQG%!)Hav5;03(@t|$1#G$0$zFH zVbG5bMdPX51D;S!24lO3KJBJFhB4g{Q8)||m0y|=F~K0%r}a$N1XxMeGw3QD+0FiM`MO-kJgabcLkz3R2}TRaVqJ7dYQnwOk|QE8>6=7rYEPMhG?04qS$ zzaCfe@;le%doF&jt#{d9@wYeT^`|fYl}Gt2Zv6g#+lk6Aea9kHU4Q0(A@2Ofhc~6! zR1KCjgrNA6GL{#)JJ+|-{J08aiYZJCGN-i@R*wZwG4Nr!YO37J%haRE4g)g~r^Mj= z8RT-SXOpXQUP)I>)STxg+yfM_Cq)$R(@dB^5k(xK=kR3J(sA-dqN6nWGj=@CFrt~A zRSxWhtIH@@YK7YxRexc|gXQJ9?oJ^oq~6B5P++IMZ4fM_rE+J&1@GF6DYJ^Cq&&e0 zGYnE&dbG}mqk>kN%2tMgrig?MFjh)aDlrv*Oo@cf?x6A zmvBsJKN$PYSOz!650&DFE^GM#LU#INZMHHo{W{-4i+^pt?e?$yDP^0Jw|wPq zo%*`2Z1gYP+2*G!+vu15|H6{wFP_Q&VynM%L#_I6xmz#3Xmz&xg*7RC{7<={nsK4<%?HU!qf;k=45?SlG(Y!*Sn0J$TqFZsa9s}uTUYDOY#f<7xHAYShFn%y z)C3(xM6?CRoPQh2cZ7`VRxaZ(oamG!Q!JUv&7IAkHKEYDpg#~h22O!Y0DRMK^VI54i zNwA^}G%{xstIf-nT4bYShD1qq#AO&qUEtJ(I#Mymd4Hl6b|fOxj3?^-2?ytcPkN9H zxI6J0sp1g~w81BX3bBkP#(}y7M>fJhFiac+((Q@_K9hyQI1t7xUXjVeC4(NHgHG@O zqq-1tO~uI+rl#WPlUZ<@c+l#Lb7w654pb6y35ME-v6V}|ai&B1vgcYE^CRnj=UV-l z@0a{3gMYqoEAs1|hnCOWUw&+=@~VE@6<<8{jZyv$BU84+9~g&v=6d7IBTW0Ob0CA+ z3Kszp!p1WDRCxs?8-%TID^Eunh=;AIX;pp~BPi5PZc%*L^ZYQMvj;!&Oy%2wNDC^3 z2~xKlF7@R(^+9q>L}u&YdWfuaoDEZoP*9TSW`E^0Gy<)5g~V+v5139=ZUnD}dNB1N z5O=$F*)op0T&5QqOj12mY)Y>{*=#`~IHPc9}de8jm9(%m@Z>TaJ4XOGIj z>O!XS=&<0P4z6ir0ueR{+LQ;7D}!fVg4`z~5|g-|aa^p;3>HC@PElPZkinWl856M} zVSiAqxq9zKpLHg{*-I`F44w=4wzq5Ns>S^M-pe0(?5{lg z*8fm)>__Ga`tmn^;(zRYmD>SxesKh~EL?QghQbl}-4D29)CHS` z<-mt3a>T^MVleQY#ghUfwANHi7;hAY(B|g4SiQ3~o7Wd12D9M6ZgG$(Y#Koh25y4N zm2zo-_VDY7N7{BtI9*shcJ~o;XUF1b)TKp$=^$K+P$@`WC1kFxY{a&#){><*5P!Dh z$d{U>BQ3V2i%iZa5!}Ec@kkB5fD3%Y{NSU(+z>P{bFFY|s&^u}^A8grBDZlkC77oO z7L&6jR30gGtwOUPjB+E;0$fuc@w?5!i3>US6uEN~Cv%0hLzoUsa4ATN5Ksal(m_fD zNjJqJnmb5hl@iw4Qq9n4nu!LQIn*mvk@bjG6|5`TqBVRy%- zX6?kq!LoD?QjSTGdYT;H$Y8?#nn;=y;k@>+4r1YEQS<$2K_L5&kgS$+GMRFi!H4+A z&5Jsn@kcG@E4WYI+bpr~vXA%slmNQ-J_l%AVA6L1P4G#;8e{^Tvj*^h>?NGx#DKue z0;Ca}!x$k6OYlQ=z82skB7eXXzM*y~8VDqy6G1GzCy<0hpPdiraWJ`aQ#&3pfIgfC z;H&r-9*_#q8L9+)?sh?U-(6Dub@rVqfmrZ?>d#ZoSMPE7>)#%L5uFzxcoCkm(MGuLN-a2+bGsaNHF*-4JS;P)tD>jDhvkdRRdThf&LxO&iK;1*& zFi>f331#;RXjNe(R)2X=|(z7n2%x38};^;T|*WSexpawdQvZ_Sk1NC&CmszFO6n*g*Zkm5s3$kJu>C6FS zH@*bwOMiqHg3;J!Jt|gMWevA;2F4hq8efJUk}Dok^QvdZcmu==y)5`{gZA(1bf8f6 zKwHnx7%Bc`J)H&)pal0M$<_LY1HZW*0U*HwB`l~8Y*AcU*psAbY7mxQ89^4{o}_C1 z#2}ahGp=1*o>9k4vsM*CB)lm;6rw`#;vYmS=6?WBPHPFVs}dX`8I*@)0|D=W$WI_D zL=QZ7?-gQI*bS132SuP4MFrg>>x+AZ+$!wqN!azi$~J6K6czH`<-DpmguJ^*s(^)D z&1+;!8s1kGAfna?ztI`w3tR<&t;m)TB#Ix1wLnS5DexXvizr@!+KbHH!c9iUwCp_&_RpjFFt>F;p$#t zx2iZ3s(%)4YG~T>qvKHeDp%thy~>-WMtNIaIu12o0l%(lnF-N*Bp;q-76u+(#cDuOMj)~z^6TiPbI(HR(_@8-}4z9lDRmvx*Q{uDIHsQ zhjvZoxLwN?MIw&XTzP`^=)h>&`DuRfU`@nISA3=4ptf&RhhU+{1g(t5QnXJ8uq4gn zze=w;7`pF%chz&g=!5uQ^kWRj9{-zuzRE$ry>|ko{DVF7+5@_9+Yi?l2-|a{+mZo~U+=!BDat!iPlRBCA@v!hiZFo}t!>zU&-> zAC#UF(dG6}3Jpk(U=`hg04| z1sj9TdwOd_BI;#EdyZQB@t@bd(T-`WUx+;TsLB{d2Ge$`Zl!m!c=O#q({U*HdG)B~ zfb@2}gtAG-wtw;;T0NL3n{ee&$Rc|}rn?m7+(m8_kIN^Y)nWlu1=R(AB@r+tH}g7Uy`$W#0{Z<+=G8R|H~>R5jr zhfM?geLYvl@9QsmQyni}^snO={pr(f78AC zk6v{0)qnq}rMtiSueWrYSO4F1%I<`T-TcUnRM}@l-YVMiPOavCh98nEJJn>bHLC6z zBizy}dwBj$uPK}FhyJ~$IsK;J=Bj?}_Dgp3+h>Fi$yJ-omanJw7$HT!3t#jVWr}vx z*8fdgRTZb-w0qU|`8Vyft@rnuF$b#d4||NDrGHm#Cxcr>>CM?-c+o#3&#QaIsH*#$ zR;YH*ziCO^j&Iu5pX&S`$EMvL)$bB zmM_1vf@D*v*uAz;*o@LyE1o|jHev})z<17%?dWiYRS^~)^BTC}#~VOjIxlr7AkA)Z zO@I5=9zTiT201f^Ti2Q-=2H*%71Ai*P0x00V=}g-F>i~CEzGoJ_2@6PtYM07C zP|#(I4}P5^g)f&s(FKeYHh)6l zovzxK=%TJ~1Ujf+N7YA**Et@jrv3kTE(H&MKUb*dI*LBw=%MI1SI?np|Gy&`8tk|# zr?61xx`3rbb1+;;1-gIO7Pv1D(T)qXhH`L=qj|{gCQ@hkyT5{gwKS)b$w9{yk0&7=;=K4X_50Q0-4)%TcA; zp9&PW<15fp(Y5VU?NMI>AXIJfDAe<|fKG~j%aNwWbq#9#2jAnl6;P{j_d(s~g3bSX z^ZuI8w*PIrR_}JTOmQgXu)@Tu0N%b`*0+fy$UkOVvl10aRk-Es^>!c4t) z-(Nh|bC7)guXeb--yXYrbJ_i0|M&m*Kh^HG|IdD%?*8BR(J4PR|6#A^yXF51^XIPn zKiccny*cjwkv-CjcQhBjS`2L>aygY58=KCs+Xk`hj~)y(zBe987T@xFV1noEHbW zS(7`n%!q%^t!zH&a(xyoHXj5P;oy3?U5v|s&FZZGkq*dV<$palpTllF>$4_YbrPfZ zvFmJ2zB5x6;)tgSW;sl=CR`NV^Ucn6qh94s>gn}lf4_GgLACF*0`~dx-0iI0_4;U_ z!63D`gPyp?vvBhOyW20P%TBj*=*|YK@n&b@q_2fLvqsz&w?P!!v4xaIf#fu88{l|`OeGN=&hvDjRX+r(H`x`XB8BP+7i z`grJkjF-DH>3uHcc4g&*hVt?1vAut;?kpLJvv%&1{(l^o-SfB_cNx{Kau?dbi{oIq zKCTbRW?!(!IJ(}qt9zy!H$K{s$GLo5GFtXJqmMY5tRLD)=i-7;X=`$FU5-3$E->;3 zVk{s)hni~8bF7ZrAFyEwz?SJz!ZiGtCF2izKJ)?&jNZ^Lrq5zaH=b`*q(?>@WSGer-tXb!9o?dSoyEhqcp7+P3H}(3DTYo$rZ$BQ7czE0gA0N}tp4VA?j_zX@ z#60tl-Zlxa=|flTG>+f1wK!Y7kEB| zO-(dhQUteciRsba7nQf6qoL#Bg>f7lv+l=IyJkM#H9H()Gnvnq(KI?b3&_q}JNL;* zHh;a1eF)Q(tixq=-_qK@u@4m7rfHlniY0Ov%+mB3KdiLlN+b1}jhAvl7SYFjaV@8Z zmD!22>2!$W`(ZfVJi^PVSQ^*Pe&ccFbSGOlFO8lcCpg?bXO^>E?Z+rzt=IR{?o{p*EFF(d=;%KB z5Ersf_osbOU9VemiM9P*d#>?fL`dS?pXG;Nu2Zg+iT%M)c9C)3$9!NF>n*;b8h`JY zM7Itivky%$oqh(6qXp4o&#b7vtjXqJ_Vi0NFHey%@SAaWyhgTvjma*yG~>p*e3Ut! zV+2RPc;3oco-8ie@h+OKgmq+m4nJ-mx!-lIM)Mx7wy5GuUM;+uO!Dq}@)=*D_qjv>ufMW1^wpbnws+Z7f>_m4hQ*XURrXcJJVFn9(-LVJ3?SjV|2*$P0TYvdPvMKQ+-jYTfm*1|Sju&(on3f9+G zkNc(f7%h%dV)i!oh^C*<4{asm6Q<44zQ$hKzYw!C7RewJ;d6NHTPNdV?8kNIq)i$p zzYROa(CMb1p7t>^7Be?LBY$&D2F|vgj{E7r-4$JX$(My8i|cgrynikyBbE<0y8?8_ za+wAx9q-G;!bfv=6<2F+kfWFyu~Q@`vB#^U!;N)5T95indW)djrRQm}0kXJ9QJ~Ma zdws|uLiUYR*AIWM-B(>*Qg`X6u@@EV^70|-JUr@lKcDkaS=-{Y(^iJo?_9z#PKU5u zT|8`zMORdpLz+m98h`2dvOM1c-&}r=BBJu*9w=f>fN>f>E(svOZt*tx{_=+5mR%RF(?#j<>!2Y1ZF;&{J^=PoJqxatn$ zVsK|=u!64XFXr(%#>Oz*Mb>KM@Ac!3@CwVI}k3t?Ti@t{)%M$)@*=EB5* zCe}LV8?sGElNW2$^|%44hR^YlFPMnyb>1c1K4Got8GrEr#LRcLqIW494f}_mq+@m|v(C+7v}rLr2_z%a8Zii-{Qdspmxgl{fa z>k!kio_|}<>me9nQ70r>c}`+&oxvI%tBGOfAImV$mSKKbhWT+B=J#coAC_T$i|)-B z`NxwnbzIkoleLE;^+{mg8!Zm2jU#qbUiS4Xo!`1H?JeVRaEZ&&BaWjTo;~J^Y_PFmHpw_~(~PXK4O`gn@PBAn-MA;OsXRW4r!^Q;iJvn6EY^0O zG!ee8@hQI0D;nCY&^_QoZrmkd}uPSnONooX%lxvw&nKb_j-++ zz<*6_MA|9B3EAE@Rq&z7=dRihuFa-G5}#J|F7Q?XufedEeR%PlKNBRGnb| z@X|@T?OZ3xz~%+%3U8t_h9!{+5vX={HjkT)6p3F zwz>14UFj90jx$Y11??O+C%j0^!%PGzyFJLIw%L&S>!WQSk&V)PaVY!g*@63ImVfxG z9qC=?NdoI}!+X=y)}1+BccX3GX|$Ya;Y?n76VN`r+vIc=K_0DFAD6w=KIcY z_At_T-mebNkIm`mmTbj(i>Y>7ER4|Gn|c!^ecm0^!@;h1-gQRfEZ$ExGW_hzkDz}Z zbrys1eLW=5*(`4L(>Wd&T90&A!+(|kEQ{I-&wFPv+n*cB_UWe6RG+kinxkEu2B*`! zIeS`szT`WXXlj!rIhWT-QuN|ze-Zw5bTA5HXA4$3LxATz@$C5Yoa<-jB2y%i^kcqT zoSal-(LM~1_fryk-A>vquh-62TiBnU?r>av!lgMZMrPOS@9@YR*jKqf*MDwa@eO1|Z#tLF>XB}*`^Plt?Y+*re_CEUahdI%V_I4rn&o-OJKn&l z#EzxcMqdS|BsD@b@AW?Y#bdK`qkf>nFrSEhvpOvJ^H3PeFr8r5j~CUp+sFNS^UQb0 z%c-MJ9(%UFnfK{_F!Mjc4Sz4_I@1>F+gI~vkR|=@e(vuwcWn0TYGdt>gL7vvSxFQ> z`p0Y5!>g`0>*(Y;N26PD?Yqmc|~$NX?p>3(hSf%>Cq6^(3G4x{+8-rE5i-mH&aI6M{u6OL}$ zv8K&vJL}TzXZ7*9!qrS18bP|Js)pNb;n>=&x@61i@j35g_j|euyZ89$7yZc2tx-K{ zhE#L%b2`1A_Ex?-Eq~1wx(WL@mD?S$n=bt@yEl6=uZEAn(x->J`d0nt9G0k?h1qc# zL1y=>y5nOv%GbA4ADPRPC24obmhqlZvFc7KS?`)*W@?+{bGyGk#GGcqU7LkISkZm-;SKR*t#^BKhM;`2UV-9}OHk>>7$-bA!sc7~7XpgSq%hfe(% ze=ZLzpQdhd)_)?!FjO&AV9rcEspp*^>L9{Y&E99aVE(L&&?foj&Bp$OHO~ZTOuULStRV)gG$m z-DGDzr+-sEKf$%gx2&Ym)NE%a%jkDeyrvK*>z$=b=ff9kH@@)Lt8@(?@2VceCAi&B zi3~mRA-4z$BPa5agh2D)t zz=Y~0Upf1Voj;F>OzDo-7h#?sC9OEMD9|E_&3{^a|HS3QZUwi`#jG+J)AixGEAXqu zqm)o!g(sLVRg%^PO9=XIjgHXwtvC>q0K>WSk5d-Co?AUE3L2OyoIkIdOY5H zAKcFQO>E0gOVP`IoeetSh!Z`tDEVV@}6&H3W=MZikDBk)T*`T zI40Sm6h&vsD&H0J_&nf)7)K8_@L8Oi@c~(mvFkIGy(v7b70wSV=8 z#-F9`x}9T%8y=8C8;Z+@%1vdUvHBL5@}t2|FKxMJcUN<3ZRg6TI`kYyfiuLAGDA=huSXiPC zZ{?NBl$(;IH*zq$&A029iz`-E;eYdIr77WLOh(j<_+?J7@u@NkZ9V1qWv3qv?I+{e z;tcv0yr`#9l%vrki98xVRSb)ALzneOk?H8ZgN9DJ0VxnU97=#RD; z_md-3YU&5fqm29H*(JG>@YDN6Q|rJeQ|9iNKy2FQ8NH?_el;Hakn-=vlz$71SLHuCiT2&`hgvDFyJaY>GzT3z&Tie9`c_X~$-m1mOoo`}^~ zliJ(;nTW6Is`(vrk?XYI`G6{)3op3^_kIT~Yu|;?5?}Yn^YJ9kZgu-y25j~-x_Lk8 zr`nDdCI_EoT3*YQO3n9v+J6Ey58lK!%jXjTolVU)vro<=XOZP7q|}Gq1#GE8lZe~f zxS#yzeBs}nUfiCl&W`ico>|v^&yV*~2i1*xjt-@>Ez>T8TCtp2IMSkD9}5Ad0C@CC z4?y**mXYGm-Y)KGn5E+9LkCY1miZ;w4u%K5ua6TE9C%zuxB6z^gck zKI)UAbWxc~7W<55d+eCi5}Bi-ST8U3Tqt?PtRf#2APxDPG~Z8J1ZAW*7sU^Y`)hKq z6>-?=5R4~;mCJUyGl ztKq%-$FTZ6G_*lnH+Tx$PdB{u%5N}p`J9MiMx-&jFC9$WMtJK--8RD5E#n?%pFX%7 zwXN#a3HH0^?SJxz-1MEvDj8fVBQ1H26=Za4ukuTK1nqXBxaDb$6x&^%2etB2WT#{=xR0ioE< z#=wR^!I^1o)dJUH_2CFr5QkL}aoD?x*TkGJS(so5b20&3qVS%jZ5rv8s0|%KY^5kY zA%v5LE%(aaFNU@kWQiV@-S!_xpll-aHbakqR;aczQjS34W zC*BP`%zp|My%5DWjgin7*#{q?f; zWUG)S(2P&?eWX>y*r*`X7eR{`$vU;N_C|vC@_p}SL|MLdR+hyE2(n#rn4`qPF6sR) z$q{}R-I%!dJVoHNFm0tFO^gAj8izS4`?xK>u77WamYw0MPHUIAYKn~$$1D@}=>ytN zQ@ym~)}F}p98jm~eV#~}4(RLYNsJWhm-FXwA78}qAtyQ@uKAsn&oA@inA7!X>oztf zZo;cD7I(4d=nunn#WZP*Jq_<+Ipm4lA6(FHyjlEymC8^qP{|h*3IJfGKLKgMQ+i}3 z7lkAadZ$<)7Ztu6Bi9@su}?Qq zmHoaZAm%9Iz0sfB^_>L18TVf4>{@tV9wBm-$aPR@Menre)uybi&gs{Z#?_+ctd-lX z?|)gIvosfkLAS1oTYW+$T9oP zciKkp*WPjdz!)J>w z77icv+UVyznc!`zF5}&}1YW)K>V=8bF&Ze%x0;K6d6~VnwHxqj>EWkdLq}BBiyFlB zZ6rH#60A9kM&|eSb#aH+*nbbWIy09~>Pfgi4b$_&bd?WVj%&MfMSeuIPdZCh9(g;YjokM?A24*d;Y{PGP%%k?Z~v=*(%=1dl{?qHn<|=8lb$PO0tFPKCGB zRElvv`!rh5r`ff3qjk=eo4u=oOUSgbHx^@-q0k7slRhh^x1q}x#ih1C?z==TE>3ti zX+nK&g1+^;KA*6>_kVr=Qw-JxgSBVnaP+VDhm|2Ed|bDhbUjD7xfU`y$J>Ge&Bl&e z2gtoX60H)2*>0+VPdH~=!n(Xyp5v}NA9_?uzQ143e%94OT~S;XE3^hZ zxg@Xf*#J#Gr1Jskw$n<)FdG?7AFj{P4WWvNZ6&r?YwI`pL(^&0F zjQw*P*+EVg-?+X$`fCZEX6iH|bB2Kx;=v70Y3sd`YLW!w7hJUQ|VXY?1ugfD8K8i*-R)ak; zozPjk@zx=D(l$EF(m=b9a>!4hxmNMXxP?S?Qv$0^_@O-&gU9&VUK6Rh;WvF@ruWC} zw}$@-V&hL|Z`!pCuTTI_B^)r_H_ZmaOGNR!zP+Dh&^`3kYPwlaGtaM_q;fAj zCVyAr%(S2L2_Ldh;c24N6sbLnf}c;|1u2|8MUi{2vl3={j19T&}-!7s~tVrbMYD{q_Dc%K6dvEfs_?l;EU zMCMwYFQs8K6S<}w<}c5aE)6Ifxiz`v%oKfu7Q9CB2~Kh%*BH~@ny$K;pMN{k30!C2 za_7Nl3bOEoD3-6)a=}mtGqrlP*FzyMHkgD__Ju^1~v~f!mf33 z-zxg~Hd9roq=IqOs((s_UVn}yyAtnny486?C#U7f`HE0UicC3$%IbxHoN{(Q(5S2X zJGbs@pU?GTh|`_^;IjdVCQ&C<+VD4fzf9_T`ILo2aU^FKPqQp;oV%OY+ZtWHN_F(NuSj~5bm z?Dh`3vmsqSHEo;NC7@(rTJCA`BH`|Y2G2}fUZ$`djq`f~w$`3zJ z#+y~i3_i^)i<83^r3J@YII|dgJ}+jGxm`73EPGpVD^X46Sqo-rGJSFyELfsfuSLR2 z&Mudn`)k+v)qk*_sdskm94CA7mh|SX&oq0YQ3)@aChAj|59QpRIO}6zPP&x4t~TpV zlw9BVR`|N#^AmJu-YlJ;% zTHo%huHvGyeq^;aWgVlhTn_mJFR>}_T=|4qDX!@GmVdp{`Oy?-558r>t(6^*l(}DL zo=hqfVQgbgjB9Y{iG_>YpKv8D9<3L5?**BpYc!dmIuV3;{-N-xFP#rwUn6Bp@w3FI zH@0=@lsDy%vzf)j)!Q?=EfzHD`q5;8cU@UlVDIRe%#3Qe;Tttf>O?3Nw&=}BsU$p_ z4~_f_Du2`WL#MAiqoS5BdD3$j96hgn1=+x)R~eJLMP9EprzMquN!>}QxEH&R>8!p8 zi9a*1G@t<~ z4tL+;YcP)jzt78CaMW&nWuKbg2Pms<`5`Z73kqJmog^kQ6K!X{gkNAFDur`S zPJi)~#Z*<@88eXUfCqPCza_jmKkhEYYQm0>ec`b-k&<-F9z2+EA?`)4zKlyu@y?UF zI26O3W(TcK01|FLtzz&&sufbDQU{T~%k25pchddQKJMt~e1eEa9UdE7KWQA_HDh8Y zFqzP;77X%{*f+#94@Mo+b@M9P~n2_`Yc?NGt!9dLjI?^BDF$e=N59o{-E{+a3Pg0Q|LQRrL z6rEr_s0yeP0c2VL+CZ>-jRhSoc$qGHeJezRUM`PTxJd>BJ{N%*eii!qCgOu&e18?H z%(<=`0daAMf9pzeX%l~GvI6Yjmw)F^p{vS%##<|COAn$3SLZqYTypZhq&IWfU!8ol zZ=MH{6qS)GJ{cTr$h3A?~m-rPYpI6XVSSVA+5q4dVhD}QPg&B=x} z%oK6xC`Y-F^0#UWTYJvoBCG6Yca*mTcm1xx4EDo{?nByulR#xM0Am|~r*IM7-Wt6< z7R}NUo_E{2p4wb4y-44lPW;}6O1|llbTfkZrpM9EWec!m`(jF}d$JzFP}W z;}=k4MZ<2r_c3*mPoD482Y+|Z88{|N01-ZLuEx*!hT{+1#_=;=*ZhvZ-?&}B%?iiq zxy;XEU6tqkam~*78OP4_#N$)_-ilJL@8Rg#D>Yxpb1+>z^BT4p0K#YR6$@1MYm7fv zmLe8My}VBGDTmT#! zPDTsbP2iNG=e;Db7UvmyaIS~WKZnDsR?9{&f9Y6a8KYXeHUgCZL zjejW~fuyD3;M~#BxI%~a{$?iq=`PnE{@mtROWxyUX+)doV_u%3k$>;boevpN?3;|e z-0g-Xt;daM;p^Rw%a2DXV%=WHvVq@|$F(yg?F*ZX;8`X@9tOYgDXF&dk)QnYfkx zoC{W~8P=554l|e*Xw*#sO+!ej=fMxSyH{YT?eZE>ukl*H^3^!2?sRKsI(UNB3!GbH zut9bobed3$4){a<&Zl&@nYVYLX^@0&7+#T8XQZve$3WpZZSE#8>!5*$$pXgmFEoDGzoH2Yt^Rmf?l|sW2 zx0?~!zU*DB7p(nLE_c0KKHH$3vLTSFi0G#iC7r1UTz^tsdIRnJB2l2!qU2nuv^Uwh8hvON?_WR*@H)T*$(ZGOh?JZW9xS8uX4 z+xBniX`ktyyXjq2^vvL0EyFMAh*mIVD(8eTq8&V~XSHdSYHm@l?;Q8Ed5T_K=VnTT$O)f1&f(Rv5^6 zD%EJ8-F_%`WLU+9Uc$zC2`f8qGW<|}Hh+n2e=Z$H^OPj(o;z1~g>9PqD1_HXcSW0p zMW2Mm%SACVm6<{q&yZ!_z$?jBH0)j^cZU?(*^W9!7y$Uxr29y^*!<{gr#V zQz0!qDz?W-D9A)Ml(5uwZ6t!Fk9A$C{2bi$>@8J9RgKb#*8FR5YQ>emp0IvB_kRLB zUD*`CXG_{wbMgYB2uo{K@1)5*@24%A&RsCFF|uB{e|CF2(Tya?UsaD7{(_Q(ef#HY zZO{Qs(-QlaTQBqZ+d#UBQf6|LdJE^S~}f!sbox>upER9 zjSvr;`IxYLd?~>mrQZ8w2JRI%oWr3hH_AyTUhZo(*UT$w=h?p{^dY-bfDy^r`%qOZ z?@#K^z}Sy0YO14p5d)6qKzijqRm$49S4NjxC-6@^E*e+6(Yv9mSIfSehJRHh08Bu$ zzZe&bdx*_g#Rl4V6_7U6&Zti)xbFbH7n{5)=UPeq51kTw0bjRJ4$X6MWY2o|Y{dL%0{5|xWNMvR!Z2;>%#3B#*t#7k^jFYA)D!`u=|o@DvdOZ!#HPt(m3 z=-}>Oi+qTiK#-gJBZ0D(2=Vhatv-Kp16vWU@V+hcN0_KvpkK0?ycc<#eO`@p_RXAs zbrwd4ckfIncKOh8(evN8`zcp;KYdtUarIx!$r;|evoK?zM-=&qq1AH#$@|9G+Ml0lwzF6N3T0nt|n}o&RZXIo*Gb-Ogj^ zgy9~yU{`Blu)(|5gyzmK89vkV<h3P|C@)&uwAyBODds8blPE>FSeq9W%C)`Powi&gc`zHF)fYfvqri12^vrBne4z*~Sp^|IJNL8-r3qQ-ngpVqVxXp-HT%cUX_ zZeuLE#I&|L;zkvTEM|(lmf!}L7hHbkquzHfEc0D2RQ9vY6cOb@(`HJJaz~QJri4}& z+M58BlcZ)LSUFJ~Q6Y%W^#QEyo;Q)=_NdTvC10}c0!O>`*YuSUYomV+cLs-z+b4sZ zY>C1kK{1AXW{oLJ@_XK&KBpkRrv=U8&Z|q5QA=-znbGlUCE_?Y+za{CpK%Zptk5cx zQIf6S1&O0PZ;dlCulMD%2s3p;&v*YUt&H48%4bqVo^L@#+}E_qm9w-rO~g}z9B0-V z?xc>vf-{_olg2lY@aKQMqn^`4GnljB#)L#V+gz?3+0in~?dr46uW*PLfwd(X%x zr|G&kjJ+;bK!lZ}fs&4J PLpYfg%yyaT{8LOx1KECd^?%{^Q>!s98?Gf~Nx~Gwg zQ#zG)pPn-K-K{E#do`l&JYiit=Jsp6Y)n%qo-d6q9^J4oj*ARcGHzcIF2htNUfb(ukZgW!eZr|6y^90{T{?m+Kdp>s8v(?*8zePt<#y8C(Q@$A;-wX}_j!F>UvNPTF;r*%KZCt*2 z7EbO{80|1cMUpIk26sgaYGSH7(E9N(i11l81%hIb+X4#RD@+1GR42>fr z<2d{&aRa9;A#W)!V~6X+D3)*K^HSmWtKO{yJ05>!aE^=)SU--84EW8!002C{Lg~wq zgz_zQuzgG8SVtiSV6DEJWv6W4<27s&KF61z$WE$=@GU7G~@kZ41ka=27UR&X9XYm=8GETJXitw zw1QCLl78}cMB|XVJ@j!Dpn#Tdtx+e1R=j_WLUwoA4jSBmvC?qZsXenDK!X?^rQ-pB zzffyO`H1^3+zbF{IPR%H_f#CH5&BmzA3v5`#1d!Ud z{iHoQ+K#kVDOoHb1KTiQq%=U8JIXCT4|(ZB&xc4rQh#CO@A_5}c91=OquqWZ;r@T1 zi~fe2usdzH*76TZ<}dt5naV%txi6mb2U)3JAtD?-(Ry(~@TdbK?MyA8qrTc=s5=uv zaL{{@t5Zw>4$Ljc1QBJLDJ>@SBt_j0>@;6!jdsvJ%}`i0{M9SMN=#C#G>UM22}qkY zkC7nAEYZs(2$;rPma1j7N*1LGB1wM{8%+=erW9X70b5k-2cn<_ld>+#odu36FcxUE zf9UCWkF77h=Lh#%rZS9vp0UaPj@US!|2Ka79~kGq(CNSR`wI;{f%d?4`4jXaSq$P< z2b^{wAAz?T=210M8mvG1QM6tK83b#@tE;CRv^uHP3vpRX9e#BarRZo2Nd$6&gH;N^LNMxAH&P?D%vD%2NAxraHeMyeUb#O=jvwzSFo*sjC5C{YgqYV#4iPdELFIzhh zA+)?^2P*duX7U#bw}ngjMWliNyaPcKMNjLka6IsvXKxuSuw)PfQtWlhfr}cP&69=h zbG&0BM3PW8M5}p{BCmr4LQfZwrvju^;%>=s)h-Z7gmBp1q5)9TCTM?4AOLeX&;tNS zEflN+&-(KYt(hbrHgd@4p5JgjpS~!g#tMQ!vB4rG5cLWKl8n!s@JO>jLXsi0CWZ;ip4y2E&Z`obP=~xp! z2X@s4t+zT`Bn#o6XNP~99}6y&5+y)q9YlshSjHFs?&+HvXbtePR6!oJLGK(0ZqP8U zwM44a{9RW^vZHi()CIxQFTckb&6jp5Jo-xsf?(Q=MZe&eOh{(x{_WSk8Hw5(+`-nM+eW865N?H5u^3M~IO+V6Nq zd(U9)AqmPtZTNqj4YuC!d&vO7Xz5!fLw|XQ!f1FzWEh@htJ}Q>ed)pF-X$wCVK73; zi(1=jsSi?OQ5a4Tp_j4j*kzGNLChNHOVC&%Q<2Bta>GOJ1bpSb9qo1WfxobV$5@yC z9v^=6b>IiH-{T4S(?6E*A2LFrmldJyMbYP;4-#k)_}+hG>ub@oo|v@bAZ9g^Gx5D7NScPvZ^+G(ch0 z0HtCfe)4~;#TyLZy#-*s;WkQ!fCR<=(Wi}b3ZRj2M&Nd&%lED;S?bM<8Nf8^iVtc zO7MRRZ50FWi+WYG?dSt62P~zk?i$F5qDS{V350K=?BkjSgx{`GWzDzT<}-E`r`IlHGa4 z(I@>slJUw&lE=xwlSIka!~@fp1I`st-xzzzdizOqy7H}gz45>EECTws*ZB3`ogII0 z*F^m&u?Rr&Hw>bBAk+RU;`ocoqpV|2;d{y-`^@TQ`$c%Hmk zwgMJkTo_M5ULL@{0*I5%ohgmxU_2oa0?wD)gHJ~oVgn+fA!DUEX)4+R)%Pm#z;t;U z6p&^K9&@)W3`64>_s0SUNun!oi3)#-NJR?-iw2>GEE|%y=Dmq;F+c_)FxelIqr}}; z4`<>>58AO_$+o}duVwUS-q8Ik;?(Mnxz?exto;i+ToS+L690l5{EdjyMeQ$y-0CI& zKpOx4d0;bt(1xw{_kQE?FG%En!H4_dFWpIC86^ppq39z^@;FC=gQP?#SCN0_!hpk6 zVJH-N$t-ahuGTNxc1HeR+JUh7 zIns2k?=vmXvS^cpqu#xMPN}wYZ&_rrU$!RLo+gSCL72T4LgbYhed7(H^dQJ2g=lTr zi&U{6{+f3${W0$@jvaQt+Y5-lF{xVm(@x)ZfdG_B=4;vUTpoYWOvTS^@Y1~qXqn_k zE8;^wl_z6W%%p<>aY8M*MGnITNNVhFT5(U zCh%huUzt?orFNqoHb;RE=PRD>MTRDbP2>nmDT#V?i?$wuru5~7fljP@F4FkRj$RVb zTre@2ggoOk)tG;^c)kE4Lj}rssUZn|%lJ~Cr?;fOk+cozFoMJRQm73~}0`NAvULqaB;bSJc5~BeE)YmE+ z1%MbPPOqZd-E3jeAm#TmSQ+K0^Xq|Dd?NxL?l7fq^2&efu*E$~#~$L#qD%!K1Lqdi zsuDx7X8EuG;8+JPmVT^fjIUqxJHPj(AD>9_2O$eJ@|W+`m%tzY-Os!Kecl`$6>JY* zkjTJFf-xP3$N@;Rl?~`ezZv(R&QQEZks||mnhGYy6wr*#HH(SyjTJpTc5aqk0A;#t zpn~&R!bg9gR=1m}fWc5+w;e9>QOj95)*`?C%FE$@1kqo9DM_%EL_-5>t!%7D7+ev;w8^C%Q?#GA6Jn!FLPuk^j7=EsYpQ`}o$8m)5FaM3AV#A^s$NK8WfAd{eMPPriLaF&#Pv}S!Y^Vhw@xE)Rnh`*~ zNP*~O0($T`Lbf8Tl8R3%#<*vMPm(HmqY45Gst7?9N!?+uVa;|R&BO?O<7nY`%wMxW zYP@6Z_p^3^RtOf|290Gw&k$-F699^wV>>I-APxu+0B={W+deM`@OBLUwyLh=j0o&JTy{xcW4|6tuq%0|-BzyCF7YP|Gse#|c# zSr5JA9ZTfD#us>%Zw^?2WCl99h|ygHQma4pCQ)D;CrYTdTB|=T;lo#j5R8eT)j&cL zl;Y>L3&7LfR_e7z3f_2k)$?Kq`^F%=HyD4%4tQnMQBOBT<0F^-rf;)O+b%&C`+auH zn|?mUFUJdj7p3b{GJNpTGpf%Z8mlo~kl}L!{GybzhZ<-v<8ayitDfFK^nmLf^aB9D z@7vEim;Cf0e|+dazS}=zZkF1D^&4BQYQA;X4&UF=$DqSMQvT>@k!a07{C5ATp4@-w zm@i!9S1cn5UKPR;Km@Nq@)ozoIv_bg7Fl zO|Mf_b|A{(BAvyi&jl2pV1p8&2Q=`kOk9{0EE4U3AyL4U`9B(i=5rt;D-=5B1i?}$ zU{suN5Jle$MP#v#mzfG2EhvBX*gxhs#TVd=%I2_5={qO>rL#ctrJwoMp(7tGy(!n^ ztQ>IzWF=z59O14PK;uF)ZmC1m$3md#0lMu%r5Ta;MglpQ?H{!E zvBLlw+jA`cTa>34njdlb&T9e!2UBmUxb!1eO8BUK9{Gf%8$4;y|#Ll6C~qF$9W5 zwt+4QCJ_9CYho|fetg~?_x~Q-5QsK=1;!Gr!?H1%34ID}R3o9015@4&KMtPJW}w zwMA%=@y(N94m63U{qu{*us=&c9M6BdZlvULel(*1UJzLb-_e0w4?xTy00cKh);~z+ z(qF|tmLD`ZSblbz$j687_Y>wg7m~$XU;)$-)ShtIwLk+)&|uc|ff8z0 zsUbGa)taHzHtnXhW4j2Jrt#px9#{+Hz2J=FIOgKCi>Eld;~kDUIJNUWPHebG*BH-2 zdJrp*KM6FduQ*TBbX{DzD~V`-FB7|gWIHFuCwu;P1c`tDt$!u}=7H#wsMB;&?^+=!5Okiop?}U9I&ZS6g zH-I$ZKo@_Hf_zg2<%@715fg(*D>V}c-hm};EGQ9&48O7C_f1BU@C$i-;1@;X3}TN& zUv&HkbJ5#=pZh=9yN_*Gl{Hb&4@3hmL=*456ZC|)!rP~>f#zCk?_=_0j;vd)y0arh z{J0<@h$$exCYA*2OQzqnVlYYl4=b+oUuOK?=IMV&Ub}?%vH~`Ql6pUZP?R%wv$M=@ z{BYY?;5Wq|l0D#?nfdbV$M5w?>dkTAycPL|VehT92LigJj1*_)F;SOHph@rP{B97Mo4imrc1_kyPcISK;1ym>PP z{u_S>?l(8_9o%_&GsU#C7dI(SGk$q<;xwdNfl7%tDSGqkUm7|7A;0OH(!BGO(k4l8 z4jPe}n_s_U>BbKQ;ul;0{WNw;p!txXc%`~|)RL8?zNn^~*yVl^q>&v6v?U_&H}(JhL~UNfiP?Q-S{5J%943+o{v5L(k@ZvB-bM zxBlO3@D6DdrStcG2&YyC&XJIDN#AI{nT; zUC_5+$L;syLdrK^`7Z~}m;X$U^Ec7>L$AMXLh*-Izd1?ihi1P?#vgk9ebeJV={F(i z{m|^pdC=uIJ-zAipY)rIEPm3@^I(4H_vxSbb>Z*)`s)wBzW*b?PX7bHe*EUwRp%ec zclqz+`%C&wz8C+*g#R1bp8kK*_IKVa_AlO^yJ>so;HhGorFr^Ah?h4<`9rHqH!u06 z)uo%m{GrzyRpr>9bZ?$5>xX8)Y5N~~{iCX!@RNS$Z}LO4JLkdt(C?$Fa`I0)Hs2&L z6;R8}Jm;H376hvJUYi1%{kis1bf{A64>eBBGxMBg=6SbY$G83ZQqq6_p`0;#X)xON zTK+#I-Qyp&S^8TU%Zc8{=YhoyUqJg$N7^^1YI}f)w@qY@ppTD_bdM0e&7AQ@TZ^iW#y^UFK$*|vgeyF|A+m# z+ai;>N$2;@pFi~aOnKd3jJEc?%Z=&$Ek>o3yY2obx_X&e>@O01`eEy3@8?|26zv5J zR+x--=Na?&v@d@PAK%xk-?3qP6ZB5}y)NO*{SU98*V6f`Yz6;yJy)yWyt?*nXU+2; z_uD%E@bg8eQbj1d|7{-rRR(G5ZukEO96J54asQTBXa3Xr?(2koUyr@_&O>_Wm5UYl zMPc&I-xzRhp4oo-msomn{x#N`XLg>diYm;X`f}&Ze|vvCcUe;>YQNif``5Yusej?G z^PZM7wbxeltDMANV*O^#3-u34es4dVXRE*aBY#`R|GPSD6aS&E-1XyRl&ZV?;Wirz zRy|Wb8wTG#BxfO9{Qb_~{l|Y_f5LCsjK0?wlhH8YL>l)_w%u9-({P-zW6sg|LwQl{cPLwxbv~=Rlm$!|KFtF z{TPboX=B&V`};o6Y;$w%`nTO|y!zWt_kA6@)t~)yt8-`W{sRZy<gnsf6nlKsn|C)Kx({$kv}6#DgtW&g(CTsikA zK6QS@yMN-5ACmp=bb9ieW&d*M8S|TEzd3Xo|3kRn9J<*5X4!vY&)+QjH}?65aR0_a zKP3C#>P%k!X4$`_JotxY|8nTmZ^He{p%;J7Zy{#QOS z{2!M6K1M10&9Z;^_y`2#;U4%Uy<(rJFA^8*OR6s z*7l29y8p-XTSg%$DT*mp=4tG2eaD5mf2-0;Cs6NKSMX@2{#^6+*`h>i;wXyw#j<}} zl=6#fKPc+Uvy0aB9b{MiH6Q=}$as$U_r5IW95VH#)_3bS!7kX`Jy!Y6wXak`Rebd; z*1q5K{QQvZy;M*XO>>MC!uDTuyMNQ{-9b_R8* zf1uX2;_uY@Kcwqa7t$&9kJLKy|0{oLy`i$3GH1DeN16-w{>1llQ9p$Ad8dD=X;!)^ zp^Y0{63D9Cr^Q|FhivTng3ypP|vef@rqjOvTBSD_i7p4793%lc+{zJY?r{+>W+pDEh+kdUMmf%0>jrJ$q zrhcc({O@$@`n|l&YSypxMmzsX|Gm7n8^4#oDEwFYrE%P!bnEwVLDv2&9kt!!S9;1D zzt(TL)UR}z*4SUmH{5@&6@Sv9ztY2B=Tij1oihJ-q&@O~nzXmWKU9Q$KL~8m|0B}g z`#Wjh@4X%VChao@{u^muSzpp#JCmAwMc1OqE@*hP`;4}+kaj7K+S2gt3v=KIa1^<6H-6Ao)V9P1SVHS*j{Pm3l{Yfh1=9ljzU0!a#q$Styq^;*K7yu`!^R z5IH#>^a&pHCMF6}LT>P}LF}aYvhY5krb=UU*!ZJ>pDp4-wBR?MGa6G#&K?wyJ#95> zKp?_rq$G$6mDaKC^zJ>BR+o0x}@N%*oi28vW1mbZaYPPh0M^ zQdikI%4=#WauYvVv+ zevhGF_;s!IRDPK3l7jT}-KTt>(R@2Jn30*bFn)Lg>ZpGyg0M=1CH+WO?CrtTvnguh&$m(Id&)1t)U>(3JBLZuslpBdd^-eA*=Xhhfd5CV+MjKo| z`tVo*UE@#^z$#*g=VP1G{`6XxefbH+PU#<&U3!az@B6>~vG89i!8UZ?IXjVl+9FrDSW+G;tUp* zzjocbCZ~6~iE~z8SD%H8)63;F8r3jxlhw_!TO1tTAQ8LU^h@?Tm(dWvjJCuICmW)u z851=LrSc*iyZIr{z1Iz{x=G+{9R~>trW6EP8a@hM&=Q`1DlKp2qIF2_NmcN!sKzb? z8Cid?b^H>N6))r#>YT#h{nun;9{0H%7K<#lc^|xhVAwPD1{Q1*n2&o@Qq{B(BMt2r zy2lWPJh>)L>w}v5n%(dZcL4cgEDBYoE zrqBo8Cp}a6ggz`^UIc((Sc}CORj{ zh;n+8awUbPuGhW*&XGcY!;zMCnPkbM8goxH{*UfEXD(xt$k`H#y_mNe2d7jK#$6FAs{UPvD z>2Nw-Xc|u)g20{oTtKIe5VF_+eX=(F`x>Lxam?wZ5^sSvKgZe;FPe%Ns#Sl3E`!#M z5a)E*cMj(si^Jt%3a99T1;wkJ31r{q@J%ZIz#@jKr0llP+XwS(M1I!SGclds!4}KD z+LJ*%qAI-8PovWStmU)}yX82GnZ=V9>9Hr09XHEUku2C?UE0?00heXVq*S~E;2>tJ zm^2M<#x(cMplXuAX%|li98rJ9TT1q)jikZ)0?U(<$jvR+xO(oeI3);T1w-4;l4~ZN z>ZxcQ1Ti`-yOmar;8vFF;CxQ3OHy_ERb5s-PNE%CAt1kB8SAFh%zMgyb!#0RYj8U5 zlo6ObC$> z&N3%e3Up(65B>6XQ%xuyc%f#V>w|ZFq;xOkyb7LGpIN7Wy&LpeTfK-FhZ9l-@E4Gl zMFn54*VcL$aq8s993W_Z#4dtgsVJ7$P-vU4!HLsSVBLu#p?O6@aO0vqp3wX97l0#Y z!UXdoB@kn0c#N500f2uv!TFTSi9?)egfD-GO%6xjg==I(~+`G|q~ z?9|HsL)%wKCcyk`V%TGaF_HiyM%>RQ)uwPSr@)di+O1%84pvTA4L^D8ma9V3dCjV(g7wgUH*)WLb;z z;;E@M20`)!($f*jmfAj2SW00miRmE}vm}nGmACp10?4ba_t>u1J~v1oDC{Uy-lJZh z{a!&KR70UwL*ZPDo>KXyTk_mRPQ%az&pNTJ2M-sl%F_>>qB*s5KUBeSDwNVaO;-q{ zNw%{e=d&vXKkk3}mJmfuH|z10e4gg)5{}K(eQ6kT^-fA!kM{>Y)#B@#y{AFu8VzO? zojz#39wokRB{hBsI3mcOFl*ED5g@NNBnn|kXl6-e`i+PACnSQjzI?=7;y2*eAL29gFd+SI#!bG23C@mHr4qi5dlYw;WcC_wE!tWPEc^ z&!UVK+z-=xzTc26+s|Dg~u|EQ@vrPf>xJ5~HGRm~L9Sp<3 zjO9VS#TUYBfLE4bUu`Pq5uA-@`sPv}SfgUWdT)PbA!|i)RaWQ|^`D<+1jPi@)s6aq z>1E=e+J#H@fOSy$sIw90f>}T1daNdgg?JAoYrOWxPs1v!qhYH1NM6m38WIeC@TL9G zixL4I?o~?`ta=wH4)a2y!41ncR`C3>lW|a!=26Z3F;ZOgBxD26-?6VRD@eR6>1h=; zLPLMaYh9siQfQKR6YH5Ytvru-y2H~Ub1%(yh{yYt$>?YZBAsIwx90#@!P9(iCqkN4YslZZ7TKe#ZE5X=d< zb{9mB4`4t7b7hv%!wR3AZ)Db#TxJ?2PmX^#y6>oJ>rjPLxx9Q-e8l~n5Tu}u(!`?6 z5t!iR-1BK;PA^FNC5{1@qI!M|WtDuK9mA8%qXub^cB5u8l*hEqVp7*!N6uRa-O7J; zoGhAIrhG}bh~jL&e6HQZ58Ltps|7qwbVW$Q{BYWMnmV<>pX6D*)(|I~2{0izB?JJq z9j}DlMtM^@jBi|mo(>Oyd{_16UG9S*4P~YptSvX=05xgI};}npE_R z>6YW+xYosK&%sR%C((*3$yT-%dV+u7!Q)hqWv(E2D{i~%NQ_SNl#g_`3M+bdW8nx5 z-j3iU1zV8PCifK!a@X$34-@4?+ysgJ1yK2*5@UEm03o0YWaaL2(rG@d&n8N&H=F>} zLx7)rpO`5EkaR)G>&Bdw@`;p>S8pkTEe}&bM8#FyLw-=wepNw29`ph~m#lwF9Yhx2 z0Gk~yYM)tJ(z8RRz=nG)@`B*5d5zIVeF#f>Vb?HfC~)-(cs<{jNwLFb=&!=Ax+cE| zZD7XKW?rhp1Vv^p(%fxo=AeYPQ2n2%s6rS%<+c-bBCIWL)12K6W-YzPCbsX zs)!_}!�K1uW8a#qR4YH7A>3=-E%{~TaK!~QGv`i#u`I2iRtIv*quxn+UAsPKc9 zYawdbl_pQ!=#>VaVP4oa2;Mg!KLWzKg8f#eDxfnDkX2ox;OHInhwOhWT)oKha=;uX z^w7Ex4KcujBYZN$+um0u&LL&eo6Ui3mGC z2&V!l^+R+M6K5h*irM!^02uL5?sr&>>)J%pk_*?1xYAMS{sE9LU=7+bCILEd1D}5O zfzy5}(3-#lj7V0E10H`M1H)caHvn^ds`rtj98tuGYfbdrBUYB}u;v)ST)}(NoNgpB zd3BR(iJ#ucJ9Zi03t~JV%mhX01$2^M4=6SJnIc8?6Mye76B9}s$w}X05q59=Q--hn z*zn*ym$ixq$OWZr5<^d&JkE8N*>@u#&Yxmte}po_jt`qEXhwfhDGE5|Ik3W~7Dj@w zuM|yQJD=t&nsx;#T5XD|XQ@3*3!fi;)kg+Ea8U@7n9w043g8|HY?MfJ|)HYKtmu6$Ul#iT9ty|UUD|~cqA`fIRek&dhpH+k)7C80^C|DgB$Jf zMl<|GTZCp$Js^Jxls9?7wnj-zkS^(7@1oZ6EA!^le`cQeFq2#ppDp-0x5$*oC{KqE zs!tRzB2KyP;iecuzpV5bzCh_&WrH%9;GI7orG;+t@@jJbt&?TsJVT)be8c z?*T$4tFfR*U;<2Mc08X5GQyNmU=#V$14^mWPK+1Fo_e`jK%z=|NHu&O0s!D6BGWNU2QjN4;lpuKUm1~)-jour zfo*2JeOP}HA2_+d{8P!F{T2av-@J1bjH4*=m4H+~=kQpIi`Nn2Wkyhy!?CfCT@!dy z^vT9Xf7o*2SuMTQoU$~2nU4dj2h$VWDjChn377m9@n)B8yvBXlt$F&e8Ik=(R~C)0 z%3{T8Y_U@Mi26&`M>599+`GHjCU1eFlHB-1h=sd^Uo}rfDeQ7*8bm2$AJm zHh11k3@tvxnnSqo3*_9b!}Uirs5L$h@Fb3a5#9jK8R-13YUE60TXT<~W!SLdt)efnINc4NVu{N!iB}%|~Ws%%3f%*FB4W zmMp$(YcfChoYMswlG!`NY%(}E*v}a^9tO=?taro;f>h?(OMRW!f*#R;xG%7emU13m zrSnj9V6k8GVF`c$b(lsNM7&qyQpG1**Ems5!1Q(~Czb|5Jb23Fj`g)XJWqvJBTRoz zegmtm4@oa3$0NayaGS5=X(;-rG=r&P@qOxIfdTru-JjaLLE-2#@&?1irI?l?9t0oH z3VTkZ^Ujtgf5b`-rH^J#_&t-^r{T!<3muZnk$lXnEFT8aF2iJg^=m|+Ap^_RKzH7kocgT@+t}Ir3p5zEHPn_ zX+~sfD;hDf+u#~B5iG7x$#lScp8zo79xyJb7PC{X;rHtSTfye_$L9b~IUwL(6U<$A z%Ss;CdXvgGC4d={I^2~}^0xO0M-=?=WLS;%qTtDppP>0P;1KYEGgUcAdD4F)Df(kh z8NV+i{HkrpBBxFQyN>QWYR{wDb_7Yb>simh`OU*q2>IP0HuK>1F>s8LFYk3OV+$ z+_lP(aU+vPoRMwg(tL^$bK`i6A%<$QLv1^Lsg#JaYeO~9AzfyfW)4E4tW*!)WoQ(&{ zth-WGQR2MkCS_^5wGclt#BPK=@8lBg#WZE7p%7V3L!aptF5<;=JR^T0TZ}9O$aK~o z)~76}a`-%R0n(pMP4L_bc&Kh=#Do+4v>_tjo+J}#m#mUq(RwiJZbW1Fi9ELN`oWPt zr>qMfuDYs_#NJ-8M;*ZvA~hMSl3=ttnkG7q%7?HFfk@0f79Fc8z}w%kF#VtjDFrqXFqZB>D%eZz9=weuXMHdv zC#+;9nTiZP5d-0HA8mnjxx(zBoB9xRCbT{3f6uPp?<4)XmuCKt*>!#U z8@p}?f=~Wo*Rk)LllA?uo#tef#!G+L_0;;W*mdLcU$W~(^Pk!Edt_+JXM3-@X8$+4 zu08+Gu5U5}Bfr^oB>5Y=F6R^v{9)Jc!7(WM`%>=b6EA;M0{>>$wa@oW&o8^~1fK74 z$ZvN2_y=}<-3j#%yPkcm_Tm@2?&g#Hn_Yi>k3_lmhiek$oy6jAcHIyE4ZGg@yZ=O< z)djD+66>9#`de{G*wNbXs%rE4#Dl1_0WZUjHv705i0GXwZa>^Qb=p811yHjZ!V6@E zsNJ^VlM{c;$&c*8dvQ{hiR9U^AVgdlSFUDD4iol+|q<&?v@gN*P2v)bSYFpe8rn@Z`p4Ytdu!pu=e59!U{DwaYB)W#b}VbN(H#xc zcIkh)4!raL2HqSf=1|IYm$+QSf*wQ`>eQZ>OV9u%J;CV|CJKIMu#zuLgEFbuFfs9X zw9%t9#R%Ucf*ZO+#)V9BO@TAIM7oga0l|`rKGnlka9~sz$Hluf;KHs_E>Ue5hmWNL zZ7{dhVC2|$cD!kpVzO_lq*Wg?MW={PjRu6TM--@xF4Q|4(ugpxn}SzTUit}hi3G&!sZ z1GBmaZ$huC9ZC!PV$VKIC~u=|4AS@Kk@r;tMW)~=1awhdCKY?B@cc@@Ab0UNoG)?R zJ9Q||5*IsOqzBd2i+R!g`3{o~`#i}G^*W1Bh6Of|yo+Ajg+F2gSUb*>G@O4J6@L+Q z_a$C>tAFB6B-hbIw-~3+Ch(ocK7eNyU$h6G^o}KXYWQ8*vw`MH6W}h(3}V1OxdFp> zR}OCaJYzACPLYjfJ3wkE#*MwYzrA-kNGJ0iomN&E(H|ehIn=}b3yAj+bx=ndp7g5# z$_cYZec$cnh{!%9P#{xkDrJ8pW^1cXzOg{$mD90+sW3IWqxE=er*nMre&^M(yG9T- zy~)g}46@8U36lUyhx<)CK>5HCO+f9s16OO*i`$B2SdHxw2|;3gwq}ufH}ffdkPH;C zPR3lcdAOdUmD-4<&=lD+Dt2}~(2%8IQ8-b(-_EwZr+8a%@Z42M+Qolqv)p0=Q;=nG zq&V46GGZxQe!NHG_1zXX;FDzw%pedMW6-`r_eKq$Cu$+GgNr;0gbyPtsYJmr4 z&^mcfdx_8C!;cY1U#g5~u+%_gMP(anbSV0fwDj$^6Wn21pXF8x*YCjLMb*;C%!l6K zqs*(wd`1{=USt+-&zJf*_A5nwd&_+gK97x*iCzsh%=zSK$E{}DC{v&XOM+X&r9H|x z#I>X^qcbB=Bd&kg3XO#*QfH%dD~$819p|7f_h5U-9OZIOaZ*KjEeJ;@q=ykT_QRKR z;aHushz~UB5VmoH9VfT2e%QmO7woLl>#*)3V|tW(UMuDCShfBPAKN5=pGVMo*;I%W zuPZoRn0uAZQJ*7jp;A2jY|FVCFH~g95;fQ^?{;!Bk!^q96K^wVUHfFDMoRUu79`yR zv8FP(2``(7iW(N`$!invD)ugc_tK8cA8%jNtjI#(4$7SVT1;=$CLE+^jR(bJm=VzI zY5jAJP06Zd5;B4}Og)-Ot$&_qs}Htr%g&=R0F2A_Ccr+DAF727+(V2dqRtq*kG%@7 zfAYOy4kv$YBTG|%-shu)Y|93pt|A)9s1WaRSKiRPt^002d?K$(zP-nt0kK4+I$Ehb zxVNZeDV zTTE)gWnwISvI7g7ah_AbUVpK}^P?3E6#`AqCJcX=8sokv8wl?@Ws}*%D8DGS)HsA$;LOlfTOG2@m0PT~OEm7nzi>hg0T%TsNY?zs)HG2AR_VZ#G|4K*%x1U2z)ACy@-zk7 zvNC_uOzIWIFmdXGNn{WDSn)LlbzMY1g(jJY3sgkLZ9dHx>H^D4M4-zc4#h{+GUeSh z_{kpaXLH6!o&iy+dUpvF#o?RW z%mB#_7FX)e#Nz`IHk{VtwC8x2$P6O=PK)7b>E#iBz!&9bSuPx2KvImt1Soy@anR6^ zo$q!Cl8$7lp_-*Q#hOT9b{lNX$Y{=Uv~QB}3dtNz@hh^ryyTd_K!+Ksc=5$ce?F> z@@Wh71spJf3aGDK+Cr{#?PTW8A&hb%rK6JL^YsL=PT}&8&5YIjQE?C@p79GF9%u%k zgn#i~Y}c`mb6hiqN(f@l4_hzGg8MH*UCX9_<0|qXO1%zsx}D-E=>`qRve_ho0V|}Z z1?$5}W^*=2k_07ThpK{8NCA1is^13ep~Y#_xXt2*a{{i)5iX3V7F@b z;9!>Mi4;s7SNdYa)PuSAZPZ+i^B1+-jMbp2?v?N1AKeV;Atx5zF9I0U|1wKEXls^}MZX!Xh=l3va%Jd?|qynJH?+vlEI7Td$(9*(;z~@*G5JPvRjYAKzT~$XY_CZhE;0 zO};_Q>}-ZDHsuPF9FEs~?)}V3=;emVtFycnNfcISj;;*aGT;UI8v8ESUroK)cB0a; z5myKNGJp{8j`u4RE?1pH(m+Fh_JIO32n9rcL6XM@4(Mf7 zVP7)%0@DjZyw`7boVCK+^ZQqJJd-HF|2OP7a6b%RQA|Xs33XB&K)s*zw2L-XLX5d9 zDm7ayMd>+(YwWXdMmnB_taxwqDv9cz0yKt+pCVgm0ump45i-&&^HjHgG4)d^_kS1! z%yX!i`)>yUqs?buWKa5MZ~rz3*v)jC*b%TCMLF75?3%@ftasKdU(rFdMBOYf&poFo zN1o}oC3J{&wGwqWJ9t$f0(SgKr#a@(fuyz~rtyrbt)WYa)tX!nmkDRG*gI8^G`V7t zOz1nVqcDrJm?t|#QpiYu6yD|uUQmfvq}J6@N{>sn_ILJO2UJ6n!b!(^aNmvzF+Ixm z70RGwDWnVT7wavp(Qr|_Hc`9o@{5$w#hVkYS_49sZX!XZr~=Yh95w!-~5n_V+qQA-S1VX2)e*yt#*?l)^5-miy$bz#95f>R63y9_wC z(WAnnnA4D$M(#DhIuBO&v}NCwE`$!WlRKZ5gQhhdkFAz#BtS1*lBDdY2CM zC{Cei+UAfzPYDpH;9GX{UO};2ZT@w_FA%(GaF77O+X??n;F9+$@Fa(61&QhL5YYBT zN^z@a{^Tr)R|Hspd5{;|j!TF;nK5LHdHi zz_VHg=Jm<9WW1g=zW^&V_8+BZkdFGaSAi#t;!F@628&$sKPR&1S!jg*WsjN~9QG77@L5 zP4C<4yq=m-2drsBv!Y#bpnCxRG~RFvi5xjnAh|L7V&?{F3Y8hX#UXs6Px)!4HQqs> z5v@BLcZmIe?vZB_;;#haC?|__-b37Rn%R%=B2@9?j4oygD90IU-IfoFUJCSl6v6iD z?M|Q^+6k_579J&H^7bb1p9+jSfw;sGUADdi>wfk!6PqfDs^4D72%=@DG$lYxNJ7+} zsmxe0Rt;6I2tUqe0z3i*OqGV^Y3UJg(;!#x;*(o{>8jB}*)1KOYiPC5Q}_}EX*2{u zP?a{>gU8dgmXch%ls7PKqnLtyr61(%5?h@PO9T$fc&l@rv|S7!&GfRGexR395K~CtoFh1 z*eoG`C3@Onr|&L(s9wE~6|60j3w^_Z!6g9URw-Jd;Aift1Z7I}8EqTp32*|_UC z#3yvOcOueBYJ5Mm6a2hN0y0+(p}W49!n} zH(EIRBhayCYd@LSbQvBXKF?QgrRvU2F&u7Q31w~T%zL(=b8REyP{SK;YV-&f66X8JYe29 z8%=VN87a9%ZP)%7jtCsDY^XG_>9!+j`!h4-{h3@&D)iZg&y6BKyM@1oz1!y*|7X4IzeY7_D z{9L@+dn^nk7n8gIzAv(A>p^iL9#sD3DpW&~5VrbbQ=4?6C>$L1So#8;T1xXVry zB^5=L@iC8uo}qa&c3DR<9bu&lci=cDFDe|0yuvj;c#2as)AaS?v>vg#QwT`?GYN%i zHz52{)@oN{q3^GE#9aOPw$ZbH0r1&n9C}moM&A6;>mz+{n(DGu#`c;f!uf{r+G@55 z8-MKXCn@!(nT9q@#FT#07p%sS3}II!tJ+CQZGeW5q4-GUYT)^uc}zlD$|Qy4dcY0p5s(3eC%BlsY|AR?KwhZ2+?Y~w}N8j#r{L> z8mk}jB;T)aVy0@BFQJ|piM6nlb7-KN7lE~Dklqv4qnKIZZKDSwV4o;4(!(pMIm}d}CG_+neDOQuiA*p4>R*2EfwJHGqj!G|ap+)}E!*ZU zqA?B%t~-;BngEm zyvV_?*#MMU|A4p^h!n#jt_yu>2|B&=XPJ_Dk`7|w1{nvadTHQ(RoX)jiu_AX;MZsx z$Y8EP3;9p8rgs0~FB^wlPdxF21VRtifB`)|IMCaFnw7oY{s*bI73t4m8-ov1Y)2Ry z`H`Ddl!k2!@MB(N;N`pop87~wYCgNkS%_c)Lm5X^DK<$nd;w$&YZwf7$HY1A`D)zV3XB!mzG{C~Iq`VeIU zfBy<@_r+Cz>YuUTLZ&K|cfE4%BN{>2M-La*3ZOO7PMHmG6h?tKQ82%T1hY!vBXDJP z>)2&;vuR^yB_GNI2Zo@00z9io2>iQ3BULRVA@KKffe*;-nv}&?Q*eZo)`tnnZxY;^ zZYw1ar)wG43IxU`3LO_5YR9Qjt``o1e~PN>GVrZt*}>lRRcatW*~3#hH=)K9OsE1` z#{~d12p-DsRB7!}ycwyj^bB$gxq>KwH1ZT6XkeX(jEc%O3-GwB26gqAzODc}m0N$F zE;~EQoHDXw;2#(TKJqBvY~es{Z$@hSt9^g7Yy*KZu+A(kK@ zZk8Ad#H5bJgKPIFb7S$X$lGq6P{RgK z%(!y60+9`fu1<)xC;~_Er)gOC88FuX9JB3P~!AXY=qIkf4&c_2rJV_ zJa>$E9*-kuWRuxGmyNI>j|l>|l}5aYOwljj9b5D3vrDh2!gob|FU>eiXCbK3lbhAt zE{@%5>&V!zbSTwEIaHM>DskCe)lvCynP;+lbqRs~ZT! zKjcr~DpB2o^$aNWKnfcH+UUgC7~lO(RHzheRFXkeaftgXaLya$#U+}u$0fAJOcbiwWN)r~kB zD4nvX5tNB4*WqQdB1m}xOV@c?LOc&#gk3<9&83+JNVvK&`ldDNv}m629_#@&WJnEU zcI7s}=naUvZFv#yu@tlO+@71x07u!lBKxM4S`3DMc;xn28P8+;&H}Z)%3zft1sV!> zGy>Qn2asrb#Zq8^e>%_tQoB}qkG(|!b!;IL&O;^Bp&v+~q=6b_1}enXs`}OY1|X!) z8RXKqA1p&@K#W<)EC8Jdj(t_~sqX;wqB646=~utVc%aYu;G4hFAT^X<&Lx*&tW@kS zE;bNqudY%#Q(b=UiXz?Em6IF3)O*A^YHdbb>?ksEJVYYde~?jOL2;WJfT(5E*b#Vo z)ELrne)CX>qo@G64tJL)zz9HbilS@mt3@F@J}nJM9ty#V3=9k4(>`fl!I=~GTt4Tp z02UsPsV=h~jgiA$2{WKZY<5}a{};-iswx=!HTA>3qQaSg{)-wK`%u=7KMdPQ6OQYY z!4E5YHv55Ie@t$DBtz)z6fE|xPanxOP1H{8^$z7uFDiP`Yz!q*0KF>B*L-zz!vONCyo3|LXqHWHv5xB7pgRa? z6?*SXKr8Om4om^NCZSRQu|bRgUG?Hbhzt@lpig#b2Z|vb_Lz(qe40|IfDT zvHw;H*kc*~Rl8wNkzwDmP0zfi#>s~|Q0)b>Fr%5L|~R!75_ci`#L zlPiA_!=eoPVH`Yj`#-kDMpcOKnt}K-w5cMKe^X+Wd~f!SA}fF2mNeNwGKpX)r!Tu$ zy)eC+QOQ4@?$>?}8L+*EfJC(|&!GZRO+{h?u%T!ey>eZ<|;rqTDqNe{xu?FA&cQNe(HH?5+r@@Ocm-dy-f+DMzSvP< zcB!DXQLeDqpE!djiagzq7=SZbQ^VCE?}4|t0u(6)baIVU%c70Y2^c%QaCAWP#T+}S zMjTbe*FWSVVj(R72*0j_hyR5Gk5~@ep^s$N4}Z5=|F>o1u_Mbc!~PlPqqt!{3J>#9>>9)u0m_%&RDd+= zC&3WQ93XEhfIwp7WObN-3y^QLY<^E-&(ic0uhyo+^@KvWO$NDPp6M4tfA|Rcah>_x zR-CW-C}{tlu5feOb1!{$C;MpP)kAo9ptF1$F3YEe0dV^$M4$c=wvjSigSJkn`ss%Y zc4>2?dl^fvLap9Xr9^$Kdw@k-q0Ro{;Wc|w>wuz2p;w)&SLIwuXIw2zB9Pfi>#Jac zhjPdHQ~NWcMWPvqcERGut)u|mP{!q`Y7 z|4O`$eLrhc@;5&}gTXK73`WL}^Q6CA&!jv-(1m)1Dw(0`@L?r4?WDZk{Cc^_R( z#p%rsy=B18jS*BT^D?^yr-Lppj-s=JI7<8ZF1l@e`ntSSpCF_P=HsTe=@o4s0444%gX|`XK8mi$ZH*Fb61OXXAgl-cXAw z_IEJO#}yz*E3-v~p+r-&OsB)Rjv(I%K&<*~{%)L40wFF&R(L%ASzmR5;%iM`%I?`d zgK`T_reK&ls2Xm03lXc=B$t5Em=9ahX#^3~d_=Iu!pAo`c;mD@V9NwH~pOYz-`4!}kYh`yRJj+qc8Kb-6h_q+hc z+7rV7g0Kx{*au|n7*zVU&Qs-9OpPr`6h7jWf*f0 z^Jx*A-zYzAf0WHH32u*B{hmQ!e9eiD+;E>lvmCC&6A&6>J%=nUo98!x>f_J6-e(!H zGQAX+vgqCgZOYngbje7a$?Kkbot$BRY$X`erbm<-H!-@!vo}MzZsffzNWNUbk`Og$ zJ-N81&@dDo)G*N=Y${s$TBaql{Q~QgJjD38&7w67f0APhdawj4z+$&u-M{Kz(MY6f z&LlZmF`wDDWf8IY<0;XBpY9}ErW9uzZ#E|cDV~D+y|6x3zEc|_l5^aqJIRu{H!r!0 z`_ASI*W&v8vc61mfMk>1;$ReT&08h`@^Idz|A#PlLpEnMp6}d|DDN3zyavV|6)0nK z`|>NOgL?ZcDS-gvy-`qQn1AjLc0GFvHe4l_@J3V|L4b7xT6RvvaL4pVnC%r`(w#QG z0UUWc@pa1UAh{(vsCOEvv-o_4kn%b58wDh% z=8xkqmz`IC>X*&!{nWQlWwoFP7C2J<;oeAL2I<8OsccphxfSN!kSJPbusTyrfRNTP ze*}`fi7u&JwVAkuXaJ0U%9=h_B{iqulCp6?f_xnL)47muHLc!Eg-xABDwK3Kw$R!? zFZ;d=%i@CD{Qo7DEP_3d4Pe&bNfiTxB79Ko4Gu`wvB80BBeyK*{8UnE>UGe%sxP`- zz`LZ0Bzr?a)GKm4!UeCtasH6mh@RGwf4q|{kKXm~yRR&Ns&Bx?&y~KP0w5`izm5?u zAvUkHq-yk4i0`|-%7TT!AB|pq{0@K|W0?o12pH_~Vui(}R|x1D;4*p`0!AGRj5(LZ?j1mYZKq2Z40AZw$jfPRj9smNY7EBSt z834rX|97e57=!7*Ngdl@Y=`nE&img}$0Z7kol%Mj)OS!`kwx|_l=!d(3$Wic zfB2nIDjoaXNEn+FatHF~cN|x7f2*FGa+8J=Ij(xI8ZOFL&YO1yVjm_?;V#6NgL)wI zY3U`#GcEpIs<;85VJJ)-7itm!5-bT&PO9C2ZJ3-&>cSVb)Dzj3vK7ZABe{wfW*t4O z(#|az7uf*D%R$B9gI{@8Xb*f@`fPyF7#*q@2M9V!wzi|N`zE+vQ^Y$Yf7PJeU?al3 z)tcB<8el51&ZtvbvCoXNYk-fBHpAacB}EoBk|;Cwj4WPvN)bU^P{PtO(hpG-vOIh< zyL%%6*!&Q+xM5^&17jr_sAP?*wX1xm^&7(IU8*vIZo6wrGBUb+GO)S^<8k6skW2|< zmiOg}kVb_A6^t3PXajMie};=~63E{Kahe3w8hGIk%gaY(;Adrx4m_i+47@9==aB}x z;Y0)bAFlQ`5XJ6D49`;dB#gTGP<0NTF?QBW8vyO$x3WeRNL4WT%Mkse3l_TfTuqOH;fn~X)^1$AX{4V3uzF1R`$P_Y~ z4AhM=mh&H&O87jC3>evSp8n1NgPC>R+~AP&dB!CSq8DFA$o z6B+G|PTdalDUcXdEBkv`rxAWO5HxK2jA&H&fgr|=AXXS*f4v77^Y@`0GCKCtetgCr ze{r*h%^1=3$oxhwF^n4{@qB|fDtiwCn~}9PLR;i-O&hq|z-7#4Xrrf}plA5COzTv( zJp3$=7@37?H%@F6AjV}Ts@y>I7`SwHj6_zeP(}$OJP^eIB}04YVmz=`12v3K_mEtd1`48kTna%%-F=b;lMp*+@X#kio z(hWpo`&9!98VqDV_R|I$6)vIL0FfP2t}z@#21)HK)!1AK1|j2h@pY)zXy8v1?T^%M z!s=_Jgc+fRMv9nGWFs}aWz_DtoUv(mT;6pp9I4@qe^I;Raz>?PBSl3nWtp3Igqc3jw`3K_o{3~&s6<@?L_yddkx0Exho9P$lJW$Ry zie&jk%dQ#8TzO#ZMv9nmY9lpVF=lq8gc)1sj1+OuwQ!_{&$f-!aXG8sbfkzG<=hR+ zZ?=t$e{ngh-+EYXGb;B(j@un&{AZfVP#I|Kk=8B-MqL&MTGx>~YIiScv;Ip#tpC!7 zhUo)~7W9Rrq0u z0Hauy*?ztLO4g#^7+OpLelXVbc`8e>GGri?7296(=idZBtZW0)3O{~GW5b4Ipyd(t ze-qn528f8ZbdX7+T@snYc~h?0x!L`YyRMen|I5TFqe5e-BjajC$n{0X#Cz#%bJF8U?DK+9ioKfnf0i$I&gZ6X6H34RDq+;wzae>Q(i zi?ylwr?kG@(RKH-Sb2uy<;v>jzbem*!rFN?%0PswpE?|mzpwp1)2Jf5@>e?GfAWmQki)-8IF~76*3OV3hoy zxaVM?VWThJosF{@;U3x#Ftf@T4a)ywIomcs5n2L)eGl2b4j50&5B;VeIIN%6*f{!Y z`}Q8L?SU1yXvoGj=tKE3E>`_ezQgAae>;92Dc|w) z7v(#U!s&AJ3Cw~g87RA!&m z@pI610Dt~ovd^EtkLeBec{`>bKac6h&tv-W^GGFs=wUKa$s;{{8>!?EQ9M$~pLESw z{*fMT#`1sY;j#Q5qF5Zue?O*sj|B0rL@@YbhzIsJ3%Ue!qIMzA`aDfl|)3Gx?34E{_JBOsR}Rs0&} z51T&_#q}^B=-a3sf0<$U%@1!;&{`u^EXqJO68n=@t@llkja)G)*+Gl2QB?6SZ1KPn z?-5YSy9n}$+JL}iV?y6{)GbJXkN!R+sm$n4wjPYlVN!CK^8NabfJ)@@=eq$EB8xxY z9pDp1`}vN6Ns#mNJp!1ZU)G=gVL2~viOT5r3>yhfzvXSx}<~tc7 zT(eiAI`+JR1HhaSx?cpW^Y0V2*8kOZG2nmQ?i!3V@E09x00`N7Q%0>a3K_%sKhnVj z1_KpK;gm&RN!fn+g^cyTY0{BEbqFJUhd3yU|ALV1{)QO-2O`-3HvWtJsOd<>j@0RU zEaynNIv6ZJH-s$DDB2XmDa-esGE%nwCSxtYZaoYXtT_CR5x)Pu{XOhQpOs_G*X2|H zA*=r#9_iY2s5i>~munUgwL!l_xqhqf6p#7B82X>z@j$t<{c`=#-IRT|88xgAWO_H$ z<3J6w{^*m>U)!H7v2tqw(f=9R_&bV{!Vx;YFZp-3VHXj(8Gjym+elIrkjb*I#`}VW zEJP3j6m8mTEfZUcaH?7Q)Kpy{ zq97KdwYPk8Zy>2rELz0SSO_~gxUB*hN!%OJK;nHs&12x>00KiEdPgiPKlRD#-{9S< zkv}0!6Vy7t^M5&CVNR(de~~RLB%y~5-en;US*Xe46BhD^(2>V|a#SFuszO%Bqi<+K zy%g%z^ob5ALNL7EF=a#rg0L{>8zQX{NOgc6?Nrnd5ahZrJ6R!!RJ&o+1k|?a8jwWO z39GxOrS`%^@|HlkY!e-@7qxi{Qmp8~%G}`PF^&8 z3TBEkUw_lq&8OFe9UiMy5h?N}xL35@DQE_hs(!f(Z>D0$Oj}C_ugCzHoHRo82xPL3 z9c1*812EFf;%cxLuo;6%BWQ11vzy!n$UqDKi=;JF_&umz^41 z*!)e|^~HvmCCBmd5`*gx1CoBikFxgTMBkxfhksC}*>`AmqsM1gNd@=)cAUOmDrm|j z)%E#R#)EbD6!q$ask&PSqHyKX7|>INu9-Z?1j=%>li3T0)Hnc&D;HA;aCZKwk2HtD z*pKeH)&ktnFCK^UN0IU0A-%uYk}5ibGULa7gyl060IoEd>JlKS0||xP0VB{x4tQQ= z1Ak;b1T6gI)R}ng)d7yN1Cb?@%vd!kk{O}GX2N@H>nzMCChn1qLfSz*iz0X;`W!C@4gK+n2_-GqM(d5&gn#K@jTbZ%re?82=|Ad8B^K=4m zKCE-~lqO?)eMF_M3m=V#kibA#J{H8V5#asBy1Fi)kt)=_O9hexVpdn-#FIm$bPo25fU!f8&*u z9N%$RIILE@HIv2v;>n-yf>0o{P?0cBP`ou{XukkhN70MIleQwiigA5dbe-s1}0uk_j zL8#HJ5{fu`RZ}z?8M(N_i{C?k9Lf!4!;SF?$GQo=5q%iWln!l3|6Nu*nD)a@Uc6EF zI#l{gG&<>rY^*MlC=8S|WcfyRR`ufk{10Wx&=;3EbJd3~G=(mfwIALxsA7#CKJ?@- z^6{by_0M|r_E}HA=)kL;zw8X#F;T?KzbQnv|L~W{m(z?(5n|sr-|hT21ln&Va+v3`HEG@u z?0Tp!zUDWtc^v;_ENnlV=7->-`HR;IN|_JUi!Zu}Rr%W&e`t}Ux2t_+X5H+o5B*E3 z68iV=snPBAKJ@2({%WJE|H_EKJHKc>uD-_qMX>1xN7I}WFp=!cjU2QeR*$~w$nO7+s-uH$!NLEj6S=t5V^4Nlb6at+s=&fEB|5#Y1f8> zXm(`RzV@rHZoIq1Ukda?U0NTe()sY08|_3_f8dmWiGCPMh7H(y7%tkani={fUt=QV z|5|pg{>yNY_Uk<^g7A7h=r8_Ly0+iBxVwKsxcxUof8PH2?GL#d?VB6T5y$mg=U4v? zH2TU{gg~e_zBX8y4>zv9#uRkz*V3+;a@os>wl7ser|ni@zwK&YdoSCYWfBEa!B$OYvH55NYuU3E9#!b`cw@T9Q z>-$Upe1B!-zwc*s6M1FxIhOy2Jf`?1@Qy$F;Leq?aP=!;qIY%T1=Fh)%tJy|tMyXm4)ybS$3L#~7g@+|%=O*z zo6DTbS^Toawjf0FW?!&RI&XH*IAd(Lf1l%~`Z+d3zc@!IF8{yg^W+c5YWFe}U+ijg zv5OyWFumx@Z#|-mnY?)8{+}|A-+GX=_#ukj_2M9_FB7T!vp&NsAEMLlzC@?qMWXYY zfBsv2gm#&^?Jrie{u~blrT$G4HWqjF8|aTR?8aW--R$2PKrN+#zj#UYOMuqFe|KM{ zG#v=#Ds$U)uKD3Uf9G9o>xS1c>!+-!T%4!h{}+~C|JAp;e?88R-&|_*`JUx&f3J6| z|3!I<>~&?d|L3gpUwzQN2;={NmbBgNKjSLvfA@Y@{VonQ5N9UmLP1V_p^Id0kN@D| zI8CF8inEu|r50Piqe}9mA{Ke$a zq8zR zuHP#Bbq>drpS1r;SDGd>zh19D>F?{!w|se}+i&?#y8D*@r2B9APx|>~IDfI0UxxD+ zYuSDo&M$xY`ZAoq%6+&07i+nFxBtspF5m6{#b186|5v%&m$&?bf1k|QAL8=UclIxd z`9n&|-!b|nF+Z$icKi^RpYqU$XZ&6M*O>m2n4j`_{pBtH`0hY|6PF)`viu<~KeXh- z{oyb#T9P4t6PKT|^AB-uHgg@bV!K{9Rtz`#xI&)%b?&Iy*R?86_Pctg zZP5%Tq4^0nn8ZpUj{DTzD(Tb`lYB}>|l9aPu8zBxt7-K z-@M@VKjshV|D(^{Y0m!hbFp9Sz5b*npH43h zQIh}Q2pRD=d-vh>F2)l3@}AuA-|&R`Kj$m|9lyEJf6G6sPf>!Y`wD`)=o#3*71J zO2{vUFTPkt$-nCTz8-2!>>rMh9xiV2%M6y+dgsMM{@?BI&wk`ToZpaGKDUm;Cqr<1=2^ z^2-10-}4{%OyR?V_8;DH`}UvD`~L8l;&!dq{MtX~r+;4SNuTBR%Tr#>$v)}(c*Sh`#-}z7KebupSA{lCLz{+_?G z0Pgqv+&}+|e$5A7Na!`>^lM(xDrxYVgu0hZd4-QdXfCF0ondIUqivsIN?xL`xo?|S zfB$pFQr~_UN&TepznDq>cfMWzW+t&;`KaTy|Kg6l{}VnE`(NfGnQLD5`^dbiZZyqk zum6aT^#0%@U$ot!{+Ex$1f4}nnqg_7wrQz62EBB))Tqs~<>DzAGS^9&oiWqK$^x*< zSPw|nO7aqoW8}jIypd04K1LWg2@D(vf5B(ilwTF)?`>@}%^rEZ4PL)LYDa>3(C5C&4_eFN-t?&;NCX68^$}ZH=h$jSs{Dy(ooN+kj6Aq^3@hwnWJ#dbp8k?1 zuzjGA_iF|p%w8gIPgkXko8u7^ zW+~rNYjBXvqdg6B03Mu46x{Slf0jftsH$7Qy5(+xXY42WEXI^AZGS#RS&M)w38;XOlj;&z~#vK*mLY8GHWmG_3*93XI&f8CCv?uHXB z>Y&Sm8hXQM2I$}L8-9l}DoJ!v8WRq?S*79m4K4Qbn6mX96+FKc`W6TBtgM*KqYsiZ zAHEI%2EU&-3mO}SVVj5RKU9FGr6qzpFRTosVpB>e?Iq+5S4{(xW4d4wD=uim^YD{g zCB~Q8gnY#5UXZWz%nWfFf3}>VFYUq&2rkC&g+94N=a028RcSHO7qNJ`9kR+4!|h^h zS}pV|dN0ap+{_y%L1^*z`<$R^3jAkL&_1FE1=neB$*QZQ&S2(yAP06K1t7bkyY@i1$~1yRA{wk`6gNG{YKkM(feM z@@8KkZ_dQQ5r2Oue`*CD`q@8*0D0Pv8i}GxuhN)ZN|{yUu}9dX%l#3IBTQG%E}W5P zZQ1)W@Ld8u=4_yZ$6bHDmdzfQNLz^_s>jTmh4?k3Eu6_sytz7G9So0}HrlB3h`7}r z;4NYYA;nrSV`+-4j;}3AP=}th^^F(@+|OKadiC3Rzh)y1f5*OOMxSaZ`t-O&( z_0rN&T}Grl=^+_76MBtgk84(hEo_F{(;T+K(3MioYwKd8$AO>H9RhtgG5dg1ZWw$6 zNCWHm0rl5hhLTaV@ddd{rTTK^F-IqXOcXynkf*)x8m^?a5HT8(?Jh*@iT9}0Q}7Pd zd+>GXN7tROe@JKbXHpX??C?i0xvu!`ZSje+`M4{tR6m*<-0d&jCi-}OI&v`UiQ+Gs zr#VR_AQ2)Hvu-MCzreB}qFyw$sxE1khguh88#G3CpN+wo4PMW7ThnxZ-*-IZi~SUI zmNaQmzxa7F=Ky7B|3mfIl@9jA~J)2k!RJ1YD0(g+SntUyBZzqjo3*hBpUN1-UWena=*pj ze-N9=tD^>q=oePXq?^_%_k}0k#lW=3(#EAyLDBQ*$^ z7yD=qD1N2>_I?b7(ovn@pQp0m#ik`Ml2ivdtajuWCyf`+JfBj{?+M#50jTtGUg{Mm zgr;0fY86{gM}|;J8-*SbGa5mIz#d|Ee^ji|H=G&YeG<7_g$UM^_;wDyvVoK0Uwvm1 z3Hv}}e}!syIunAk7qm{Gl({uN-qUXigo%n;ZeIE}PfJ(lWB>(h9N3!?O%SnMV ztgMA8HISOrheNkVr6i+Z?c3Iuk4Ru4dXF~!>2 zq!T9yp#!Nk;*Z-Fk!T-9VDlD%p!HD2^YH-n%rc8$<2iw+Ud$BFn{}OXjH5kCm&s@; z({D6LSGvNg()NId2UOCJceRJt{lKWKq`lBXLV(np4NSv??raeb-YMNEfBV3aT+=%J z_U!5=j7G~d<*@RMy&6c*k7Q3#dNOxUGSRKYaY&J?z_)lpJZxV#2g;PPC`z|! zNgX{LCRJMDv_UjP3#ZQef3bE|bn;5yN2hYoTioh9=g5q+OJZqH4gKbs#(0Dlc$QY+ zH49yEVP7Bzwz{4z=}2W-evhIey*$p+z2W%WN}Y0KdJtxt>@jhBXGRyD!?roA6~5^f z#{)Nb&6~tcn87?ihRn%3SGL*MfF~7T^LA2hCS!Ap2p+A>1Q(3`St_RaY?b1k z-Fhq2N0I4A=0&OZ@@d=mw11Q`lfw{&u4V}V=G$7|pzr?XhbiF@>P@6IHV z+8ixerjllY&+WvQ53LbEab+D_zA+?o*0gZfkD6 zl5DN_&;8aOw2kzP@!BAsE}pf>GpQ#a47^#=*`yL1vo~Y#_#mSZg3U*Qz16c=vA{6j z5c}@Z{++{fSZ@y3dy^Q2VYYI5XXJNOyOY~4B<-zNhzfY=x;1F*q)kRS&~OTQXS?I> zX?iwyTa7Vw7NYEMaDV8=_aO|9bCk7bRxTmIlpiZ-rv3{W#JL=W#x z@2NfpOtmxu?SyQexzn4I9(X_Wf>a^x9uI$3c2a?kBJXlENv~`u@Mxiq#lR4FwR=1nWX9cq$cSI7O1Jk4JEd9IA>G_rD~(iQovyE zAjH|05$k)`a?Q)9 z*5$ckX$zthn#~F{Q^(eRJ){VD2YZo|ppwd%mwz&tU7siozuf}_;}bRRN;W_4M;+|? zq{anq0OKh+>q%ZOvw%%#Vq`^$VJ*pd8~9o29Jf0y{ zygVapNK`61PP>Ngj#3eue`77cz1s$I#I``)wVJYCVB>YOq$fT$aC)dh57~;3Sb;_z zvBDZW+`1+1{Q$=l{jR4m}>du8z`hu}Ws3hO4? zEbfnZALX$*?2Yn71t$hbW7!^6Wu&)VDd6z2rQNzxlqky?-Zo9BhbBmggSHc5>VKpY z)gYuBi?^2ugk<%Ea$S7jwN+F}T}d@z9%m zeWET;Y!F6nD|FtPX`QFY>q5RrkI~_p)q!)um^sMCGUS1Yo+U(nKHL`;eSqN|Uch_s z?(2nkYYo6|RFUX_vkXhj0z{d$C_Bg*jK_%0UTq=s zkPvAP2ss3#dApHBo9WR&21Q}wSW%(PY86i;-y2gPU1~46NgF*15OHACB;<)%4-&n- zPX_W(2JE}&-aXJfGof?tCMHN*k|iig1ezS{&BlM@kQzE7{D;FuuVR|bA_^!phBE7t+44X6PHYVsh8 z+hJw5j5^HH+h6xH*ik(OHd7Re$GeiVQ_p2KfMZl+|jK$}t_>wiubQ8CnMy=&s^ zt&J%+QQc%6qcI_+{dU=-6etX~bu1pMo6+!*o2|N;GQ%brcN$Z$?Ftkr9XxZa z(xHKST9qRq@iTJ0(tnRBf%N6Q4J=A5_7&9*$!;CZDnbMfh199BFwjJ~e?HYllIAJ% z6^{wk2qXBX=YLvh#*_KoYaK;80C_W14kpfz~44q6DNnQ<_4nt!?D44V<;xl6Iv0GH4!juPt( zQK&BBwl?v$E^xnQ1;6$LzwUzJ{d|G1seEa4Z!@Zxp!=P=ja`SN7qFgYwTH%NKRkBj z%~X?A7ZKXz{96bgR>K>GVdJgcFAdP?v?tAa#`xij!L2cdPjSv(c@rmY^oXVAHb(NY zy0P&rbblG3Qg`DsHx|f;bZ_GmcnXn@18senXo_!f%z#c_u)|bF|Ih!NLe8__^E#Ig z{GUR-?&i#K+ArIFBeir&# z1-qbrC}fTLmlX1W{Xrp1da0&A6f(ztUi{SWRDaB#7pfl$S!?JY)tDKgOz6=5heBSq zKPlvG!>b<(S+MLsDdc-G#Xl5s*nJ|$UR4oacA>Y}4~5J)^FN`Gsh|vP@qBtgnzzS8 zkCDw3?=F~lOM;{6N@tm+L|%>GO~z`P4)S!7z&QTYfBs>yx8mdtnvZ^dKPwElP1m*d zH-Ab}Ko@8X-#>UuX7{q~%8_M*lsOo8O)Uff3x;gq7(6pJV{ZK-@dVkCI8Urz)X!s= zxfmWPqdnoiT=9;X4ZTm~ox18FTAKH4Mg?>l#QT2G1}UKukCqg)NW8gl#m%jNj=LG} z#>O}Dc|NTbcmx)*0fRuc@AoY~6?J{-PJf7mzSF8%^b&3FNZg&a*SR^;BaGGEpW@j? zyy%5>r0sEfNoHh38kvi+B_~bQg4{I3Rd# z_9$P=WPlo)@VrK4!9Jfn$VUyT*RfKE{v>2-WgubQbhPAU)i{m_&CK!>RAzcFWq%=h zM_97Fj-29cUUrdCbPu~~lO3|eIlwAb#O52ApWaAeR0bB3oGwzWzJTKx<~BNB13zkQ zU8769T=(8bEFvdkl;z76lo9o$i0FM*G;3rl9D#l{Jo~gE`+ftTouP(6=Wg~$LwB@` zQSx;de&YZG#m4lhsQHPayk z^d$%sMSINr2|kBPJ2jWe^c;w6x3$CKj=ALs<=l3Uq0;f0YMF79g8J4BD=H_t1*bWJ zGNgt9lUEYIRvL3e${}jo&#kFM9(p*SG(>Rv4Hbb&)WVae_5R=)c)m9T{C{3dMmLT7 zuuyjfLzXEI@whPpL))-?NLvl{?+LcFUe-WKgg@gNMA0>!&68vLJX^5gRctoDuX2Bz z@VOJi;daA-LVEe5#XZD3f)_MWJA#YnkFj$!a3;#WC&i`5<93aF+E$&yPfD8i%3QaB z*1jIx!pO;!31RYaJi2F7^M9g^CVZ&ieGG1mFd_pTEiqOW`|e}5%iY&p*8pZlZAqjYV(w6s*$MAUXT*L6aCf4 z?CC_@yh4QwzPGbx8h6=Ou8x{6HTtDG)e1fy8 zXK0Fl3EW$8gPc2$bR4efvm@>*bidJrMzmcqbvA4BbZ&mz>jFYk#H z4E|`yn0XOB)H-*$h=2I89x!PNS7@yR#!kU!fg+hpuWxHN`4(h~gTznF01^;bbtXoA#@VoMpovZ>upiW>(k=n9@#< z4ERaHkuoIu9`~nNW~ZDSj1_4_%~!z;RAMU|oJw2gYL4s;kbj8g9wbn8AMyDqZ}*!w zJ`kDSPm~8K>zRebV>BwF4T>IA!%~cNN}F78V!&;!WR5Q_Mm^Y`gG1LuLG6a#G36ms z@M-qxz}()ZQ?dt~Swo`5KIyBaG2)n~a=I@?>-(r*Y?JtjXGKN{P@v@c@@yicN7Dv} zF->L6?448Pm4AE&xz%-qn{qZouOLSPq%B&DE7udVYr&xaIPJ?3uXp%jD)llUu>9(s zuuBD!kV8_y1a%{Biug>TosrX&0QoD>D?R9G77{!f+w^la9y!Jd50&W)WDcT7TOHo= zLMTJ+@3D(Oq8vX~2}xshf(uXATwygG+9#WiG&rLiH-DCf(v#5cxkx2m!21!-Z~?{| zR73-i5F*bc0`jcGDC$(kpV4Iwk)D76UTyvaWxG*9!wkKaL<&3*{+_<8ZgsN9MknC9-^mAm0+TJ%tJ@RR9-RP!1`Vu8;y1wVdEX0vF$S)r(_DtzM%`9X7$9gdkW6kW!I70w;R>HZQ?6qzK^gDGzqz40!)@77^P zLP~0)P#?qk^a3Bm#2I{b*gj$#s|(#?(0?*pA|c>l28%bAp$HHfTZK&GWpRj1ARuzwaG z2aE}vM9=M(ytN`|;!ueJp$uMe3K^9ch!#kLC&UJcH@hRit54rmLEUF+kihkJR1ZQD zeZo2Ulz2pe$+JCdC! z*MBc>C&u!aTea*^W(TaEg*2(i(~laSwq;Vx(I|UP-Jms9du9f#iYLgONPi!d#ZgX= z*F33b|9T+_5tP1#7Xt%L5DWI?$ftxujGj80D$Y{f6}!kAj~Zhi@i`OQ2DyTW&226m zXi!F{ISzp=!6WWZ1Xd5$ti0B^-*GoEHJZbE4Z;H_y~WV`#KPLsViuUNty*x>LmYQ$7}vTSPjCd%L8n z&S5ptf2i-SC=0jPh+(k@fRELE3_-Em4_7#m@@DOo+gLVv*`aXH|av6Rbw znzGE3g_xfmihL}sVT5ult9in2?7Rg}KMvnKQSVcpNbhKkhXu0P#s`ZzJj`a}F?W`L zi36&WtVHW*S#{1lz!hcmF~Pa=xG|3f_Mvyys`J8Q4AQwgT3AUg7QVo8`x&6KZKqB% zsg7z;Faz;N-_;r}C4VSjj`SOpU#+aTPU_A%m+5G>K3tGaSDS+Mzo>hT=~*!@qmpJ- zqt)bhh1sl43+lk=+uz|D=eproxzV%D-D$IBqEag8{Khi0QqY6VqMPp1htCK&wiWMR_i0|g_qqK(zV6%YM%lD=fiwT?4^SjhD)W_DqF>g^&keN?O*-GQs9i!h z7QU@vyN!#&Kz|)Po}#l+ik==Wy)x8XlZii#iQi=&NV$~8Yx*f?cpm0)bz}6mFe$dX zuIQU>d_NgYKc58TzT%@rLc)}zNIJYrani5r2;WcuZ6vZJ)vcx+4b&|wc<=zD8%EbJ zosMWVte8dqwSToRQcil)CwenHVUZt9nedX`=s?6VZGS8(G+jbEXWj(SAZ}H`b5OT? zmoz3Ygs8Lc+k&uiG`+=JTVLyfP1UbXBr?3s?b`;xGlBb3=z8FautcFyqiW!uy~f_Ai5`e0OCyvws!3g;jYaeI_CAh>P1E%=Eo+#Oea zMWs4moqw2dK?-A=MQ_*z<6t0&g54FkAh}WiNIpU74kn;`rvMUN;T?UgHy^z-E~JPg zL#oB+(X_{Hec4y2hu0h|Ie|c%DJ3sFzkKY!)Z>oiZh@# zpoG<*sp&L(%ELIWts)}njf#NvXLox$dRSg}(M8DKFXD>jgBn045;h)}DA7EF95=w_ z34i4p+vo_~1lC$`{SxQQZY8A!rrWIf_w()TLxY4B5n2Yw791J@@nW5QI@1WgwBgHm zOQIQ|21E1D-g5$@C0-_K=t1&!!v~(I7w{;LEnBe28=|%8});NLQ>k$&Ja)WdOr zEt4aRZnTzEoT(F}l)sD-L=sVXgA%vU8|e$YJRrX_B@&X_0!V@Nk&&&=2@ zB=(pL>J9W1tS7;%)rf+q+le0y5^Zmt()ekM4R)@pPKNrZ2#9O;Uf98P={7`3mSTj5 zfe{RZkyfZ^?4zI08P??@Pz;-|lRyHzMSY~X-)j6JsqMRQRUsm(UTo21cz?A~oh?Ao zjM8U1e#C}~=hMM1CJKjk+8gRUgArd;d;1(-o|^&&m=-z%1%A^A_jGgSmmV|^MZ;sX zQ?M3#>`o`5cw@?NzdI4*5`g zwiP%B=qqY0T$yfnCDETvomr9&J)M^{0ja{)g)%uAakupCZb7$Kp?^{me9OqEqnmwL zlVME__w9a5Ra9OdVeE^KS;I;*=_uE-+x+Av+;5yk<7Pa^v6q;R5@~6H44ac(vXOeE zMHe_58V81mF*jR6DtS4Lr)c=9o*w0ky+Vf(f;qw7f;iF2)h(Hsm~4w-)QsmXwNpdW z&YAV{XOowQ4){xmf`8d2s*!}P9uvG70-I<%gLh6fT4k=fw-*!pz(TP_1;;eZEV92; z9qH|v(EB#Ojn0Q?NT_rj!dcYwcOjUWugUtrSS_+KC5_O*FlzY;bTzov)=+=w{k9Q}Z zVVVKyW(f=x8-EOk_FRE@eu?hs-|Y$4Tq_4vv+p@Us6qf7s z`XdBx0e5=98+Z_|Jm+jF4~01^Yw7kUT~3<;=OS5%cYkL=@6eetug()ZjZ8_*qc*AY z1zR2heV-V5<14={1yLwDG0|op!5*YeRPr`b5*N_;8QM|VNnTs*L zb~=l?y_2W$5L7NJRQ)q#1UY1bv0Hbiv|K}EbEZGG_rk77=PF6`hWj)QqUBZbq@%^u ztI+|}>wj$$5{>MsfSm12U604&>D-^Xo#xI9!_8qj`O|$u_`D@`yqwRpt-{A3$oQ?H zQN(nC2Jc+#$-Rh*ISadlmqo;_4)vr1WN-p3X{w-X90O?O@uqd_-Z)F$#b`Y@Ck2!^ z0NS?=$n^~a20Jex*?PYOX1tRL-o7UGeNvzax_?jC0k~J5BzK`DC^9EZ%Od9&Nzv{- zp?v{EdTT>xVUz2smJg%31Gc!2P{uKve%O90=5=l~@|N(zpj8f`Y&u7unVTFLr*9^A zKvA#{Bwf>xDLH&u43p-RkX>w4YnmcW2W%8Cy69m1U2Xd85&`5}@*KatD%z0vCzGp3xn%y)J0{I)Al4=loyb@Cv2bmcc9KdEe0ij05rgp;zb? zz@R3C3N_%TnVRavvf3ez?wy-fds(YBK!XV!7(E^M8yz%Q)ne3BWcpAClOWpQ_n`z? z&S>x6ZCdkoqu^6A2;>RM_u`I9iG%U*vwyPcGxF9T8vG=&_B}Z6MnE*@^76=T%!Pu( zHeGmhX}X&_Xx;_pX&5*AT#6>VITozpT6sgdm>NiOdFii|=*Df(ZlQQSZ}$`H@1YVG zRr18>;s$klWh;j%d7$!wrU(225%!TEhj@Y>B7hCv5Tj6pEWx5`kWm0#xI_qnrvkoH&R)bs-kd?u zElV+H8qVAmKUnenc@)un5>o$`(#*w1pv|Rm8^rgBQhJ)<2f0ER5Kz7#qw_oQN_8ER z43E>3Y>i!do)oZ)jQ6HGhe`^{M1PPKRV_3cRO?BBTV$+M54{@W-qRv@tlo^~6SSupG-_dd>tTQZ4-3Ngn4#2uP8SU6PbRR%57r5# zE54F*h-lc;iRm>~d2f_?_RVJoEx&jHzCTLC9U!Ej8}jBTxBUE`Q-uTPlR4 z){V@}MxGt>d5#B}nNM|bR+LReff4*aLM41=ysys>{|J@P5|Mo^ z^U<|Fr+-2vgc1{9ag8q-kf!NG{LXk^Gu*#}`Th=-kONCe({fw>6@My$mrljxX_`&1 zR$*SxzlBPup8xYu2@9~_SgK4zn)y`i3YGBTy=jKGKVcHC5Czr6)axwo&>ml9ELBD} zU-F^?;s%}SF~7+UZ%5`T3TK9h7;wxYQ}UcvN()Uw+`37tROt*;MjrkqC2*K1q(CEZ zn1CIKuDwtJJI1P?dVgH)q(l1oWw>dEW|%T-m|CwtY%-T2{4P5@{UP=%ek%>yj-GUC z0iOF|;04;2eLku3N_O%W!;7cgGW0HCc$h%wyU^nq zqO=MKD&W=sPtZ|SYt6alxBhSMT?aMLLadxqy7V4xg9C@RvNkB&fo z$B#2QAF+YRVuJw5{FWxm1DPb>F+PP0H2{#KIDUmZBLJ9&ruFJq6Q@7uhThQ2Pnen2 z#G#4Ahs)B~^nZ6rX&_E`Nt-e&jhchTtwH0e^L;nmbB(kdT8Agpb4&4ho;`II5x5TzX)2#ut%S`y4c@%-$XNs5n`(f*>UA zkH@4QTuHf#%C}q>^5FNme;9~J&W#l0arvbVB$yj`S(1KX5YnGI`NJJJf8_BGLvce6 zXw;HyCm4*ijvyY?78Rsz6D~rysS3zb2yz9&+<#Q!oFOy>561WydS_h(oC|Y9GUoG% z9}(n(G*I9E%kYdw3{sUW+n>~&4Dy6RAl1ZTotF+y1bg?U?vo}5mq5pg2yiSSvg|xo z9@ii}yRZF&Y7|2%oIdd48=uFHfKw#9Q&r%aJo1S6=+Q}~(u-5xD@E5`#+s~4A@HG< z>3<`Z0FWszS0UUmf;TShmMkIu$(MIr8&izLDV1U2leQuZ3{bIZEtU||-No9U$+RWL#b(+G}nK+cg86^RmK$b(z}qkpxA z01E>q>A^e?kN|Wpj{ z!jpwMfUniW2oxaw`N9LaXi~P{y5tjjXBq4Gl(aoreWzQoP+V|s%t>kmh-sN09@oFD~n*5u(RIP{86qJ+`boKDTb>x}R@8B32&7iF~86&<}6FK(66HEDkRIj1T;V z`TPBP1{vp|;MY*0hKy%3x~`Y;OQGV0Dk~qVWLjtv9^{6!(0?#Lb%WG{Nr7f=a~A&s>P0cmS0;wRq(*jE>RW}KKFqbC?K1b=b0>RQs@po)q{ z!5v-mGe1Lw`&h<75vDF8?ZLsHI{)FV1XGBi22`Yt3IYYnCT-MUfjoe7nd+R20x*OM zVMdUl*q%$>szdq076gU_aIS(=%hR?GEy4JdH>3_JR%Mdk@>1UT{YgU#&=h;i*!}6> zZl;GDkbj62c|y97BYy>s;t-Vp0qpa59!US{$lAq`XCOd1)yXlb`gAa|Y&Cm^3v!a@ zW6m8gJOfqPzi|&qxu*@a5DqtGc@7l1ilmS^VQG-|eltBEo9~4~nF(1VI>-mL0Z}oS z0(mrvv_p(}2MmFLk;pF;l3`pl8tS=$0G1T$o{)70f&5x?pnpzLg%_&phlE-Bo)6LW zsb61W$TKUS1}Vc^Li(*!!s?Qz3UClvxLj7?%IT)j`Q-bjK2cV+QpZ{WI7R|L(ECwU zLF73Hw?r$3NP?=R>&mR$aEmu(z32fC0b~ zps1LC=av9?^8FZ!J~Z1h*IDQt5&Hh;yIDy zL-W(0{%byyYrmy${q(Qja)$+0fwt^(q01EBX?1~cSpYBTFqih-+C>z%;Ai_-^Ph5E zjwj^f@)*4L!|?r4j%55@j-{5r=g+3+zCeK-o;lh!P9eF50u}kbb>Y?&B!~w%#t<+H zgn#u7q*AW3CxiQ(w@&|Y-Y)vd|8V5rxvPBVli&V*Nk{bQ-_sOKzP!*|c`E%~J^S~0 zJ{f;N?DLo>Wsi8^@C+Ht1o7I!@h89PSi-fkgyTn!rBvb-RW!-`AG`8rOnN8#{JmPm zwF+`W;6bi{(cpRCwzBbk@83Ul?bY$eHGdV5j0kw)ChuJ%?ZC5pO^38n2h@#{6H}ifsFPnV*37Y45wMFp~D7e8vrAIgFS27o`mRF1H%8Y8n8@g8Y)7K>e~==A?LNLf%prM}PeW zaJ(>c0vr)lr*eQ}S@73pysZ*|TU7uRcO!F%+~5Pih~^aD372)-y;tjOA5!7KbGbeX zm?PJH#%c!qYb5i(762h#f98KFxt<8c!Vd_+hzdjg#UC>7&0$m&GNyu@@%?Pi`Y(F} z90QABG@AB&=Yl~>QT;tNHrgIBgWyi#7UhkQBKXlvX{5$4SJ9E+k0%2iy- zl@AIw8~_midQmfY#(avGs(_R-b5e6e?Qv;HvJmoz+A~>}&l@1ochpAOXX`^fqrHTAv z1O_v8kmHkiE6Hc${nHlGU}*z?T+gU;N}He+A$W>s z1{yqw9l~77?;^+qW{-dZZOEjcF4O`u70eaM;vb>}Xp)A6Tn{fKS2{wEtO^K#V7`A{ zt1hhHpXdDTI~wV4jsvh_2WxJW6p{{6UXJBaFdJXmtc)V z{B7%>bK}6CwN(zvblM1^_&vJbeu$4s-cFezlObWJ%OtPh@@nDj$xP3=Tj4 zex5u-ITXp#tj)+zz<*4^9*ZS7b8t)M+Ew&fGu8b z-Ty?dF?BXv;D#aBU;hogCP+YdfGGzG_Etd1d1pi>v>9LlLVu_Ef|~Of%rRM%m&BLj zjGEyFeV=qA^Sv_r(rXmJG@p@qU&BEGs}%WsGNKA>2B?f?4_re?Mv#%$Gd6nw8U8jR z3UmXsM~%>}{EZYrH$XTp_t}LDxy`e!!96#3ApX18{2lTDoJQjYlu=M~*gzieuN>!~ zXW;mvT9E40A%B;7YL%1M8*mh;!{--#()u-AfY{1$Ryx!wmrwcMQ*Z`ZPMYN9x-D6mlWLcvq64YdUkPb{|5@L>RoHAu9_TNM4xwhp%es(Zkw7Jc)Gyj;_t*pnm z1cUteZ>hLH{^3Mc#W9xRyu1c6Ars;g@ey+n zZ;UD?WY`IS@I;okH_@Mb7I5c{xxy1lY2d}H%u5R?H{?tJd?i(Mz+=Ls6b3{z zNy86=cALc<@n3?8a1esVf&j1cg|ZvqQNZ(0-1?+Qcp=;f^$_+-`(e&ZabeHMG3Vgu zUmr2~ti*J&qC>n;Zip~A{AhjnFD4A?{1*`yihuRMFPz-;vp+>AJQyM1KA#)O=SV(( z=myOfi=4dw<;1jwkQ0sz0cYO8DJDyie<=|GnS_sWLmdE@C7&g{-6Tl#bCf@X+{N8U z3<9wF5OcW*B=s1EPC=Gi*VcdVWI;{Hv5@2?VUwUc=7hL|d{7YH4ALeJbs!Vsgm7>R z)qiyQ5M=Pn0}jCG70I38M&$_yN8-Xdh$Z&R=q-P(5lB#sSs-d9;aUnoQQExh3HgJ* zB+AhVC5WYs=!QXFg|2v1bO`))NhpfbLiPRP?g(+04gi>@!`>K z(g4dEK_qHJLKP274 z*Z@q3nh@kp+*Ywmm_m?$$_$(afsh^`mskkG;2*d*_n-GU@DAAzSoZTPk8UHF{VMhy z^ktf?rTr7eXrv7QOp&BRnvEiK7n;f@lo+VP@hFK%KLM;S$0vl&;X}lUc|HKH!hhG> zh>)o*^^oUB#DD4!a>w5s*vu2=sroQ%q7d`EM#vK6n!GY^MpRBZVWyzupwM+kA@r*Y zq3Y&I_k%+B*WWN_C`dTke8NS-6hKu7f^tHIDhxm@a1R`9eOoxag^R-+q24C65OM&D zOMNh|zF7WHa7gtvtt@$sa4v{%3x9?1bMEigr2djJ{V;SN#*4fko(Q3LA?Gg>Qtq#H z$M_$ZDYV?3CuNIS0PLR34aHCWh5i6Ycpdnm4qSy+a_0mr(xwD=X+yjs@mmSzkgUv( zM#$q@Tk=(0cq*~@cM44;WWpKwB2_rG1)SE)Mi-meq2+#4M;A&k*!YAAx z28=&c9HI6;6kMb0Ncx16{C`b9RGd3c{D-2u>pD_D;mkmYqJ@OyxqrubYN@je;XeUiV%LzTUhG)H@+zTr}H3v-hV%x=O-M5Ka)X6 zSV$%sIQ~Lmn&p_=gw6YB%5emU*0|Noev zB6Jj)bHtT%BXprP^0g^_h}=&2q(db2S0eMpzo*cabX0`5BRr(W0+ICBWM1)i=_d2N z_%qL0m^&~BIbrU;gzCSfhxA1@|C{DhmhsTP{bs!kj?^35{eL5l4m8qt{~M+C7zE}w z(bj($J=On|)c~L$@=R{?2FWx2&#;B5JCQN#CvJQtB%XWvH`;mr)~)bAxAWiRpt1C` z_WZBu#`7-@k5HR=EG2(WqyDOc5RiWVzu?*6U*r8P`Nsd#{>V77kb-_PinJN~#RG>y zp4(#m2TNuQgMZNWe~Akp?qB2LVGxGCEJ1F6+6&2>|M7fCUDJCol0M$~mi?#w#ouy2 z7Xuh8T<(6TKSI6{W-Zr$5ozT3gdJ_sLeu`*$NyOycG!PtD+@i$YvsZwI>Lud41_pb z=8y<)&}n&1c<}k}+fTgI&Eqn@*u23=Gs+bdR}LXOXMfU_sG(Jb4?fZG2doXleY6rnYmGkHA@gHRJb&RqJ< z+OL*=x^1i2D}SZWE9ZrM91uu=lnl}+<)_P{@-}>hlu;5-xz-}aSOAChA$Df zux#mvWc{*o3(fZa#l$V~37>|3#Xq~>_};Jhn-3B9C*_v@FmVgdcHw`RIKs2R-7iv( z@N9DX!^C~!&Oc1t5|{f$$}Mr}4-xlAo5{)_CT=0_s9#Ln!n1)NQf}eda`%UcTPQdF z7k?AC#5I0Mxg~DFe~7r>`N)gEm^gBdO8mpbE%{Q+KcpPV6JGhn#4T~_A0}>zTb4hh z+!9yUbuuE6>&|k7#dC&05u6~e6mpFhTzQQJ%8~H0B4qxUJ3aupS7j7#s3QpC-EjcF z#=P-|iUa&#sJID$_Djjt%N?+PBjk3=eSdA_PAXU!xDo&GaFT-tH(w~ZC&)o5mfyI} zQDRZ^;pAF{JVF5xuf%KTH!BwqHtse7z}%leriv3l)d}$cxo;R^2_WYmn3|frCz?dSgF@)`mdgZ6hgPkvGH{2eY3!f5&(jsPD+;di(QkOGb0;nKf_o8T71^MCKx z77(zo$)T_iM*!}=lv4AdmOl44{OW#@UVIMD z`G@MV2xTP@V(tH|yg%i*oX7i1xIMqbShc)fs5OI}*RYUn-`9Hn*}n}wG~1a_T@p?5 zaiPDGg}%%GLbCn8uP1+(u7TuZu@HxUjsKej6Bt6)4di?K^HQFKTqE^g`G4lmQhu9X zMBArceMvuPEtDPp5N-clKS+B48VO&@S2q4a(D{VclVVaIw%_`*?DvP5J1zCX_>_;l z{H@9R=cRn`hj=5e7s}25(2uAF-jePm|F2&X@?Yt@zwGx@?*B+0e%k-P5O{x0AED$B zA@_<8#YX-r2SXPi0SIy- z24+ouZ;62BL-PDRz$xSbQUPuuKO%$tPD03$OQa`r@z8~ zk$E)wMdo4u_x|y({eO$X@9e@yqH&k#S7rc!yF(5gsKKEf(mA66t8QLN5cj@o^bCb<9~M7i$c)Xm%FMwat5gK zsL$2vH9MnHTbj8RnwJm!Le&gQxL1-fmFovA^K!Uk*Lb*r?6IpFEj(_LN&@j-3~rJL z#na{N?B1fBOzhfY*`rX#t3rQrhr+O!3)ncDYN!wIBJp}^dkN-eR-E5Y%?##Aoz7-U zzms(*C0!TfL4T+Wk^Vk%RgrGGLy=d{Qk&y!Cr@U@JPL+-4W}l1-4dFWlQr|y zxyw2)Z0xId<1rOO^rq}?Vx&?FP;=%MC>^1YIk$I#*VHh&jke<^#`9pyZEfnLucwJ$jkG`1|f zwy{!`j|=daOQu)b)!O5-e4ii9r?J)3BuS*p!;Cam*a*1+vT(40x-E^;m%AXgQqL&f zL*nhNGQ3`k{JFar{uZENKt~Q<8PGFA#_7jNzDeABYP3zt9;b67tj^cQR~vI`sMW~r zy?;$}@Ho3+YzoR-W25)?6Sq?>>cNGtjFL+Fa9)|!Eq)18HE*|>6}%g&kDZO?7}k;6 zTvj0%bXC$Xhq>Q_qX>NN^&a$j&uhl1n=tm|u~n5mttE7Z2x>S7Mo@qBUd z>=%KIwUPCbNb%WrZPxrX`N@zT_K)pZY5i|QpI8O&7+-+!^b z?i6*%#xg!d+8J}v8aBSf?;6hOYP(SJy7R=cEfd!}YC(9392(Dwf8FnnM;|;iDazpy>exX|zYcbq^Dd-xU>x!Nh^8u6Te0Y}lHVbTH(p*b zUZWPNnm6rVa4_~lWqZ6KZ3elX27lSK^11Dba67P`BohLX1rNMt{9O+KM?kp0ykvX3 z7mw>UdrVI6hoFPC#yMuq#@9j23rpg}BZ$xML|DmkuDyn>4-{rIR{CyR8L&4VS}vbG zEDxubj+IxZn;S{-He_(O+oZE?Z4XqFc&4CM{j|2KFfu4W&)tUEW zt&A-nwW^PbOQ$+cr$W|si&Cp!xyhw%eE=Oju*&I%I^eCDGlhWUJ>D6)HFKf^r zS9d4d9pK7oicx+o56W$+M*T0EY7T5UsJRgB@@SNRMnT%;=#^Zl+=k0aFyK<+x%5gsArF(T-b#8)sC(~XL%d+CMXID=h^OnBD z{>CdWz7_oeaHF{f$F$Dzb?tH#hC!RS8tZ50kvFAv*CbUo-Sy3% zSDJlLG!t%gd7t9;Bj&aXS=y|5Ed@pjqp)XD8{D-jd_mgJvUiK`*4ju>yBVMg`+D#lJGdf~&u;}vaqY{NL+cy%?IdTRkK?lxLIjGljJ;KR{@Sfk3UORsZUc8sj^A0BZ}s^w zDe`X9B5~6qhfN_rZb}Cp#8TkeM}AZ}-HdiX+}}7>Om`dGH^jccC(q4y$4I@5QP!|5 zbFtc_;`o1bJFT65twn3`ib7jO%|OdL0N$*2YBV(L!M^@RLLj~i7u_n3da`kjJM?ny^cQN?U_;*N<=y`lKD zi#B!QCpkmI`Go4Z)7%v7=u7 zqx4dbZlcspotwMRy*=10TB$RSJ1a)eaN}6HxycIqcP!OoVHRE@UPU*4*=T<~XT@EG zTG)TxX{|gSXXuv}?Z;0j9<+q5=GeU^A!}ZTnL!~VO$BO{WFJkcBSh<1pE*Z7>gs*6 z5}^^=Y!XDD$9Uh=Y=_Hjfo^Syt)8wy(?rol|NY|Tikv%Nd&y3n1T{Q%3( zh~lAm+v|HZg*hYaFFxRq$kHzVat_MM=fhFulne8C*yo$(jju9votENjZjSNsx<&o< zd2EE4`iN@Fb#BAtS4WdwpVUFIbnz>ak$Id zV(r>zjlOlZzd5|iynNle5bs96EH8gIVI!kxr{CS+7AA}Y!t*5?Cy=JKHg}leBJ1Lf zEBBpDK%!mKHu)++*2CoaFy~X;d$TIl#$>L<@2o zz0a59JEpU9n0u`d*Z0%uF~jG;rWRPuesA7`XtfL9$CWlVZbcT1$5ken2LXSG&&EHy z*YmVj?11*-=jxQOrmRW4?#PTAYchY9%M^2dqO!NQ+KNCdoR_zo?|Ia8$+3HwuxXL| zbhOlsn@toSTc)}NR{6Y~j6>$F%lR%{y7xo9-937{x~{fwZrz1Ck}Dldxy(geeJecU zdC8CLxZ2iQL+{efoi&ha92tLKbK3G0%E;k-O+^Si?=U{-)o7o0Yi*DB(Nlttn{7;| zyDJFBt-hU3ez$uBxn{}|qpQH)8~V@`Rk}Z?dhoQ`S{lyn^@_C8c(B@W%NtygO&cA9 zNjp?}lqaDxyzkdFHR(Qg=|ew8;jWHq71y#KZ`cj4^3es5lCjP9!M=Z3t2awpJ$k2L zUj?fR;{CN{L1G{K_a!^Mw0pv;F58;)aUUPvH(og8QUTh%9r1;}yBKH2O^&B)uCryA zn2v3k+;!5-s68C>xy{Xj;pz>w!{uN-x89Isa;!LLKX147)|kes^|&*Y`6km_&okX$ zxC5=OyS*Z5%yGlQ8TEhnJFD-mWz>w~P2F(abJ!ZA7^2%dDNaKukcB4(bcyY9wlve%9`(xc_I*{Q8BzEA0G+YC-L zp`dOdz0EJd(=4XZ=iu7+8TK9DI<Jkt2 z{YE=5*`L`^d->%K)TcMz=C9q$InGh3;hRant(2-_-%Z#D?9FJrr1 z`A5OJc=oni9S(odI8$kHD(LvtpRtP~K@)bH6${RorUYwK=GhQ={aiBb{O(?jrOfmI zpTj^Fw-QoA6<-B7wUQ-HZ6Ch)m3MwSt`4&mt$?lV@3gjub!QA*i%c_u*OQ>2_#Pnn zfn#oMpF@AUdF{{d{hoz&n|7QPtCd6F&U97W-u_Oj?nZyG7Lbo6>Gqa+sg54~$SFVP7=vD0?XI5MmX=PsVG za9LedKzGlBns4rYgUYkdI;QLH_N#-hn7ZRMj6ddr<(FO1-f256i(rDaF2I8nu77b=+sSTJjQ|YtP(Dd91{luiETr zX}j&CY|5=KF$R6_U&4Aou6t_Ubudv^F*6+t1)cnoZjU^*$z}hsMOnbT`i-|gc|s5= zsnXa(r08s_OkDF_b!0YaZ}1l_YUi__j?ToioT@p zp{d8%)?6+PR{QLc8ji=jUV@(u^(@`>WbfQCeA3oEax!CNN;R*$P4RfU*L?@f_tqNg zykNFFZlpcf8BgQ3QV_p6ITd%m4CABmH%*qDcAURwgVPvpMiCSSQAqWtwtDSotcZUd zi*CNap(HDxp4T5Zy9Dt=!cw_^W7QO~)^}I^?E$52r|kK34!dFA2~Fi}xoNF-xQRp9 zJNDUTUW{Va6?!~-=&I1{*wtA8${1Zw+;~4`t6d%8?-NPh~z} z$=_z)P8F~FM|!?L?y|z?M%na8iB1V8=*hjh4*KTZt8PvE;_h5USLDF9kE-3Hq>rpu zJGWA$ArOVdGZf`#Y&S8r{{Y=U9~wm&dLsH-jSd8Gwq||o}LB(X9<5w5B4>N z;dnW8y|$x-bX~7@BBzw+Ew7#>n=rFtchAPcg-YW^|mg6+Hm{MqUW->D*m{3P-GOBcvS1X*FwF&$3=54 zAF(;}8zVca2vgQJ@Lx#d06Txt(QR-C|0L!9zH7$4@|IjylXw+#nN;7ybM!zDWQK;6 zYgTMV>cf`jwC!3=j@I=Zcu{iOJ*tBy#iw)a924;k6KYQPp?lioo&j+$3p#fQr@dre zQ^oL(Q#&X1=%|n8meG#CU9G9;hE`$Du}axeH8pM%KC|xB+GoZ`w4Z;{+QO~zh!7C> zc0KBc%{ncUDnSM#dQzFCZ*{m`%0r|oyza7r}VtHsD zkL|1jedE3gYY+3GOg3E1sWOZZ-M7weV=3(8l~+%3E!_6jv#-{k5M_uu9>Jy@6{8c= z_k>4or94%)N$%-oe4O7#)!#3Q><+db_JS(sjf_uf5c03EG1h-Eo%hL^dZ)otP=%)O z*o^=U@zk>eV7Xdfb=%F=q=x(X>W9Iad!E!&Vy<4HVS?G$S^l(tbvt^VyLPDV`eXf| z62U`pRobk)5A4e)@7_Ug%QnWMGmrZ!#`;@0sp{u!Dw zPoo2Oy-;|L*e8EeStr39ROa$#j=}4lTLvTR_T4Jz`qeYXysRECMm;)#m7o0kfp5i3 zsN8b>7Vmt0O}#^D;3qe+99wSM{r1k>l8ftIyZ~L)XV>Y^tltg##=Iv7aHv;~dneWpU`7B}^Hgl8h-iWqoeoq=BJacv2NQv!iTJwV09n{x%R&9dj zq{ee6#o~Xn8UsWu_{aWKfP=$Cy5BDZ$u2#5|;JGGw)!v}du+%Dp?&x8PlC$ePM~uYho86#AtM zyxo5)NQ(1gxb%b5Sfy{Cm=s;Y<`%ajH}0ghWG5o(F1`_`*7=GJch*!@?9ZaH%PdE& zq*i;|!G7R-$9efVdY*V`sz;~Xxcdk;S8?;cB$0fQF0mD1d2lbxN?s?XvX!*`Hi~r| zWv%w+j9lfTWRljEyS;cV1;c*ZoNIf7o`8QpORBZ+YK$&Hc=2dyoD=o^IEVLKPp|q_ z+VFHV59T40gjc3FZ{~cZOGmMtHD8Sz`wax{y;)VJ^-kJ3=(5qx=myZ2xnI|8J>ApO z#v1mF8~4}s{HzY0Jl%4GQxK^z-nbL{q~uN!qbHYq$fdG>x9et3l6oVin+MpbmmPn7 zXU^UH!24*6^J6Us^$FCNmC;=sDq0D1+#IwbVD$iWrZ(rqCfTgsBlEmllY7j{i+paB z?fGT6)V<$jp{T@kvO>3lLaXknow0iokHYg_J*`7$oO|gM7rVI4%VM`z7%$10NS?Uk zNvO0lc2Z}Qpfkqu=;wtk1#ek%1+ahH9A)jWmUL8{t}Y*WPZ{3J9+1w9Z5nTGb zR`jH;OrtO63FBaC?ql5c!5P1NOX=G~zir1NyZL|P0xc*vClcZ&>yxhQ&cijz>xg*> z@*QvdvrTO!U&)O5z+K|T^gDmI8xP*)mIkWxS_x){_Qx=(H7oXjCV4wgo@Z}VWHsDG zFX3uRp0Y2gwp>l!MQ@7hXoutNdcHNfF6ISD90$Jp+wtb?N~61B+%(yvd9!OvG;#`@ zZKFe(kBSbjDSBtR>Xn&x{*mHLw69J<-e#NgMLXtr+ia48mV<5;X*++zs$^!LWP28IOQF?GrFr80D#zLA6dXv`-1nQj(O1@%>hqRVC-f0dm zca!q3QeHQsPWghWR0XvvCFh1sC@VYcNT94v$8&i-Tha79vRn6Y&sx3p#i}cFWmSwy zQMUKIlYQ3~0u{UbZd33^da_LkXO{<`XY!`8?;VhK&-32)g6Dsf6k~r0R?>R(JE5SQ z$~=m;UEHYCiOHPyy?gZjeH%JiASThf9q7UqLw*oF=hf}Z%D_Ey0!q4*n`$q;0&UU) zF&Q#_HC-;3VcV=>;s@z;bnJbwYRm`a%`EEJyB{>^iQhg(e+*)59|<>tS>sU##@ip(Tun6)7zrnKkJL=Z-yw^H#LeSAFe7@s#d1@GK~SwnlPYK!d`wL0Jj zA6&~@5x+pQ3VqP8u80!u&ProI2jAx95aH{jc*$8v{i|sNF|J>C9jfbXU{PIUgi$P< zfSt$&YbSrrF&NIy={eD_y*jP@Ihfd?Do&3rpRilSX&@Wj+N!~BR@BGZk?$&bCh7LV z$A>gLt##9JO~|O%HOF=PM;8YA`*}FplHJNqm7EHjd!ucyu<_Wx!L{zvS60W*;prTo zJabQ(`r)yoqwP~);hqZBozzSnxnsfac{l1`+t+{X@(x7w6u0ScJ3H<{zQ2RBJ6DRw ztl~2&=~_r7MRRGb&yp~nY|fG&e7(|yeahK_Oh2v)ZE0?G3KgiiJZ7u(e&|xQb;|s4 zc}>~DHec!X03B~`1#Xk124h@z&7O+&aMg{dpdN=v$~?#(MoT@vkM7uW!#rf0>+O|R z!cc$d&tbEV06vGAC790fus`$1%LK6l>WB3%`?V@ax&T6c zm+||R+i_Ue&ay}4H!<8@4?neynzaufGLOLG-Ipm2;5rYSVS; zqnu0Bu=73BkGD~vQ|%o;qOR+l{49vk*^{s0ifIpL(>TaS(|610g-rQ1>Fidx-7npH zW?B7oxOtI%moI7H-laY{+o#0$AFL#uby;@O<38HSCCritZus2?zMl3 zH#;Mfr7;o*%J@OvlsF(4*blW+V56J{Lx8^js3Q&~H(6n&%<z(e-3|h%QBpiEJaTReodbJzTbyW4mZnz4EJ(aO_KmRRLwFIrGe3V<3e+8l zaf##Tm%@^%=2=(G;JqUlfnC3N8qw$eQ=(Juj z?3tw@(D!+1VcC7v?DF^s#@9on8W_!(K@fS3{DInLh@L?7`won}y@^4t^$d^x^i#B; zlfvWiar0nbG?*NOVvft#m_t=Y-No2fDF8n*?Ofvg)6*0xh&l@R<-qrv`|eCg2g=eentkXB@xfK?@bf%C!i`0!%17u zS&$8LP)ZiPUU(Z}lq`|>0VvR@;9%kM3dbIXB2(@}eC@sME%9Bt+FBY(ffkq2dXtC^>&$-ws+8syF4rQGfq( zx)G-jbHwR?tS<)$U)#V1San7MrI*wx3w|daK32-lJ?w%EE|?0OKRrwJ zN1050U-+PS;IehwTtv;XZULG5^)pQAv7AU^Pc@PaE^)jiNT6wx$5?6cs#iW;-%bnY zLY#je`PJJ+0I+2lG+lQz+2}Fv8ID9fw{YP#Z}d>@^}c>pNfRdtr5Se~OI->9Z%2)S z*4Oq{E1Qt89E)mXZ-l}846CyEcgmQnvhvsAF~-Nx8{CXuztF6;v3n#@+1qBnQU#L2 zD9mouFX>J2d}J|whqb_{5vLz&Ka9^SqIiE;kl9NI-jcWtCwCpo_saR^#uhkleNJ`D z6KPb_W1k)t|5T03&dvc%em7Q3^lfIljxXCuF)LTN(D1R1r&~%4ChaBgK9rXxIDNgv zJ43Q%Z_kF*RyRUe*j=vu6SlzQ^!IHDeeHNr=~@-;oK^3~q|jL*=$`#Aj;Z^r2mybO z7?_XuVPSh9*-Q@zACR|yjdNyf+PEf9&0FTfvjkb!x7g$LTYrYPAwW1@+`5d>b58hf z0$MWq)chG9<5OuAj#+PpSjkk$+hJkLOOYdb0kB{VywWVKaO@XP#A!Y2MA&JU;8FPA zb0@{*?7)KwP7uTb$?u-(y19En4K{y53~{XZ9j!?EzHfFoS!S_a>!bE#&~P=dw(tO) zzR}IO3K1`LZDxC0Mg)Yjb}?c}38gKMYZ)P=S*Zt!1mw|voBdZ;{C&p&K%Pc8co(H_ zoSo?&q7E0h$y6?@K1=p7m(<% zPjc`j|LmZr33vC)OO!#U?^n@P$zKzGN^rCPzIqASsGZ9Cey}RN120$C$hvI>zv;QJ zs)CUE5+D_;Pvl)7N|i8y)ORZj4^uD#W6R3>+lO^QxJ!a+!t66go`(>VG0l})Xxfi) zc9*$57#~G2gAR|2B6*XZ%^-i@*XOv1r<`#sVOW<8fmMAmHl#NmJ02!2jPQ_I0Rar~ zM>^N`z>3xXEW(1zXe5(ji3_0fG&g}$`b2Xq@oE>GE9rW9^}bev#tXiXr3}lOG?L#>sSzE@@X?aMWuErMLMVFJi+UQ7dOV$|(2!a={c?XfMw#uC&f4xU zu1~U$ggk^qyXMo;1~KqAwfSxh5#2{M@m6C6odyPpK5|L0%prE*&$)Vd=P5i1H;R7*BtxNt&H>YE06a{e zM)b^VcQKKn@HDpRxB;TAhAm4lb#@`HT(cyZ^;(MBYqiz_vzm5&&->&_vx*qeG(ZL$ zKv@mlVtaBXU32<;vhdZH%X1?Isk}_cb**#s`mLi`B*kk*8g(zEQ;#&=^e;4?@R~1B zSmKo-J|2Bp-AsS4?S!M=Os6X(&Tp3;QBr;$CF{dXIxtxi1(zB{)Khf2j_I1tD`z- zf_Nd&<{9kCme{;tHqj`zJ%_yBzA_tUzt}3i55N&^bYUp+X$b)Rv#50z-XFP(~ zRhNo#7f?I;=-wa35WGt`nh-+N-kHjx%U6RggTBgA;#E&sXBQ?BEU@;-FYjuTgmv_G zh9w1pUxZvJip_yREZMU3Cgppa?M7hl@;f)Z8Ec2YMSbSAe;mKkhy^VNg)XOYPVOM45`ll-tRA>GVj=FVUfcf4D94*;T)|^AY?1!fE${HglWrG~uns?E1i4iFR{@jy zZT%XYFj7;SEUDv%eE?dXym;vN4WyR^VZLJA%ZG7Op{0pefEzY?^*qg0+#=<|eduRJ zAAU|-7l^Qey@RaoH-)$)U!X-3O$g3%JLBEEHabwjvL7Trt~Us;Xn_7{P|V@v!ttpX~cU*JA|Ug6OH)81;eJ(z2GLseOMm zaU1vEf{5DhsDP@4-h$i%EV=doKof9h`xZsH3zn|Ui<@4&2;2nkaaJJ^)H3wBQT6SE zuL$-zox?1NnjR)~4d?X*Ik)6x*_}WSny-JHQ)eSdqG)KXgSBm&fVftOxvZ&E_bWH! zi^2FiWA>X=qz>1?ZNw4ys-XiUTfBc&t##9%Ey=)lSVamnq&b8_)T|YujYj3z*ZmPD zZYwT7DZTQ0fL*8UEh~^SQDY>AEPQ3PM7UbC^8!hJ(T1zrU;nqleNCYb$Ap+!=c+~- z+c;Ia0ln5C`;oU0=v9*^OgA&Tl5ba<5giH&v%>3iTgDjG(WX63J>{aDN5y~UrGBW; zk1(EvvNv?TngcU|I_jlQlVe2Aox+)E>KS6Px|XvR8e>iQBcIQx^`(lPU#uAg z@}vFf{CTt>)i(6_-H7nvVrfBti`K-}cl5kmYw9i?`QTl_=4{=40zpVpS1WoEyb9P5 zMBnP$xy0?hAWQ7GtQ3JufFFPT#5b?9$cqURe2g+>v7$v=nexYg^kkfiWYE~qSw?Q?yLy!I(NV_2U$U(e6)WYSY^gRR-Np? zX*xVXlny*mfNz!8l`T^HPSm%03Y#OK?uTf1y`o1>hN>YRiJV$>HM5B;0@Y*D8hR6X|hC2lD$J z!brWs4T_aSWmuz?{aV>|Fo|Y8faU}LOEaqjATfHKN2m{LzDBOH$2Nq+{H^g+ec*5{ ze!6nlsTiZ^s!+ESP~Gkepu`j}chV*r8?A6^G=QnBz1O_9D%g-U+cs5u&jN>*_o;ax ztkzMp@5Uuti&TH9GpliNgK2s1)}c0$bmh<@CSrUDC;Z$d+gdXb(d}aI0NEx(TI@Tjyw?`gcyzSoLsm%L}r*Mq&yvqvzi+Il?`@O zSQ3~tE(CwFE~^a5d5e@q1}iUIRfih1i=WY;lKw(+8Ukj}^pnEwt*~GjpNK4$t&ta# z{epGT=+3?R3T|OpV7(-1M{de%P@6NplMnz2*O8m-O29q$pR+y9GR}#~R`m{DdLEy| z!zalSOv5Me2;(6#a4eg?_nzab9^`DWU#(BN8=!xtA2HgQI^9PNM_qsmxh3+{Aqk}RTk$~=FbHrmtSa4DTU3OZG8uL%(q$a}Rwy#}hKkDo6`WXCW`O zFJZGdM^M_4NmWkaFirc-KY(vx5At+P88&s2yAE@TwQMp>)`R605VJ*pk<3rUh!fsw zGIYsxJ9PwJpla`^%NWc^>n7m3c7$20ww8aFx~e|UWVpE%HPC+GR`$i!NXn#GGS43L zCiH&NM6N41{A$x6Fz#|WDnB@;=YE@UblWCX6!LAsJS?n!G;naU9_?8=$_6F3M2avp zUYKmuv#|T>1xJIrF4rdQ-hGvIITay_rV}n57icU=Mo)E&P_q3Z7n%o|m`GOst%!d> z8oyT1FXH;9xw{CBil(mA_Q?>TK?wxK1;$ zx2bP~;B^#2q!*3HhAX9lRb$eZ)i6K$nB{UD#c`=y?wGxiHs^6h(_2H~l=y;F+cCF_ z))|-8Vy`jw6%TfM*2j~p!ZS@2Hi~})*Nq9qkeZgx<^wC#Q)Xm-W+2buTwOlBk~i?0 zc40FTf8AHG(9{E#VpI-3^@rUs(eN6yo4}*la`u}GDn#d}C%o2!hT>JO8SypiNSp2kX@BJ_s${MuYsG^KTnRd|1~P+5W3 z7z97k-~dgNB^;%w5)0+*(#&Hu$r;B}2OFUi4VBqfg>!z$ut;TJrlGkD4KrDli^Bm` z!W*d+z7@Qvs_5<^F3^)zdy|N}Y?MZRmI1_Z45xoaATk2~%KHE_CTEtVlh=xYO*FNg zrlCk3Ls-w*2uG1y+*;%Vn_z#>hN0S|EQ?`c^Oo`nwGQPX^l9EqEoBd$fe?*utWKDQ>16#Ugq#j5Bg@*?H zN8B0_U9t&&*SRXbTvrESJ+ssUM%GV&2=Ye~Ni+~;WAru3F>NHW48MndTIG%Ix)_w=ry@7%QjFeeVN;hO z+hER$J|vtRY)MrnRB(S+#}NXy((~LB4G%0KIeDE~!4wPs0e;{+-XivVzxl;iu+pf< z*m9|%aze%y8mhPi%HNX3Pk==M1j4e|StwjEpZBL5M-dfSf*3se{s&@5ZG0@n3`&=@ zs%I&R(FJOMSWYmCeqiceM_5fNsZ$aKo-TN&1sa7ouMb%7N3egD+ECLdoI-Du^W%%h zvVsGChhOp-P~07;V#j!)HhN>Mc-;`J;bumAxRq5Cq~@fDn|M0m#1C{viWWK?HCU?x z*S%k>JRtcbWnZ;%FIAwTes;w$l$kK=>A(V6@rYflSc%koiRM|t0Xn8@#TxiPFmOnej}%N8`xIH9ay`C-44=Pa=@+`E5BRl3WWW6QJeylgD@TbW+o z71E*MwHMPGUzzrZc*)62^o|PqD=qzho;vbBK=r?G|KF~S``=v~qWDkOCL^5xMdD!X z;HCaV@z4LqjQlHoHg0g4zv!R9&M>_7uk|$jeVQ-lwil70DfACI`PV9B{^2xHqYfg9 zPlZ(e#jbxL`)^PGq!i3ZI+CvcBeL^E@E_#l|8g}>iadDww;4DL!-0RLLbn!ocKzHX zjl{n^;`^^D#s6Ve43hXQcm9{q{#Tp&$D03%PYmaX8Y85c1Veuckn!(F^W49DDeG4J zk3Dk4{1e$9?cb6ADTa~>3OzRcm)Oqmg7V+?3deu2rbBf3B%Z1B2fO?oMEWyZgiVU; z24a6XYX0#$^q-Z;cz=JkH0+(Ac>e_Vzy0n1PR8`F@u-f!Mw2RYFKZ@ZWTOhp9o+V9 zo!Ut*=5UgJRS@n|BaY3_Jpf(cZJ|aGc}jju+0NGU`z8Prq=ZPj^hf904s7jMxxGCe zSs8!UajAnSu_O0A?{L*P1I)NAAO-=asSKOR7p%e?>dkR|v)rC!+>6pxLsp-?QVwf& z9ND^t;>8LHCa0Lm*=3#!T-eTvdrq*08C@tpN|^9a8jxtEmUUAo_3+Z88CSK|#E@5O zR~<8^)?g9^gHfAd!pQ0c{=H$*%+N1l?Mi=W5eBR~0P|u=N^DPvGyA8;n;9lwIr0R5 zJZGq_;kw2&2n7;$_}H+#BsRaV*5)KQ$=csNjoZxpRjs;e3{-7TXH`%@VH*w#iYgS7 zEmNfj4PqLW2f&AuMW!JdhDHQDE$}l|dzjPlwtqEoyQye1J57cgAkL+kJ%9He2`_($ zs1U_?7r?FF6??u!IpzCU#oiIw zh6sqHtdv)|l0N9ublS%CHE-xy_K1Hy>+rxeLgYuWF&qm)lD(zAeb2iwfQJ6<`;WDx zV$n$yRt6p*%keQ6tkjV{j^aufTvrk=9a4zUJHu^&ktJkzxKemtQ+7Kc&Cj6f38yI) z;L3H>2c4C%0NzY#Ew&RyNH+bn)sRf>>wYX6?mp!Jpb*?oZ$lSX4w* z=2YSY$a{bl+9U&I|0|z8^!8EU{K7jR0=F73n;1fBQR|(XOY)(K8KmjC$>a|&R6>*P z*yjY*ZoT)0P>U`}ByV->%Cc=!5ERyFy+$xh-lF555QKASl5j3&mJ5GK$yjBfKF;K< zuRp_Aej_c&H^qo_rBvjXg2g6?KH^_!G=yMkAu8v!qZ?d|@`=_S|OpYX}@+qoH-<%GX-?5!TtU` znt8xQYp6xatxe)sm**fzaFHO?x($6I^~L#QsKe36)P+;0P;sztq27IVv~3yhti3Wafs&Qc z&)hY@+Qywx(%zrfa~6C=h3gN{QMCAx9PnA@&X5U77sL>DA^>(uHDm^}x2PNH#;VGG zOYEQKz;CuRZ#;jlt*-YX@l4_*f_Pe9H68H>WB~3sshhd2G06EYn%5T;gotTgNdSTJ)}?;W#c%2faT7Qwn(DtLCNogb)A>-y(o zRP?fvWS>I4k#xopg^Cf!Dm zP~yEw(sY08-oEB^;Po7r4IXBiE|byoYH%b-0A~+gJxpm>*mus$5kK)QZ`TKuB#8=8 zhO`hrN0RwfSeB-+8dt*f{!mr1QORlymcmx(ts!i_4Xte+#Mc+%TZ#yZVhc>uAIl@z zK}rv5DH;>$1%N2Z`#HX3d8+l7@7@ECo-z)y!fJo^lAsngY1HLU7>+ON`h^0U{iXsh zb-VdRb%P(md;J@NclBPuJJ3um)zag#C=sJERxB-tJct;tKl^Y{q-dN$rRDP7nG`;fQnoS*7WeTlbh zqAY*!J%0?>_qC+oZ&^2Vsnm^u-#Q6lvNdzS4?}YXg{p%7{RB^BMT$*XokOVclvLOp z>Pr{mq*f(qwCAuTgZL&Pv*VhpLiaN@85(hz2qj>Fq5uZVvBmmq;#u^sg_449AsHM9 zo1CZk7ks@<+Z2z5Yq$=dile+lnt+k2dP;x4mixOFZ$em@G_TO3COuZQ^!By=cDT6Vf759Z2_*EX=6jL7N`}=G>y8<&Hn*mK45D*t&)6hlvcM*B~GD;~@ zcRyl(Asz^NH#ev>R&v3WqL*YU16F@>zzR@&QcNAQ!IZZ&tw)Js*<`UZfj~ChAYnE_b3P4p{Y0Y^aFq2C*|D(Z)f4We_qI;fF04)LyP27VdGTJbaw zX5Y^*1(A`tP%%o%cn=2TBO=0^5IKJ6phq#~4Uw<~DJ~S5~JVAU#I2#n-U9|U^ym1?7WU#ao z2iqADtNOUKxRO5(S9%Gc4Lg5J0TIYAbVMS(j$NX^uCenOr4v>ER&XUN!k;Kg%D`a0 zRq@x3IeQ$$j16aASs5BS|2pPYo+~~PqMm7D>vUstM=y-;BYO*nk%Xc9<+^an%g%Qn zi1nFI>L;Z2m;YkkrFLS@tv#%gR~5D;VpPX%7HOYa9WTJuOn?XFOuc^tWri%l=p_D3 zxeFOd<^`14*BbZXynKNxeKf)pytlhTd(RIo6K~`~Q*zaa-j|7YE*6ruzxDR%#D1H} z*bS`!t%X#4)-V^~K3Od_xFu0eg_RCD8q8!ED%TqI8edXh14nhhc)Y!;qPoW!-)d`S2X)DnV{wmz@(HMEL>=Hhj+@o=hXNadkYg}5;nNT|T7 zMqL^*Wn2f7{*LZ9=WO)_>~>!98u zXk}YQ#y->MlUaWo{u`KhA2@%n2-hS7Eh1E3ZdM_kif&=2&&U149gE5=xd6m>S_h-M zz?j4GnSS{Jv(sXgw{<#3p>s&Tji$+t-pdL_ofM@w;Api+UML+Hvdek3L?FUHs5?cS zLq=Om_~5QS**TXJ;Jrc)K85e|7p{x^DdEA#jk0 ziGP==w1$6qVqKR01_Zp-Wg zdQXHR_sw^Xw-uG<+}4-+H;Q79OhD$x!Rv_VXxMHgm#Sqt$jXIf1)na0 zR{<^#B-@lJNOgU5g@VHa?Jd!1B$Zhn?jB||_yU*ZOQTeVnuqpeYv(j{xX~R-)2qZD z4}cE$*@!0gP(y}_2@=!MYOkD2tJa1?l)x#Sm8PSNp{vJqUuh^@gH@yWn~6Xqxu5HQD*w zETT>KJq8_#-6V%fZoE*1LXyKZ#r^I>>PA~xo-6=KyY`DESboQ&(0aS;prhs701P=l6U=k>W5dkZ;7n(ACT2ab|g`0 zwH76udqAOZd=4gt_f|lb=6)d=vdz_h@!>Y%YZiVo<)UxmgCLVCGa&vyohIfJg zD|}ZZx*z2DpAunh56w>*J3xBfxmNQS>6#3Ev24p{c`kJ%!lw)Q#VDF1A3gCjFs_*5 z+?y|jgL-d+@@j&ajA_5=b-iqPp-fWsqFO+bQhzi~4ru$}LoF0^fhYKaQ=A5Wo7mlo zD5ZiA8Bl5@X*3~H&N|j6`Xl87?>94k8VRyyh(Vfpp4YQsg*!pg~<4xX0m z?~$NAz4Es7PxixRJ0J6tegMkd0~v;0PPFVk)fwZL6NBnEF0r{P2jCVJRdT6sG;|pM7BYb5t zVb!VDb!qddmj^)T_>4P5vwLbN1Ay%`FeD*nFAgv#nca+)p)1nWnK;K`ej3G5ovJ@{Ow7`k}e+rUEG^m7M`}F4=UrW0!?bzzisa4{`^3(qIOL za#I2^R9$a0@?|l<#I?MCg=4RLM$6sOuynX3t>Hq@tQ_&T%8V-tov?exYDs6_5Pdqt zIAtHNA5|#9KKk^hMS3Mmsnf=7?mpkC21c0CHS%pstkml|KOH7nh#cYVp2N{~l2d<&Yu7P>-Z4eE@_MK9ML zJ7emxzJBEfLpAt+JJ%8Lq#CxbJGU6yeWMg%Z#UJn+{AA)_&CP3jJN&`IH?3beZ(^G zHOrp^PqqV^Zpi_L5Wv#lbyBKxZQ!zA>1KL?4F+HiIA#(*pI-&W)700=lE5DJlqs_` zw|=*eV8HNf#a}qF?|k!>Jv|C zWeg>zO1b_o#2t)DwVM1ZV|&z9X@c8=zsGNHE&NtJ(+ zOz-J46-aV_jpoqbg@xKQ8A%8d0n8Y0;k=jyH9UBd&8*UK&zmHb7z+>MmcBtC_1Iuw zxBZcVVx~PxrTf~)7-k1f^L&xu)wCY7NO+zJaTvOATNSUvH77((WIrSCbJ8N|G#LGc;$uMX0i zaq@6tYd6^IGZFRgA%nnDj72+i=}=5E*_{^0wUexaJjp|&&Z}wEAmD>u*D-f=INjkS zcW``QzjHS3ZZf9Og5Qr8*x=WWRtP0!Zs*S=(eV&xdU-3~gs2_Aw+OxUsXVS(IvM9A z{FXO=!A(NXrST{V>NdIk@o+)V?Jq)BUcK0jxt=^F6+JDzvgZ%z8RbvJ4X23j{>Wn{r(LvNNrIu^_F7J=2e@#|*WcqznTT`v(p&=Z{1?9d6w=~9;< zI7tBmyw^CP+3>^61Hj@L_+hvIm*M>70d4vjkeZVFi4m)8ynrBK!?j{4aS{YYAG#`k zEN^E14~N&2mOxzq>`s`~(aCI-n8LP}>f#H4sBnM6&$}`TcY=}hyhmMzj?O9s?G_XP zHHF*3f_wBqshiQ!nhXs#tJF!qh zgSS1PZ}?sN z*L$%0;0ryEkJvF9UZps_8t)!dKE|*SA(v#3Uc_{(3Q1Lm$`}?pNeDNQbJ@7He#%MF zUExmra2V5!oM))arkDlte!>rb9KApsR>V`2?oN)C+xVNlfxx}y-GTv%a#|Yqer0pD zMdc@GnPJly`6T|S`GGYQ{dQQc9wH^i`51xBxgH0y9_KI}0BNtr*S>0n(BgKsEpZyx zNN&HrTv|E`d^!q~04XDO8UaLUj+9@qm?>QR^&>|Nw-*RDO$&5^GQTi?3|#Y8RI?k? z*fyv}Q+%30ehDP216HRMiQK)mT)`33KbnGj4OAhm7w_PG)b%-Iv%`DgwFP@%ghgM6 z!VAU@v}BD2G@vlf*$8~Q%SQC}RU&N_PX_iTICJ%ej_jK(ZI_D&-BFEZUVO%XE;@q5mf{*{ju5!13&~8bDKj_gSHNe#qx%iN#rP1$tDZc9 zQTM1kwSgC<(-6JreMvs{_5o_s+!~%^TbL}xM!ISZF`~6$nf0t&BI(nq+9h-F{JSpc z13*KFz6wfVCcA#aFhXfH{leDHvR^Sj4Z-xKE*MDp|s4dZg=m)8Rm4N2Ld+&RA?*s_*^>0YI z_dd5?-BGPnf)Eg9#*C1dt5G}e6klZiVyOs>g0EHs-)*$XLf$SZdbW6O<)Y}N?1A3F zHSws*k5=(txW&emgCv$H=KljYf)jXdqaJPe!{X{9fw{% zNF+(`T0!)ztY0D`+R>oB9_1s}%Oc{A>ULjZ5!=hAP1G^8k&Ql1h&`UShZf2(kUdEl z4dDWhZUvFr?u3=rxLA9uV5yg=a~3$VC|pe<95AguI5jxy`q8CzSY1XlEw@bQs-W+( z;hNSZv7c~%<%Edfsfu?DsHJchxmqa{lb{y;Ro#>dUtWY*4x!$K(IJ*OlJ93m4z+;d zxUJvEYPf6~GDOS`)8|6NsZe_YRg8&$5$KC-UqZ3e-=9V}xu+a^AqJI4HEK6Jxg{kI zku$%uS{`~>3^AUZz>kyo98FV^jpte50Xd?eUGGqTE%BzNz~*@rGs5HMY}NA?OC5IM z<_@gc+i!KcUf5xexE@w-fgqU=LSdWj3r6m$=BODb$#}lFK6`C%T(L}ji}Gxr*V!T+ zaE7p45g$I4liM!n=xee9!pmJScI0=pSuu?aPaur_GmCg>hUM5=0m2s?vYi>4!_{Po zQ=n#lV4<9>y+S~Im{g!1jp)Y2lWr3TMBMs}f^j~kr{>Huay?pKeunjybR^QJqWd7E z1XQc{Km&#Lcv(BMhnqThiZP{pwHOgcGSe1aPH8Wsj5hRH$i zS%b@vL?+x1N!uS+3k2hpI~fIBP9r{k*gA?kjvz?;2bAde#SJNOjG`*1Z;+X(fJ#|^ z+s?yr_Wb%?iW;V7;`KCd+}DQi9xnZDJaUCm^3jcOl#Lms(;E3??A8kTNDxP7bO}Rr zAE)Wxs?)#41rf*{Yn5!SC{vfHW29O^HgWC4J)*cVHtJ(qq{E8IV=K&%xr2QkU;C1h zSZ$e~lzDE)#&zM2(Gi5JpHW#H|5DC>kC7HVQA=-pM_Cv2(o# zf_V0GlESP~Dhn#$YqLC^VS8_Ro8-G`JSM#MtauE<3^r90ap0o#FYtNIqmFuiw4g0o zW%%m!l|3)Zl2OxLmhTtxoTf@Vr59A~Wi&@A7t9*6j&`V?S6XIh7&d}zs4NP?+xX1RaLhezxf`v%DWyP1^53R~ro1Rs|148A>UHRWLRV%VoVx?r>Hr@aUc7b7S3n zG1r*=4U_XF*uID&0=-gGy3M(N@LfQ~7|3Mgm1b~;bG;4~?3Cc*pCxk~Ul`a*KDy## z5~}a+EJsqKucUY5hr=WLivQw0k__IJs|r29{sQCf7GT33KR)Pb1qGsiaM{<-E0dcY z_On76+AAFv^_NK zSo|1A`oPH73)3OLrIz$+N~H#4ZYHCkZiAA%y+}0|f4#Y9<38S{YCxEdZ8NSR9{Z42 z(XMaQGhRtUAw7{NNl6}}9%*IB;GUG-~nf$6sviN1P zIuX%nx5MkKkPAY+LqpMjD&x?R^IEjd&)OGiTYRT&E=m$nAP2erU78mF0yW7SJlT-1F0l_pzc)uVf_P0+uTu^Pk&I1Dk})~w_roA0{}ZwwCjo*XTZ=o} z4|5%*(@rv-6+c+5a8t6RqYPQ2_7RQ(jiEFXhE5uL;retcLBirG1Ao;QD0f9md!l;( ze!Pqan}(Y`sHql9g1I#uD5XNDY7Rxm&-ICYWqjuD-4d*SVk}vn*U$Z2+)STLyjKxn zAe=mcr%1L6Q++Z;p|}QZ`WjyJLcw^Hyk)^E>Dv?q4(k?!qR$Xx6BKC&x8_I-nxx^kMC8%*=P zz=!E<8iqoD$H&^V5oKRc3XJZ9-fNsAaJYcmAmC5-?j;(;TmZl&Sak__XN=W?26C+)XIIO!*La4%7$+m3ZQROi?wtZ*B?m$I-5r6+ES}x{P9l zskbzWmxoobkO$rK#_!L1Hs9xIa!mAABNi`hXj`fbj^i;<$J;d!h2Zzp=JOEJqt+%A z$*>MQ_^#}|5Sd8=<&8XazGc}XY7$&Dm~(mZjJHr?Q{QZV)cD4x5oS)k{w{>0W>jRn zon2jju5B*eCWSMK0F=G5#CAR+F&(F=aMun+?llx8%WK3m{iJt}yfbPDhR73r+HYyWe7ixds`R+Ma#gA4NEPV&5k&Sbp-gh1 z+;{y7G1O+lF7mAH+u&Zxd*|jjHTZG>ZXJDpbUQ?}Yc0^cZd2EJE8-2=I>m;SNI-Ix zh_$ZxS~FcmRyJ?ZC$xzpMy1bR6d2l3VDa_&;KHUwl#ffj*KW?oZufb{8MDk*5_r7^ zqRBub+AZ!>8Q-bbfj>Y_*T)l4-ENK#<`M5cvf&?bq^!ypnmu_gVM%C`ewyQ@w)uH~ zS>CmYohq)eV#F(C0h?$zV6viXNH3Bsl=B0hHHHkq6;I>SKd$W_WgiAFj(Ux-lWzrM ztglt>s)6i*+gfQLMA^Dzk)ek;+GK=rdzNDYz^&XeSvgtYi>qI+sgFo3HoFCWvr>;2 ziVrsu3msVY8uF4d-pA+N*3l-3A@Tu#$BV(h&WUYpj)Pg;5B?KXi#@|CA)?`~AT>eO z4oAN2_b04o^Z9DvX}Yd~PnZSEF{$kja6h;0+3P~|nKk0gu&e05sMm8AJlh^onp$K@ zi#es;vQUCuKUeo7zVXMgkZ0=yD2(5{A*5)admoI=f?=EGw6~F%O3EdPP8m0U#C~3P zP0Gjg5#S{ZB_SL)2h5o)?!Ks3lZ_EUL~^-hYe@8On?gn_ekj6FVg!DKOmrA9l6lf~ zTLM--7q@}KmrdA40Ytm@ES)bW(oR|U74mpZQOADuz1X1}VmEr<9r&Xqh)XaJwj)GY zJCO5i3yrXy@%z(nUwNY2;Ouu<3PKvw^{2p&mhqFfW8urbEXney`bGDDS@j}guFVP} zWUgqeNV~(eLJq@*s}6-EK7rF5!l4k@BlkeOcx=8IPc zjLdyR(WWVN&ot?@xGt_mwcv1#`#ztxal^R3@9fIHv8JAF&h?!5sd~(;px!Phk6#gwwrdrN8RdZGO&2Tsi%+AA8cX{_>-{4iEWV>%?x``COz0iEC;SE#3zGIP zKe%*6wsk_=s1c)o<~V`a^(FegI=g(}kUX|G*Db#gfnJ)OH=#LUfZ#G(PK}T0P2}!U zG>o_)YHlN(uXLm5dRtW#sg!?2z8X7ACArqQjH6XGsyi;Vm+bF?GO`3ztcE|d;X@uW z08VPCAXftU1LXyq&c!_J7fs-3Olr;;DhP*)Q4^VgE(b+_mh^`M-nXJZie*8yM@zm7 zAs{e@+E;gR+rv$wwsDPlz(#pwI#y$a_Ie0o&ta)U)hZulLp4)kvsMWJ)shYk^u8*2 zd*;d+#6XFYh}ihD?u5ZRG)N;2ck!#vR*XgImGi;p$-cBgqxVxZ=?5E3>kaijCF3}U z+0`^E$lo)I#2t7lzZ)7Lolv(Y%>C-?kB#tg8d;+K|Fm3 z@v-et^Vk#&)U717E~+%R>t^YTq)ODpfa{Sho+OL=SbR9=Zq}+=>INaX#W+CkUeChl zp0wvho*Y7_DfdbwD^agexx#`Rn&{q#i2$H7x2Xx0Lp9g%aiy0R_EqB7fa%`e z?459b9Y++IQeh*t}db?{O)95v<#e}%gEda#U?P+7dew}T|gCo+-4 z>qPLLKH#;rYEV0O+roXhC(AJ0kIuOwV$MH*>qnH|@<%;0HtC3n67*F9MI&K0sjbNr z$uKuRl697w`g$C%5iZHj#VQ11iK0Ren{`w$7Dj@&D2+D5lI!S85{Vb#FV!&eb1jSV zzS5cu!>=CZona8!%w@$nQIi?PQsp^|$kMDX30^h-hR$x9Z>ZNNlHA^i)1RyQHYQSk zRB#$zWNSidB)tw&oBddm8#YB%y%S&~VuZ-Y;v#}-g6|)!%mdy=Q}Cc2({F;*I=IcZ zI_f-)$0!+CF#C9jF;VOY&Qor+59DicM?JLBY2J_{&RoSkAGMA^-Cjgpf4e1&Fi%Z^ zWPn@{|8StP`CzB_q#qAae;FHDu8!M(Le0K>pxg1Kcewd=g0HHB*cH48vwMf7BcQo% z2OPqAS0Ec~%E5*cHl8+~kH@nnAC-uPi2m8uCTHHkGU+lUT{~EIx{whj0WDN<=;+Wa zzs+g=xX3bMy@!e421`9%%3E03Yd@c4f>!oa3zwUw`m;3e8q=Y>?hM4%s%eUUf^B6r zMZ${ykuL>BaQGg-qDcRcs}(~l_IMf*{FA&s=RUeE zS!@!7!SkJj_;@0_c7;rw@9t*4m#+CvO=Q%C806_#+MAqDf;6+Uyocs}511P8@3BOL z^|T_2>7V=S0N8YX*89%p)s;Srs~*siH%*XO4zo*Xs@f;B^&d|5hK9R;o?|XyQDCWi zZ2+fp9Cd^;Z}32n`c{~#dw1Pn`h*j9y{FhGy_3U`b~hO-P1QB<7UHp1=1Az#i)>nm z^gs?>t*l?0niJmRS0^?#F&%3)~187=A-mN?n-k5-3OMpcj@_bGanKw4~gZe z`^dMU?;T6^P)Lt{Bm-A}!elV@nD($C7}==#rJVPW>s-yaGI*drIA#y~WdiIKkedGtt`nu0;U{ z&6rbuBjA9Ze%dqyvnG`@_^CD02$3@RR&j?{wrxiU*tQRaYpV9cO5EH zaAx%8{JEbb)>tinXYZM9`BgJ4$@P-+{*2>(Crl;cw!_06J;$2lZD?*FX9)W+b@?Xc zcwtZeaXxE7L<<@9>c(dIHlaj9q(W$ZoQ46>4}MMGkAXD}tbBY*l2TbJAn}!XvHC)= zjBeKKvobFD5`*_4k`WnlS$eoaPkBi4(a`rfh@vTa94(K3uOXCS#U!NoAfC1&oFdIa z?fc~Nx4EK3)!C1gE3KeHRG-Q{s-7?gj~{cc7_lk52vi&l!MC(@(wvj3Uiw;lBV|8Y zBQTZ)nX%MxShzfzo_-Oh-5RwOWZtzQoSmA?M!uhNgyVhU%drU?rbR_6B_=Qkm3$dUX~xp zekG&Ee&#i%D{}6V7*iqhEh3h|gxNi)%Q1JZP97HD>-$D5uMg$<2xB}JYW?U}clM~D zLkawOch2)Roi`;YLUw+NKtdBJ#EJsUqdrxb6KGa{8H^I$)b_>@+>X4SA}2``F}if8 z3QO3djZQnI5wuevt!t~vcb0`N`M53IzECghZf8~UBK8;`smiB7;Jh@J6WVQ;jxrrx zh&{Ik=QHms-k{1-5M}d9;H_N;c%I-7DSU3*>Qz7IeSIku3SZ?0Zp%n3OD&4=cT7aD zD~s`em}JF4LO*ILU=uV&#~* zYP4q75DkUJ))$*7X-0F+nedWdoyESxA=^w*T1Lv7KGp|)j-rjkr0!=-68cxXY@c27 z#e^s`tHw`f^9n3h+(dWS!R2!lSs%{si|G@8kKd*^(jq0sfj@K2NmyC1!8#4hN`3Ah zaj;>9QHW=pjE@^X*QA*->SMkVBGk`}eDY>UobpAUGxHoDM_Cl<8H;bz_PWA-?F(_h zPi8{dLL-arVp7tJebiOn0*U3z(o8~LpgWRppsVG6L`*+7Zoy!Q_QulyD)UaNk|z>> zU$?3zo_VmcdxO=%pAR2#xqE+G#E63N`W3X^A%W`!jxa z3UvB@P(J1lJR*Tx0TMq<+PIB)B)R>PF~foglIqt%&3bFKjH)nZm9V_{ZCq1KKF7VX zTFpHLJjAK1DZra-OWugM%F5j5$A8v;^0LP2A+~C0&5-#{E_+M;C*HmbLUPM4LUjnu zY`}p=k&?k-Scb2G?bVWUd>9}h;(ezDetB%T0!!k#@M1b0qPToNw!FL5;iwi%p0AcH zpXw!bi=E@sK;L=qmghwjTwy2QyJ6lgHdM zTc^s$wlLJ8S=F_(4z4E$db5pYWESye5!jp#h~R|DqD~&>vdmF!#pX`Q?QHYK^Oe_} z&c*AK&z;`H`;&jtQO*xmu-BP?%A-<9oV6v$T=cB_LWs45J?BYSq6Dh9P;CM|0U$3=%9Klg_P!ASNcQ!A z)wf_F$|O_JaLiWy;SB`?M2vj7x086(8J6pRG6JuHk2Z?E{-2|tc z7zT+k==t=l2dzXZlnFdLg%PjqSjx4+gi1K-6_}z1v!{4_tIWaTOP>UJ#nFJid~Nk+ z-r4ANP)_?D$o(Vv*b3}_Y|`6TOzaLL1=5;ep%OYD+OL;wu)}OdFO1|F_n8(9$ zR?lx9zG6DH5-7dn=bv#gU!Ai?89LuNiNvH3X^(B#*v|KyT(A5zCRc0mlT+*bL!>rz zfq-H!j9mdh`P1!A!7YWk;ox73j0Gj*19a?x1DPm;*k{wcO0X{mI1< zU)6@%7li>}ZTFBBG;QVg9+yo@5%JHBr3WlC;IU?VNw5WmsQSwr%B5Gf46oGD(6-qf z`L}<*Ii|$w%KGY%h;8!b)3De!jjhpT_Y0qYR}wH}?OTru1*Ty2BrFsw zL*i18Xb-m3)gk#U>u?JeIoh%cNc=I($yAE~)FsIZ&3m(MbffRd5Z~~F-98#Bj z4g}EmH0GB!JWO2E!MXt<$W)0HJvhdvq#2E-ETdE74P(>|7 z5iRj@Y6wTVh!N=vWE2!CwMXekE8YeAXvBxnoHh%<9)RJ4wVrhI; z7D;AIN7l;lt)4eAM{f#VEY0KEIdb-HkQXMWXcRxVL`cxIC+~8wmIr^@gWA$_(=dIg z_4~?y!Dd>%>)vj6T0OKk;O~^m`hja6lvm%YcQ+Lg5mFpr`}{&+Ibp1Ha-J1VO`B3# zguI;@2WbD^loh-iU{+iRRyMCX3?3%Y5Aw(BLpQmFRw=PrF^uV!7iSbt$KImow{bkXYOD`RD&hY5x8$;z3Tu7vJ? zg!{?lx>)EiPIowA4!X?j`*vriA(ghQTU?#^D&^)%k#141dKaf~pT>FV(aUaxE==A_ zXEoXOD%z*%&=w`m(b{%hYSbl5-0?09G~MIrv}*{chm-635Kkd=Pg%U#g+W-+iYPp3 zq{${d?@|ezkF=(*y*qT-b7HX<4l^r!FGkXl^tImY@@wzM^Md;$`*`8y8Obrz>7 zGN36>jvkowFd*$EABS7RG4#}8%j&ZAf_Nuk9&r!iC)|_#&aunB(s-cFj18$Z?wE0m z9cz3vC38czSx0WIJ^GOxO6D9H8dB>?>2m>;iZ>J5VQopWKWE1|4_6}=h_Ty$2%60H z82}E&XL-Gb*(4(sat_pmU2NLiE>^{k%*^>IH(lzJXu0GDS4%PZ}= zL$r@r!q{V!^{+A^fu}#_<=h@mQuTm)XQ;3g>~@4!hcAo}B$1o3s?gmC5pq&f5Gv(Z zQNA^vmv330!7?wuo=lkwc`H?8XF49XWQC&*hvU}r+}B`t8tK_ww(8J-ib_S*BYt9& z&Aghs3%ghPA!HUa?8rC}Lt`9%&t9Wc_jofrrX+hkU(gb(w6&pqr;``F!ca0zu?M?T-&oYr5n6Nl@C&^b zo4F&?PaoEMWwm0rRO0+rGDU~Oc7^tCtMZSjZCUT&df zYtdO)szul9Eh6EE2RK9#?`elog{!LXL+=Z%M%tpSK+Uk!KNIRgva;ki0kw?Yt5*mb z42&y1@Z@V7y!cF0fTP=%B}KkfMbo2qTHOGQJspl^4AUS}9yB^&}c@?4dc^)k+xW1KcQ%q|q7VcrKC9XDTuiHg$1-bTwmKfOu03%cE%8?E$5I zRFVzf54Oc1m>pZ3)?Xy>sE33reHr(|wa?1uCP*S@%jX-`;(B>!gFlBlwBXtOxu7>% zQ>_blvC!v+)CoERvNhdSX3&fPyJaBEC7;9{%#|>IvaQL>z=#$j(85)<>qj|^C_7C! zJ6bc>@Qg@*PtHPFWOCZolXys!L#{Kl^X1F3Rm7`6gKpC9D!s7qBoSA91n8#Z&9~i? zpEtCg2;QC|rk6Ie09JqLAEZ)Fmz%ct8K6XG_mDQ5Zxs%6VZ5t7H|+{;G=DZh^8|{> zD9_4uZ3X)t6~Efh{D`OD8}>H7 zI;8O9lu|V`%}+GE63DAJVkz?$dLu}dwP?nFI(-&O{FCOM2Kz$Zm7)dP?jC%jKBY@2 z$HsvPS|K`z&)dL^b(33no{&*yT!%PJx@ z%uSFZ$R~v9ACS*K>_QGOGbRS*PPRXG+y)x z5AZ%d&D9(p?g-ttYo58f8GrF|(=f_)-$BG7_U{*Zrv(zPD0=jdCv-)9;^;DnnSyEN zqFaKak{zd?1;2Y1me0$9=liaYuz8+;<)p4})QO%5hGAw2ESw~%)pqau#*S|Pe9Tk` z9uJZtd2z&U)Xwz~!*1+=9}r}wVhBMb@{02se&+ipoRFj}D~@OD36At2&1$Z9=M|0g zpe(A+SNjEtwjj;x6_IRRLHpVux>OxaU9p?wWNn<-3>?Xxie{Zo%^S{axBlCIn;!gy zo(-xU)7`w8@Hu#iAUA{+)FQa)Efx-Ais5Zat+p5ACifui_F#_0kInUKR_kSz+j^24 z4cDucSZb9Rv~pbCrdwGv!{F1MLFzpK?b7CsZCb4s>SIGg=_cV>^*TP2POXg&03g_% ze)ESCf&d`@|8zS4UoxHB_Mb|B=id|w7(Z#rgRWde`)Hl7emOYc-JeQiBz#pjnV-rg z0JI-`H|l4bk5&Z+D=aGjV)Hu!p_6!|$25?3C_Dh*qpu33!TzhV`J0o_@A3Dt9#gR& zf>jvx-xYk{&y$JZ*RjCi);j`#H-2*!{ZCc%r=ke}?IAxEOnXX08|Qg{{444SJn}n2 z;j~ns+Q=1tZ%8&}0(IvKUNhVT zfG~dVpPt39Y9{?t$pnDzeix-m->J=4r4(F1@&V8QeE^`H+|4=J%{kiD57wnsk`EQ@ z1yT-xiZ#BUX90Qts-X&h6_7UDGMP$Zv}Sd5HoEyb^5`QI=tdgkD|7ie@_7)oQWGIM zn;Mcmh2r4}bj|JD*E4lhO7s+q7srUKQ~{g3dKDN9$jsFw-k`=~p#h~}uhnlRC{DC% z2@V*@?=-Lnph3jiQ$ZzN%BjJ6D+Ro5iF6f-bfY!$6%FzgVLBUsU3~3%^bra4@x7jG zkeh5MYJ3roOaMnE@w6r}Koe+!E0mfP=}MCr+Hx6Cv2>L&=4oZZ>FH6w#`IB1^x3D8 zg?l2s`fMPlf|W@O$l?Iyep;vU*SJN;Lbp-GYm1Q;7pX@j>UE^dEEs{6dB!2~!inene`(qD#i^i2GI08`+?=9z zbvm%uhN-GRq>rx;Tz`~Gz6QR2WHf9^JZtBM>T4*lGMcTtTsR^JSd}8#s~ghuwg9W) zD%eu(Njn?vVgtHjPW%fnE%buvm!+WJslNmO47dpR7BDSv+D<1um4?gs3g>B8dULmh z>bHbyXB7T&f4us~_H8yu*MIiI^wxvHB@Mc3VR{8Hz19B31VA4@jQHF3^W%T`f$7Zm zb0m?jt&nfEOTVL5zATzKw3K(eUaTrjti3+0EE-gj!5$LB*1+H`js|wbFtufVUqk!E zGRZW`K$Aq;n`8!vEPZ&2kQw0n%;F(2^pVlPf*FmWe{{y;7{19x6|NL;W>XF-2Ao=> zTGQ{Z_(4kE!g)|x%yD?`-|LVvtH`NcUSTZGm%C|qz`^iLtSA7dg{qqvd!o5-5ja2w zK_3087OZOb zF`58Zc&qC~poQJpvEYz0){->F!6jf%gJ4M^e>mWTskKT3qG9{1!gkZ8yIdumT<2IY z>Vd5I`OM~A7Gx}aS0cD}NwW@Fu@6f;CC{x_Ne73jyI_@U_>fPj6~QTb?4$z+mW2j~ zneS6`=IbzAE~e>dHZ78)!PI_v&HLa=;KNLGfCPZ#W`RWiTW5dx|6i;}e=*%gnC?h| ze{`w3L{A!fu9fL>CR0s0?|=myoCxad`_s{iw$_JoQ-f%Av_0RQruzbZEYjq=gXsXETnalDLcLJZQI=!k7LAfTTI4YH? zafYV7#|OQP%&Sh8mICIe)9C{52C6ee*X9$#t8J86`<9=b^C`;Y9+N&?fE=y1w8EqycPYgKC>38vmT-|TJs;5`O6tp zzp@=P+Slj5+!QP6)>;qIS!>W)Z_wQok?4#8U;Jr~lLnK((UnOH9Ibu7m(Kzc6@P1$ z7gsg8j8$=J3|a=oXjI z6)v;1p{cCRNumv#+-d#akF6$Jx1|5CV}BeJFcI)9aRjyY{Q_B7Pl^8e zLAHL}Zq%J$to~}Mc6xE z5?Ea=UtXcOwxC;`z?|P2iqovad9;Q1l1+}rg@B-zSm9W0rs8S7A zOKf}|+aEsy?iU{z3w-e*l>Wg5z-Iq`mPl7Xq`jwnL09fOQNCgy5Y!|r+x{U za%C1S9r_v6pX2SPz1F{uT-ufG}9OL^;{zxK%CkLXuN z{;Ad98uYb&yIkESRMAuA&yQC{3*CQad&nh{4r`s#PBQKGw0Ua=Yst0M|0aV0Z&Y{gvf0PBha59QeE8wd0kuj!+bKY;o9BVm93`0;P#A71f?NA;JZWcD9s zc^-Xn!#}+Lo9hC7Br;dw0$W=K&Ef-dz#Lau3d2SST}uJ9X(3ldh+c6l&2&Wc0w$fU zJquKl>7E`3Dv6r_! z>3>}IuO@$U2>$h#bOjQWfBobCc*?(dd`7qS&EJ3GN&1Vozq+)%Fq?Bu$92om1Of9Yk))Gf4}_wm^_)wT!A4; z+Ts}cggwTJSWsDt*d77raL%t**pkfK_;)Ulf8&`rpo39J-IG;QGy2Rxa!^bA;$PTO6=9DGe%D@`BJV>mQP zuPQbK8qZjnA%gJ+ZUo=0_TxFa385gbzWurOjSu5IzVU%+ZG`Ad^q(((y~KaM&TgZU zP_312?Zm}@dvNWqKmT8TH2>6}|J8r*8(WlH2b$w)9Cu9ge{ncm<*78_Xu2YKng+f| z;~f`&um9V_YBaFpM`>}2a!;ew`rkf}45jG9(s;nzqy%cfOEAwG4n;GTb~2U6(8t7$ zGt9A-M+3`a=FDL{y%K~uOItw=JL&_I&0`E^M3ok82!JT8n%v? z4EQ&Y;baiyj755CcQ}m2sTK!29GgwraWEoHZu&WuoRzmByaXCdvI2dajIA|towb}D z`LZRm$~~b1ebB5E@$3S%z(d<$%;e7bTE+4V~KjC}6LJ8p=o#0rcWP=37QkN@$P^dBdd&Y!>k?)SgBjucAv|HB@CT+vE3 zrHYcAsibGsNrG*0%<-LRWbM~quFbKa@+^rqGP>eKrj=GTV=}F(RIchIPq5ny+E`>oWO#L{;jxkfDM8o8aw0-Fe}h4n)~*7rW@UA-Thzsyq_P%KTiE0 z9yap-fp`DR)xNzzuQO5e>#4cH+v?HTpolK-h8_Uq*IPs)LjeAnuH8 zr-Y2LE!!#a^#6zyV5(N_y}#x^=iHncV=V(pM7Gh)m=UT1ruv}Pef<4PtJ;lumYbBi ze@pI^DDS?|556(M7aBzham{)2<)I&5x8#VU?M}Y#4@NMo5d~VDqhvvDjfu!I~8gYSZr7`l#gT%-ZwxC!dRSlidY)z&!2Hx9hv_AenWz))nQ#e zOPcSz9^Qj)q5~z~{95lMne2k^yUF=A{-CDcXh-L2_&sTVuT=ly_vXLplP3#+`Y3@AHgL!SKP} zRd553ibe|7)(PDu8 zwIjt3hHU!~R1T>s_hIxJKlrIrka#1!$|cX$9{quUKFGkUngG-!_tbvyD_BX!AU~0X zOFrTV(9b@U=uh7yb^dam`>x=kL#)5x#H=?gMGT(rN1sK+jVp;M$U>=@!Tjpo6n}ow zsV8c1Ozn)5n8AI?88IhB3Zs-+FAJB-PO8k^f7iZ#4YGy zbfx_Y$CoJLjxHv~@OJJ}DXJ$+$&_D5R;Wo&hlEx*mjF`l6ulv77tybuKStGr3- zUan1%h;Hf6sP1BCKcz3l$R~12x~9yp-ySm1i=@tDuPZn=j+3d$is{8`ZGXg<>{>y! zE4^y6^^7VAF(iKdGSqNSj_$8#w3;Rv-_LLMp@2Nwj>mbWCRFM@C7ntbQ!yAGeY_;g z6hDJ853^4X<3`k20x{a_WCFoAFbADV`I$dL0tKG`m!nKYpZ&+)J|M1BBG$Md+qjRr z;^ecO?UMK)(az7#@sz4ua)0!{Aa;UY4s+M3KFNyLK>MDT@{I8NsnF7+T|6V^Nm)GB z$cUTPgK9CG84gn@eT*|0ah@~l`~1YsOuWOm&=7aBw(vZt?H;st z{3#xl)aA*3$N0yB=)C~lBv5=M49a>6lQgdPQS3A!bK+0hHGazF)l8RlM2kDvLPhcy zk(y?fXoo=OY(Wh^9e-6hgR*5r?K#JNin`p$AWGdz)-N8{dAQh?k6zv0*>elw0hA2l zoLi^o_(ev&Oe*usw< z@`&c^ja=NX2*}ws4vES`rrjm#FhU^rln!_|xk<8dFq^|Dnt!*ssB}hr6`7ltyH7AQ z(1lM^8FI6DSeiU0rtxXl?q=E8fA)np&E*%@4)S0=P%l_}^eIPtxJ00oMtLJs02^QY z(FN(L|L6G9Pn>Fg^mDd+9^+c?LS<%Cz;~D zU+D;pAAxBLjDI(QlC#;k59>uSVOo+%>^!Rdt3>hXlf`lIXmfIp`&Muu9|cpJ}ve!d7g0< zQ$Qb#V63pDH#d3a5$O#!`04YP@x79QnxY}P73hPr?|<7s=prIjS$L%MxRUoc2TYaX z3G$dIDT#5O<%d$DlZ+@Mw#UzDTB^R^f8b|eV!>c3iE=zj9L3NtFIzU?-mt{^`i1kw zExvg7e{sH^VDLq@|D2;@n4_%N$LV(d=N!2=&EMweHr0EN=q5R&g-1&kJ+>yjk%WJOY!1YghbbK6YrQPhf~wA>IlWZ_Q8m z>N%cj$8Ggw@e?c8XP@lk+v8cr^Kim-5X-Q(WPFs1vdafwUW+zxg!h=1F(d02MLnlcFZc$-J{bHMdyanB6~ABSmpBgYojCvKYSkJ=cs28!^wZz#{pAZuoPoKjTq#R zUi$|52WrOD#6um@-oWJw|XN(Ia?}} zvwIs48}RNk4)&*vjXPJ^{q{3ob}y}r6Mu4D+|Le^ds4{-37>YL8-|$N#pgIKk5zm_ zAtt?vrPty4ksj#6qq^8om|Z0bKbJ9bcF=A=`mbAH(&Ep9oRGiC317=IKVyU7qGwq$-~o zxx1Kqdb);Kv~SK>_HnNkH<6JZImLof<%emRk}l2#iG7}yWX{_K2b~EJ>qW<>T(7-! z7`rdXLZo&4KCk4QqPy-sD6zyehHgyXhytJ9AZcTzQ z@sMGC)A(>Y@0ao)ZDWhu9R20Zf4PS(NBIwOcvtKZwqK zFtJZFSbG@+#Ke@CpaMN$t)p%J9-3nKjxt_tae5q=@+k$0_H=Jbd@b;i)M(6tl7Qn` zD2zw!y&)cPvR`pu9&Hv<_7}e1Y5hEc<2~80KUmBS&X>Ux5)^;EfPx2gLtw6#MCB1Z zxEOm;LQiI*f{gHh5n|sH((@Oh&4fYmB9Z6P6^iSWD)bXbF~fK}>+(lpWQ;{=wnTAI zYV(!rm}<*A+XywGqiv4uHmFA^^2tn8JGy+|xIFNP4)k*JPVS=`^km^-%uWyAgVM@< z6jbC2RLS0xW-dM@R1DH<_-%MLcv2!6)*I zLd5$rzGT1$ylERF-t=StA7v!Tir+XX7kJ(n@w|1tcwx<>@73;?$De50=`Wbaaag~^ z5fqe~a^t7ENSF+$$pti~qFwQM_vC&GWnz)teBGt)oT2+$rQ4_4dGev)KGmO3uW=RG zXQ#&)ztewsULp;5h$AFhl>_1khadkg?)W)A6(3}uV7#I#`ipqiaeaT}AU{|P{?*!! z*1v0(M7N&kbRkTU3azEr!yB*0G&CZP`;yY}BPGp(dZhGu`tX(XjW?T{$xIb*iNSb1 zBt+Xa3_WWHa$jvz;OZ6@x*?9qRpA)xjX^lzrK`)kDAHVSIAe=!ar zJ7sY%k}6+XJFa{^{{4cJ4miax2VTlb)TMLQ4+EyXinrwz3_cx>xhBTy4aC3EwQ}!eftgTATn=d-QX?4yjN_|Jbe$uNJHgUBA8ShWe0aM zC*~?PZ6wTXtk~_Bj203ee|;{FwsGl;v^d6W%A-wOTwmyr{3I^}%Xz%mJ4gG5U+$e# zOvg9@=sch#Uf}uWsS*!2`XO?PDd~#TGa7yLg6abON6sPVSU`a4q9GN~ID35LN)j;c zH_4oL?CViRIFy=?us71DCLo9SU&xFip4DycQ+MlEG44MBRrACT4bCr`wyCEu{uVRR zsV|o{7ZNQuIr0viV|?WG$4*3_}$5Tz*`?3%ue%64!b6 zXtU?waIMtsoSW;HnimopO8Z%A^i2v$sdSkZhdl?SIaNjqttpry@KP=5L$%}GZR6{f zL*)|oQrs_?dX@^~crqssmTMbORfa50R8UPM?evoAHslp&eEdn5{udG-0`*{*9~crD z0fCn}7!o3X(cbo`x_M@4jQ$qsd-=>%ImF%O&J|bcpFaP$JGMSZinvvdSq=B2QBJwl z&&7{@Yxv2x+8$$u-UaqM{==uB7dGz#W!NCADvK=?L!NA$*lH=w)YZ8OCc)y;Tcw=qYDPQ2#2Jk46o-QaB&hU-|R~Tmxmy z*4I|vtdcqMG*C{L+Y?3?cP=~o^49kJ;r@V$FS(_5&aM&1HZtKVzqP!zL&LdtDC>$D zg7K%Z6K3o{Xkv!CB*ZHRIs{8m|Z zXaws@v_m+7CZ=k_89ZN&MtAqba&o1xv3SD?F@`hi&!I1h_U~bIb+UhLJUO=ZFJwoa z^7h0NirF!~{)1s`b4Qxr;*i^5$ZKHoBJON|zi;=Al`D(nluvMq(I4_b&MR^6RXiiF zuE#4^%FallCsd@lV4xYz3IbUXfYC8Y^9AUMM~T@|;w}gW^j%Z0$`fuJ0V}vFwkBN;1=LzZGO`tTP^x-SiWkQrBRF&hg;tcxui5Sj* zC}~&Bq~go_j2MxlRo*5d&68~GjJWQihr3D;D^Jj`@V{|hmncsV7oVSS(ZL?JW489g zZ|BVJFIgcjgfixgkDdJu*~dQV)H7^<_$|Zt?cBc(xY9Z9?DC7o-zo%6nY z4f@9!azQy!Dbt13cj%hKv(0A-QQryuI}UQhfN%~jxKU@^p%zE}Lp;otbL1_5qpffd zKQSKUoGs3{CHi-F@(2G(*K4#lNC!*EZxfVp5*kh#O;c?Gt{p+ld&yv*k}hA0akKop zUom})W8R&xYb(Ec$wBR-J;@n#4-cNU<4+jok{CBd`4^YmysHb_064@Jhv$e7jV#aX zk9h%B-ux&1T4!g!Zeu ztGy^3s`fp_@kBt@N<6`P13t=QUIhl<}ZR7jrNp*cLS_}6nQ4Oypzht zndkD-P9OaLg+RW4es_{hHpZ|E%(y#L8 z#k`TzaVa5dJYSaq*Q$xA#z#;Qx*gBdc>j^*P8;fph_X14b<7E3upf@p^JbCM#V6*; z!yI$1oZL4&Ot-kFQ0MB9cNlZlcs2;B)76Z|W51IeOui0(p~QS7-*e*z`h3b$cP~5R zS$+b{k)L4qTd2XcxI*U4Ev{hlI{4u^y+X&g!Eg43%~?D7H9ve>jXbBLUxTt3{UXg@ z)Il*~FlSH2MDb_8D_1ag?}pe~bCa?7drAr))c)BQiI>isda7N{>Eu1(6E7kK$}5iz z*fAlUix{hal})Sh1lOjWr|r&%6vDdvx|x5jg0%IYdNa-~UQXy}v?q@CT%cBNg@Zkw z*B5x%^$!pFn7^8p^<~^B{zrU?ZK$GUt@ zNE}9beN_lTlx_(qGZ(3|^gDN9Hb`!H2#erk)QKplkPm08_R__j?up%tJzitem7YO!>^{M2q~K^#nv4W zB7>yiuX4Vo3!WcRsu%^H&Xh^1$&Grf))fV>28zbsvrAdplvGJ6Oyh?YtT$Z$ zS~%K&<0f(A3Uxp=)c#`(_oKojn-Ei!p&mzB^90KBWo}|TmnZwclw0(bkDR;f_^%x9 z@>&d7kB#Azkq)U|FA7x>s{4nXif7BMxf3wxO0L_*@=!%j)qbkBdiw#*OQ4D(&dLXB zycW)X0xUal!UFh^&ba^Q%;3V5~zjB6rx6$2y z8`|+)8=q(=vvz%2G77+e+>AWq-@|4PN|OXU4)o)(m^+^$Hk$qL`pQ-E2d7w?VoOdO zF4PT2Ihs^uA6G~H@LQgzwI^TC`M3igc{ron!ngbRH{QmPtH14BfW?~vEE=wvO zSXVwJO?zEHLpjL_Z_+1jO3$m|Q4j~?bA;0POYJ}rwU<0?3206TbbQ8cV`@?s)b}Jd zO8Qw|gu2~@wKj2GyvB?fP~}W?&Pq`J;zJ=sCkv~eF$AonL^+ftcDCy+RyuIYCREIa7@X`2E)MP=GF(oTb_3s2E28*FR1a4+MxYx{ zkb*#Go|8ULh>>L06-y^Acb;iUZ+>J2gGIIK3komIJg=PjlXliNE>=lJR3qSqI3zVi zl*)p>+&&F*rL=hH5U+LV8X!7<@?v%ZVyYWPxUQLQ@8VCzh5-1hm`PH6cpI`>(m#j`(h2l@$YG}mHoN@Lk@x%t#J?>)HSHQ z_DHdnRk4=ueB~=$dAj^&m){~19TCaz_u3y_M}Aj$$n{BJa{U&U4I>gIUViMY_z5ij zRoZ-ai_f3@@RPsZpM2|+Z+MR1Ie*4Gez=t7N~Gm=V))$R8oS50!Qaj+nFb!8n^3XV zC#t04!7N!9$1z|rXp}=-qYqBJ^(!GVBQ;bq1CMJdsZqC=kRuX5e_z(WM4adm@qMVQ z->phA$j{4Mlw!qCoF2?6JNnf;i(i9nytMg~Rh)a!y>gx9=r@aRTeQ}vZ%LS?#gV7e zsF9Wy@AILN{(#xx*|fDtMk6a<3fQP4uCfVLRnQjQpl&gj#+t~zcDqr4PpJbRL6TLHyuS^uIymrNxR5PvtEgq7FiQlHy+>)KI{+l5{}hpOowDws|Wsp3jf27+d+9A~_6 z{dqCf|d8Hny;dvkTD}xj-B9C?K|STuarxqz$GeCv<^vx+ z8c8E7*yQN%4RPe&DTuOhWVDz2BOmsZ*RtY2_UC18wIeU`n+Jcl9Ae@ba?xXt)WPT6 z$SL9Zm=akCHGlD}EUhHJ)(ZJ1S}G~0O*-3PKK2;K%;*VXo&LB8lICNzylDdTnQ138 zsz>+Ez&Al2#5INFd8LbwP3}r>dpig1-1F2~#UA(P13pwBzv$L@V_+5^+R-82@I;XR zm*$r^d2RKKKIOjt#K&cJai&mSO)y?C3`&PVPr|??S?Au(+bX9NP#)|X*`eT`Lm~;BD#jByyBFo`{#_8-{`Z0F|BQH znzc~cBi*An?aGuv8&gVxu$%0$9YxMj6^nSwkrx^BE|=6yVgXLTk(`&hBTBfWmHLXi zRuEMzxXJI6RQgmEoOATMV2ysi0r?JWKk(!R{(oqHUi<#H*qha%7OVZiHWrt8pE!3X zdkog)ntKVG*7=rb-bJ?FO_LOg`Q#%W_XkO&O^~$AT zlsG9hx{oo2jy%~p@BYl^eAqcc<&%sUi%;^=sHGJ1wwTRBU(sQFD39#eev6OxVhIaHjGg&I8mX^a3BSHW6jFk*AKIAKZx@_ja-jDieS% zQj)$&)<3@MA8Y{aoz1RtJKBxc57rMr`Ig6MFI)SOY)2#T9e9{PV(~)*1Ab^_urJLq z2G+}3_Q!M3$A6EX9^*Ra{MBtqZmMGGReyUY(J~OQ51GbXmWC!<_28vtP8P}h7*Cyx z%=r|l{zM;wtK6sB`yTuc@8wOa46z%MO8)kq6Vjn=EYI=N+HJ4qMayW_*ti1P0em)^ zVE)I4A=bW2O+o&NM-KMxF@~}@)sZ;IrH;7Ukf)y?IZ0>x_~grMNvFKu@^Ob441d~E z!{>EuIW=fw1lwZCNzNByzzZbhoTL*1D#kO;MHgqp7)hWgKyl_nSxG?t;KT07mn(fG zF%NYvG}ZuWmY9?2f%Zk(7F-uI))^m`aVo6sIdBLe-RrL^7ob|5lRcy0G6KdI3)NUz zq2u7-`4-+UX z@30wgm=5b2#TT)wbL;}=s20_P{%~H-vB4pS$dtati!W{3E%FdCFVXJDOP<}2-_u8F zd;s6Cq?a$^3;AH@+{B7MdE2sq>ub^EvSIKiGf=#qqTLl zGb6ALGR2<)E-&+R2A?O3*)Y!^Bma1AEx*&@Cp^N&3I7ET-52SRo7ADLg|WgD&J#b8 zQ+_q_C(nENBL6nSwJHW4(tnpZs17z>Rjy!>&XLbyxqzL-+`@qH+ynDzxrdxQ?0<_z zRr&$cGK9L4A}zKtah4)=Gte>CgIN>O5GWo@c*ZR*_~jVu*Tsr{G&ND}ZLc|UcuV?c z9Lq>GdMU&@ZDiaRD?e=)U^xl-gj%qggT)OWTa*tR<%9f(DCztyx5+Q$!p^w;OQlrLP*o2tL%13`n zDV3_Xm$lBHDRLQ99FkO~MD6i4q3Oh1FV`DlctA59(DeSLFvXV#x#v+w7CQ_+a=4vc zeL>b>-JM{_U*qJ+SM5c|Ef3}voCQxMru<)-ShuIex`sY}KY;W1 zm&aT{%RPUKtK!19ajCTROq74jyS=CbfVM(;^hM9M=lHkUC{4~~Rz6O*ZUBnMImC~i zmYDYwLbZPc$DP{xQ(jw!#3aPnxoi=K#koW>sO42&n-b@l=+FVpInA8C+?RELDnWD4 zPr;`sGqnr?8v0Vd2@bf&%0Q$_(zF$8Ps%9O9_W9#i1d29R%%0)&V_0`oS%gu4s3FQ z^#^U9$8YmH_%-oizy+yM%&~9&pdN{ z zRSST10d+~gUZa$qR(vetW_lsU=rN1@A`zAF{j z`Xr|Cx7rt#Lz5Kkh_)7F|HadWSzCb=Umkzr2T@KYMyz1hBA!^Fzsijp{4+oN$dw%G zPz<;Mc^Jnvyn2N9lIj&-a5RFZoa`T*Uhb z`U}T+|A}jSQTwR;f-`T!_T?F>i&$tq?UC<~dNp9~bCBf^RJW37j&*P5%xGI@YD#}{ z3Dx@BXRkR?`HZDXB2EsflspB@S!cwUN}70vdWu#>M2P(0q)92yv9JxsH<5=y4%++1 zAU*b*4*3Ai4<Y}|m$9S_nxu9DBC3KEi%AMb-p^o&6TMZ%?muAKD6h?^Gvo>V z5bEIXzj(^dTW@3P2kT=Dxy2`6`cIb@FA^3i9t`5FdZ_boDIZ;Wf%m))7(uZm7d2jz zZ--HMl$@4xP?ldYrG~px5Z{+pFA^nx{n9>xhdotx-(&vSrCcgc`|ZyT;FOnR z>l2=NU5}cH>0>*5(L8mw!S32g#!-XuKb4JWZrFh8f#igpEqu2|tbJ%ay&e4fH!2ng$C=0`mB zjD`Hf6zg*LNaPfhW-mIQaWe@szGp(oThTxEn%&HcTmSK%PyYI4wE%UEZ!81rQrkD4%*Ki?BY5|Wk#!f z?xk`H26;~k|BJ!IL!RF+PR6s*>+f~{ioewTBg+JuFxGpluQG<`m@G{2fu(dktgn#9 z)4$7a%>OO>b3M08EkEZ{2c1si8up??PfHi9f(K8_FQ4*)r?Ql3lhO_crWgeAs^L7B z|1c6794E^!F*rZo&$|6fp8bVQ{crLMSE=(S$NsZ@k(V_w5->>nS`#(o#Yi*MU*8xg zur4Ew@~xi&8|)_J0iX{lQG3gviHFA@tuN#nmdDz+ z#g1S-i|&$pJK*`pdb0zM{G#s6U-Q_~rhFvjlwo~IBEJ$^etJqXo8R=23#=JIRYR%L zg;0*f{B{p?09ezE7goX&y6@f8KpL0qx4^oWs?;+K=LYu80F_TCcf{ zedZqC5cgEtKi0*T#N0MaYs?|V`e;4{J;nWsd|P;$U>**$a^}RpJ>x8LoK(`*fr+hM zYR`Gbt9*AeV^Dldh>Urc<1!Kxf8C9~T>{6vI9sC~_m1XHUio){DF~@oS9~EIuzvY| z?pgg51L6YkgmHFw$NC6sg>~z@D9M}zhPw4n&|$4%^;0ty&u{wAdUY4}7#m`aLo6<_ zx!1O~#7+EDXJc#cV=OOcA?JUo6?(d__EUUfD^Zxcfpv6WuYRk0XG*N=f4T7bd>3_( zzUYgT*mwfp33tB=ke`fuE-O~`t6FizYre%iWSgUWRTvjb2YNQwMi-PQGC(M$#@8B#k2q=y2pQg?3IJV#2CCvhgQ{^(!QDpasLa zQlJt#JRRhx{sNExNA(x}e{Fq*j{kZ6FZC5x&o}y_zJgs_#G{(zT@AT-q*Du1{d{h7 zGdj*sHFqd!o-Jm5kyD#oaNd8*v-`b{!cb?&^5Jd0gnK-Xu|Dz@w!iZgj(g{a|C_}F z?^Yi<@Z0kD`XvLe;Z&1o7lK?6RpdU6U!!W{{aL5vz6Y&v9ILx;f7E}M_|)i?FF}XP zosx8hoY{ZWCy`F#8P3$9kd-?nrm_6eC*-W_W5d}f_Kdf zv{V8bn+pN+A|R$%e|~qEx3C|B)(xqo5Gp2|Nd-@Cm+Feol*-92jT3@$+F9WkjE77F6&RCJwIG4k!$_KfA2fiM;l`CYRF+luHU|0 z;$HD6pRJ)3#K?tTxfE>?*jCRBxdRLL7Bub;IT?6Dt4Qc3ncDnGMW>5N0$L`kLqky} zLFGP9jhs(a6OfL(R4clMz0Gl)R?_E6KF^>eKfEA;&kT^WOb4t38O{+i7EeFd%O~`$ zSDF*b%sVA8f1_D5p}@&X`@3n?gw&XjR{IccB%5yAwEzd-+jAG#^(b*Y9&H>c9sD1w z>q)UV<~H{ivAqlVWAWo-?T>PZA^q@BPxbvzHDdm5@3Ila=DcUWxjc=1Xj4)4iKttd1JGm1c`^c-c=fA-IPoCotBazH#J#m9h8$aUw| zP_r;ob9KuOHtJhpWqEBrpacoSkfH+TS zapkGlCn@AYO$Hp^$~A-aq&F6aAD+iB|Lu7U$9fZ|n%TcUI|KciQ~c*#GkW)D?yt>% zylj6se`58B(+oD9o#)WM-+1X~u*O=pT^rI$%>%**U8q`~<;|36sn9>8`UPTTk|ft5 zm?rdzR2Z3QoZf6ns&T1smEG<)Uu171W8EOWBb;Tz(c zwyu-K_j!KisGoR!m-P#}eBh{P0kYFG+O;|*<^hc-W}Jnjh_9Da^f7s@_RmDMqi{Ut zKC~3nWJ)WUa4tWX=GFg~&)aZ}n8MupDGU3%`j35es>^q@_whfc!#n}tLp_ZW9}4eV ze@=;(6Rb5w!HEme7AITT6Q4O8>lZ&_{fL>y`Vl}B@>_9_STbT8Y5`9m+4JoUGX)vqMm%7u4Q%a#9I##=r77x`oIF;X6Z7kR(aE(?CL%We z!vm_+T+;dOwvvT_R$?tA&`kguH|9`(e-(gIe8>Y)$EVC=Oo(XM4w(W|_VZ}^4C{Gt z4H?>B+VDMW8mb`Lpv^fm$$>z@r;xr7zvVBG-H}-$8q8o=6I+&QZ7y8*mf%$x=_a;`l|6-Y&JecbPtK(^@?mpPqneEN!j(dA`f8Pm* zmt_{EVahu>Z_=41T-IIIi)6pxUA`BK{h}kqqu8nCS5-#hTCVQ8N_hbRUsNV?%j=>JZsdDh-_S(Lf7^IF&eAXlotIg?)WLJRTlAaH;D*0Wdi@d#xqs^G ztJBl(?0N+6xA(K)nH>&efC>8rfBVNq_p`|cYL4-$VbS#Y+Apv*6!5`!XiG@@2)%!3`%^e?MS{`CT&& zE#8MK*dy7J8(89VBsgvo*5E!_V?0CP!|X^5Qyd{v;C+wo@jbI3n}QhF(*CX@8o$>L zMT@Xb^fNY+2J7Y9B31_RwRki_asebbSyW;wpR~^h0Vwc)zJdq*Jl3qi`uYol#SZ{K z97E6!_wfJw$O_wg7@uMq{oV@i7vlWT2KB$jN%=5rj{HTtULTv0 zXUGHSo6-Gj)I!aV>-gtC1!&vgjM#Iw-_vjWSQ=XE5EUSC0Xkc&>9>FA`LJ&eti8Lq zZ|<$d8aqd!ktWuogN7yMO-^$SKxZo*17Lh$7aunVKi$mEqV0sue~$+kl5Fkf`2t%k z0;b?kU=MVU1 zM*)0ldIQt>AK!n0Us`+g`2EpnJDSW!-?OXn=-1sS`F6X}!|Z#0dz@E&kokWal_ysW)Dpuq-Wbv-p8f}#Zy+3!-%FdlERJ9x6+C$~|t(F(k&VLaJ> zK@P>s%X;mJbqoAsxoEa65ViCc>t#lomaLg{f_1b3e-Tk4WCT1|ibh7@t1&5BD7WeZ zq;%v)W_S;wqYZe`aZaIc^*Z8y(Ma8b#XHssO+f$n$ThcwAeiuSWXQrsUiLTvxTOW!#Jnvdg>A&HJ*!H)JAz^z5W1F zToZmAXBb5?U>YCqB;V!6KC|bzzuNXMX92+J& zr4t?);@r+cEh*o~UaZ5Kf3`4bTZH*Hq$zUs*%D~+_p#D8bCS^R8GPL*bh!bgoOinq zWd!QDf6|CzW$$3ug4oJPbMU62y!H&w6D{SNmJy`Vk=tkkS;Rl{WyJZKr|TesWSu2$ zf3$&l#6Qy||D2z$dA-2v^W*hAia78lSu7)-@dnpb(3bFzLfG6Sswnl93!C3353 zB3f|qOeNBuA*qX~EIj0XVohW;gZFk`bLoTzmR=`7f3&jw@Orx@OPnu}wVy31+g9;K z*k2ou_8nCDN3?-;6md3O-?yz&!}Y%JNNTT_QN+FBy3@9*wAcN*?6fYdb|YW#Uc7A;e`p+^tosf!!0~OPh*L>%e5!3#Y_A7d-w|rB@1lr{ z;X2N5VXud@??`X2zeN$3!*!hB#$JzVqh}G2f?k`bH@b+RC@4YQL<&B&x);x0OWk+W zr7`VI)PI=>T<2q9&`)0thmE7iE^5#WkjG5e=l~e3Yt_? z(%SWj>x(_8TrbT-1L=c_vv>6E7wJNqW!l!ZZEv83$;W@SeL{^+Hp93RyVsBH^Tody z$J9Xf^>4;GTnF;t3|0=|vpRO$xgsLlSJ4KpBMKAA%YQ%q2tZwPlrJ|CbyqWbZKl-d z8P4;+)f3bJPjIbRanPSFe``>Nf~a#H)x>WB@a)nc--UHkZqq)gq^N3?X)W-r`j9Rt zz7Q1Gb>FrC%a&Mu%dJk#hwrq?fck()^`9?w?QSK7NxjA2YybJ348GDn@#Qtz7R6S5 z@ECY#s1tw42%j#ZQLm=UDCQD!?cK#Z(D}nM53Js#4eI8^mrjXKe?ahx2t2F5|F*|a zH}@wyCS`@Ewk<4Ju}sx60F*T$?+0H*&$WdOST|xi0X9!(agPlVfc?S&tqV44Makc!dKNZTN zNfZHy4_KjW!E;Ble?mv6^*k8dH}{uqTG($@jRB)#o8DpHl4?q?&8+TmuNu)Ow1xNu z^~$hTo&k{-Q4JHcHSzPGYdE+03+k*>TT(9#$riz&mm2#(_5!0L$G!Eh&w2bMd(bnv zcTw+08yNlG?>8pt?Q!jitp>FI_}d)DKYNfvi!wg7`~1NOeZw;5o`y)%Rvvw|}-(-Z=P2 z+m!ZZ+JA*O9(!K)I5+=kJYR-1@mI@G#uIUz!{Pj$*8SQuj3)H7Pg?k7VIS)mg)e!i ze|{fzsa7xQf8*jF|En{Ma_3*36?;CUd=K%-ZGD8$!40Ge=S!kZGVR;S?!^c^vuX)>Qe*9eG>*MF~(7wmlYG`j?=RCKXStdwBIJ-9Rd7qnc==OA1&wDz8}sg ze&@?5Tqi$9d-&Y`5pCl6x&8Rqp8T{g@c#E<|06pd?cR1iT5jclYx|Ua8HM}CZ-#&N zcZ6cwC$uT&=eBoI#K})vgMB%-eYSmfFVD=Cy?6a(lpQPjYH$_a5rXYC9on~CencBe z?7dHIrNn#xw8cKp&uzKAZ+|h5GRJ$5PV-V>zuDwCFVr8}ALm4By!W|njs2dV+lICu z=eBLz>UQuuBk;UC$w4Oqu^H4^%Upkcv{1dY5I4DB|JaV11Bg}ih3c69o@93SiS240CN&>BPxg?*pH2IB%=j*za#dg1mx zb%Fl3ewQO|fAPB-e>cMRg(4PzR`|SUkHZ?s3wRDZDZI{Mf$xQel_7sQ_znKv3-~3J zn!F+CzgDCNA@T1k_&IEVRM?+XlW*``#Q8njPmTS{G2Yx`|6}YY+aLS!U;G{FK(4WW zB|Iw9;`d7ZzyAKx&c7xH@EX{dVLoC7UpQ}5#rtj)d9m|^`N+b~Yk_s;7k0dW?{9yO<93Go!IzDuXp-A` z!{4EBv9E`7x-n#D_lFtw{Tj#VHBjQd@eF=%dpyU7bN_t%zB&Gm9>z2DFE=E>pq|9| z*wC-9fBY{0=Y8#d5yi02{TQ#?_7aADY4N}3JP z75M8C^$xXBNQ0r&DZm6(a%$u923Y~Q6+Kq(*`;i8=#PIGUG#CDsm*uYVjMWYI4&(Q zzWIisy3m~Byzq_ZK3349{73-CzzTd$xr=eqDb_P#24)JbFkci)L}HzBh555Q$93>6 zNkEYhc#HFw0C#a76s}LM$sG6nLXjoLpO-+^Aoj2{qy*u>N|R@+w2`ZzV%TcRAaP-A zIEO@oi_d=;u#d@P8|>7LVrxhy1ar_q?Flvb?01JS{eRl~w&uufEY0`&73hrVmb4a_ zykAn6?Q&K5GTpMJ{E{R6u{Vd4NJ zli($pl*@i#>^nV5pzQa5{`5Wh;o*L{{^}d@ z`Q3l(@4omdD`~#}=I!0*%O9d`@aJzMk@lr}^H+W!<LLmg ziZ?V|Z`eXp^b1BG7!H$wCTyXKZ#hi@numYMdO@}?2w7QxOE1%w1rf#vET;T}aV(CNFPpr8fAzT75;LaGjHE(^APQCwCGLNa%m5|5;A% z*jDaBXnUuAl{ideu>C-DiU}2dF0n5Byw6nqJj|F}adrL-3$L2osx6WZQ8?J(kTvKD z;X-yU`|&a42aG;U2EF`Z*ca}juwa-y?Lj^wSwR=o!$k|57+FU2f`8(OJf>T=ScMNX zXc5e7OcO=(bSIZ+<@+>>F1mksdrxx~`Xs_&)5&6)GM2_Nq3a?eiHzdafT4CN_SBUL z^g?i>u#ySLZVyBP@71oCDNmk8^P(MS|2`ZOW>D|}D+pa-Lfps!~S{W`g* z^Y`SJ>BAE*gF{B+VgUmWG_KvHtB5{azy}UO7>Pk&ej07#{phd(TIGLus;a-0()Sj; z2@@!H1o@^pRMlegP_|O-XfrOhS+QL{@%fBscSKhWS6}7CZ}euPoKF)8O`s%F;1l!l zzchTI#?nWMGzx=@K9jiUQ$j$B4HT(0A%6iw8@%Lev#c|~h9F`8Bbj zeiSVL^Vm<38ajh%%3;IyBa+pB9HQozJR(`&1gO<9K;T*llZ56HopxHjPkZXj)LPV5 z;X{j7GbOK54!5i!#FOy7!$LO&1k>s-F089`XTfS3AGaE&0X_Is&12C4vl<4f z%_Nf?Xxk0ileF!OQFfY?r)k?AqwPwx-AO9(#)x|j;)Y^QbRBinchm-bQ_;*x z(re?ssY&!b=ScbWG1u3BJFc&|M)fm=hd#zbpAIkj7zF4&5TM!Rk28gdImSfB0^?aa zvc{OmpkPg*ZI98mWiWe|mYgvr&UEC_$IwCVK?idRZEuXW_blYmN3lWgVgtvXXJ}p< z_hGFCLDSe6V@P3ik;2hT-L)LeG^X&-$9U)x8d(*WDLjlZ9>#QkG&073!sr2tN<+Fg zg^4xBLa*Cf!gtZDsXj-iR!MH5@MrV!W1h)XcyOrmX!(KaNAnnv6lBW_N| zCUXon%pTa#rYYQi9-}Vf#Wdp17;)!0QzLT>9?T9r9K9AZ$56rSK?Tj8xV{l+~;lOExZjR?+j`1)h$fcIxB{+YtW8Drx7qBWW|1{PKiSWL2iRd0;8C(-uwNzSH@dYcRu zhH92Slh}+qwI;D~>}hQ5G3c;+pkoSYeT=k>7*j|aW29xEm_phdBQ4{@6w=lh=_V{F z-W1aI7-@St;@D$gVE2H5K83bBMq9>!X_oDc5tl*0(I@rgAr!FtP(XF2`Q^A{%IQ((BSky!?C#Ij3R^6MFx&M&(OR!?!j6MfTne@ zJBAZ(7bgtU(Z$ac9{Ly${g+^oI|dkT7cflMo-}WNcE zV~F8)5ySSTO*q{##Be)^F&UNIG3;==*fE8&H%8fe4l22$z~S}*hiR(%lm)Om3LI`1 zH~^oft+Tx`;PAS@p}U@8X|APtQ+ViOJoH}zPTm-Bcs;TfWx$P z5B@iQrm(QaSV*{G>8d-$?YuGg@Vf9}Ij*A`@c$GZ&KM8pmq3&^1|MDrJ|;tyHwGkL z7f2>Ko;vDzGK}b|svDZ=YStt+;|?yvh-r8(a5?(OhB|tjO6>s>Q&U}4HICg5DrxAW zT=W)P>7K2c$NFGN!x-fvfr()_mZqzgIf;pXIm$#r6T`I}-BoSPnZ(5!aCT2{D!Qu~mTJ2CG%n^S7a3f1%`y#Bb2)6{HW=&omL5}Z4{k7wXf9hiyh=~;9_#jZ;2tLz6Dm$jO2u+10A)$J0{%?Dyfh>Cy#y_ z6*N11ou;bbr{%pf>>s1AZndblntYk7Qz`GQ#IJ?yFO=M`Mfwok;(qmwEM z3ueuTM+`#k?`rZ}bLTC8mIn}SnLf05NfY?y{n!xodPeUGkLuSmD-z}-MOryTXwrv6B>jw%9zfufpT zqg_0QrCB?@r@gYEifYK`ynt0p1??jA=L9F)xa0b#GvudR=($sWUh>V)$yezanw`xq z%5)qieokX3y)Uu1k~qRg(2nEiAmu8*G1ldH9c}4T*Kzy+-l)P#3!Hds)DLh(mS7M7 z1&1rQoX;nbe`=|}MW1k6KD!tir)wUOR8m@tATfUrg#JGNOIp0tcT)%j62{o@@Aaogo_b3dm zW+cmI$d5=-T+Pb)3sz^>=<+uhqdEcY!YD#ZigF6D9H2CRK_tsi%w-4=#z3?rTT+8J z3n2b_fjr%zASXMTqiDSlPdLqJG845Bx(t(S%eX)+U9TgGMX4ErS1?`8{0;T*mgz$+ zYDq*Co*|x312n&WQBef>i!h)|lGozN0+uwO5Zd6v&nZnn!20<;wp2*qQz>klwYUgf zatDK#MRg>9CV5DN|6R@eC@p9uMqBuDJu$n!ob%!h>b+|8(+Dls7~U3vbyY=tO2^kK zsFDz6kJv=n18&NCs9X38D9Y&sf_F@Xt4F+~YzOU+m)JxxYY{`}a--MOQoeqQ0u)D3 z?p2z=+olNkOmR8i=yiuQx<%3of?0W*H2P>%!zJ&3B}>qfC3w9_3&uN+PhMKWB6JCp z>4!K!{Z_nj=!muhKP3!! zo58xnWDV{6G8kxoLD~2FVy9*Gvo?%C%&T%GmTwcEFgm+_dJ_C9h5xAjq?A)S^Z~|y zGx205=$oQT)&WISI)S$tfxbjC?Fb%RJo`G(Ra&l=>vP zWob^fviwJ;V>9m924s^M;85 z%_T27aJJ+#TGmQG3AexLtCG(;p%v%@Xw4brg}9D1Md=AU)wPqbe<@K&LFt5T84r z^}&79RGSvsHWdM+;p%GE#`0^VHYR6j7Nk4yH1U9G5_G#8#GJ)xPG>_C7=$i=%N$0Q z@ZwLfpBc(iunv(U8GDSi8CnEk0YTQ_Y8Dos(}d=Pg~_^a0`V(bKHKD!;vi)Ou0q;J zPHie@0K@md@C~BDuWI&y4ftKBK$?NnSjE=IEZ3cU^iDq@QFO+kF0PFtrpdOir?-*Z z%PnK7@tW7KCh?l8qCADbRscbN7W5wfsQrX?*wRskbyRFfknZ}rJ__rh7N|!l3HU%O zjm>AX>q~O|y3D_v6If@2{nVPHCCzg%hMED5D5HbqdF~ML%lS5H14(PJ-h0X36&e$X zO$sxqRk(f~k|`ZG4%fAT=1MnRenTlh7nZ!_Z(?;S` zK(>l(#q@L}=kWh9&$+RnCUS|+^Ypr|hD=^lMRE=xf0@(kI1Pt(JR50>VH!SGEZxDH z;&|9lRNJZ^Eg%zNMfFU@&^)5JjsgElK5!G;QY<~fitg!H(QF@Uifv-W^>l2&5h%q| zb*w9viQq$v6wUIWaGR8WV(EyJi!g8kjy{*7mJD&pkczliQB|r%2sH_5Hn8%!I0yt& zJXJ#=S1Atvj7s6HcKcA>zDfZ;NP`;El#fDBPzv|xNzlQZknX`vZJ!I{dWH}NgO<1~ zSZIk$gM0J@%@b_O-^-h)CU{weyo zM^m_N;rkftiftH*?ixOrBvxDl^knL8^=Lb0sV%N7jnmO;YGBc}2Pv9nfyOj$S#aAE zE2hRNC?<5M=9>J|5(?w`>QVzbHmAw0#^pK#Q9!)CbiAbPn0o{4(!yLOAzviSOY=dd zQaEm9n2RLD;~LX{gmHDL?b~VBPUiZ!k_&UW$|s>TytlD_a=H+)>M7gDAR34WG(bcy zo+*TkDm&9&_UX+tE#xYm(J&&PcOtHP7MHpc67G$%haT7^G8MydxNG9lH$gNZBZy{; z4i!R1wYl}%NZHu;M}{Y=*+Dms#Wpmih`*#gjUt+(ut0u)nuRn#t2~Vnj3mWkJ`YG( zJZ?k9Pvg0t##x%sgcWmM9q+5pyD=~+TG=x%-G31qLEZ5WC>%fk{EwezW+bAFQT-Wn zOV~bqDJ^f1y6FU=?k(6^jNtuK4mK6d(4-O&zEUGYu?!0k*|6os5f5@{s-oK(($p_B zOX*?I4Yd(}163HOE1Irh#dW-wbt3#T6xB3iuxAZ9ilMvMQ2;ev-Q%C`A8q6)n(My5 zF}ss!wKM5ef%pyJoOO@QFFZxJ+z6YBs#(Z<&y{tr>5A!D?_(8-r|&i6{e*dPXX-X# z;#8E(p`T78Zg$(OdMc`&^*RDVklEMtfdvg;wGS)+$^fH4v@j9S zR`fd{->c~rNNm>RBnZ>~h9)GYB^O+o$m~!bwJ{QXgidR2&~W+#3~qyLFcZebWDO+sQv z04>nT3HtEi*w}nrtif?AKC?z7Y@4;)RF=Yj%k6R*(HCv2;wxSr?q7xLwhb-fV$Iog zGVbUj&rrgklNT0vT`!Yci{^G&8iWjQ3!3A&SYwg|IHzT02y6x+`>vq*O|gEN1mD&< z9Wwh$n2Wssep9Sr*HhNQ5j#K`O<-djou4Iar?m%_kgZunnw+Nazm1v6?fP z&^?aR00tP|*iKjf{*FGv*r)J@DfEGV_)N)i*fU@-E+oN?;CiqchrxE`iXr;s3Z49Y zaw1b0iknG=IN}#;%tAA>rFesf7hP zL7yN=F-pOi*Dl#shMYYF3q2=07>%=tFxoiLunO0IppRe#Kb}A~c;YsHrzhv=gr(WL zyXql>hd4~i2lDWarFS$bPJZg_2_I=DHS>>XCqLFif0|$`^GYwBt@O8Fzy5l`#~Rp) zLIsJl2<9>TAR`URZY$1F7Ew~b0X;se;mZ#cCM-okxLQ#Nzo1$5x1(mL5P$Edb9@F^ z-j;t02z|{qG#4!4cjdl+?HLw@^ZG@7SfS*Bst0jCN!Ug(yOSiOl^Jv)K%4McZW7a~eQuN}Z z;#EcTcq{C-xTmcn4jeot1d6=NNwVfXT&K_n5%}c({FuRpvT)^p2j;)a@^nd-VHC2* zDhW2A3&5(5ga6lb@?l5sU$+al8urdV(S$@Sq{V+;&+Ll$QM_`*26LKG!rn!sVDA#J zrZOSp(s29A;Lvp9(`MQ#r$29L;y*SMgZ%LwT-FS;D5QCn0_v)sNff0!8oU!T)GD(i7TjmSFN1r3xEvF-C&>R<%(o@qN3q=_SGq7mEZ0+E@tWTE*^k(Q3hQGHX%ap1 z^+ExD5YT&AiqLbkEyS@!@rQ#EK>>|Z=?p0kpV49;M5+HWanBHN_{2s)m)rH1oYQL> zr~KT=+3fn1r=KB_)0JTG5Kix=!vrPvNH1J$;9|i(p);U=ZqnUbKI8lo&2OonCP9Cs zwt1{ooQl8Qz-c&M7iD~o0=2tGS%+IZX)#e-$E$4t;ZV4T98(4(dN=kmxB@}qzR>Kt zr1H17Uw>73>gi``E+(XaH0LO%D-jgAtQRBoU%8v1O-@&0_YihN*CE?%mwYpHy{7q^ zBG{2|${M$D#U*Eh@;$VgtTrX!=vvH$vtGdP2YZd7H9}ILy<6 z5hlJZkak{YB=52+RaG=NLESbwpX+{Tg*C9sIu4D0djoW>lb1t5Ix$R$!n3{zOdmAx zi%QK{#_alANCLYl2ed;$oj#KMCdtbrcpVl#$pe}AE66;KVly<=rG889sZ1JDjtdf1 zk5G>*yAd!F3%qJF0|(2g0BcgzNG#&4Wk>91K8;P8G88*&+`wvI2snYbYwMiEj1x;GL8m9jN4$?qVJ*|~bqELHCp9~SRJ z`$T)~uFVi!-|j$j9ti_&S@u=AV-2j}g&Q9w*xWqw$)j=nTeUg9+eg`U*yS;{Xua5B z&mM>>%c2ejV3zs(2)h?Mr!m7iit}b@w55=LN*%BGjpidA3&_e8Z;Ca{2yVB_IAncJ z)SX~9dkP}Eohc=Z%?OUULSMs~W#0MKcG1Ar5Fe+@1L4KGzGHG#^?N-UmUPSC{7iKH zB@X!fxqT-Egc6genmagkY4>eO?za@zd1df=QKZ}4r;=>Is#$i3B@1J|9t>BACOnjX ze&F1E_h8W`PF=)lydXOcXA4$0r9^)wyf%#NsX&2O>f^(rqH1_eu|rVvAL{;-!X(e2LzBT|p_j4_HtrLbwK;w3#6c|{Otk*O6GJQj%thS{z zL0pkA{HOd>9BRb%2j45uJD|>5OR4eEun~%SdMa7|!#|*XxD&3!%E)wR3>&$7bj+Y8 z?&g+3#YPMf#5y@mQdxWCo`ext*_m{ZYG|2M_QDr5r)UR(xO_+Qpqli5iX+t-;X7F@ zMgAs?ATn1Tr#?porHlH=b-{>#R|Nujtl{S^EjSX-O~=qQ&#>nuWqvS*lf(Pd+0DLf z28;lnn+J$?hj&#^r2uVPWAJ9K6Y$3Js38~Udw8Cq-*c{4v%jHn$WBkTSwI+F>`R=S tqrc%tJ`@oOQ8C%ei<52)q;o-cR!4-vOvGUY2z2A^_V`AT0m@ diff --git a/examples/server/webui/index.html b/examples/server/webui/index.html index dcdd41079..86a79b77f 100644 --- a/examples/server/webui/index.html +++ b/examples/server/webui/index.html @@ -62,53 +62,57 @@

- +
+ +
- From 1204f9727005974587d6fc1dcd4d4f0ead87c856 Mon Sep 17 00:00:00 2001 From: Tei Home Date: Thu, 9 Jan 2025 19:32:06 +0800 Subject: [PATCH 81/81] doc: add cuda guide for fedora (#11135) Since NVIDIA does not release CUDA for in-maintenance versions of Fedora, the process of setting up the CUDA toolkit on Fedora has become quite involved. This guide should help mere mortals install CUDA for development in a Fedora 39 toolbox environment, without affecting the host system. --- docs/build.md | 2 + docs/cuda-fedora.md | 317 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 319 insertions(+) create mode 100644 docs/cuda-fedora.md diff --git a/docs/build.md b/docs/build.md index 84019b204..3b0d2211d 100644 --- a/docs/build.md +++ b/docs/build.md @@ -127,6 +127,8 @@ For detailed info, please refer to [llama.cpp for SYCL](./backend/SYCL.md). This provides GPU acceleration using an NVIDIA GPU. Make sure to have the CUDA toolkit installed. You can download it from your Linux distro's package manager (e.g. `apt install nvidia-cuda-toolkit`) or from the [NVIDIA developer site](https://developer.nvidia.com/cuda-downloads). +If you are using Fedora (using Fedora Workstation, or an 'Atomic' variant such as Silverblue), or would like to set up CUDA in a toolbox, please consider our [Fedora CUDA guide](./cuda-fedora.md). Unfortunately, the process is not as simple as one might expect. + - Using `CMake`: ```bash diff --git a/docs/cuda-fedora.md b/docs/cuda-fedora.md new file mode 100644 index 000000000..b993386c8 --- /dev/null +++ b/docs/cuda-fedora.md @@ -0,0 +1,317 @@ +# Setting Up CUDA on Fedora + +In this guide we setup [Nvidia CUDA](https://docs.nvidia.com/cuda/) in a toolbox container. This guide is applicable for: +- [Fedora Workstation](https://fedoraproject.org/workstation/) +- [Atomic Desktops for Fedora](https://fedoraproject.org/atomic-desktops/) +- [Fedora Spins](https://fedoraproject.org/spins) +- [Other Distributions](https://containertoolbx.org/distros/), including `Red Hat Enterprise Linux >= 8.`, `Arch Linux`, and `Ubuntu`. + + +## Table of Contents + +- [Prerequisites](#prerequisites) +- [Monitoring NVIDIA CUDA Repositories](#monitoring-nvidia-cuda-repositories) +- [Using the Fedora 39 CUDA Repository](#using-the-fedora-39-cuda-repository) +- [Creating a Fedora Toolbox Environment](#creating-a-fedora-toolbox-environment) +- [Installing Essential Development Tools](#installing-essential-development-tools) +- [Adding the CUDA Repository](#adding-the-cuda-repository) +- [Installing `nvidia-driver-libs`](#installing-nvidia-driver-libs) +- [Manually Resolving Package Conflicts](#manually-resolving-package-conflicts) +- [Finalizing the Installation of `nvidia-driver-libs`](#finalizing-the-installation-of-nvidia-driver-libs) +- [Installing the CUDA Meta-Package](#installing-the-cuda-meta-package) +- [Configuring the Environment](#configuring-the-environment) +- [Verifying the Installation](#verifying-the-installation) +- [Conclusion](#conclusion) +- [Troubleshooting](#troubleshooting) +- [Additional Notes](#additional-notes) +- [References](#references) + +## Prerequisites + +- **Toolbox Installed on the Host System** `Fedora Silverblue` and `Fedora Workstation` both have toolbox by default, other distributions may need to install the [toolbox package](https://containertoolbx.org/install/). +- **NVIDIA Drivers and Graphics Card installed on Host System (optional)** To run CUDA program, such as `llama.cpp`, the host should be setup to access your NVIDIA hardware. Fedora Hosts can use the [RPM Fusion Repository](https://rpmfusion.org/Howto/NVIDIA). +- **Internet connectivity** to download packages. + +### Monitoring NVIDIA CUDA Repositories + +Before proceeding, it is advisable to check if NVIDIA has updated their CUDA repositories for your Fedora version. NVIDIA's repositories can be found at: + +- [Fedora 40 CUDA Repository](https://developer.download.nvidia.com/compute/cuda/repos/fedora40/x86_64/) +- [Fedora 41 CUDA Repository](https://developer.download.nvidia.com/compute/cuda/repos/fedora41/x86_64/) + +As of the latest update, these repositories do not contain the `cuda` meta-package or are missing essential components. + +### Using the Fedora 39 CUDA Repository + +Since the newer repositories are incomplete, we'll use the Fedora 39 repository: + +- [Fedora 39 CUDA Repository](https://developer.download.nvidia.com/compute/cuda/repos/fedora39/x86_64/) + +**Note:** Fedora 39 is no longer maintained, so we recommend using a toolbox environment to prevent system conflicts. + +## Creating a Fedora Toolbox Environment + +This guide focuses on Fedora hosts, but with small adjustments, it can work for other hosts. Using a Fedora 39 toolbox allows us to install the necessary packages without affecting the host system. + +**Note:** Toolbox is available for other systems, and even without Toolbox, it is possible to use Podman or Docker. + +We do not recommend installing on the host system, as Fedora 39 is out-of-maintenance, and instead you should upgrade to a maintained version of Fedora for your host. + +1. **Create a Fedora 39 Toolbox:** + + ```bash + toolbox create --image registry.fedoraproject.org/fedora-toolbox:39 --container fedora-toolbox-39-cuda + ``` + +2. **Enter the Toolbox:** + + ```bash + toolbox enter --container fedora-toolbox-39-cuda + ``` + + Inside the toolbox, you have root privileges and can install packages without affecting the host system. + +## Installing Essential Development Tools + +1. **Synchronize the DNF Package Manager:** + + ```bash + sudo dnf distro-sync + ``` + +2. **Install the Default Text Editor (Optional):** + + ```bash + sudo dnf install vim-default-editor --allowerasing + ``` + + The `--allowerasing` flag resolves any package conflicts. + +3. **Install Development Tools and Libraries:** + + ```bash + sudo dnf install @c-development @development-tools cmake + ``` + + This installs essential packages for compiling software, including `gcc`, `make`, and other development headers. + +## Adding the CUDA Repository + +Add the NVIDIA CUDA repository to your DNF configuration: + +```bash +sudo dnf config-manager --add-repo https://developer.download.nvidia.com/compute/cuda/repos/fedora39/x86_64/cuda-fedora39.repo +``` + +After adding the repository, synchronize the package manager again: + +```bash +sudo dnf distro-sync +``` + +## Installing `nvidia-driver-libs` + +Attempt to install `nvidia-driver-libs`: + +```bash +sudo dnf install nvidia-driver-libs +``` + +**Explanation:** + +- `nvidia-driver-libs` contains necessary NVIDIA driver libraries required by CUDA. +- This step might fail due to conflicts with existing NVIDIA drivers on the host system. + +## Manually Resolving Package Conflicts + +If the installation fails due to conflicts, we'll manually download and install the required packages, excluding conflicting files. + +### 1. Download the `nvidia-driver-libs` RPM + +```bash +sudo dnf download --arch x86_64 nvidia-driver-libs +``` + +You should see a file similar to: + +``` +nvidia-driver-libs-560.35.05-1.fc39.x86_64.rpm +``` + +### 2. Attempt to Install the RPM + +```bash +sudo dnf install nvidia-driver-libs-560.35.05-1.fc39.x86_64.rpm +``` + +**Expected Error:** + +Installation may fail with errors pointing to conflicts with `egl-gbm` and `egl-wayland`. + +**Note: It is important to carefully read the error messages to identify the exact paths that need to be excluded.** + +### 3. Download Dependencies + +```bash +sudo dnf download --arch x86_64 egl-gbm egl-wayland +``` + +### 4. Install `egl-gbm` with Excluded Paths + +Exclude conflicting files during installation: + +```bash +sudo rpm --install --verbose --hash \ + --excludepath=/usr/lib64/libnvidia-egl-gbm.so.1.1.2 \ + --excludepath=/usr/share/egl/egl_external_platform.d/15_nvidia_gbm.json \ + egl-gbm-1.1.2^20240919gitb24587d-3.fc39.x86_64.rpm +``` + +**Explanation:** + +- The `--excludepath` option skips installing files that conflict with existing files. +- Adjust the paths based on the error messages you receive. + +### 5. Install `egl-wayland` with Excluded Paths + +```bash +sudo rpm --install --verbose --hash \ + --excludepath=/usr/share/egl/egl_external_platform.d/10_nvidia_wayland.json \ + egl-wayland-1.1.17^20241118giteeb29e1-5.fc39.x86_64.rpm +``` + +### 6. Install `nvidia-driver-libs` with Excluded Paths + +```bash +sudo rpm --install --verbose --hash \ + --excludepath=/usr/share/glvnd/egl_vendor.d/10_nvidia.json \ + --excludepath=/usr/share/nvidia/nvoptix.bin \ + nvidia-driver-libs-560.35.05-1.fc39.x86_64.rpm +``` + +**Note:** + +- Replace the paths with the ones causing conflicts in your installation if they differ. +- The `--verbose` and `--hash` options provide detailed output during installation. + +## Finalizing the Installation of `nvidia-driver-libs` + +After manually installing the dependencies, run: + +```bash +sudo dnf install nvidia-driver-libs +``` + +You should receive a message indicating the package is already installed: + +``` +Package nvidia-driver-libs-3:560.35.05-1.fc39.x86_64 is already installed. +Dependencies resolved. +Nothing to do. +Complete! +``` + +## Installing the CUDA Meta-Package + +Now that the driver libraries are installed, proceed to install CUDA: + +```bash +sudo dnf install cuda +``` + +This installs the CUDA toolkit and associated packages. + +## Configuring the Environment + +To use CUDA, add its binary directory to your system's `PATH`. + +1. **Create a Profile Script:** + + ```bash + sudo sh -c 'echo "export PATH=\$PATH:/usr/local/cuda/bin" >> /etc/profile.d/cuda.sh' + ``` + + **Explanation:** + + - We add to `/etc/profile.d/` as the `/etc/` folder is unique to this particular container, and is not shared with other containers or the host system. + - The backslash `\` before `$PATH` ensures the variable is correctly written into the script. + +2. **Make the Script Executable:** + + ```bash + sudo chmod +x /etc/profile.d/cuda.sh + ``` + +3. **Source the Script to Update Your Environment:** + + ```bash + source /etc/profile.d/cuda.sh + ``` + + **Note:** This command updates your current shell session with the new `PATH`. The `/etc/profile.d/cuda.sh` script ensures that the CUDA binaries are available in your `PATH` for all future sessions. + +## Verifying the Installation + +To confirm that CUDA is correctly installed and configured, check the version of the NVIDIA CUDA Compiler (`nvcc`): + +```bash +nvcc --version +``` + +You should see output similar to: + +``` +nvcc: NVIDIA (R) Cuda compiler driver +Copyright (c) 2005-2024 NVIDIA Corporation +Built on Tue_Oct_29_23:50:19_PDT_2024 +Cuda compilation tools, release 12.6, V12.6.85 +Build cuda_12.6.r12.6/compiler.35059454_0 +``` + +This output confirms that the CUDA compiler is accessible and indicates the installed version. + +## Conclusion + +You have successfully set up CUDA on Fedora within a toolbox environment using the Fedora 39 CUDA repository. By manually resolving package conflicts and configuring the environment, you can develop CUDA applications without affecting your host system. + +## Troubleshooting + +- **Installation Failures:** + - If you encounter errors during installation, carefully read the error messages. They often indicate conflicting files or missing dependencies. + - Use the `--excludepath` option with `rpm` to exclude conflicting files during manual installations. + +- **Driver Conflicts:** + - Since the host system may already have NVIDIA drivers installed, conflicts can arise. Using the toolbox environment helps isolate these issues. + +- **Environment Variables Not Set:** + - If `nvcc` is not found after installation, ensure that `/usr/local/cuda/bin` is in your `PATH`. + - Run `echo $PATH` to check if the path is included. + - Re-source the profile script or open a new terminal session. + +## Additional Notes + +- **Updating CUDA in the Future:** + - Keep an eye on the official NVIDIA repositories for updates to your Fedora version. + - When an updated repository becomes available, adjust your `dnf` configuration accordingly. + +- **Building `llama.cpp`:** + - With CUDA installed, you can follow these [build instructions for `llama.cpp`](https://github.com/ggerganov/llama.cpp/blob/master/docs/build.md) to compile it with CUDA support. + - Ensure that any CUDA-specific build flags or paths are correctly set in your build configuration. + +- **Using the Toolbox Environment:** + - The toolbox environment is isolated from your host system, which helps prevent conflicts. + - Remember that system files and configurations inside the toolbox are separate from the host. By default the home directory of the user is shared between the host and the toolbox. + +--- + +**Disclaimer:** Manually installing and modifying system packages can lead to instability of the container. The above steps are provided as a guideline and may need adjustments based on your specific system configuration. Always back up important data before making significant system changes, especially as your home folder is writable and shared with he toolbox. + +**Acknowledgments:** Special thanks to the Fedora community and NVIDIA documentation for providing resources that assisted in creating this guide. + +## References + +- [Fedora Toolbox Documentation](https://docs.fedoraproject.org/en-US/fedora-silverblue/toolbox/) +- [NVIDIA CUDA Installation Guide](https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html) +- [Podman Documentation](https://podman.io/get-started) + +---

Z*r0xL``#p>&G0x#N zF0G1>aT?AQe3`j@Za~Wz@=y|ij5mSTqfu~ZZvR=SPODx(tEb_FBw@yVTaqB_83cMd zq66_HNsIOpb*E4^Rx2f!0@)eBEy+xSgdYO-tZfdF`+LEsz+x_RJBPfHKVKipZD9`X z3jiN~=X~s=1I;gCIQwlwnmEIhEo}JI7RZ}rIVKTBrcnrrU}Q;-=39Hod+OvX@* z$NM73VdUPfj^~h3&x?}HM#m?l;?gAlien+vIw_a%L#r}L+-aFzBV5APb|iUE+Z769 z^HXm66*Fs*N`qPqi!v}X07`2_lCGZ?7QqXDEVO%DL@Ui|OBNA%`E3P7jqd?|;#wGi zPNa8B)Xadxv;5A1xF%elEoosEA3jLjZ}M>jL6C4sT?#P<$gelnv)_8QM07Jf<<_nHew-f%Uz z+XQ;j=)~h{3VgPh${eJQnaPD@^Y9t(m&-vA88gXGg3#b8FMbCI_oB3UVXE`j&h3L> z%PTUJ2uM%V!togj6WOzno_|@NNHCp$%*SWpm|)pm3gRq+vknEmYtpR>#e>x;82~Ir z=`Zx^^C-8{vY@$<4rYn<%!Z2kI*`$B6M-LeVW89kSpA0&!vegJEYy?QuE9VQtctr| zfcrJ%`YT3YsneX*Lcm5$$li-&5ikf@Ly5HQP@ILEQww7N!cErIRdMNC`&Gl*@cbpZ#bB}Kim^|tE8yZ#s6|mQu8m%kVCaIdTqlJ_{q@Rb$?shZ&Y}74|prlrdjb5 zeVJF#N@qAC@c!fLyLEPlq2ykF#n{RVEufriY@wWUR;QeE{+{0WZ)3PI3xXmJheJsP z1@X*SWp_ENM5N2Mj_5Kyu80_SGVK#~Yq=6Q;0ZJV+uX8L8A^C>-|s;4;1P!47hI1- z(NPQ7n+IYh?1WRb&lPPGXaR%4f^x9U8Kx{BHz0uZPgY>L(k1)!U;Tc6e?Zj5Nw+kj z=v5HSdf*+npSxh=)*pdlkTr20uEVKLB9vL83o>sl19=tH*1`A_(kDu~E|yWh00ntp z4GLFz9EYLk>LtZ5D$>ZCo3cx7-Lft3V$^4~C}MPWb%sxS5A>$oOJWLcX|Zk?9(j_Z z6YIVKU_Q~3{JPU!SN&OkZHC>iG#ug@@RG?Q^dZ3?uRKo{vgWd;2g}m+@h3a^3YLo`qA46SPkGoSE1d1tUd00fwbH?VYYf! zC%p`2ngZq38r8IV_4MK+5nZ1*dcJ1{USk$%AOcS}Oy`Gyl3F?aJDit7-ZQ-r?;>5Z zuMSNoXH4pAhVV|ocLYN2Ls?XpIqe6076?e!?)s>t1CU~@B3K`2MnTKvXrK14?mhat zgfWcqy>p8WD?gQg6{b^Y3QNb)c^_1=t=P@DtBpo{$6R}MZ1vW%+HdD3KSLG40y?W@ z(eU3m@~U2nnrLQ38(7^K&T0uM!?njL?z^6SY!aQfIp{z_N{4bcC^9&W^>^2--CI7L zjw>~7N%)!b3%tx$|4bp7cxKJvl}mdQuk$r%?ThWV{jnu~a~23~_b*nzvhMEAeTA*i zI=)1oOWTRK>HAGTM`g=%5q zCKtOYBFUZ4T+rUprGRVs_ugF1$a!KA;kV|xxlNL!GGO-b zY(IT}#^NaD?jmT4Pgq2H8olqf6Ao#TsJq@i9<0rJ7!V4mC*orcC94h3xx*Bifhc#9 zhp!|s%yLjustZT}=$jv)IQYFi-efJ7)}_cV9o3v`*QRIix*EVY8L_u&5a<_wPG_3Ql&@1ob`3SVKo8&%gW=5GOL5I|=P%j$py-MtD3>r~iacMcn!WOm-^gr^;@B z2Y|w|Q0*^dyj-$!<8t|!MO@d?=ntGP%WIg+(fN}C;OZxMUDo&fG5!+KT?s^Ut@DRC z$Oq;zi3?gka~KQk8-zXFF}F(&oSMgLBriHezhDoVlN@WR0N1|((hxCjZr6d!8mymp zo*Yo_)Mne2alpKULerusF9|P@=9b)l(`MEsw4aY1Hi0y$m!u9=>rtjaA~rE1__clu zms90cHx}&9LSo%8x~P~DT+6!uUlM9R21Cykrk9v^UC_)rHIvRN5F2Ikz$TnWlaFtqpn?lZlLgj=jnmd z8{FkPe9ngN?onRxA%XXg8!R$~3+_;7T)^!BorPQBD{Ak#B~~Xqb<$q5hVBj0ACKFi zK*p+3;%HlrnLjwlUsOnY2s*8;ffC$(r}mCsSsjKY+Y!; z9Tbb~;Qox>T#>BgS*b67oDc1qdMh4u6@w5U#$5*!T8kQk#G74^=IK?hC0qnC>*qFK zW6~gETyt|^9W-*fLXk|KTPU71%VeK~Yq~_U@PxlefGriQO9LAladS<*1($su&Wf+Z z+EvLfQ=_p-eXwDUli>Fu3ScvOi#-7fAj+2;)x5gpYSDq`QNS2 zDLSWfs#J~&rdvUtq`M5J1|F;cQS!hg=rM2BgUw5Zz>*b(^9WWoj}assut?X>y}#50 z%M0msCire~;am4O@cke@!Y0vg)J&u1S-?nbo>&GMP+`q^J zF*vNjynXi`73gq6)sMbBfRM1b_Hok@RTD82Lp1~k)L-~@=terEJ}feQuo0zI@E-b^ zdwF`mTbrzZ>P__l;F2*bJCP@Q!u(A5LW~U|Lg6ukRq>D|02m}Zig)CkNjJ0zBMXd_ z%RpkA$kfdJ+7%liDMb^0>~AdtSn}RDjwc@~ns02qENjOP;7r+Xg>(+4wQC}mQP_7+ z?A>UXYQ8x^Cfb^gRl3IBXjAJPBz37McGXcP4F|}7XDt1>rO|R8A)K`AV-qy__f!c+ zF+>%GI@YwghuwaB0bBcHfKzhwDa5~}w;HhB!NS6L@p?XzwZH8rvmq;%@edVHJAA)> z`W_K6fZx59UczFwh&KuSWFl4ycd)UT3_&WpCdW`BM2mvoj4`xvLKSZ(kl0g?Q`WvM zJ$~VT+T?4HtoqUh&V&YVXPwzrR0WaP04?>i1qVDaUBFi2YLHKl*XF0GL06C5H08gd z1&;)&C|ZJ@0tYW2X5@eJ&p>$sP-OQrPPJo;bj(s@K zU1AZ{rensvt*I&k(eU=}Jy*c{0D_xWhLR3eHO5D%ZE%`s9Iy=4oS3K`&sdS8E-v$b zm`M!6aA$Cr%RrO|)e5{axMl4ND`Lpo#RsorITlGTeVTfg01&Xl?R(=BfaS0(9e1c& zKlo$ApkQl9f)EX^*pnfxrV+(gG41Yz*0I4$`-x93BzkLZ zjH4&@@Jut)l8Q@hpm5qEBXl1rD3NlJ*-GwrpXxVf6>i%ScnGG z`rc`pt%4&Q-Txxt5RPb!-m8T*H!1C>lKsV?Ugevb(!|R#O2#)R zg`r>u&Q=MuC)Zu?L582;k!7QfXN#4d0k{aqrzl;iX(jWmEUfxA#p?Mha|_2&PUXjX z%p_M(*P2yudhl-z?exxxk%ucy% zsWwX1*f?I_bBS0~ieFSv4j#OLb8e?7H2s`&blhSBkHL>8tQAsO%5kE*;I2$cfshDB zn=fqvx;19ZlJcjBsy5#3k!4VpMm;!_hY7kVTsC&j_31nBYtCoFJ5Q0{lB717<*6U|% z2ZeSn1b^EESBObH46JRKnHequ`s)cQ=?T6S2i^~$-24D=#sJxWGjiBE-&jgNWJu3& z#N|zBTcrB+TiVw8VIo3t7fo0`QidWB&s!$(mqo|a)@cfz{_ynU%jwY-Y1Dy?LPJcA zjHn=-%h4%R7k}dN9(gl>G}XmVBt^Q1>o8Yy&CXG+=&j)^Zq8w@j3I(tAu>B|4mfqk z@542Ou4{ZE0*rEh5HP|!9j@{I)X$9~U@P5pv8aDSr!+{oq1Nf1v6;tHm@ zc0k)tcnc&Q+`E;@0)a&UjQiR}6ut=IP8;7w+tUorKI@J)84Q0#vZet*w z>Dv`TM|a%m*H3AeDD_IK^prpjw>9`xkqEF0V(-2R0abcUzI|^CSHxKtzv8h`bx8LJ zntO;D#1++lLS!+yRB`7IZuM_ z$*cBeqvr`BxzbIKuX~EvXQmAw#cAa$pkcqw$9TDWA+6oddq(5>LcAxLjPDeoN#Z^*I?>NPASY~?dLCHb79Z>%;Zf$(JUMXBdY zgaI}Wrqj~Qx?F9=GoZ>goxlM;Cw^x0B2sFAUu8I2TB|J6YBs?VTl8`L_Ot@TQ}av# zt+v;HEX>0!$cJJ4Z9!_Hif2>cfr^#8q3vQQR};)ZsuEN@kx1?UqEM@Pg zz|HiRweX!RL3_^_mZX^DS>N#h)9- zSre3I+^;P8;Z0Z#_76X{wYabKq?=HuJqYE0PZEr0cp_DUYp+ZN`hw9z2gp3$Y_Z}8 z(0EudQDFY;G&l16$n%)~5PAZCa*_iqzTXw^Cw~&{)3JjaVry_rpl)d1TCE3=c|m)^ zJ*ulV-~53RJvDIIPS`soi=_DsHX*bBon>{TBVB9lo-v7NE30<3A2rm`BoYDQT6FP$ zA*@e%T8*ms_m!pocUGwd*U4tEjUS{(vL;O$F2O*e?W(7f5ZeCgd61ao%Ri`}pF9@!;^AI`T0mv>I={YMda| zRn?4HbbVB$gz@*>Exeq2Q~M3i<`V{g3H9zBm4905JPWBm>DTPn!T5CJ;zRQKLwMS~ zZ^nmhvyB=R3%UbV-n<1!S+{A6PkCH^&Is>2w#JgVPAUDeprX1ZV2@yDqb?OFJeT+# z=Rq1*auEzd!>cQo<8aOkt|vBH(~T2)9GmU8%UBCO2EJ%;SrD!!UKc{I@T!pEzXZYAYPcdpz&Fv~^KK*y<;UGQ%`% zR!{Imn&n2$*!sYDvZ`JsWrZ;9WGcmKH&Id;ncM7*SSXY4$Td&66xPvOO0vG2J-6d(S zhHY(sn^Hk)r)ui$DBCSzvFz~u{V356S8shJKY+P%9WlN*GiRF&(dRc-R={tD zAA^9Hgo|2gSVxk$)M2)NTKg|KPI8{o!5^}k#!ToW4M6HAP%zDS7fO^DDm>C2(Jb#P zN^2h(Ga&AUQ=!zX^gQ+!=$IzL;1#jXyp$&ox`23`2Hn8AvQX3|^J`Y$L# z-#;SXx{yvT8E3L7awMvn=laK|Pc0FW?=Ulz+o<0w57PbQ^`CWre9`4_m>2weX0ip? zqSZ1&99&Ad^DYm9Sh|2h9^VK+xYn$Y+Bd7fcbCj;h|^k#Hd_-4`6H|!woesi#5+OD z-aOrvKTs|z$9u1wg{&!cKUOdt@qN0lSAa|bVa{YQL+a-+PEd`y8t*OQ<^pZXcOrpP z4?0RT39k6vjCu!u?vQWI9JxcS(X}GQA90Dtw6t-T!A{8%%{di{LL+Fs9Y*RJ?pGSEP;o?b!=1C-rk^RD_Ba3aq_hv9H-Cn~R+Q7zt*Jz%IB8<{h(X90(ViQ-r z>Nr=C9~znp&s13>98D{Ij;)5xDjtoN@u*VEvxXb=;QlF7P1-+xw&BAtfnj}GSalMU z_rj6+LN5snIkq{E8U1M?#3ke0>l+#12KE>MgQ032`}x@w|I3wg@HIpZUf-%CSlv^2 zwf1Ky01I4y-e#MmG2E$;s8Do&D|b#1JbL0j@vhsk-^n8^T4^CH<{gW5Bt0y3VO|{g zZJx;W@BpRbI4?U#EqwMTDLYiMli$J8tFr|M3~d&O4wj)~AG%V6zs**1g16}hOj#$& zfq6Hfb?QC83o>OP_w4JLp`T3dt^R`L1!k*r=qy-&L1j9- z)QboQ?9WZmuZ48USm&CY!pPMbm* zT$SMj$;DZr%7&7ufH>2t1M;eBpzf+wGATjF{Dz1(<*Iuw~Y4bsZ-o`(UAPrhjl94YW&fdZe;BCHq3L zOTk*Z7n(a4f!ali4T>?5Vqg|YJJ0U@mv_7Z%J}|}cAC+vsTy`sYM;>$JYLtuFYHAX z9oyp>?a94p7=J+E8R@eo!=yFso_Hn23CM+ila1xvm2WADOB%wF$^AYjINp#w;YMj_ z!CHN@g0mWku>=D1SU)M)Viy6H6LS?j*+iR0gh(H`ZJilWeYGa?u6>yI=cjc)bDXZ= zI@56r3^=ynW^m@fdM4xy91z*paB19gQ;a*v4SjX8@oaapSdKzyw&Ksr3sfE&2#02W z3v_C_RM^KlMW0|v2h0ep`_ysX@hX^G0Fl0q=Bk8exOgNj43waARL_Z2h3gl)t?}?3 z@ebMr#5CU|R81O25_C6j3_E^e{1a}*7EU--DAW^n#bF~k7;$&z_I%x1t7b_}j)J5n zMQ%-GqOY<32Q15HTmhNRaM>4w(Lb+$|0@0{*d)$h#MC1hU}^$4%3E@12%xU%wjRIN zc3#>GYas&RB>_vH9tE zu6UH1kaJj$L0aK><(_R}xSu@EQX|bod$tLY%ZaE0Z)^0YW6PeT0R|Sjp(0w-(!5e0 zg{*In65B?5gqc*!o$CESS7$kENEOS3*I*Q_>b|#WmvI#*I5*t$RePZ{ONi1nQlvigyv`yR~{Gu;ok~g3}VTxCdv1>$ zbH}V707ISu&fWM%4jjjr(nQR+r@JyTUNRs72g0@pSgn?IT>S+5MumwtuZuyrqqu2f z9IjR?+rD0KZ+Y927bWltY14Q3<~ZA4MT*SW+B@F$ClTSm>?5TMyc%5ZJ$-%(eYr@29KY_gSV*lt{|MY<6=8a-@2TV=vimyW-0qj zK5P$Je<^}z-9_%=NBVt)9RAMorOs_bs-c1{ASBSt^xqQ4@JwTWKM=x2(Sb_lHFW7I z7{zT^BYfD^7V(+IYejTaHC57B6N4>|GC4}=mcpmwi8czo3Vko+4YYTHTXs#`){SVl zEIJqBnsbC*Qt9ooDZP?t9P`NB(}$8JGG}{E?(N>*RK$!Bz%sJ9C`<1r*ddU0la{Rc z>6+CPk$L|QpT`(~r`MPSy~U^+JJjFqG9v(gGRCeT8|ogIMHsHZ*hg*))&1l*`iYt^ zA|B)-j8holt0#-Pjxo{o;*s7Zs0%|g4ou%8kJ6Z9nC6e>rMabZJM{l04beWwc^~U+ zeStT}*%bOh-GZ_U!ZsS@#V@$^>xe057EaiC@mxn&+V@<4j;!lE4^v*a-`@d~`Cb(6 z7WSj+-1UsJb3;fL5s9)u9ds4i{RC`m;o`aaY*@0R-H-twI}Ln<(Avwt?z9ryszM|- zUyU9VqYc>Zi^$4_m{^~y`B4oOFs*GmL^2|3Hpk`C&ePGNews)jj~M}EfT)MnCTPB* z7#Qxlh>Dnhyn?(8u*s9-@6FVf#Cat$BBUqf=_)H^152Tzp9sIc7(+6oUG(QRsIfJS zqXtq8*tZddp}lH7xIqdu-5=M5)ubyN$au@@-pILJkZWQC%%r;zo%Iv1~hq2rigerC$rB*JRgLP8qO5$KxcFRj^OE-z5Y(FpQ2 z#UV6~(aXI1E&&o7ji77cVDc)&v9iDc$N{S%wnEmj1Gi#+xPK*N%-pcvj-L)sn5zgD z@oOM|>3-)E!QPftM7@}bxveB>^xLVWau-3`W^OMeSCH9BcM-r;)_P_gowQ#Mhiy|d zfrBSxh%HD?kZo<3k0;Xt{ZuYqDYgG67~PC5K~zsX(8+KmHYx(fl)Xi!{H)__2+9o3 zaxN1lT-ju#HO8Xt#s)Bk`Dw+}r=K|DHdq3G^_FG`Ng){E9B&;NLqx<>xvKmY66 z$NyPoXZFA9Wj?{b-v6CvM@3-&RYJ7nHVpf3sO%>UgZ*oMO2aS?^ZzNV9emWsFzmR0 z|6Pj7Uti;1tG7qynZyjkNXZ{1j{U>=z4!rnd4i^p!Z1vZ`~xBXs;JQ;DK@7GSf{yLR^f1r-WGBuP*FjO%NlfZvdDSs@5n1dSo#E%>? z+rKqZfAQTPKjl*&8PE~9t^OvFc<2A(G3@7%az2S?>M%@>|HVuBA6`0QW3`u0j+zj| zu&?zOkN+*ANQb>G;F$f=fA^342Q~dwx;j3MCROGhf=>QqdtI0bzE4Q-s z4!9(N9%zivIxhbOHynIT8cDx-3!Q#}BdU&tp+}9h@H~#941;kz6@nU%O@4dFjM55+ z(owBI_j2EGPg>xyv0iCGG$@iI3^4r=n;cz)iL$rP5Kw%BtGeapMp*)F+%rHipk9QR4VJ7>#MdP27-6>}d?^i;_ga~8`*1N6&Z3R4VH z{*CnSasw-Df}okmZF-7z)jWETpojKER|Ptd_*G#>6O{w4#OAEYy}I z0{r&+7SdM*4YJLDJ3kQ|8>{(W@p-0;d2eXTk!OZQ@V-TT%)e3S{!GqiHe|rgqO5jT zcn0HsZANb@oc``6-K}vr6+3r;F{$;R8%;UH@f1bhm>u;{4`|&HJUVaCz#CF}A=?aZ ztGi?)hPef|Wb@rMxrNUk(OG?>)Y~{@+o+}q6AK{2iGu=n1 zNXQ%mJ`9Hagl%G9U17rub14hhA#P>NswGP5_G#hO5`c>}D04j&DzUz$T9*la0P~Am zt)@Cs_z^3A?_kBw)Vn+O2zU;~^`%X)@q|3p%`&CWAXrf54Y^Cy`J~-zcgj*!va`wj z2svCAk???j!FTUqL5UKJS>(UyXhz<^fM>0IzW4rIpzyA1XTzRJxB>N1){0nkb~oBb zL7}{s|I3jccO=+Ue?x~gaBHD)P<4#H(Am{;x81BF5g^4p4Lz2_6}=77{Ce%ZCBdN! z%B{-Jl!KNbPr{z^xz; zaoDfO2yrxldaZ6;K8mcPlJ?8yG}v{!N;hrbh*$+%WGbKaDscg$=^PHJ-K`(F-X=qk z+5YIfWCIwcSpOb3*}n7GSapixvB^e$+@O2k&di6qyR<1Xh-`q7#NkJtw*%OHLTw&K zbKRYPaI$U~+|jZzBP3XufDIVD%z{fTG{Yz!;N)g_)(A zy;9hr&S>GjV^=W6+{R5hXm1efpu%LIe7Vz~77u!aeZ%6T(GT%k)bK`uC`{t$7QlNi~zh@Rt5Se1d7g1!td1 zD#qdI-50~;4L-D4l4h)x$~1?YbNP2Mpfw@1?xXLAmhj5FF06%JM|-#IF(*y>JJW4{ zM0TPDW3F835W9VU-pzCsEf=Vy5k}DNcqus)Eg>z5i-wKBivDcS-ZF?f3%oI7s4++uBgVa5L0kY%N35e84K=| zmdZtqiG|BM=lKV*%o>HOW)fq7S}|W@X20T5^CIDvp8;oulrqcUiFG}^4>LB$unG{~ z!w!qzY|6O}A#~xW#VjNr*OmC3Q{!M;aP0tRCS7rh>4__|E1!&1V==V)>$xC*G>nJm zx&p!H{1Ia+8yG=>TR+?QywCxhifVroXAK?B=v9Vlk5!DyoiON4h0ae`m_*4TxikYj z$l7;%n@Pq@`+;CrCQ=koOWC=TpG><#saQWf6cUy*a1ad+^lv9+>M&!Gy4o;vHO>aW zlE$UVWQ6^UuK&H6kw~&{$pvA5tMhGefdgdXVY({Q@Ejo$DhP)cSpxIuV^9^vvbuMR z{bC8Z8*wb2U^W5I=?a9RQAWICwThaV&(&Cg7L`CaM286XI5cY8r{UL5Ka*?c6wM_f z-u1>s^ByBo#CX?gzb&<(>27w;NO0kVlYOy8WI^vj9hs{NQ(o_ZOBe=TD z6c4V;G~o6p5kJ1IwPx>gHLn1JPMZaa348*a%sV|97p!x3%l=dq{M(Zq;@lRb%&6(} zzH50K$ZX`S{wYspH2b@l6f>J@Ci_Tv5B(gafWy%6I3-aZIQo+d0Dl+V`*;!Ph~4u_ z29G}$+$|-Qi=m=3N%Xvbj+dwu%icQiC*ddpw^-#&J`f`@;q&4Jf1Z@DG_9g>oLlka zReAuYO@#K&Pvgjw#XeBzo3j>4hn6Nwg-y(XQx)PC#|UETU;bTBKrm7p z>Re#7zj!S;h~7;fJn{EpK}Y<%${h)0_IJo~f`y|i&Z*u_9mZjQ3nw(JwQJr;0jZdM zXpRKPst^AAD}SGt=$eFtrfP2DD?pDpv5(qPV5PlAYEk2 zD2P3HJ4Xp*AMYz!Xnp_|$!j0hc_cxCD|$0bDo8hYkwBA1DwwbpnF-JP5sWyAh1@JH zL7=}6!3k~cyGF%-K_zIpq|#tFrBDXzf7766XEawhO2wCsjn=%f1wkx{`hA1hu=U&1<$VOvbGTK0 zzm1b0F(HWQKU3nFy03-fZ1fHAcS-pSM$)vH?8k?QotNl;oYfpJD2UjJ<8zrkjrN*r zHRRtm+)xw$PHG(j9qmKgZn~44Ge4gdyyDu6{z$j>e-fg6l0jF7I$0~J{8NdSna(mpwT=|(iXD~oJO$rv&4sDQ|8v3ly!x`L(2$NovfLx$K0VoSBXu< ztHUw45pY9)M+10+TGHUCn&2JoT-x(ljp&$pQlyVv)6$@S^YwnGr)ah*MH`gip{#_s zr?9XsLdquNjF+V>5D90S)Rn@-G`|hPGpcf{`!fH29Hievvv};lu&aOfBTeFqF8E9V z+7kCj;v}g8+zC!68+G{R^#oI164c4e_!n4wbMU2qQQ$(CEowQ1OQBNb$vavBdjHHx z%#G}Ja}4`lJlBZE)5&@MH9KlsF|dX7m?-+oN_Z7GEBcGcaz^lI()nR`;{n$=JAX}g za1}uL&>uXe=%ZTt`Luh0z?68U%-MqBrhcVRjuDTgVXjVF>+{MtRh2;e$w`tZzd?vX z$a#Q&*_Jt=$e*3haoo4$K11zl5KjBD-%7gF;tHbNHyg+Gx4C?RclVaNvjevm$I87- zR`@B~#r%U(zpkc6wle1U^cIx;o2}11Lw6AXbpRmpNH4F@qcSqM$z?Glo8_4s=kKpv z*!W#N2gLze>srC&1ERW)P=E*Jj!g=TD->scQ(K+)?{l%zZ*XH|WsGk-I>X>cxW{pZ z%n1ud2jD&Tc!sYddiOlo+!$SK51#UC9l9&mFyh)ODdsL1Eu#KQTx?T!Ph|Y`&!|bB zkW1fDH!+fe@DdlJh0!K_etxSVgG&<-^=Nbv(7P$!2s5UBSFtdK=^u92QkP8W-*D=G z1L5K#)l7^KGu`hFWn*n-gQiFTl8Yuxenqav%+g5jD9bd($w~szS)akz|p)JxJ zAu6DCH@7Q6)DJ3w6sf1;n!zv#@$;Qpi->#EfNfJmWIuc`)J_Wd@v0UHSM|ZbSz5_@ zi3V1DYO9t-6e-6vFkL8%5o7ez_(=eu(SEbZ$WL9T%W9-nKyOFeWW->?Yc9lp4(O-4 z8fHti6qL#tcPXeH2TNY#W9U4@>G7tQ-lcQviVnvZ-s`0HD9Eg2Cl2&WX)QuVrc3=j z26lrlco1I=!hcee9%y*2wN*AqQB)o$91XFe?5^O%f$b+tKpW=vZ&y<9yTZ2&^pMJ3 zsm?mp%UG&X`dI;n7ExTt)`h))qu)awpl2(hyDuqEWRo}D)S_8MqJxqM2Qt691i~U1 zSaxiu7;4jVPvb!G>+;kMs@Er>pL_xGz$}#c@%|8!)XxMY_Uu9LCL1Ph$eiqe^w0eg z?4t3`kQV$ArFY$%MaIjUfvGHb1})F~e%nQD$;7XuAeVI&o!gSDuFoca=A8PfMnwW& z3+?Fn48P7{n~%y`G*gZQ=_8zJs-U|sagvq#fTSK=ZkYeIyy&(ykI6D*n^P(#nu#3@ zmnTZ|rj6ey=_s6%o8|F9-Gkw{9POzi-0oH5k#{!;)Hb4^nWXs78vF|eqAxjrKYp-x z5;wz&&X6UeLF$P|rQd{q!i8Atz&}oyk0~kGAU9&=mfL873h?=^8^8=h?N#CBLGFk0 zCC`_>GAlhM1HaixHv3ynG2I^X@4??pYv z>ekBUd2XpGHnmAQbB_#sZY4YRioKV?fJ7u=W7f7usa9CAecy!TzxW^nXia#3WT&7K zzb$iR-?#bOCfUnv&Nt)3{V*7oM?O|en3|*(8zm7a8ezhReueEealufLcmWgvqM#(W z4+YuxZ&}#9Ra(b?n1hKZT*dHJI7eDY$=CO;W)or?px~UR(*{`E=sx=RfYS3;lUfK3>BXcAdH$Eu{ zpc^!$CQs;OO$)QyAP017VSebNRw!w}6N$eSSM!a20X24iS1<0UXs?ue14nx*y+#{p zYQA1>LenGnR6$^I<`yjs&MXE52Wj^vHZ94-91d^oQpGv3b~Oq5S+ydL;6&ZNC8LWA zgDdmc6bPvV`M}{&#Pi*n&cFbm*V_+kN@!{JYhg zhl9UIQs-8GrkL)$Se?nxP%-5!Kq0n0Wh<-rCB%qu;9;g}mu8v#U`$?08hc?>A#(>> z=>)elfV%?um*nU#lPRE$%e30+WsU`H&pZt-7(*L16MPW`nce+>@cp^ETAI*W1*?R3 zwLR=UK1U|DUt0i6?wcrEh$1%e1B)xjNXbC`AQ;Mjo=mB*%Shu!QPP3XQgY*Y-Qmx% zY49NJ&9UBmId|=qWh)D$m~Ja8mQMV(h60i)SpGj0oi&og01N~lK<%3^qYiFG`4% zBat+c755EPCUQ2r!kY?p!b4?nTP`3k^^@kwN48M_fF7lB^D0jF#9BcDJJ)#0Q03iK zPe$&br;LEvRfk+Zt~}#Is@B_|T$cPsoa zHFeQ45G@*nN~p2oS~w|d=CtCXduI2`8r@dS%v(8Xq=S!`uK=!q(eAFUJ4D`5++XT{ zeX&x=Yr>HC77XxsV!qmcIEBp@{toxW%{a91p7af&9MBE?9+>|s#~!~Z4`*0bzz)Gq#>9T@OEaMbBi1bPI!?@F?*gRQ(~gF;21X>nlJGwl!gS zA`k|-wY{keopvd;&~(t#fpcTFHRT_F)**e5d9UgC)hMu{5@`+O)y3N3bSC3mmTe%a za%?}q1f`0OCn!ekln}qk^$d*AnHypmOx-EQGV#EK-xaIfnDJ$`%JZK;%=^&EHaGyn z^NTPl`M3NPaMkJ006ffmF$pI~#4=T|JN48uV?gaE%zsO>slGGTnSk0R04!5~RH^hY z8uac!q8|u+c?me>b@k--xo z{Qa45WC~=Z;(uS1U^k)qJVN1>+~~!&rl81{!(~+7kWRKUir})!Gr-p*eJ)3YFdWy=Ew!XT*k6otq%N_JWbp&Ba-cx+y`;Dx<>=*snmT($M0;rKHJ~r=o zy|**NA8&uTv^;2RCsA@*8V4P@&kEf*E>E#-0Bz^;d}_ugSDGe-V4))ao%1 zV?(Cw4@19%3>XSFGl=N(F?%Dt$QZVuiC;M3N0!F9Ig-}<73t1#+3YOlw$Y2%*yk9L zz&PscfrvsDQy5T3l@(Ao??I7^eAM7-nqsCM_ervzFpjlun9EU$CtoD1MHN$vAA||yM1876Sr0s-S^=M`AzD(vH;N8y44tAb7FX|@5 z0Y|Z(Su+oowC=J57F*42UPN<6ZJG~oX;W8e<6vFpm#5_rzvEN`e=5_!6qZK>7smEd ztSu=7cvjn-8Ra+4ixj$A<`kPtWlp?)OLbk>GIV9Dp9Ct@!8&{|rWerHKa6jPX8zDF zqdXqRh;W%5@Y1)!?yk#o?T)@Du!O7f*;rBnZ>>}Q?GS5M0%ju*rNFBY7K>!5Z|63# zB0(TJPjqb^p7sMpf9IoI4@Tx?plGO#Br^h5dssy65tXEcW*tES$sFti^_(+gh!rqm zP%&+D2|H&aCE!itraRy^RU#(JW0bxI744*%d$Psv$83`55`9MwkUfHC)Mj zoh=%2?Us9LUe`3G@9VcV@3L8aB=kBlBj{G82cA*u?om-Iw)D$Xf2ql@4kZ$|*)7gZF)#{BoJ zC0h+Np@rSp9S<3&j2+85B|5I1|ku3P9+Y3Hy*)8K{}T$p`EuZ)JQU@1Ya{=}Pq zP?Zd*pvMNFcuC$ren;?$Y2$+%;Ex{WYjjv>7-i5YK|*f3FFk6Z_@EVhvWKi993qQAK++Osz2Z8nhP&~7x8pZOeqTKx z7+IL{eOs-R{r1DcMFv$FiA4RVs}Z%be+vl3d}c<9-*L${TIA(e^wo=fqEquUWPl5u zXJksE9C)vxGZYXIuQm-*(bZG8Hw_@F{qmg~SS~_s>Z>e@%>09QiP0F?YW&sV{Yi^1 zSbpamy3VszexUyO9y+;ENKP!1@e?rHe561iUpo_N&VAB79pMxa2!$CMQ3`<>jl4 zDa-27d%`>4LhQ9NvcGTV6oFrfcpfH^sA6@FA~1)FhOzcp9-g1jwn)2I)Udp{SF7T| z;c(2G@$s43!A<^&1olHOQZw@Xe+TSk!m=z=J`*9pw63hk_*=YN;|BNfn z4lne@CYo7AZzo28*%fJ{+X%|6dclXqVT#*qi=spJfFIz-dYmVssld;=<~ zc^DCh!lm^z^uNBZ=NJ>2q4#kjZdnY;ANc2+9(=Z1u?S!caaFMmBW1=>f11Y8cP@(_ z`6(W{=1HFWRgQuWuW!mq^(};3@T)7n7>XH&08~Ur%<`WtaYdhw<>&NW&&6+Jc`Tft z{lL{kJV;kM|LMgq8L-WOLxs7F_bM{&^i3075gjWeI^D;bxF3;4gV#;L;hnt5DAREX z>uB5ZR7fDOnD->&2ldISf2T8+7iwG@0m;rJcI(bknpF}3J% z5)6jg6u8Q#1`E8ZBdwP?ZpjeWgX_jYs+*|F2LZdMI?NRyU$a4DXWr$^7Ba#_nWyQl}6m>Kuq7( zU##&Y^FiHp9z`&Se{xOP+7uT~8+W=KC~w>Vj@~WoM=`R!Da*Lw$E(Scqz-ByB6UjW z`bTg0dh5k=AI)MhdxoTsJ7p(soo#uwg4+vf14mHx5?0RINl#Y}gyKSA#e;4vVfB8qckc%>Vr?>8{PRRA6 zIK!7hqM()yS!1O^@keJRaXBi>zSJc7j>NEx3l0+rm0oib1l_{`=_>3iq1fM|+?la= z)D?8N65e#%% z4^SXcq!$(3e?IBn=eY~iV{gu4aI>yAkSD;-P5E}Y`b%zX>&L^%$$YQabKtVa0J)a< zL^!qw3;+7~iQle}M{C91)V^nG+&(N9G&iNnXd7QY!H@ETCWg{qjE$faipO4;_d*(Q zjJ|lDEXJm#rhV;v3&z*NKmhQngv7B(Mxuv|veAS2vLKhk;JY^**C&kfPrS^bxecDRT;ewlTT|fB% zt?O}We{`YpAcAK@Xo|z0Ivm#Yu7%2;bz9tCmIUc(Xe`fH>^^%x;GEvp%MXwNs0ydi zznAM}blFxwy>4L23I?&TF069RDf1bSpI69Ym3BUK}e#th=7O1A2GTwle zF}J~0&5n@bYu*-sdiIx4>)E-rrS^DYTd2YmR^4?l{2X))8~%1tH#XlGU;l^>%j6}_ zrXH3)MS=Oxe#SGS&?)5~FT4do`Rh`0GfD#*A93wL7K6Mlf)+^zHL?s`NV1oO3!#f1 ze^+!>(zebwcUdYs4A{lN$U5tB65L9jO%X!V6NAmJ%}*mvNZcN^wE&ZKaCTL+<@dlz z=pX8ia8k^YEFGzCT;x+?n;XZT zvBb7Mf1yRelM!iwwRJh8+JmEjOsSk?e^WCtV&z{?q%n6%V48P@HhcmE_wnyjDJ{&3 z)jLj&iAC5D-RqdQjX;t%T+*b=C`fc#?;Cp)Bn~dcX5H)`dbe2xRZHyi3_dKLbvMQT;q66>LvObrvEys?N?y0Kl-_H$l`5w~Q9Q6xWYon`%y z`a76a8A}|F$IVoO^-GVzkShw>f53e(E44WB0llL{5L;^^uW-A>`d;MKpAM!Yq|{U_ zF7|?q<$$qxj3|s+jx~qg_QMPM)_TX>UTxb2T0L$kz zjH012DGh6$x=d~sTzRh2GEA#>%7(nNc;ere@gQ&FKB6Z zEp5=j7{K3?@SXuV6{jk|G-KI&=Km-kSa|gt{KIY$+3`BvUjG|1Kq^>R2q?*_bF@B` zk;Ixrl+W0&JS*NtPSWf*289<{{o1ckwT zJCHs{iLD{DE_z|gveA`z}5dUGyT`7x7v z+alI_F0}8-=*~M5CZc<13hd3|w1TYDY2V8R1dBo`@;6~kO1_`=LRzyj0liWHx+uc6 z8;cofA@qC-9R-7WpT^sreW2KW81g*a)8(G*JSIjR?16p@n@J*oJYWdSst);%@_0@JwYeTtVA;7IH$<=XNQwfX^?pO z>|Hl<-M;<&B9IRPM>EYu3|Mm^ z4N8&mCvo0}?MMN3G*?mOn3n+oot1amsC0*{6oe~b)yh*I)a_0|R*Au{ z*fcCa0^<_1e~L=(UB(3O-B20e5?P;HiTVp?7u+SF*QW zD-Bj<>!@LVCjeNlk$Vwnm|%|Cey|fm`=DeOj-$ zi27pZ<+hK3aPUsni9neIxS{&JEv@{8sZ;0BsQiE;{W4@YZ-d;lJlB-(t}U6_Lc%%s zdros(WyWWTN#E&3Ak!E!Zn%e(y<4}lYbre$ky^-lMVr0GJLT3ivi#3^>Ma9v_E$%_ z^L@JQe<`PhQ}84`beubJ;VHbW@TRy|{D5E_D|GGNzGyPrzZ<-0Anmw(d(ybIIAxqQ z^czK$#T5AZwJeF-q~M9WGevbrs^2ewA}kn>(;jBi?mKg26Meq*YxUGS8M)d{Tpl2D zRBG(cT$Rm1Vz2FY6qzSfq;J=rca+y&c)Q^ae+cL8p!LnD;b1tgW#sj}+aykAkJ=9a z1(d3TRnF2cIC`ra`ZDgBS^BZ_&+p@=%ujRC0YRl%IIcf6 zfB$DSFR6yUL%445GD8`{XA#MIn|ZROS8F*G9(q$rSuwM|URNDRA0~39$VlNb?tdZ# zTDMn?AQTEXnpqC!>aZK;%Sze*WN`ko16Tv)=O^JD?wA`uFQ30Au-uZr#g#mY-PxK9 zkc-v@MrE%@6L?E;_v@}68w9y!R4BO_e~&l?wFcgzEr^#`9wb8EMEH_e=*IBifqIH$ z($%AWO70X8T}$Ng?$#P{>ODNdW9>!U2C=$4B$fxS|>HE^SNzB&Rq)AcrUTIScBQxMOQqdqBMk0U; ztVhMDs+!-MA+H1ie1&*cR>a?eof`+XpCm2p33r@s55=ydapH2qmMwOo709aGf+XN| zU*{F5pdY16#A&W{e?fqOF|OpIf4HgTyjcMf#o8O2ETD4h92{Y|Bt>(7?cPf>hV@`@ zGFzSG@}gb9YNXSvLwxm+H4LGN)Y03+H70J3tMQ?)EwRJzr@)7ni5*ZCIWI%KWXPp{ z&MTB`a`&sfri^FY`#>Zs4jxem?@Ew3vh4LxEt*m_rxELu}lJkmZfBcHGU(K@1 zTUlF^#n)@)mn~8nxmj4q+B~@awvk*=J$wk7v_ceP>pZoK=F?41QBp7o$1*xetomVN zle{h%~N!o7UQdIsh_?UvwtS_c+JV(zdFoJ=Tzi~d-i=LM9 zm)|SZ1Hi2KHGzwH{+qGgK1S0yC0Rqn7|&o0RyGU>YYmm#(slFBk$Pq3zOw#k<$)=9 zM|{I^MEZhT%o^a5O8?GRGl%k=@7vt^41U+he?hUH^o0787V#xg z_xgJBg5{)2j|>Q-EreWb^b4$O$^6old`qCJ6@l%#Np%b2)%;zYTTkPeh9j zhMs7D4?R-Qo;*X1#Dldb3u1uf&@}d2cz&)GVmcFeD{<@_rJq5JilH*t1vp8l(wNnO zz+88wQ?yZ#^*jWjScopquy~w`e3sV_`jiZE zS0n&sWB#F?pNv=Rf85>08SBcxwolKreLcukc4GI^~E2p4oHUb2e| zdEx4+;IWbyb^Hw?$@oTHj~%62_y-Y+al_`xNj&wWkY=_-e~;tGq8Z~xOEcwI*F7+( z%j)qk8$hr&jK+qlurj@S3xt)qxArp02wCOge%SssB|%$IkWV2}tp+P7`Y>$Uz2RA1 zHcc%-7DhHa&61B$b46+j$s__(PglTD+^bxc7$%QcR@}L?-@V%vQbkwXSP`Ew+5vEg zSIJ@T8gLIPe|yYEW`(&u!0&y#g7Y~3f$SCjc890Hw|P>b4--drI-nq13ifH#yVe?FA`CKQi!@6>HZ@!Q$_u%ml^ zBEu;v!{T2trz|D2hYC{^N!S^?nLOtJ06vZJ@tE|epa!KI0=xbs4uz@uIYXCpIb+4m z*8>#+DI*(!;WjhkDXroPte^Z4@=A7h03pp6PMC|(56m4DQeFd<<3sv0pMi#dQ&*pz z$V&2MfAXzOYv2(-O?7+!A8Mw9iuhe?Y&W&>H+4I)wMdjCm{5!6 ze`Qqb-E*CT9X1Q#)Z`UPRvx^O5N%WY#RNauMopC@UJ!-1%X#KuEt9I2@Ngw;(@uYX z)uF&bI0>rja1266-6`3GQ`|sZZ4W^n4Z6-(#(`*YSfD>u6o;HoIAT#Xr@C#+)#o`w_S0Be$q}Ud9PO}o1ZWgi0e7sK4$dy!^E)JMRc~xWaiHT87jN+UI31p;m$z30eX6x%^ST)H3;B2L(pXE+0?V*-~IzH=x7}U9S2te-Yvv z6RXbX6WLHWBPu^Vd*QHWSaniJ%#)8kUM|Lv#VW~ zVKvUAQoz8|b`RXD0vX)1l;~eEg_?VQCvMI0wu7?SenGin_M)mTL0&z(#c)RdHk5LS z=s@ZdI0G8tO&pd7Baho~#c|i&e{6K)u?v%XOmixo1*s^m!2U=06^;juR47%q%v&H) zbjgm~eAwjq7-BaAPTAaK19k73vmYo-49J-+{QydFPothwZ7IP6{kz=th(&Z(k&}m9 zvV3llU8ZhCiw^0p{~*uw`D*yf4JL@@L2NUY_|wrAoVGKMB?9cXq&XUy8-0+0qSi_DD=jo zRITG?cR#gOcyfuW&@$#KR_}sx;yr-C7lzGv`WM%-t^~=$nj7&YNKE<<+~@3@+f;GT zgiQlJX_{M`ksr73tXA1u?Z-!4JxychP7@+I6CW+%MeKFqlVigVe`z|0^4Z}*VS;R@ zGUbyqfX3dY%rh`66^A`U>dYxa!U`1^W<*TLHXd(lrztiS)}~{0?YL7aLC}Hh*vdu` z{r;dGKUrj=VGOn9f@aNmGZ@b`+R-wjsj78jBL@W;Bt2D>uyD=pfigd9hOCP zg7Li4%ErD`At1DF(<#-2S!t)QD;tKc_yw5NJ$8ht2~HZQf4IpNWGw9`CV~nS4xiJC zsV6OP{|yyh-&<|9xUGpX71Fo{c|YTGo~FzgTS<7LNslB7-QIDk^IeEj-c*b19Rl1E ztxAA5rOU+wr@aqEKF~=j$?_ia-1{z}ZKqUE^kj=Nroav#@@FVPBWxn-K!ec(Vs2_h zqGG<`tF|=He+__AXHP#cl;2mEEY$^N=1zp|*{w6iR)J*KlcV=1ajzd^;;#Id zHWKgeWqMUafSWi@#8q8gYTHWWCj&%2;y%975&l;Wf3NKsNi#3TL26+27YR*J(Vsy& zoh;|5PKo#p6Vq<$T5J&T2UfK9k!yV&4F$9xxlxPawxGTz$GWB@9*y9+O>w?Z z0`_f`?mN|p*oUQ$69K2&WWnnO2~LharqMVce=C=#l5-r>789ujHrc$b=Sg{=B`!u}bvtLNn zg~~jx;a5q(U~DHZ;=zF2qnD_^iilCP!1&ASwF!B|Rxr)u*!f66#O6}fO?^1+%F0%K zEN)1d{BU-oPhZtm(NArb;(IfAWS)jAsmRsI^RSH+;*43Mp8!q1kpm!8>M6L}hMsfkYSof~|*{DVnmMcJ^f3=YfdEyoJ zbqV@Af9%~`?c~R05${~+?u6f;@L`)@(p>PTAv0hcv8P}kVxMtDf&G{OgT@^hEI<~7 z-jm~-VKDa_}j*z$;^o za0~H5y>?079@3KISXKv%qm3A~*6khoLG@2j+3?VuNCi@9?;t6(87^-GPBnff0NI2d z9fTK-n$(B}wXv(vf8GWm3H>Pz1X3by2TGT>BkOi%-I9~_x_2xnBfe7M3#-w^^a_k%R2&+ntnZy&C9O@Z}b>A!T z2N+X5@h#hu-KCd3)-AWGgD6~7PSivfg>w=8qzqR6bo~R|e;3M7mWiuUFI$$12yPO9 zLgtpXwtjf(?&5FSbeed7$TB$=%hS)DNHS9O6t4?xB7h5&7^v*`IS7AR^H!Rh)`{#{QSXwk3HVX6m*0QKAdU@YBa2s^!gNs*r7`s^#qL}_(|O> z>q7Aw*Cu2Fe^NS67?*+Ox6fM!Z4?~zx)h;&l@qQ90l68m;M3=E)fVOrR@6PdVC{B< zj&ok52!w|(2si5DJXDb5I=uc|*8L~XA|A2XShAbiD;9M=>`9LIhrR|nGz=csO0+W( z)%}bic6pWLLeTx{hW!r8+R8)04(yn#L5P0Cqv&Z6f7b9h1=t|ckM&ejmZV$OR9Jwj zo}h@fyIR1;QJ^T$it`z7DiqAB@ zM6CT&f0Zc0FtmZJj3nPGPJxB8C;m;_HkDACD=1k|^IPzh1#am!Y)4q6i_7nNALJNH z5z)QYe`SACSWA{X0=fw$xJFp2Q|))kh(>9o$=#)i^gkpzG@%BZTu>~5zv+G#wjfVp z8qy_(kpp82xu|lB+y_fG=1mU9s8Q7MWRa^KQRaO|N8odu&xhxs=SV=0(sP4?s~P(Q<A*DcBGCNk6m~?nf(rs=f31woVQP;<~RnzXEDG*MQZKFj!aN`2L2eDFgxU)5T=n9qg`6`JC?r?nl%$L+Uq5Fp}6& zcwiB<`NOHrwC0Nu=@&9NikAuqwXjiYe+7tk%q)UFaLz@yD##DccrY*(8ZKuR=K4ev z{3(iL;9#BlZqZT2DK*Y$Ht`3S0xB4mLZ4sO&;&sBK7bW1O&zECmHV+j_9Zoz_DWti zimV!9eG{3?|I+f1AWWw4I7Hl>kJ1#A9FB6G^*$ee@*p%M1#u{+&NhkPn++^ie=Hj^ z|G+vg`8C;0t!?$v!!gAD@eIqihEgIioAs=5hym{!g4{9Sf;9D#7D@~SU8^n_9Yp%8 zl{fCYH5$Y2Y<@uzHg(yx>r6|v+WhtH0c|u_jw23ij)C7j6@5J7+a3~6VfpGN* zoKJ!}joVyzvDWivMH*6Y5Ird`^~-SC0ucFQF>PsotR7nhhb4Wn5g`L}zq3C2kiaY; z2W(ikd^Hemw3;Q!FsywPfKb$tYYqmsfU$~7;z-+}4)IIT6?+f)#lbnTf9}QLSJW3b z8s5Z!^We1wUbo)IY;m@!k{BlvI z8enmnNfCz(DT>9Zj9ax#=7YC;^av_}nIDn3GE1m0h5J9)e*+Xmb|jQ_EiR7k;S(cc zSj_N;U_cGcOLC>-)3jeH4k24A;10(#=G68NhISu0(;H|?WdTV_5k(Mv8w&Rz5{f?p`~VvgHXX0*hy|GPuUlbt}O2Mln>AWr@2@ugk0#!r;adknkXE4Z747SZ}+ zRrY)NPS%o07^8BQ9_K2OZSlUgPmr``1>QMo=C0X^f+YHwC0MHidW= zDVnWORK;oq)L1`b{E_GCjG>hJUjhp~;H$Cyik5zu4ZHz? z^^590O_$ny>41D}O^bGCVkC+>ne^dAllaxLU=`E(dCQM>$@x}yz-8sha_J&5v(cC~ zkU>dK+nJX00Vrl&dQvJ5ZO7!3+l{$#E>rn#4Oav1o|)oEU>FKf;E?Q>FsO9R(OBe$ zRD!tyf7n)2I-n(?REE;G=;I*2-xR16Q7Y?5Mm2^p`Jf>o6*I>%gP&!|VMzlM-2i0v z4IbE9CzNxzN4Eq%>T7$hTB?LmkbH%3NGZzh-z8Dmypgp|eBl`OjgR?;-(|qdy!pho zNRP#3VB{9SrYPRV)!atsl4x*T;7Rz_3HDj9o^kU}HKj?C_Khowg)FOQZHw#?j!i zN^>LeBwSZVA-s`$I2FoW>JkvRw+IRnX*7q-@2CJKLZq6xC^ zi^}|Y30o%tA@@E`2{tq%r5S_eH;)(@fA`q2C!Nmj7_vL~O>pmTEG@Ck`HS;646;1n z`Q<%tx;N7?A~ZT-Y{xS|oX_l^W5zEvKSaCq9ym!!tT{V*gdK_*U+t96c#U41lAdaIe0`k1qRe{wBM z{A269bv0D7CHz1Hco*Wm_hfj_k9SW$+UMxrNv4GnM4hx?Rjsa19(kAxq>u72`zJ5c zkMuAbzdoJZVR0`KU1CnET`j!AYuu{tb2L$x4JM`YGA}X+Y;&b5LVl9rm;+j7U2k8J z=wz$BQ&BM6Wb=Mitx+>r){_@ofBRk9EcbjZ=sgFst=7Ekqnqb(kv@~@r%re>a<{^{ z(vPY~s27d{87B=1-IJLfbP)R#YBVXm8g49S)rgS)YSh`Ov=to%Ubvdve0QxSU~#^h z+F)H&$c2_9$R`44=$-C&QQ+8gVwnj0;XUeGi2WH3jK|Uhgn>T&;~8rDf9XQyGq;@G zB-kIl+uUl;>qg}oabTIG)=C#(5Ne#`b#AQJgV&et7RPZDZ7#Bls)W5|(yG`#>x_3(f+NG?)A;OS5*F zn4QLKWpB(s$wAmnrv1&`Mq14DUEjWF0RwT__6c3O*eOdBf=#|ne|`8s6>N~W!*Xu7 zUs9tR1LsS(mS^Hqof3t+t%8XaSt?DNA>vYW54sG{>b?@Dc(?@pvJ=DyK^q>MCII3U z0f$*ogL^gNsR+~70^1b;-$4`?j2jG>^sM`MjjkEiKHsWv-(k-|9db_`C4wd;V_tFp zWI>_l@Jj(X@@sI0e^OD_6GR|{jEK!B4LRkm*V~jNLcLmnsi6FyVurjtU=; zSNOtTdY+V-wPAZxTjd>nqF7(5rTYRSMam?7&xD%IEIYl}6gpTt$3Gxx(}KLD;179) zXjURXpeb>P#e6tEjNq*Y+^<+FosX8dclUK{7ID~bd7(z3yKHwQzYcYhDcbw5zln}L ztN3K)7t<}}e-QT<;o>MZ+{4%dmt75PcQR(V*~1_JP5ekSZSSS}W417kRf5PQBUzf4{YN~mPow=I1w{ZcB z=drHntrqzvQWHNhF7iF(Va#z+^lT*v%RGkFm^-mttT z5+c72DN)j%gDn-RP9!C%uGLq`y!|?wpauoaimy>5v?qI)v-K#_E3or2e>gR^22euf z4Cl;>e=%Gl%QvMfmKe{oXCXiG-j_*2R&D*jaYoDh$`%%hb$C3Tu(Wo_R)$}G^7x8D z11kSo7{fmo0a1N~UO1_p4LeQ*i1Z$pGTR=~Kvm)(Xu&4H0V*gSt+cJF5t9qu4*a$z zJ-_xHB2|Y|d1Wkr3bm^NOX3L9XyHUcymDa@L`pwN>PJ4y+M4Qe)GO`~$bddA= zsOPu+A5i9Zxs4j7lOv$$tn+Xnbn!M5V z=J%EjUepMi-Sj{kbFtBtb>!w>$OU@Kf4^+E=NV9C3^C;xX{70)Xadd;5{I?hK2DrG z<`p1d;<}t!qRKge!5+Zw)1OZ5Qcq=Jm?+Mmnx3W>=Xp?^Ykn;NUx>;S;S;E0C5huc z=eRhkDOk3sTWc-F?&`gf?+yC_OgCd2f~HOPeb{6O^@UAelfQ7$a&8_f8nM0 zg_`Y!2}}8t_ovu(qOZTcIHM(Zd|6ykya_+0Zr9Ao^5sLuY73RIUICd_ zYHFkGN8o*u^09wl0=w{2M5u@8e^h52#lBnDa3IgE+wx4r&b_~c@MM=q$ z^P|x$iprSVAWXQTurXgBCHaG4!n_>4n&2R{kC+FA&9 zYEBFNWpx^^v5HK~JrCL9jum^vg^9{`=Qb)7)f(1S)I#HC!zhaPHi?TrLz8G6bX_w~ zelx-T_H|I3U*bNf)b$q0fBlq33F@7PdjQxkfgf0BFq(a()Bz0S)1H`ea^p^-#VEBb z*c?g*n2*~0e$IAdrZpNEq~Cg1iUAxFJowpA6D;|V)@)X~uc?*&VmF5f1r(gV zvMWz5s=Of3`oZE9|;<#`c9mwx){`L4wOD!t8ZG54+#jvQ;8>lp@c-jEt-JoBcEO z@~XO4DMzSjsHe_Q;HCnT__V?&coye940 zs`M0Z3z|CE`yedO3m{@y3rTy!YI#{y>D3InMP(KF>EIT%#67E-Qxrr%npjIA0C8tq zYGu}xI#QUGzuiTZ2f@wd`&;n=JGDe6lLZ8!S6*DA$BUiQuv=JgdG3niF?Bmt zOYdnhsv8d3e^!wY+6e@Efnw`Wgx8q+w0GN{!v(bE3N@jEsj{wKF}vFu^fre@sa|Mm zCiC&;YTA@Wp5P;x()!V8!f1t5a@acy$WBv}CeIZhWHu6$*rJ&m@}%d6UryzkAH?vI zovZ|XN^$6u8yq7*ksE!?YTaPF=6$$=bPA&v$gPG*f8W)8SK*=7WMNvtC6JT*?~)G& zf3mkaEo>g-4`5V)i`(rvvx1<*2pEGRmEW(t7TVJd{NR3GcCjuStnKaIp}*YlcAzC4}{q=P3UY-+b@3`hM-Q+)CpoH)&hgd^!`1 zSOvj5zNuGsL*jK zVEfx_DWqt@Z(P4Cavgm{tAp6rF{Y#IFzW**=}P>ffnF){)y^;;pkpzIpM+@PDetWS z1dWV=gw?K~WW7q*=8w0=@s9kUpxLKF@G=j(yup=c_oL3i=G4 z!r|mXC<%F(+&rqjYrL|^o<7#8UC7k>28M9AU`z{u^CZ*!O+G#Pa(=5F$MTL%e~x0R z{oH;6E)U;akygKJX)aq6m*!;|+$iEtulzwweXjK5ti6NSZGWR+8^zN{d7CoGiMg_B zfV3(#taM&ui&oCT4IBV>WR$u!`dJOE31^0s`giX&TXnM`#s_`Ztgem%>ct4FuYhXx zf-%$TqV<)e;S+4{J6^C6PY~iTe_`q{&Tq19xtB}Fsf``v3sV7H4V?$gRJ5mOyJ9Gn z*_fcXM%O`ChF()xozGgpB=)8jYfc}ic-^+J?auOM5CB14gGWPJwhT{C&Y`rv0i%RG z8J7`~BiM6vK0T5tu@Ve>YD*W;6Wb=e&zCJj=M`s8&kUb`$4Es-84U3yee9Oygx%Y#4GyDGE4)yik_;# zGK*0M|MuCeLs15)D&Pch+2x*#Uk5=WKO(9yV)tM*>ki1O6(-Vx)3K*GFY8Hy%RqkU zgPFCM?H3@umu);^;qBYxf9BQD%7aqJv}Lepb#?&&WJG=u4|H#lU-$9^WqjYvYUInD zq^mi+LTAzgaCDyTFtf}Ir920aG;*%;TFs*OJ^!IYo{;E-xT6FE;uepSgEq&~pRNCk3&{cAxmpOBm8FPvyTxqG?IQ29jaLePB}1Tkd;%0u!0<2msc0<-SgCb{l$6~I$>t^LCwS3T zD7~wjGjEpC#ttIZE_Do6jwcnq2|CBr0n=1%$iK|2N2old-j%%sc&!EM^e%$3LDjS{`th;ZIX z#(hi5GMGI^Y9xUMC)Zi1qA%_Q0|Dstd7!$o#HTrtFE$TNY71am&2DLb6|` zP=WmGjbxB{e-Wpm+&iptus>0G0j^fEaT!AWmskQHYd~Om1_{9ik_oB97Vb~Zf+pS6 z)&F^tbl+r>=q4a8at<~Io?tReHuVWQH@crAOO^!W3b;oIs4xZs1BxEQ(pAsowA4|0 zYk+h^$scoNLTF!R-kx$%0)+j0_q4@^zm8&lox36*e{r^k_M9x84;LH;nBG{_Vdkf0 zbi;AasG`KjnvoaJ4d|~7cnUJ}KyfaQwPc7@X(2tgvtzxxtgtz~@K8hC_V_QlT84Ll zmyrpwK7nri&D1B^#)cfowVHAHtRmH8w%Cb3=3a}_m4clTkg_;(zr*mC zud2#C%x30$=6Xjiq|yY>D7*I?=Cma~pQz`xHnY+;1!Qo)H%r;7a-WW_*+spyI?r=v zk4oYp!lWQ{P7N9fr}2Qy=DCf&z3FOnl+@QXe*go4GV;s8yW$tAczIF#<`y7J-iG|a zaANRca(jq}*2wE6ob!QjYjb}zIO(L3%l(DH($gfulQ))e244?t`KJnCzZx70eZHEI zz;mwzh8L8?&gq%gF_}#gWC+M$*(uWx_DSrDU6{EDt4fUGD2VpGCvd2qg&t0q`Eqei zf4?aPW6wXudP8hs9mkGQ07>7el1bFIZQEDa52PT?sB`|Q=XJX_x+1|3l;o~UQ3jTV zmx+FP9^(YD5CSMM>|)Bl)0yKTzwjyE6RBT656UnopUlH{mZO)ptxqaBj+IDKA=8T#YbjT_+yFa3 z#J?c4vvW2Cn#S4i#b5=xm+C6k#0gnIVy!lfPA01KnQ=BohN#*~R2rx}HChvuEdHeH z1b^iEXA_dRl4RIQ_B~C-i#o#hMN3DB zE~Y@NJItP(1yccF-RU4}#0 zj1(v!@mtq}53?2OjcyIY%hjf%Qs%g7@nzM!_+EbKUHk|X2%k1#sLkUQiVIOzk(#ap zFkSbyyJG`5QqWotcCKw?$_Q}xR6y5fUoj;6ve9dp?wH+^xz8?To&hESlTiwApMPb^ z_tmWT5|n}*0QX0E6anA$q4q(uciD~3p2yi zPAj>pY+l%{ZXAlBWiz*V$UxQ&Uf{Vk(b1gAPQ~?g2M%e2zy6?U>e(6ex%H%OJ0;%W zzu4qgPg@iYN(QDmfh9?Ns{pRr+JAanPz${5kGrrl%1gbrvuwO*RIIIVg2`>OGv-He zwM@2zSJoIpHHX0hP!@^&nqOw^JAeHkK(jlxcwzp2lH=KMQrk)bnn76dxoF zU*33;iIc+HbG;YDeL9fee%okB6I;$~-K0njBL!Ic@Q4iF*e!4R%UVNH?SD4i+Sbwl z0_!c16et&v=01ut8;9{e+ZWgZK zUy+lW=)dt0s@;gLLV!uO0CIwH0~&8{ck;;KC}2cnLNGwvjYhg6>3`F4MSC}}&RQ$j zggoZhaCcIJH9VkRgNN0qa+;KIXO~5ZM{`&Yi}flTkp7+Da19P^U{2?US{zSaph@QJ zkq|y=gY8h%Y%+L&W}LBTCmdi)?%t~1Qr$;>QrLGey(*NVe$ z=WuAa-F%yB7vq_u#(z0a&7eostmm2dZ`3Rz&%Mx8J;)SK2E@TeOWNEPFJ{Kv>O6Ks z@ZivP^whiBE+$YtES0n{pHDWWZT{m+NHl3mO_Krelu=2LizuJ8w|SEZm6H(X;&^dX zXd;6g-9VoAS+kW1P-Ntbi8-Y#Hx_(?Dy{3g;GBC^qH#qdrGErt$JU!6obnU)CajMl z)(~jWK-B`#@v_&w(MT)K%fw-k@psFgwz!Q%NC>qFl*=QmoK}G^gNI-9eiUsy(+YBG z{VN1$BwMiN4Y0QJM@qvHri+e7(QEl)@!JaNea4^9qcJ$66-X0dg#b8Cl(1drVC=U0 zbs}HW)X_ucf`3WhbWtvYkH&jx<@lL{ejm~_jDMj{j`&N5@Kqm{carcAvO|oyppj0F z$BMKtI+0G0XglO}^Aq&zvd6`avi;3$CnAWrJDP~svu*kUL3+wZ(T7pDU_;|Llb)!m zYwn>k!&!4--EDAm#7ooTSJJzD+gDdk@svUODY#XcHh=LB#?|c_8^XzvJMS2P!PsQb zYkbJnK>hob$2BG#j~U?8dc0kHS$Jhm0HYYB2$4M^f;oxkj1Tcc`eZC^Rjl_MfsRKS z%K<9xp0ZCW?%5+BbS1}SBn;LNOGJ(8LVgAe%$YCsDfhjKna%R&%vzE$m(cY|GuZhQKeX z`Ds|sivni?0P;@-utf6s7;jSI?0;KuHB}3p3UAXIk>%+j^4&MU*rOg@H{H0uAR-mD z2#FSSgA+a#baln{HVZn#5%kA5?Dv%(F0rVv(|@gRwYdl`Nmb%vnR|%HxyZ$_+iG>QD!279H0@QYFC`4?SawOQu!MCYE z*cq0<)n|B3r@RAt!d(DY$A$SFJkP4mG?82uyU9XFuR)jlndVZm9QD%Ta;QO-S_Ba( zz<-lDDWk9>x&8=^1GhKd6%QnyR3cK}tITy8VFSnEL@pU|2@q?W+%0-F`ytWw9II## z>sEYSn(cHltGitS`(wIUa}wN7h3#0Q%43J&q-1~b4DqpW4*t?F^wiy4_N#@e;l2`X z*%En+k6!VgTA4J4UPCJWa1-_BHE^9?Z+~1)_QxZU%HmN>4q2NH5Y=h*cEX~|9W7q& zBUCyOylY1jwC1nFX=kvBFO+LO>;?CB=O$bPsRare+Je~q3^etOUrPLqBRO7DkZYU4 zeDkAVd$QxkuMeGw2_Hbu>X+kW|D(kZ#^GkCBQ+qt&hECNS{>Gbj^f`GK#jurMKliGsZ&EH8n`B#I(=qc1Th^jE>i?=xp;j;8KFGy`NS8#YjrTgjmU z!OZq?N+@aok+E(fHIHNrowCJ9zkfBfoS=LY8bj@OSJRY;H>1!9CxZNHn)CGW>9su` zKNQ=^tc|%rstozOW@#L>XRI5kDhxZ;&MA?mT^$joO`K)k{FFroa*g!=@bJaZd4`od zr^BN5xy1wFTzM?jN(VA!;%l>($1;9ME0n=lzu`$jch|*nzx3y!TZwM4Wq&k^3&o)yj}H5eZ9_ z9!s}P^%>*!B-%zb)cQK`yzv?an5G%~r@^pm9qFxc)HzQLIsol?{sM(XtQA+1+zN+X zWXe9uWuiC3(DcS737vS})-zLj8y?c(%uBntN39>_8s@`d%VUcttAFl_;p2prhyGz8 zB_yG)(r*G9`m20p=Tm3ITSAcryYXB1AGA$J^5O*TuK-j|Me>Dpn-wChv&~wHd`)@6 zN5&B*m>>wR(6uZWKH7pz`5sbD9aXkm&jF2Q)<7<6E0K=`j;p?|!%w+{8U@<=+z(7A z$X$=9b&6n&t%HD6lYa^~^_P~%wjrF9&#T=+I`z8}Gi~Ra45an)_ygj65L7h&`==hr z=rLv}$8e$_v5Q-0>ML{2)^CGMRg<*y@*MAW;53*he6f!)%wgV;4 zivp1njK#Ohg&@ zo5re{0sG5hNqcI66BMoi6DDA3=%;+y-Qx0SgRw_`%dEijDE)(Dv%2%6i+gy#Q$ z?hvgz0@H~1%NDl)1j2lmF*Co6=1H-V8LhrDPbQW>!GCIALb`sU`jg?dN4O4PdaR-x zdv`jO{iQC0v*aKF?mjvV&0-~kLQ->A17#bx_{rmLTDL?>_3ni?7)x`Z`BWa?+&x71 zpmnX-`Ic&8edsj07tm^cDORlHRKvfutq|N2db6+b)Gx?>H!);UUuMy<#6XKzO-^Py!kzgJzRDh*0IRuRF;Sh!a9FrjnhO_UepMReoflR-422lxA z_gZRmuyH`}bi?Ln+eQdL8221{S{?a$>=F$jA3>ldS=MFB`~l$Q*?;B=C=~pjXYHw7 zxENrVjAK4l(BcCTkH1eJwr$>^9-Yc^YA4_LGN0Np=^lJduqwDu;LTgxm=@tgOv#%O zXMcK_Lm-v9?ecEx{%(5MvuOo8`$1~nvy-Hb4f{FL`vg6D9Dc&_4QFz2gXC2rZtMPj4X{5~slkZ$nmD zLGh_>C1sX?ff(-ap!47Lz%UNOq^_=*Fn?v+cEIqr{bT;uu+a{@9EEQz^zV`T=V1PC zy}-j41!5pLZb+jw{gp|HmUWcgC0)aP%qT?wp-&HiC%>ou=A_#xOoUa2`3+j+HqT@6 z1)&(D&sz^oOt0L)nMSkwgpEGXU&}_7Z}khF}c>{FJl-+ z?z048heYBl@T7F-@nn>yWe_PpiV?+t;d+VT-nl8n%3S(HQmH%ln6SUsIIvU+m{W; zgk?m{SlH`sgHag%lm$=uXMVXs@s}DrBE#Qk1&;_CBKl??2pIDJ{gxq>UY;jOW)U6b z7s!r1DyD6QeEGSNuz%vI4RBk#cwc(AaoNUH`BP=D8{KI{K?qWZy01$lsYBb9?cGQH zY-+VBZlliUH>%0HCQ6j4XQZ(a^R$Ym3vJ^$EazhiV-}++4m>;R&r+#rl;Dv^I3D04 zXSi1bP`ST?xg{3s6YnuPVQ?V@u6$*}4VH=?Lrr+$=&kHE>3>{U$~^)smX$HdD|+m@ z8J6Tyu;=7rKIoN%^D8}RfQ0SK4(U9GY>)+MeqX3HO_oAPh+B*RyzF7WmY)c)N;-9K z|7TH#O$i${Jr~(d92GH1p$N>?If%vt?AvdW^`KmeMn$m@03QhjMkJF`__oetHE0J8 zkSA=tH@CGXpMP)d2~NY899X-ab0k$B@n?c{P3I(rhcgaO;x#RQu`bJcq>Gv6aF$t7 zW=K&YO$;{m&`elSc~>!;7GioYm+hQYT@(?9n-6+uN)0h4Y?riEmO)*{TOIMGQ<r=9595snO@Gfk;rQ7DH1D~iv5R^m!_8-& z1*>=#z{u*4(PE6zFpORSjqJ(|bp6aEiqU9(pzwLpG;&5T7y*7XjOUlXTRrN{QlIF0 z_?}2oY+!ab!`OYwjKbgV8HTJ+1fyH-Gg!H9V?4Uv)TpyUe39H8cF9o{WF9CCP~EX` z@mycU`hQU@ErtA2g2#`;Ij;hzJoGu<*ctpbS?=fX;&oCYBhf%-%7+Q}e=t(}gWiJQ zHCwPia?%dj1`qK%B&2y@*65(k&S&EU z1NGb(ig52kE`!KnyD=V}H?V5Bpropp0yhW{Dj#%}m4mjotfE zhSf*B?Lxa|e0gLB08UvMJVLTSuM9|E+p>Ui7uQ zM1Qo1ExMN^&K;Of68Xt1dR)I2?0wy0g!%(YXV1}nAGG2yj=;)8azMUZyboh83!2|qR8Sd&Lk z0!xF0Y&?qBZni+cSWR2yxptE6V;^Em4(AO5#8U1BTwI?v!_DUV#&K`Skc1OKPB#-UgJwOJQI{< z1c9zBPP3zTYI^SzU(auVg=^WsEyOUBrp)k3)i-e=tS79*JFS@TpRO0a)@VW$z75Ma;=S`~bI{;JHVt-SITx1`w z-Cv*86^4M7QVsZ0AgHo#Tu(4H*OWR^RjM_cw+yjuOeOZ3^T(R1gdT}XN|y>{zIPDv zu}Hvtdr3iczW4NQ>KDtnC3YM69kuE+1U;IzGYf{j5I&2z?jb>Jg2#ZcVa-$Xa${M2 zP~t#nHwRjv2|d6i@4;=ECx0q0StPhN=jQQ82zx6HMJg@ss_?>#E+g#n!)B?^!(tOF zSjQCbIFcc@RfDj#NTeK@=BP!+0&7`5N()z1Y~RAdi12fvj{fE~h`R7sNTw(%SBG5K z)-2Pfa3IEGj$je9Kl7`YDVP(AX(gCNcLhX>5Ncs_k0%~7m6%-41b+}d=?jfCLyE|1 zH6s@z^7WeSd41L#%m@@}w(H!Gg$SvZ*0kd^2`KYEi$PcL!zuImg25okc`}M}9;>m* zxn6ZTGwROCtEe~+jV3Y6L$B__fO%P(IQsDGtYjZNsPQ^5QtYWJ3BfskJ1*kZL;|lv ze?!FYrr$F|o6pwrDt~FEEN4`8J~z_?n&O&YJZV(vPUIU2+)>uMw$ zEHttWBwg-ZTA3DF=EG0{W#ErhTb+;&G_esCD=22r!Zq+*JnWUBpNcXq5H&;cX0ZQ5 zSEAleB8iA$H z!dNX*G-LMoZ4)k(K`Pw*i(?(WJ3Qd7flfljw>r?iyw2plpoE_oe2plxhEl)2F z$pMuHEp7!7uD-0i{hE>$KBk$YMiCWkpfZJh4N&zce`0Guds&wk!9+gEIM;^`$;Yv5 zup$WkFuPV!cz^D|iN-^Ct=Ab2H4(UouHuMg^vT_0;FnOzZ4mE|M}`^8O#;ViW1ee- zJ_rO(5(7oj>?Qbpe!~ozAq@PosvW0^rkO!r@c!0r0P-LimvJB+S%sH*Cm=^Evs8u3 zu8YJ*iX$#e0%lB?vkfVGWU$EAe`CqJO2hY08+N+RIw zB8Fe4p^s5IBDo*)(I|?NJz?T(hIhD%Sx~QUech9vB#i5U1h=!#=r+La_!u9d;^%rDd0&FlEq!}Co7{9t0 z(XKm-TYq9CP4{i}HVObO)Ioua=A|TCQFYA1uO?2}AgPO>e{}-gtc&(pa`Dw2GhCn< zkjytudk84puJIF1smaPTTR7PFcn&{l{n(2n2PO54FLXm_7K1~YtF%sgXTo{~wzLO{ zRe_xyT)I}1Z43;8M~4cd9Gj;P@8Yg)Z6OI8F@H=lKRyAawN8MfQC14&(q9ZYYr}k1 zpVJAq2al#_RCt`Ykk+5(6E7{I?m}Vu>_dsE^kU}U%IR?J-JQ-~y>m0rps0V*UC_^0=SR--v%{>GXASiYWz^GF$h`&F5p-Uvb zNq@PvOQf_(EkW4Zf;Ik{zBz`&1v&AVyWq{e$vJj;NXCArv>hZHW02@G`sVQlIn9N> zyM%k`dC@Ujfahw1b+l{vapop4ajQe*r1>i)kMx%NSJ6ud2!`pJR|%x`mPj53xRdVB zfW@_S)e&g?Y5**r?%W3s06^jbh(V2jP=DuK)mh!&nr1d_Vk)?|NPXo;VJg(tmVI}7 zw^$GHIr!fm66w$`?xa*1m5p;|#hD#(m?wjcSR5Dx!V2zxTYnRS zmd#5f$+ zX_=%cYJy|J&-uw=5NV2{EKdj=$H$}j1wj-c+LESeo@8Md zjz_=6afBdT@B4Y4tjBT8w#WC^u^)SN9mh6Bt==2&hy~#J#40BRfw_E z&;89qY#F~tL@A$TXh~oUhB27aA@KttbGJVp!51Gc7?#W(Q!5@s*MAE~F%00+flWPv z%Gjqt1~U5OF?|CybniVj3Es)-E@c%d;_xd`9-^-Np=&#SIip*(%?`>|W4 zrIRwc@{UKrr!4@00xfL3U4R~&^k0l(_hm0SoRe`2>}B22c^moe`ExbfMjsRYIrSgs zuiL`Es(n0tnFX#_JZ$32^=r$Y(t2qd>47a?^c-8;^SXKQc33v1@v+;GueX;{JdsBF z$DQ-_{8EOP$Bo=xjN-`W{FuL~D@qryw%uH}eBao~!_&jHe`v18cpkp)Z*>aU#_MhQ zd;A(d)>ZPHy&7#6)5rWX=#E^z1hXgim|pf~-OkgQV{Sfp&^#&?eLOU4ELM+?A4c`Q zEgqL$)r`Lie=~l5&ZSYB>xGZ@e(aAAZ1Ysc=QG(D{y19CN_l#>O|{)bc-rJAem0L` zve?RSG5TuX68u(tor5cox2hDE+3a+Pl1VLIUu;k9=J^%X9eC$e{uMqN!Lc3}^ev|6 z>yFFL6f2=02#p{b}(sO5{&zGpHBXjpS z3BuJx&L{8Zr*pN7jZNZI;`1e8GlfD?eXQu%PJ|n6n<>UTRWdyBl-QfmE}qEeLw8xT z*<&*o>CG6nGE=@-#@%=WrzXF|l<4ZK2=q z@~xfZ8#|7~BLH|SuHWe&^Ln$Q9)i80KfkZPf8n6stpjr&!bpG0ITHX7WIOw%Ov4O~ ziy46Dr}~}zILY4}K&^Oo0C2hJe;<9OtI1(qBu`g;{Y!lj!7;O6^w+JKYd;3yR;4*x zdyU%78Xc7MYykZHtbc#b03@{fVGAtL?g%LW-q22e5i$*VdpL@gOO}k^E-maRfJ7Sr ze~J*y5g-nZ1a=M7ib#feafy7Ac05nlRvyP7GaIS2WJPv_oEv`0SNe5Su;VJCIV(&C zQHnvtlr|NGGDAE}%X$0aad0OLtK3$ck@1s{fMbvuh4uX98OI8WS>;CX)fZ%NibCOB z;3EZF!4#mFxQ#gn$y%>r`g7aOOPAurJxp3Qb z;*;#b@6rd>2?rIta0~Om%ha`FHOKPeV*^$o1;PPayZ+_Vjo1SY4jij`PQ(=~$tZ{_ zv|53eRJKtk_pUXYVYv86rS3dQT?;71k;Qm5Go3crFG0?U$ZQ-=RyYs{LDg4lf5mb( zf~kS866N4f!1LrIZD#-_`>%+U-zME&C!$<(n3@BGfhQ=!86FnP6_{Q?0^Ea`zn z*%|^taCpfEk$S@^Akep3TliHVynWesaMnpF|ddm=Wb zRKlmqh(^E&YD9z;^&!66AQ{0(g!*~GB%+9M4AchszzU!Oq{2j4^FT8Ff4>)~-v7XO zCec{VKY^OC0#pFJPuEGaiVU%bj(yOM={2=c%N&-cE2~M@R>@mGicfBl>Alo@)A26u ztcVXk@6QcCxrMgNbn18M(QVTcYvPra-}Xpm3G3q0E%>qD_ng~SAl6o%t^G)ZK2A?o z#9O`Zc|JQiD04tk3yU~me~rN)ureOHY!QkSI@9t3x3)kW8DeWEs8?9f7=`eK4(Noz z>f9n5fn+>-poimO=2+c~oH}_1nXT-cAyi~=q894KH)ZM zt(*ot_aw93ph{iHFa*r9#NH#T2}j~7@Eb~f!ZEQE!NLr^=7DR0fB8j(^|t;pNla>Y zqU78Xy0nYXcqYkS%s&CoV5!g&tQn9Pyj5bkBB~lMPbn`hsJG=dTU#hiZvLV7RemIf zb8+97?#DO4;4idqh}?n~NTrE|vK8<2`HBsQ^m}jn(f|G4j~rRc9F)hg)ubISvj;yC zSeC4fD>Vl}*($&6e**#!eu*qEd`;3otlz3?Nx=v}&R@P%%m^}Z(WLKIE*2!G8>f*w zh+-8~@!7TNonr{)e;O+1v^#Y#ph&GrCq8VVl3g1MpPIS z$@PmRV#krvf^*u~L4MfeGG#blh~=}*iSdw}*$0W>VPkX5e}gLBtkTrRP)FKDDNy5a zOadJjr~xa5&f4xblgCX=l-t|=-KUmW+sA??z1ywRuXXQ}veJ)uzn;8_OQcMURCZf8 z?XThqZ~DE``*3nqzaPh6{(9=;v}8l<(R(xXC6D`kY%~47qyC)^r(!^*|kJ2No@!_?MZ9_m_b5TYS zLl+;1=^XM}tgf?yF|19c5{>f$a=>}x4G zSr)UGd1RIpMBt3%j(^zFls`)3Ni~jrwl>U6K?V&$2{pZWfwgOmNQtvB24*oE> z(DnI^e>6Y{q?q7Y7QJUS0WQeo@rjBUCFyd#RLsUoaGxy1!`CxAD`i9ZoXR#cxB>*^ zd5n~`hEEfi0Q=HV0+IH|y!A`g@uv61cMRa%&bhX!fa2bEPHt6UkirJ_^7fWf3kByF zug#XWG~eh${$Rs}gJk$w(9r|qC*Fxum!=n6mqca=6ag2PS!M_#e-FQX6&qsra88PB z@NtsnR!Ebb5)7mC&6-}&9%Xw=R1UId2}2@rn&bgg2B5239#)5FvN??bijw-adueKQpX8xA13boyFNQr{pA+*hM{z~ z5XwTbT3{g-UM?__9Dx#{6{cTFm?{?fc+t6dt}2&ElYul(3ze-YaDT?7Rw)^9mUIL` zs{N~d(W<vNr^kVS|x7 zP%RnC_DL}N#6Zs$sw$Wk)7Vem?z_w$033n? z?o&n4dnQ91F#Q-q3V-+^7cqg;XtOu7dCo#24RY4$j)d!YoMI}x1)vz_0@yWVYb-(M z6X<5=f5lT1`(3)e#a|mWeO$f8;LOl**lkmUrFq12nxK$B9zNJ7&2{^wvR+X&b{iwc zf-+Rn%Fk;7l44Na0)j8$b#IH{w5=A$+|4ZYfTZ=lKt>WcMt^(67wNU;b|-SrUd@Vy zUVlqFSmzV!8Q6ng>Gz3+sYETG@Gdr(Ui&WAv41+;6AgG1BQw~IuDS4~VrGbFkQ-`F z@{~dy6X>4XSx!d_kaM(r-jA=dQGaeopA%S6o3Pa9rj_&Fk2SZmKIAtKf2>^}53>P# zXuBxm&Jz9F>3=hH%MW^7a>ptwtJ$r+mMcrirF9G-DASqKULI{j&)0l~QW`Fnf-EJ= zVE-6_aEmMlMdjIDB5pg1TMV3$8(wPC)CUV^Uo?;hZn$I-Ts~=cxgfzmAkShDf=(5q zc2H{+WY8{~IDz>bD(WEM#1i_x$@oREgz=>y+{4W$I=``+ zjxFwey?RWNc7B;oZusHuJCCvBFB2ygP>W@Z;~eD+dvmgo+dlGnp$~vri-0rQm;o;` z8_9Z)8kj>N4h&yTbHXnyWFN49r3q1wICmA7BH{R=to)Tl6+QW_P#nj~QZdDFd9=F9 z^c%|bM1Nk|us(kww4TgymRcmPyztWJg*}guo45kl0~Ubian4}K3t8CThsIMA7R6J) zoMEoxoL(QjRuNBF6L(ga1xRLV$a#=&;*hM#=<(^{3M!=t4BHmM0_AR2$!U9_hfh#^ zw2q!P4?@R(Mv(q0dh+*M_+>Wr^V@#?<$GBkC)&n;;$0sjhrO(M?v$y4u}{>V0oH<~s`K0jT55ix_x)?0_i?Vfo%@@= z)BjUoI79fs@FVG$l}H^LnTX}HJr;F?F|%N^q9DB-ho%dPBFJIxg?x!OqmUkI3?j8f z&L}8tGu%ZYqaayo1Glbn!uG+V<%_h@{25=4Cm>B+N;m)!AQX7lPEEWvU4i5`ynD+n z^K8I>+#09l-#N{%Tqf)78AaMB{gFeiX~!OJ=cdnftU#>N;Pp-5%RDa3J*e1ULN%Y4 zrbVWY`(!F(EUR@Wr56I7FOrP0Xtt$6%>reP!XiesWVTg2`cPc1zNFr~5QZJl2Uo<7 zeNYdyIe1BAOl{1iUCvHOWDg0RbY7Qp3+R@A2I-E<^fW%P%e;uUkf_z1sHdguG4-Anb4?V*S?GP|g^}^Ep zkVKxwRWHJxX?*7>&7WREta|=5{Dm`zHIN$5WIyNojyjgfDDDtqYh z&Ofekb|A9ayM$kes68U7g%m$hp)3$=O|U>gEIKa`8wVRMd?LmL_kxIzUvWLh!gN4z z%@;Fh)2m%@f;3LH$@pFISE!WcqwPxrJuX0f{N@K2-|VOm75|kVArY?-h_xku`70F$ z8-IL_J|%at5GW9qKKB+u)J4nZ?f7~LAr#1h*j<1Mj*p69p?oaD>?nAGm2welIj%nm z5|v(80NWr-VY@we2feP$N215I(aopjEV#wBa5(cUq-!e|PSRvC64nV;8KwE|g`;c* z>i#@Mk*K3S=TTWq)tAIUtnn;=!8o03`%0Ii4`>V=D#$c$rvfx?U*Gdcze^8pkyXRq zrJW_=e!eWUp7}Hv*t^6io*v`z$BdwcEwJz$1eFWpr>F5plTcv@0xVj;v!~#()&Sb4 z!Wp$vxI`!y9@||X=O_gb&KrCv;0ne%&MirrBGvQr#h-Zf#zPD~;M)&>xaBt|=hGuOhspz>7Igg=cVV1j+A04<5%?%SwKdpmB?a)ju#4RLZ9d8f~tNpmV95}Eq z7sqNooO>9LYa)dAtn-C`wf29>Ib@C0_Hf|+eenX_e0h&~y8fKqG0#t2xbY>{UwNHd zWaZ!Qz2yMieQm8KC4cjlaG|3QH<_Grb9s5(Ic+F(vq(EkWcC^99o zprY|;rBIEl3QZof_`uy1UDw;yp-9zX2PYVTNVovGqGBT%P>fq@ipD6nIYkhem>*|f z3lZa)q}cKk!;;QurYP?7I;wH-d;}jAEOq|erMJB9Cx_MbDxUYbUDDsv5O;p(e+}ZB zTkmeR|C=`$ctlKJR_~F)d>b@&6(Is@fk+X;CPj+cODs8TKJ7sje^G_JB9K_LC(n8s5W#f4)|wvHQTo z>l~$tQA~^rl;#NXIHO@Eg9m@l@>zgO6M1U>B9F}ljs|vsYHrS3#v)7N6iHymMZO$8 zAb$2_Wx@V=M%%QCq6~wYkDh?)vvyfZL1v0k1Z)vo)I|%;m%&NLrdoM5;9BP!UY~EY z^!xn(dClKB>Tq_-OrNLHe-HJ2@I-St??Kp{P|SC>!fQcaAu<^*`4_V-aH7jQbmj=! z`q@b{0-cHH#H29h7(sPebh9~LX4vgyJif|!fiwKlnYX+sC6VwR!D9M4eGvLO<2$c= z!^piKTiwpf+V9fV(s@M__q<16_a0)zkVnWg9?lgT&P&+ao363+e>{k;z1(AqzHg|d zxqo~)WqF2u@@R&YL5K`A`|2X9D3t;sL2VG?6G2LFG`qO%0}cxI>V&SHhjNUhEl8t_ zuQc<5nBWOuB)EkQww#x_7oY3cddTAq&UO%{ODxzw&KES|agDMl#Qy1QXJv@Y9dvQK z)ZD6|qfH`GPz*a7e}e%9SI6Fo!XHv=1vG!zN*+~EYyiooB;2T_hj z#O$xv)-h~T>+6mC>lOFwg>Rc2_9@fX)n7{)EJq3nKRYqsX!7vhf=Qe8@r1Q~T=|+V zmOYEUBb(B8J9|R2uy$Ua5lE8Mku&+ zoQco5s>8R(cuPW2LdKAV()S(wUi0Sy>XH9#mzMG{-dN&;pPrRxXN(W4UI_NRS3 zj@fks>HTx5qt~1C{>6X&UhiSO0@n3)n3dJ^m_hRr`OWsqZ@U!yUK>9_!3!GD&Jy&A ze3}cqYAzsZ3#02MBm89*To{s+^=D`QyrM$?+!>8j%&Z_~BXPmM*3w#Bo=4+p=yzpp zLHGR9m8SNe$Wk1;_&Bo8FxAQ(;`PsLftLo0j|2~(Lb_gJU`IE@Q1tj-q zBPcYKJY65Z&${NV$5o0T&4zWqj7J6t2LVGC2;>0eYWB8CUM_Q$FGAy-SgpArIzRsQfy{PnCG9T0hDca{@J2^)G!g8dPax8{ z6I3%liUpN7{Z2O{uJBCF-^B{O_4*xagZzAEYaN3dv6UtIn%_O9o;dttPabah+pn5( z&%5iq!tf=(#o-~}vzn|O;&{K;YwHKsk#CMq|E}Z_RvC7qx(>p=)~0>wk`?ChzNfA` zX|0m-{7D25m})0~E-OpU4x#v}IlVMq2U`|a9a7Z6ohX(in}aaDg%UhdhIDYnkxb_v zHbRoqE(sgUt};r5AQ~%Ewr?th^4RbB4r!bgkRbuHh{iCYB^78tgQav+dpSc$Jg%)00vqmm6YKM) zMB{Qpgxb$(9pX<-<%csEc)M)CioHMi#T~S1UvC&}Fx5Imw*4hkQ%+0=gPGMz9 zYvXqfLmm)uxXdlvw=qa7t-!JK3r2|bxh@hsu|PO~AkI}>vYFe=tP}W<$O|Uzb&Nx| z3si=Esq?ZJgJg{6F-0mcd6ybtUSp`h^0y8F1lyYin*$ibXg{n|VIF zV#Nvw+`W)y?BSP-CFm7J6ocfO)yg26p8zR>h-#b-S`v(3q*nrt2YpJfkKt24q*^<1 z&s*+)dCqT45wJUF)x@P2#JFza5zgb7=hU3Lm9RM;+X!#wLufa%Qtp`p4ZFDL5s!z#NWMrrc=M=6F5M!d*B?~3u}i= zh9Yib^mxfvd````jLJ#6egIAgOV$&KKL*@2AH~el@)hw_A-`YG=huh)lD!gV*W&|! zRE|d?Vwm=FW8tW0lpY(g*&zu#+opJYFN_hf)3f78+NqM(!&DS0;W) zlsU4e!38(oT))Q+13RiF{yZG+E3b1LyR^62#PSZ%+r?v(37S||bv$>&`_d^oZ?)#8 zmy&AfzTe}RxwaFf?l|(huC9H-pZD9}*VNbg=-+vyj#Cj&;9x{Yw||O=-CW-uE^}(O zxdoFsR+mr9%F`8&k2rU`4H#_u(7fz4UwB;=pTqSD8mDhL3+ZtEPu1sDjI@8yX(QCJ z_79!*4@~sBrN}R{oS^|XS^$aF)tGlb9F4De%|KwS17Wv12@r0uZ!KOTkkncl-Mdhu zaSa6IjFSL~2(y&pY&h>U6pcThN6%8sL0T_}`V0G{d=U2M_MBoAWCDcq(nyMMlPzs> ztw2aT{;V(tX4!~RC@%;^x~Z2veFz(W+ht9j$aGef#-Dm6anmcQVIAB7SFLEy*4E%B z4WNLu?i-^ydlwW@X~cN{IEIV4paK0H=iO){DxWq2!BaNm=lhurb5rY~9R2roz4*qz z_IU&ij)(I~;!8B0*~B0_?Qa|tWBvAz6iA4en za|V3~`-CF<*7OfH!SDK)@>@?(WX4YoHchmi;9f&>b*-Yus(sASSOLvKC_N+y?lxI? zl#OSM5G~<><-#IuoI7od(@Bd|mMCUO(1nYHi`8% zm#TgUB2j8`L8#iSH|xrrcjq86L}KN$J;=Br!rohk`OJm1y_jB>BD3M$q-gnEdh1Co zjx1x{C9{E|xyB^{9gW|ssi*cG>f%oiU0FPI8=c1E8cn5_9e)TYf9!~uStuq*i71Wp z?Q#VpG2OOq3`Zhj0qJ9u&?X}46%lVMu24HM)G^21uvu)m&1(J=6h`%#Esx4Ph#=iiOTuoYp%-tb`xdxN;IBS!-*!~}D_=VCyFYaNy6!sokoI1~jKBFwA12Q`VY)%=YrUA* zRVKC}eX>zne=iETfVMJ_u-cuK_Ayux^Le4_r=>~}w6J0M?C1uw9gWxtMb*Rd>sjj_ zDWK=Euh=JK8H94^^8hh!vi z#&o{xMtz;M->;v3g#vzZd_|SpvIMp1P*c_YHaD%6kNAlde1FK@f9nq0&4D%2!8m*O z9~zf~`t%#VaFJTkrlzQ`>od#l>x04p6=NuBGc(vHYSIWDUUO^(nOj98qY_6DS!F5P z1GQM=x^^L8iAQ4(B;PZXeKEwgEVAb7wvMc(TF&uIe;=>ocGZ*H#VIPdP1`DQ0ufM| zYQE9|$rnf%4u279iS_i7Keo-KMb>WOJhHeOP-o_e;ma1^m`=Y8@L-hCYycA7PdZJmbA?+vn-O&c~m5 z80~dZeByyU(Od<_c}!4lhj76V$1YhT&DmKX_}cD#hJPhPfv6U{<7|QB7^8^#@NTkt zOI@B*%!!;AsqArM&ZNVTIhw1(=WfW;5Rr7DeZ$}fdd>6^J~I6<*M30jN`^kfKl>k1 zmreO!wS{jzSC_EwK1g)lu4FpG(42O{a05Y!9aJ2S>_=Z!i@mWTXEocgz@s(6=23zbus3-d3N(zR5qe`R%l*u>Orvw`dXU4O^9$Mz8IR*KqNBKBCkyrZUGSmJ`v zA$=yKHZv+2v99k_R!h>!N2^IJ43m$oG+7{+#f34%km?Y7-7W7b_07DAA&0UYt?S;E z|F9QyvzMzr2Ud>@vs*vP5GVhz`-Y0XUfRVUkX(1Die0jrT!=VWL;UkT5kxNq`M1x} z=YMVS2^oX9ulr;-{MqNZKf2OEFEa3Gf1ayee|XoY{JoYxI>bI___y<_ue-kJ`e6C; zwP|K^T)w^sRqcVaEG!;BcgpN-+iEUd2=)K}v%0I9?k-;hx-awU{IAdJ6D=TF>`lEqrc~aD=ZrIxL1zH7wkxf>b?rE z$Gvoo)j#?w{?b}z4_15YpS$DBp)P2+lDedj&Z#i`H8m=JWRDvno_4UA+6Fs2L4Uf$ z%FJGKwZN)I`rGOAus$TKS%Sm#c|WPffw-U~WC@L~p6JYg6)ad*J}h+I08Pi5=hW-( z>Xvkkqrx<~`K$iiThYCb`Q8umAMMQ=bRn7M8isg`hqDecCy5mar#JcTt^AiW!1_JVvE*BSgAT+zAjKK3s0T^rk#%$J&voy&7FQ%_bwD!# zVSI7|DnK!6IVM|M*>9U>E)EdruA}ib_3iZ~MR8482zyt$SMo6%m5;Ft zLhO7MR}8ZQYU2r3enQU+mp9pZ)%(}=cq9k^y6kmC^$quFEcCqx-JnV7`;@e<_ouCnEdYytv)V-~8`G&xQZ>I@5Kdk`Mjf!c`=RZCjWL zR@4$GFKyBHT7O^!T34G<3k(}_YB(tCY@Jhta*p-;V(JhZU5KdE9IJq~0ux|kqz-AC zGiC=BT1&dJJ9zBZq{nB^(`DcP!}~T@tk^|X@#DTz2&>q_`sclWe7x>L*{npGp#i^P zw5TWIRNeUD9xr&XQ<`kFFKX8F!~7Ta)cTBn;fQ$X6T9jjHGMYMyjhnd znZ#JUN(ULGOZ6BM$d!CN_iHs;ublqt zy|TQ=O@IHq*PmYV$@k2d(2wmVow^`n8XXL6*Bg80<3O&rfqeA|I3%$g3>$lEE94SY zxKwRX!C&n#CV`zMw&VuE#z8J`+N#gkSWZR%`0-N_(HMacX5tiF+Cu6$qCq(7q&?HR zTFtMao@dtiQ9SuWliuC+FE`#?q?+gWg`N7k)qlNbD`qWMgcCq;K~OJ<*U6O|3I_$V zJp_)^DBXj#o7_~rQ+I(uWE=$LtY8(=ezp=1RAdf9 z1zr{@2dQ-_j0d3^Aejd9Djr@7GTldy5#k|I)Z*e&Y)r71AqzmzfEIO$zGi>|^8k`M zH0KlwnR?>gAw9|L(7@}j zwQiCIFksGS)AMA7F%k*>q#Ii}J9xcNE9mDN*t%XZg88Qw^P&&_(d#t7`M+%K0AE=s z9%`{a_4k3t2tg)Nc&>My@@^6A63ed#k4W`mv~ho2gMWfD83V^7q5p97)sCw~>G zO~Xe^_ptY`;dm?qgM!M!H1GGub}=gmP(-k+ll(y&2*?SwG~Aw#PA zzCQ_E^V`-2a9>vp?(6oWW74|sbboFA_zdkIoxv9M`s}s@%!uJ?%?i$`5*{cvLM@Ya zk)ExVl>~8$CC5vdy+V>{oj_<~&Z*!oDAz{KZ2jl+y63YuUt<#*id%t7urGS<(s>h@ z_qj_GKN@rp{XC^eeB*i@FK_+#`sV|-95h0=p3UsxM~j;cvzC76W9qkFD1W`{f>u90 zG5x(eX!skpMfk&YLhU%JBn3gdQc|S#idaYpGh$7f(Che$DPCJawsG2cizKxmhv_n( z`A#5^S15!X=+`C18s_t6xKoHPJchssTHHBsTX#WZReWX4N}TWwaF44hbhJ)t`H=CEaW?q zv!&Zy<_Kb&kif@4=NODRDiQ!$X*m@^Ef+=VZAUd$?|HE;!bL2tCnWo06?8k>T;@*XXIqP$rm<)Os}TXHg_71L|2l zreIJ5OJQT;FWKLzuA{tKvi?&K_-F3E?(22~4>RE8GJ7~ThxI17IJ}ML4yg*4L+U~* zAeSp5t|3+i&bw?%0)NwY}anfX3hUfmw=b&0Y>4LO8J?rbdjZvuWrQpMC4n<24RcwijoVW36 znA^b}3-LMZpI?U_xJw`Z_TyOtyYcu%!EWe#8~nJ&B$mdoL4Wog&zZ_5eLvt$7dFi+ zXRhiR#5Fz^b+?c+3i)V@8#Y-It>YC0npcSQ>w7W3()3I1k0kDx?#;r6EW* zf>5Dx_87bqP;=ZWoqGkU=Q3Ym{rb9;0?!oh`zaDbx$yP;sC%TXTz-<}b4sk(N7A`s zVHEB|6>A+Vd_C77_~?1Pd4R}7TR|OZ^8TC2z}t^an18PzkM&Gc`_g9X9#?+P-^qUZ z9Qr(b(6If|RsNb0k_~=Q$Iw~)3NmP4onWFlL?mbQ)d`GO!8r*ShErg?Is!w^d;3b#?-%|7@~t5#1CHVH)1CL`YE_rxyvCr6)m!MZA+MT_M~WHnanU+Z z`_jUqmvWE@GJkF>mRg1BVdU0m?PfkQ)-0$}hNv@d7;|vEXR@}D5Mk2F&jd2fIk*(+ zq|ZZQ7%eLqa3T?%7%nZ0k~Nuy#Mv$<&4D4Mi&WAMvKSGu_N&LW6U1|p_gXe*8_uqQ z+>gxVbER`wt;HJZ&ioDZ_!K}cSQo$_uj_~gh{3Hdkbm@?*Ywwnud<&RSpGvZpzZ7Q z2lnhVKN%lyc5LWeauNUc3br{{+g{Vh|xw_M;-gdC4YOIhtF}u5D?0Ncw4# zrhl2+y#P_7bA@A(?($><#eSFB!g)z{O|1LwpBIfpP-asxootp97b97{u!FT^Xcd-U zPcNiGpQ~o(LK44x>lMNjDXH<_wnOr!d+Ywer{8Ndr5`^2drehyfB#ZP1#3Be5rKja z2?3xUXGTV*N9^f!9eNz^^S^%TC_P5&xqr%@+vv4|$GiUg)_<#Y=e<80{L(iD82Tp_ zJ-WqR_wuV(PftH8YCQc^)3`aV+Prez$$D?Kj|=}ZT0x+&?k~bE-+AaI7bQ1QAVfxyJ$Pl-Uqc6l=2y+gBjN7O>%Cx7m* zak|5zylSlgFTopMrN;?#`aT|^%O#VAuVO?yD69f|=?+DUf`2*BPcMVLj-Oy+XkFS_ z&$AM}y&6xvz1Yy(>-XH3z8~U`xpr;8rni2z`%Ij{meIYh;`gl0PE47uOAfUm^ueTYuD{V_XkZ#pq>qVO@Sn(dlqq;Fr-tWzOPZa-br` zB!2OOe(oE0DSF}h_x+XYIhDV4$$4)R@w`77b42Ph)0lBTdK`BW(Z1fohyJS}|Lr+( z^gRpZkC`Xkx6ciA9}1sDAExfTv?ysNi-R~hjd7;HJ=|4GkeOxaS-QKLjejo(Ah}qD zo%nQ$>gY5)gGz;+AG-+Rf!F-MpjVWN5!hYuQP1h_=jzGZIZgL4%z0Y(8lqbJD+>ud zYAJ}(QF%K!s1`}8n&+AtzvxA3p-$6WD1gSOX zRZ$E348?Me0cOAEYx0|RGFx#y%b}k^x@o=qlF+U)LC}~Qjf?Xdzvb#eX<$K6r5=tQ z+RxEKN7xJW(CZhgz8?||dir+gLH?!p(>488$T*Zoxw3Kum2?#A>eR;Yr#- zE-}~nGwQ#8m|X*7g(bz`^91j+t?oHA8sbnNi-z;8Z5}Q1mk*W*83L_;mnxPB8-KQL z@fRP}82GCDU=T~0QUR!nK$SX9ll}4B?&l?83-&YgEao0SKlg!MgLsMLZGKGg*IXc# zt^fJj+mnT91v%B2>5)dE`183yV^yzUZCD<#wU^fcFD{;?HQf4J0NZ*?BlgorU32^n z!(7aN?W6AdWQJJR_eBi7GQ+HbzkmAUmx5(_<^TvfXZdVm$83`KIp*(ved-VhHmsRB zXgQ5z)W@ zwV+}?J}yV8F)jZ-rqrAokvL&YFfiR=vI&5 z4;~2CIDY0KV($Osc{JnTVy_EAE6UT6PX`O7&jU1*Ej8S|#8*ZWFZ=C%jJ&g|7uVRRd2bfIZ-A8;?3G9HnDTuvX5F^|C z=&W?p6K^;wEp9+U-u88hW`D&xOWM?Il>h3T+a7dN&n@BZ%A<&VhnzDH4&VY}z*XXl znLQLlZrtIa2Z`*>zAYbmZwmY9oKfTFR68eHAMs~0!`{T&4M92kF}|;=doYP0{J^Zq zsF2U2LT*Nv?wANQe{^zLm#Q(nI?GEQvh8}C=Nk=2)crg8xZYcw)el}8=iW`O($w~GkJx$@ zha&xhny71P{gM3~BLY|hQixY7y&Z27?Z-Hf68rUK+c z|KdY6M)XmZ z{M|*o$2oh#Jl-u21=e3ynonlY;53P?3Cs_@zk6-i*OG0e<)7u0E=(_g51T6cbUVHu znl!v3@6UC0PJS>)<&FMO-QsOJ-uIu{B6n>LbMfN%(fGU3K*RnlVt1$xqpy!JnHGQM z19{gWF^36-BYV;LCsmyKM212uQW*R4+$flXnSi9As!wuv;8g1g)p-aiwa2A?C(3cV zXR)@<8TMHz)pmq@UB)xVbF|?)pVXsH!(cB734<&ul^be9K_?N@sF9gR!99OMt(hS1 z@(6~pq(8yDd5(F+*~;5G9_9tfx!HeryJytZ;BR&OFZl0U{Ug-q8vaoWZ#cKN_{Fvs z1wZ@rrM@-LL}Gng$uZH=VKLu3DrJm^Z(~~1R4fOR%;m4g5)GVU2@9OLuxLn6K>Tj>(o6YsQ2S_S-LY*OEwdZ>450G^6_zynxCvI_s z%9CcSz7yk-Bz7Y&cEET{*<*hdW~klf@glDv_oT0?sXDz4lUh4OSphNHoeUJ7;{x~J zHt!G~>dJoVR=2R@??r$3^R(m5F8NwwkydZA^Wa%~%w&SULD3Rb3}hTugN%;TM^2& z+^LkAYQ_j(PoSwiu0)xAgF2pMokVWrB|?onW=!ZtrqQQ$itKe_oEI+!k5&HrdZ!RK zuc$Fok>~?8hOpS*{vYqRd@a1+7lY6Ha=&|5ADjE-TXo^Zxfi_Jb&ZvN`^>l5zwuXk zos`Kb(A6@ASHshP@KpjiikLT>l2Nrd8%d4j5?y_!x%eWV9edWq|Y~s|dKm6H$Zngdq%adO`U@l|3CB?7!{R2w*m3$$>5`-%S|%L$8fqgIG0&Yo%cO zNl6Xmvpb+Br+yGz*7QlfHgm2$6Rf5;y8ZQcT|s`d&Mx%_ui+MD zKU}Ws8KSHU&P-ADOu?Q};tK~d&co{%gaoo|66U!_<(d@o^Y7o6jvGlL&)K_+JVEi>CJ~z9JjJIt8rmaA36{vUr*GC3fw=m%Ey1jA&0f~ER7V%qhxq4>+I;C zU?al{WpP#z!I+osYy8O_S(HUXRIWj@mDs560c! z)M|GB-RpnLu~*xi#_FOCtVKN{5A<&|j9>KL)yfNq4rFLNc7laFNH!C#$yFz~@U!t> z{VU%7R__?^i*gbfc}D!^Rqv|4?qRg6w>I;WH}S-?@y?2ok)l*P5LXk@T22|{p%?lQ0~_@wr-+&0y5)6Fu*H6m=pTpp ze#C>{HGI=jVtwu6Id;LiW4{zN3oe|XU%I?)KGvSS{`sf={zva{aO1<&{MXzg@$V7@ zr2~JLOdZfq&2O-dmGD?08GqhLsVh6CNt_uunIHyUS#v?`8Bw_+Naw3%97c&xqvRz} zqmdxhaD4I;%tvXIA0^MvP}Gc}mb>z&Bt)$jpAi^2s+vkrz8o8s&O@oD`|N}3#HEJE zZaGp!OHK(dz4O5VJzDvNU+v?3#T|bsvXxUSAHqAo)9tcRx9eLI2A;&Ka-*Bq z@kJM+K+lES_`bzo${($k{f+K#i*vj7Iez(C#ee9aU*6x}_Ue9VX;`jlSB?ydIl-Nf_rKE-57e&GlmOIhPWst2Bl~i>92BJP1THAkx zkI+9Y$4jneL}!eN(}ZbROa?YL=jHpvyj4vcr4NZP6$NFUBO-+?Ao5FlHje$ZU#`ov zPU^)uS-Z70Lwzc+>Ioy(894=_^d}JVYNA!Z!8%Tfzk&~ofYM*i8J0vTLQFD2oF8bF zI4a2r@OMs(p=9m(+RT_T!_X)~Y94A<^7LEgmqa*Dd9&A}~4mM_vITDyN8ngZi*xbvEOK-=(D|1ktew zk6Sv#^K^{j`4fl2GTafG)`u{Nlwhi&PVp%~Vg89VDie_!b{;wEnBn=xR|IkaMZRK% zFDFSb@mHyEtWh;CKXqw7>t%m2YT}S+Hu{3kjGPrw$nOT4w0!yBwb38Hw?FryGPiDe z+rz6K!X+LL)#9cubK@I>aS5*FPBU!F+rJNv?}dmT)w_r$io?BJzgn*z`P^`mPiM56 z@X(htZHTM0Y{@epYG4Es&tn;-f_$gx4(5mzEXrU-M-T6K&RO`Za!G&QMq5FALdrnO zia)xXAaSR%1WW!kBaQxELD zAKDul3raN)R>0j^p}#$TcoRl(ii7~vW8T_=7x$@ak_6`Bz%Xw)W%&j&O^i$+%zqeD zx$um}Ib^$KwRFBshrATqiq8@=U0MVB}bW{^Y)jJz`Wng`zjB^VB2BGpK4x4C>yaUbZB! zdvIBFn9R8O0EO5{Ujg;qhr{dIsx$7N)m5+hNAK{eH+Pvwmot2AlLl$uA5VP@0?hXP8Jx~f$}Q?{0r1EAyGc2 zDod$K9HMlDVRzKeGrI0fs=CjpA`X(*niau3B45b zy&%b83??VU__IJAs8is)N_<}(a6e%&SPTw&D%EkO!V!Pa@;t}0GUTt$X_WKOa;fnh zsW1*5DO$>RH{?016Z|fb$dpIU)*Y?wlS^PjjIUHU68GWgFsZfA3$t}0TM>#nDU$IT zeGn{0!{p$aV=T122r4`)q<;hF!;lf{bn)qXG-?Do1Fx~DZgEWw=2>U%ML+OXzoo@o zUuu{M_v$3=zfOAD>v2x>Va~O3NEqJD!Jf`t^t#9xud^{J{0EO<@18FikqctPDi8f> z^L=-rEsUBnr4}y&^D;?T5yWAcj}ZU|@YtUjS!2BsJr{V3wQtXeZGYY)EFX3nMpy20 zH&6CZ^H#gI=I#7ZM~PUz#I;hK?_d-ox^hUkdq_f#8XXf>uFIJ|5Ju#j0v{`f?;$fj ztCI!?xaK2D!RP2Th2Y`Dl`-Pkg&O%R3OqJ@DEsfIHzQskwf!i^{7uloR7To+?(+$Y;upzxw45!zY9(vD@E&j}o<2bVRc2tNXj8<%#c2s3{f zdqN@MJQWX)@)gxN7mI=7T8a4boB^M&ak!C+%pQvVyKJ5{gHcL!!ug%2gg*H3U86hc zba?k~!YoR#l>|VV@7mSn#fjf_F?FYb+3|U5^xaJ^^+423Z|B~L<9iPI(Tn}zi)lWB z+HaqGsnoj?$$Q}!vk)9wWD$W(@VkFBB+kEP zTs(u^AghW}h#8&*wHLhd84&>(XqVL;_5tXa~c)ij`xl{ zI-ti?Y3?lELatDo^*Hs0m%OM5ApvHW-lzx~e8 zZfjS2ponivu6aSXw^}zC+u;S<1}n>R;XYp%#g}t<7NK=2$7O|H>JHIy+0QfurM3(* zk7zYVEU}FJF*l#C8U|AilhiTVqgBEd_0oFZ4(K*gTMA~%l~0V38uvbaO6O6C9>hKo ze}G&nyB|Bm;~H_BSus^@{~3Miaor37?wfVP@)FdIrp!4b4Ibksu4{Y3*Lc3u!)oz9 zmpHvyH!#W^*jW8ngZ>|uqL7oxz>t6p9-Slvml+O4<}md%U0Nw6I*aJmFNl#;Xys6q z`O>)6q}V*|?TkRp18NQ~sBi|VTCR$tfA4`tJ+UR|ESQ#@ah1)Y!M#5-_Lu--!F&$& z&b#c>64X4Zcywj3!FfK8(3ikOw zkKsI2406egJzcn2_FTL>b+YykW%W+Xc)>!+AfG+icn;P6Yg*|gjwghQAt`lcf7Ln) zrJ$;EWPI?LK1P3?3zs@N@EQ6?qyH#F?>hc#-v*UI!&Sc3D*xf@IafbrjK#Ly{@1MS z)g93jr_$&lWrY+rJu5x;&bQ;L?HPr9lDvjO>CEB!aF&=~78%ELnNdF!TcAtVcA=0y zj%)wBp7p=;@DhZy}Zh{9M+llLNU2#$&9`tg~`H~Xd z2*8m`?TM8BmPG&I+`<}son1V;g^U}HYNubPjAniaUX4?akQ-OPlP=Y8aGN37d+?%+7dOJ;FbqLBG$oliqpdkwR@=e#S8-IF1;$ za?CGJeD`y}EQ-^vrKP z*j=L3UWALD*w`^>i!^zk$=KF9G;y&|nA_y#gpJ0qIub{jNHo1{6*sjVtic&?ydzwI z&Lky=yC_x?)J;ASRv>@n6I~A1dcSv?%eK?;DcT%jx9Fwn4aO&lf1fXeN~RQw@z(eb zH1R2)fN?(|tbAVNE+{>WSiYjj(JIDCq$O)~B~;G2N@zPzINU+I-X&b)?MvOX;qrD^ zgcra$)a+|~=T&?Ts{?sAhyH}(_c4s?=KILX;8CIoQbb z>h$B2CDb)2n9m&^=id}Pi|Zt!hELr3Fm3iM!*hvTW20ZG;hrCxkm}0evhZ9rawz1I zDwUNf#<%d&Kac1E@#&3+QBry}-?zFYSkuG0X;+Uaf4GuAxTXtE?AF`3J4>6G7ym}t z`mwk(ZjNK`#2M6fMH}swTs8ie%IDtJK2VL?0+P9P61-W`VPJ`91RL~fA=(;`nfV4K zr3rKTV$5-hbvs)5{o)mS1Y>S6(c>D7uO;eXgH5qe&P*kI>z6P@!#lk-*8{|xRw;jNu-2F z%W8xEti-6W8RK|vt?C55oSA#5-~3W}6f7ZhCTPh&=f`IS%4bCR6=)lCll2(qScm`siF})=__mjfng;@jHC6`e@!z<=#2WV(3+ATj z6J2~jU3t6b;+gcPM_?1XT;>z%(eTA}f6kS%tJB#ch*eV$!+flnU#hE_pZG8^;s_^+ zelIG3Be^rjIS&&_z~WL@lI`BVn;Nt7N#KYf_wT^X}e$_uBZp%V8tW1W;4&0I1n9A&wnGv$s^f7dDD zL}AXx%OX9@h?Xu3;aig9m=;GpsUaXnI&ARjNR8e&C|m1u(s56+L z;!v}b)B1+K^uH5&fJq;Cb$RY*zvz0UKl#k-@A^f4y1@bc`0QQc%hnGd?B@6lHl>;W zuU;2#aXcS84JEEbM!@HIACUHee*pF%vWb|>6>BgZb~PhD`m;I&Vw$NuB14z{x=!0K z9y2jDjRprdV&tSzQW8+PG{=ccj~L@-)J&cb=S;Ozhek3&vJmx${o41+*`q0;N_l~G z%mlHe465>u>i4?{eqReEcp{V=&|2^?)Wfq!N1T+pp#8xEt&ev^e^*Cgjam{hxmDtG{rnfSNDY^ACF>vhyv+`xRe7Y!;wU zlO=E!8@(||B!kh(t#eI?SH)`dMxC|AqasXfaD+2@2-I;o7lh9(_;z5F5t9OhJLVM} zO?Xns?Za4eiJ z_b%9~uJ^14xW-{=`vMLW^EqEo|bw82K(XIBgh$rgG*~QrdqoEbliw=w8|3L zPqb>hr6+a%F=mJbR07KUE0vIYks45a;4K$~6Ypfxz8(dne;B#UBpkH*esy2v+sR*` zel9f@7m!T|;XY}+3RLXN-B)&(S(7*kn6d&ElAd7MJ~+H>GPXXKhC~1Puo$1>|`Wql_3_W9GZ<`ka^TFVoNK%8m$ExRN)TG&a zLsIKW?2HmRiwDTrf7&R?cQv?0vRp4D=f(2lrC1;Kf5c3Bb~$QeY!%Z6kE9qMu3cA| z&b$3cK%4dKm1nf!n)t2=(T`2d3WvY7JVn-j*W(wcj<@xwKYjC@>smWsF!Jrba`XD> z#}};8NkLC!i_GhxxD#orZR}H~i_G_)hld5%>1nOdQ>O5m9oi~l7!sljaEWv zgp1GpP91Q~(_8%?H5=u~({L2}WI=qR!{EyL_U9kXi{M7P=%Qnkwwet4UwTCE^Bw3a z;pRZD+C{5*C5&8LbQx~+h{)kV2|-?}H+8vh)G@;E+<)bfCL?__`GO{8Xgx?svmbOi zVdo4W_dWEe0p+}%i1fI|pC!>SSqUr#ZSHsd!hG>UPMMnSTHZy!Pqf>IFRMvW`RStb zAtS8k;a(Tuhlc1MsH4-kA`r?zz`nOJlPN2BIQh2o{%hi~5No5;eOo7~zy)^zYYbuW z9?<{crhkXi?1vZ48q8~&{yp~doemdLUVA*v^OmhP=%vro#!84&Yn}z9GKI)`WU6vS zXw4xWZQMKUh{B{qSJ5P{QYh?KSO1IaLM!DbARo40BFT-hrX0UobAL{%MT7rCbE1t+h6be}8V%l|`0XXr zX=+HzE2Seyb^>)^Vi*pyL9WDHJ}K9Ky&tTyRLnD2R}}Vd)nX4b*d8BOOFrcN$JMJ? z&R<2=*7JxVeeFI&+k^Z2&(nMi^}1TW1`AHoa+Z*@r`;uJ?_nwK4Kwm_I(!X-qRn69 z?|&yBo(u~VyxyFX33|F>{TFrrf8$gy_3s>-y+%);SiwpEC1;fIkSOuKQhev>#6^t2 zYY1bacpN&41cxd`o^%lU0Z~5bbHgb5u;(A16CUQoRGbVmf`9wot=Ecm=~FhE=NCPtTOWCvze4D|FRIVA zjP#Z(y>RsIB@Fp+R#kx@=y`pJnRyz_$tWh=CMZism*4C{Gh(b<4_h3F;Mw9ZO$YA{`>-+JnszQkDceo?d zpn&nXT!%{KBv1o4)Fb6vmZBT$g@1;=otOiSOaF_9;UR6W8N+iPyoA17hT5HM*BeUm z7n-J@lj3N&DRvIzU%?PuF@UG354{Ccl-2h!1=(YjHHC9>5buhra^#9R{}S2{S4PvB zzE`*&0<87DC-kJ6yksY!i(60G9p=_>@k&s6+MdBSCB|RKu<^&DZ_s}_7Jq>GE}l@s0mxnwt*a>W;*M#zZ?h^dH+Iy6N- zrh3uq#p^1FDg|0A?0Z9LQKqE_I`{gyaU0Qf<@Q!b?==Z6dhKWSp?~`x4Nk4aiEiZr z;w-wZ7oE4h)@A;x6b~I;5qh2np;lvB{=9wFnTpn@b#O}D8ho!dO8@r3`;rCeO~c^a+coNlQe<98G9 zcgS5JP25+@l!7F58CEa+(0otWndK?8jW#xm2fHi*PTA17%zt|H2ua%SS*5N|lRI*Z z3F-Pt6Xn@(%$q)Z$1h4R8fRC&=#PKLMT^U7^>ni<5Bs9Wd+qma{nMFdYoe-YKx|2& zN&+yHRk@L2^*#p8yn6I9{>wG?QnJFELt6*=K?)8>*+VPC%>Mp-G3Q$iB)duAC1?=t0KoRtPCMB zY^XkHp))Cz;S+IuV@0MXDj`BuOR7dI*m94}y(x#}Jl`7a7NfVwATHl3H+8(P+Gmoi!x=eD^wYZNU`5?p|JH}dKy^pa}F|1L6_Gi&(H@21$IUTq( zhYc%C>KJQ~ASP5N(j~}~&o|@9zMFEcVV=*$w`F?8F>=T~nFDtws0COj4%bA^XKHt)j15_}@$e5mtBZ|paEiD5$F|3F@p=o2i@)Dr z`98mOzWpCO$&bh65Kh@*HBc)jUO&fC@+#vXMDI#ac|bq~D{J)PY?td<;%|u=IFl2f z&^j6O^ohBovKIV!@Je}36z2BaWlC{2|9_mh#o?T>0h1;VCMkpzEXwF$wuHBDsjH73QzmNa9a__8q67)UBmcge@LGGczTY>POId)xf4-y6>aWd&#d0nXV2Z2h9GABcrO zO!_=0OT))l8^t~D=QZqcTT6j?T0MH3Y;#*hK$dkj@5eyCv9_W!TpNrSEtlrM2o-Jl9h30OFA#{lQ8Y3w!m68zt}lRz12(xT47J7#TnB!S@(J ztqOhw<)_pb4@!QXo{izqBd?Ij{&yDc359;%+~t8l+SDK_rE8-2hCRy z9z;~=2nIp&jyyWBJfa2e15uNAyscJy?p0<+$yu9|VCn};D7KV-^Cvi!zSzhW&Ai9Z;6V%QGa^sf9Dwqk3QGg zJhcWRUc*Bg4;Dg8?GRZ^%K^mUk%F?6t2uMF?*LywpuZ0?^d~>}AR{-r7ppdZ(EW!a zbCov%YGJ&K`_}_(v(rEING`ZYzi#&l8Fkp?S2`*bPvyzj&V$W)u{wQ%PI~a4Yu-pd8SvLr(gW<{M_ANaq$S_O|A0gylmcI>u;G55&6d{ z`TD}=ef&^Tgr2jivw#$1Z?0agBla%7A79hrDJrPcL5SF}EOt)n;4^;j7;fY5!u(Z! zqgTJ^KbI=oBmX*zwpxoYJPVO^Yc+Zee8?l1l0XJx%0EHG4`tF_210FTGVaNXbRjG zVM0F*Vo-)Mi>fbIF9&I2hp0o-^=D8|u$_~IeVLO@f2uF5uHOr6Fn7TFZ0bkf+RK-X z#0VaLcMxOlc=ainH(GG*dfc|0+O9{X$Nm@^FT452%jqZ#$G-1`7Y*jg+c}{fZMBQo zhoF8z&i4YnA}W%k5!V3^GYJS9HeT-J)r&9vDOgX-xqzQqkW#6D1*u^v!ZUQWphfSo zfLUlEUQ$LRSv$1$Q>q*#ilvNzdpB{%2;&ZaSD_C&AWtgf=fB1kB;sKgGx31^C5|dK zH6V6(ia16$%0B{@J-J#$qIm9o_a6ZcFc%x^p@{`Udp|xW!&zaIw8+wa%9n&FGFAK@ z*DDLL&g4D~m92!{F2-6DR=~We18PFA>y#WD{lo_U8*;?r{Sl*f_=Vyouf=h)G(&Mwcxf3?EINsJJvA;Bbwqa)wrPkrjxI<3-)iZuqU(Q3Ryhp4tlhA$o zssVAAQjlbfzGJGcmi=`W5uK^-vr=bs_kd+i%>Opvk6^Ds7wNF?xJ#fBgK&k><-!4u4V(nt3$HF${&>M5^^;rEKEpP4Dh>@d0@np?|;&zyk5y=o8cWLj9oudz=48 z*`@d5N(H*{&kOUpnhdpw{YkZdsv1A7kEK-Ze3`zJio%EGzss+WbgMj2UcbN1h!`Ix zuo)6i-~W6+73I4-Pb}fNcko^*F)sul`Qv*_tW!S%PcCu8`4`7W-)N0!e$iiV@L43v zFBq9&a7tbq{BNBvuAFS`?@4j#Bd!0__|edMx?frLa&mrqv%VjetAbB|rRBL>;D#t} zrt{s~O68e6`IGngK6I+5!;64%?|ti`-{<=hCSI9Lwhxwqau-`gMkGVkaA~Hm+h{kh z7LK)Cz3g_*)<3%VIV-2C)nM?V>gV34vkwV@@2Sb#y!Xzq8F)639DO0q|6(4K+uj+* zf5JfWBT>fwdTjkU^xzVZx8dA z?HfCvd%jX?GFXqp4^KArghmJI*E-z-9sS?qUUohzc)sCS>W=4Fc7A6TeL9fjOz)jH z^km|zVZ_PcHC^c?=yCt!ZJNG64CCGVckgQ(!t7P{g46z=@1;0@g&*(3v>Nv?Hy-(C z{-h%NZPwv|Fy4Z_g?O|jDtF2<53f2@5r2XD>V=XJvwY#^(0Nd~@1Sf-Ldl;t38i69 z%P>pGavXxH7&Uo9VH{ODNv^t}`v&)hIOko1|2!uJ)~RHiG@3uKw|!3^NWaK4N4W=3 zhgU|brMREQwA7P->fWC_s`VEGtdAic(g1kv+0U-tT1~w{4yW2b-}q{gJ7q-|ObRpl z&pI*2=exJCS_;E!h&l2)CcNRacX3|(ya?iEJ~hN$pXch=C0pHI)a@SBr!k&!wso4$*WDa~+ z4pT<-)c7rbJ6Xe}45>OOJkQ54&frO5mSA`5>JNRi>R{yds*Sdv^p8bAuXVTPc{+Gr zml^gih8=Ep(CAB%EWPwFnz2KDmKG;|=?Arby;ls58V09JynERD>7COP}TK}3&e+i?Ux&SWF9DD`f#cE3c{gC9M+h;1T^{Q^pN*-VI zX|hF7s&+1!1o3<)k2M$uw5#v!6(^aIPc%$CqT=3rWql3AG6kK@DQXibpS-X6Db^w* z#uq|=Z$~@qMb5|deDfWgfs=E5%)q!}QhYx6F(<+_x(&rkXnogO5-0u+&PR(dC;!Cz ziN5<=m?i8JaY59F;Ck-=wN}gH|L%44Tllkf`!{-ef7Q)9=YP@7gWUh?YtPkRU+eS% z-*hY>1@r}3V}AFy^S4a?-D`i&bK@R-DzEo{<()?4fkF9m5BYcR#I5W1+RCkr)m8J) zFa1B(iH{V#9T^|k@@5?PKa9ga)zbgzbshap4gL9Vp1X1Se=|Oe|3$NK+w1?U*N`X3 zXy4@M5Ag*gYjGUS8JC|C_lmjHwZ?Fo^)LsZU~@25Bx?P__E@)Bem+lODa`bNp&zGz z$y2V|^MG$gF2s$0U5K%JGhW6I|G;yTS$Fn4J7ysu?QaUnZN$dq6*=i2jCPY>_>(>b z_Y-j|498(B7d-B2PbdG?t6ksI=Vi0E*IdAtz3e-qt|pX(@QC`95cbiF9`Cz>`a+kZ z_!o?8$PJ%(+6cZGEx2A}peA3=rk-$roppYa?ilhz7blP-VTSA!l(%*A;1|aBcg?Yl z%`?QMpdRDhZ2Y{KG5VpwJ7+S<@*$)NUE>|WwoYbtHe81Wf@DcjO6G4cp#{bjT0g*(W@z_?yb@8OsMHx0}2uB$mm{ECCQ)R9~M zi-))v#5=|7XQ6#^%14|M*>I=!np5%3_lr84qek!B$M_8M=!BSHiu$IpMM9GW6c$fw zP!7R*VCp~ZN`DI~^ZVZ)y$9fbjY~{(E)+JN3lR|0*28Q5H?{37mWb;vRpX^Kt24<6 z>Z~eKE7Yi1^b}OoQQf{CcM# zUN*f~BUfM5+CAw%cg z^S6NKPc-B_GJ2-AqlDB_f|io&JegT?CJSDG;nVfW{^Dvq3AZVD_2@1vAaE)#n+Sd% zi6GgiinRC!w|{Sc{1M`%#+pojaH8l_ajVyR<3n&AzwF27pus~nnw1MzH+~(9>vL$e zAP@)AapJPQx3>l%IVKtNU8jVnh*egiq(2LNJx<|$68awYpZUppOXxxKv*p8w9x32r z%&U>l^qQ)}Ca~=Nb98*=wcVdXB?3si@a#)?OKK|awqaV)Hzu?U7MnJti~ zE)L5>Py+UUcYVp0Z-tRT%Ns&%zzN+bBR%lPjrV{O4{z$g$@|(S>ZqUJ5<%WMJu81P zO3qqs^%yz{nmi&CXf2T>BEJb3$_3)r2A!gv8SCdN-&I0%vrN5t@{ z|27yAHFt5{NL)7qTsNe_rru(xx;cdPfM$L8+us_0{{P1sOUXrB?gr-ui5G$XG)d|i zYDDND_qVS0Q2Qcu{QbD{7jj?8zgcTS#mBAK-p`|Rt_0KXut**<82RCxY(kyPGj>sx$MoT8O6%b>9*(5kUtaDVl@ZlKtk6fVf2iC zVFgYB5hqhTIjZ6}7dJV2*q{dwiGza?@Agwh$jnjGHvv_fMP@YO({` z_%36gru$BCo@v?evs?cCWo*>8t@-4yP42FLxg||DgCaQv=e!T4qztvul^AYR;x{_w zg_@B@vzoJeBHP!{FC!Y+OT)=>sObRYjOw`82Yxp-_&4I7-aG*6p`yRZmk!=3NS}bS zgO%d&ZTEIX_UtcLb3f+I`7`T}Hn6biK$ACclO|4>Xv83P^QqJKyUwrHkEe_2la?!& zkkSYrf8y{nKDXTEans|vOAtqbTxj%IF_t76pHD&=SezH&P?Jy!Rmm#skxGo}95B+Q zaJ*c1`?KKTuip_+Ktds7pceu{3xUy|)c2$`Si7(%0&*KnCJ|SHJd7zj3gjH^l=i5q zgi5gQ$6d(}j{;CfM6y!%JpQ6 zocYQF)Z{p2I|7ck)1y$%9ufBxl2g;)=wlwU(U`~NSA8_7RdlNt7sPFEAJWl8_zbu< z2~|OnIk51H1EFg2#o_4nx19F3*_eeqp)b#Ge5&tWVvC9ncTEg$cu~mt_9C0#;Pw_m zN8M<#6G?!seJ`}f$S0;yFEfWM8B`G?Gw2O^!#O{$z*vfh{pf+PH>(&$Tw7j~TeLWL z=d4h7$3orW|3)vf@C$Xm!A)|nm-N#JD1Yx799NRldvIR(iQfs-sX^@ooy6-ji5g#P z)LAeIO&3W_kXxY-4nH5BoekdqY~A6x{wGb!i{2Gt9f@v)J9ht-#?}9z|3&Dgmwn%t zt2(>tISO~3F@2w{YlS|sad(du2j>OO$NxQ zs6A>U{twUYqI=^qKwXN%-|ODo;1s1wigu6eFfeta0d&PJ;`@Kq1M2q7j=KuaTJkUx z1P+_AI-vH+7z!Y%Zh4vg*!>TBI9x$`#SGT{kLTW}(H~MB1SvAXl=fnIyr0%~qnje; zUsj;x1}7k>EDTPg2BUJRk<)s#Vt+UW+9xiRa=lw>1f_~gWc*t;Tpp_FN1kbEhRo!Y2D+#4O%XPsG9cQo_o6=JS9EC|EeXk z{_HT*gqHp@`c;AAQ>VaZ)?usF@jIxci#~;uXWLaQ`y4Wl7Jq!?H#rHO z-~EU-F$$T1WB={^`k(Z4{>?m#_zr(=VxC5Srv1G?=I>uMncjR7sl%IVj>y?XJ+Xj7 zHTZOnX{)#N;ICp?QKbVCW)4w@Vr;g(&ptlGe={HX6h0HOGWO?Gq1K0_PMo~4?}vFa zmVY<*ixGm12`Tv9q`~m~7k}fRs(MB^>RzjoS8Ikbg3%r!rMK8TL>bje)8M7{Xb|K^Aa@)TRhAfF<7ec|5QAOaPYq3q-xu;?B zme=?14&g4<>U(1N+vGIDUS%kp<#dT2M&`gLr_uMsAkN8m$?V?h?|nwik+rY;6F7`g zswHJ%785;rI%|0XtYVq%G z8~H(kuR2U! z-K6&a;Kcla{iJy;f^F}fjb4aBU4D%`-vV*0oV}&96mzoQ_kXsnbtL1%r{dN#{6F^I ztk-#6SrGdxK|giF2_$RqaQh{s=GmGpy#yL;&5|`)LnHq^puN7e_w94;=?)rsi-Cou zautik8c13siy8Oe01g5EN>!)Jd63u-cf5#;%Wye9TYh<2!9V6r=sx#s;OPAwTK~QO zc@O^Y_fP{74u81Dq4aTRkI^b=oT-Uerm;*aIcOg+SadWewpg>`+2=Nb<#R?G{RKeb zz~_3g&%P_vQ)Hn0tPD^F3w}Q~Ks$3XV59ThZN?oyLNWfLl3+_%9~dS$P@HU95fmz} zSnLJfDqdQA03RrCdt|`3Cf>|Pi{59LppY4jlP;jDnSUTr*zA=u1E;dtq%$rg1myG*zYS>-7y>d`;JY}{y8*}c(>T|K@%F1>udF49SenDmDR@1pd zBjvElwJclU?a_Nef8(E(@8sc;pUt~wr8s}S1TfEeLBWF)tgb>Jh1dLLqRfY9YbpeC zj4yB*M1N*X)_h+?D@&c6(8aiWXx0gc@8lWg*dQ@omT$}?*-KtAq9{Fr0v3`lm1A*d z6iMonf-JO9!l^PVjH?W#$}Q|k*2mf?d`*5(O;E(OJi85T|NH+}CSm2>ZA1HkDbVZ+ zrskBedo5c_C~Y!W+k93zFo^}JtJyMp+o@3J?0@ZT7^gveNlW&o*~$9cMO!P)E!ve_ zn}VHn^i8{FxKY|@|LL*YgIRADf+!P9rz>(Rv(ykxs&q=-FfKI5)}#?h^ip=vXvWit z9ZO@c&QUq1#$~)P`zqh$%Z1qS1dZ)YFq+tSkecVnL?CL_#R6THTInumOaB-!F z*?)Qr$!VE+T-ls6It%EW&DlrGOnH~{NGr7XrMfp(ZV7f3R@NYkfJ;q_)`9ATo^5-TqpFGQhpZTQzu0T4KHd51fR{FMdw{O&wkslZva6AE=gpz7Cg9=2 zRiOoHt^wfZeoho0f@^PV`egV02*8Csfq#GYVXxHtj5B~j>&o9C%6@Stz)Igo7yrTl zz)>+%=&TB` zGYJFig}E(Y0_6p`0KpZ47X(ZH9c+GJ@P$9+Km6Ed2D2}J{Qv~Ohzo>2a1EG@)qgV3 zf*qCDH^3y!6@V5@CXg3HLh0sn1ji;2OxXq?V{;Ru+)l1T=1d2IYq{k>Whj2fkXEF#8dFq zAHriu-W$?CSAFCC6OSnVDfYL1Q?&v6m;Y3z`r-famp7vS!iM?B9)H{?em?s66Tkl) z{*mXmz2H|q;qUmr!SBnT{s6%Yf~^eIAN-5@kbSssy#C@jcF(@;VU|ySgY*#~r;*GL z{INf@N06cT)g*|aeId^aSWvzs$o0+$vz2#;5=K=tDr;+do;M*aX+AicCR40ZO1KU_e14W`IS%g+BDsZ%OoI zTufl>)8E*cVdYhybI=CJz{X+*fDt$+kVUIr()ID#-JeLzkbiwv6o1CC58KaFj{p4s zY-mI;kJwKR11&(+3ur2MNBxKbK)0aO!3!8ohC+A(uuotF-~hN4cp9TGstCrBAu=@u zw8=KWhyhKp9WY983fSHTf&{vZXFw=HJd}U4G$huU6BCzFJYR2NR zDXi2bMOi4J)VoE`sDdbHGo<;SH7w(u<-+v zkutY{o1{v)RG|B3r2-VvCe>1r6gNf&Ql%A^ElW~`7cL{`QezfTA)lm9D)K@xq!(9o zh0;l*S$}MdI!kY^WEicGW38TT|0*Y1ecr-MZt(WRJdZNA^tY{lQw)n?7*=8j6u3=c^z&kl6m9 zq&UkBc%j5LOAqi!iDDK`7(-I;XE7aQlDa>$>3`ajEBl1{g(2VK<9t2W?f$y<(oj790VqWhn;`I)tYZVaI(?6zRBi)jpIQsuT=)i%> zFnb6GJ$J8MQi0;~d4|51zWkH`j@ihDh{vKgAP)FC^sRXTg}<1x^CH%t@(-I3C=S39eIJ}N9td*i`&RD904m2il+FOSd|o5D zLlyW)m%dFb&|WgjW){eJdB`ve#D8_uSAi(u%EB-@BW&Dwi2vXx%Wzyv|TeE5zW%zQsufUjqk`fVpuY#U=z=t^%!DTL2n z@gM(ERrMcZfn}CV;@oTm+<^3Ko&hKOAhUw53~X3QD#*Sz1M$d0q~A<9L*RQAva9I} zUN{1+mI$8vBLi#yYn~%xA%8yL!{=8=%>p*J3|xytZC7JpeBz(cnQ_520CXP7A#7Ui z+g+~4;HIh^11f(B1)fu5V5*<`K{kRbYY)h0vTOvG(yW_><8w zADlJj2+t8;JkM+^fHLcO1v<6Hjwn0T04(L$EPyi>P?%r8Fp5M73&(W1bOtc6|ALYKBAbq19*pdNzxQKsGw}y%zZj^1`A6SD zbkF)QVxS033F@cC$&l5%ELWgQD-6TU$DatAdAcU9#eZ;#Um zvvD*fPuYk?u(uVgAAF>$9QVbK<7m1cyvS@T%s=>4RXOpCr+>!L%)a<-TN&$vA5=#B z%l>B|1l;8MjFzbbfOl)mB^0;JGD;h>aKcYq!Z5RsRv>L)9s$38`WTy7RC_5=*505& zIyDx#fvU%6rNo>TaF=4q=0i0GnF^SN-R2blnfLf&;R>CRIDcUaqz}AAZ)(f}or>V| z$@*MeWC2cQihqV#d5{gGG!DWhBVM_xazJ9nX=J2vG`S45PujK>6aclaQdN%q;@xpH z%@1BKwiVhRe5*ojZ1N%d z9eV)`R4^+AH;pWsv0sLfxuE`Q{q$cO_}6~M=()jYpMSAj;*v~KfngE`ipfG!Q7jjW zDZub!D*}{(lf@tVu+rarkTYQAF(csok>&ak%`n-SD&MzMLe|~IPEL9pRhXc#)C7W3EV##g{6#sSmnFVl2U&Nws z%gpb~G=H+$D6vasZU1_l7y)FFtxQ6r)ELm8J`@JE#88=a1^~q_C_K67UZR92sJ{-- zTnde&60U3p;2>|A$gU?ep6~;>f5uP-jfvj3Ow4J3Qx*o-hA`-6w2E|-I#l2JAIrFz zS|z2m>1b{tNVu}H05T((SY>0D0L?Mbxat)jl|GD_S7!OZ*KPAXjGt}zFT6TAmNXX zY=M(aBTJ3QS4WtX<^WhZ28`@}Z8ZYu`V5ub7!B!+`8!Th0WZgB%=$wc)FW_fJN}GI zG=p9g2zn`PKfdMv$8oL}S|+iZVldjLpMSk=|6Xo$1M`pVvp@w|AVmLEhkyAX1gPIv zs|fkIFv67ec@VpG9Z2jNsspzG%N*1g!hfWbVwe91I_6*TL*s;n#$z{Hcw7(Md@_A6 z%Wix%j}Cx5>R&l+9NBL82URg+~2Ve=v?_hvp-H1!`As<#0rX!jZumQ&BHA= z5X^D}9vk?dIV5KiT>OD~G=Kc(@e_P}GyIG%H%6m`jmso1D6KQ%jrvhO{HuM*IGW(3 zY^PNNoQw2jqd6Qq4wC)x#RZ}8>jSr}&l6DYk_{^9K$`{&-VW2`#sx zXY=#yex5B=mEh0!_UHNgz4N6{(6jrc|2%tN`p>ifrT;wNRTXWWgDk4sXMeW+^BZFu zr;)ElbebM;8lsC-6&>IBq1zB0fM2?09MOAi_d}O{=m7tryBFJz_H2CVF5{R1V4*pj z0MIl1K6B%kaX;Tn==tY+?L%)PTL|Wd{`>5H=)cdwhyMGVAUOcwK5^WA+GRox$r0H| z4gjnVU5DBQf9Qs9`}q%D{C{mf{Ly#)9T=H)dW;R957W z?Jvq1LPVbwpBVj=8952j?`S^!7rlw*e82Rm{X_5k(i0#4_V`!dA%1-Os+=J}WgZ{D z#)pyP5Iy~+pBJ-0v>oFA7cG+C`K7&lWcj7V5#Ph#W9EtG@hmlr9DkZiLhQr4m=_b- zNdjN`Gh`5LhUWCnA6ka_@cyOMkQ~3X`A6o<-}K1vAKu%KUL@2%0ql=n^6g9i)d?eg zc>mH?i1#mTfn;`mX;oE4BNF=ESu8N0H*^-m^0HDwzwx}9QO-aLW5CKhr2aN{oB(=P zrRBD-au5V`ID+3XZ+{ul5%fXp5U30sfVgrLmJH=%QU1WP8F(Q3!i=k&;qrNr&Hym> zqX|^hn}4pm2NGbmo<`?f0cWo=5G2gA5dW@k48c!WrFF zpIg4#Nf`hTa*Pxlt#3I1Ip3>8-)oMdnOP7>hB9Ojqx=O*#D9v;#Epwr!3=WN|K#gD z0e}E}mNpc$d6fC&$QnxQitcVe>l6!fLx)0jpv(wAWdce%f$op@J}`zI!H~f9iJh=L z`q}Vgl3*PRKqfI^-#IATS|?=;8&*m4I7@^0fbd?+5=x zt^w&WKzbDZkzW1v18_k4Q~^*>xyXb#DklMG|LEsqzZBU~K=MN2Ye$G9xg2CS0S6(H zpBqtqE-L2(_(b<51}4%ch3tk_7lHTDQw0*VZ-3)&e))~33-SXT$q{_?ec>%@7F*4Px~{tfBYKJ zGvLP$xo>-Tbh*~QkdFbV9R$*UWFkBVf(-rtw2MUaAHVs~edRJuX#JGK(LHHQ_Vtq# z9e=iFJCvX6jp_@QuOEFbZ4fnN&##;S%t(((23wRT$XTF(4v|Mcah81I`~uVk(QAnA zh5RxFa7TXojfds${_zGJTL1r?&-iP9)9t|i?r-|1AGEptvkr~4hWc^-GvpxEg6flM z>=T+MN=bM@1FH4eXSe_tAlDE}Tr_q+mgge*XJ zy;6`x=PE1?avA3UbkB#$`M^q`0GJJ^8kjPMM4icGG7Qip&@WsFtPTnb+NX^}G2@<} z9iApv09%7H;A&t8P+qtW*cDVVZht_Xk7{x=urE+U`v4pXY72)0$Ax;vk*E_eOeO$M z2aSchpnL+ijC}*=0?`IL;7ZV3cn&Bov`oeZZh-C#lYm=6JL6TfX{bf}iChXg3vU7? zfv(Bhz|%kt7!PV1Q`S!EEYin0S?PdxPcIesZYN&7D5fA$GE8$t<*?JdUND}&D^UWyv&;B z55CeF=%#t>AM+o&-Ed6a6MynL6*$2W_$g?t9Mt&>)bIs8{S^|&93H(TA?h(EDApD7 z7dXN_`ok0F=r1p6(OBtQX9*T<`QSH=1^8ch_ZP2czxcn(bDR+S^_@cgFZiocYoM1KEEZ+m5c?i zNkN(?t^7_&Z(*A6_eoQ?k!Z@nUEX3ctzt5r|C9-uD;|ORw@fHde`KP6+u>9{x|deN z$$s&M`Ga>k;t$?*er!|n(EP>A>@U7D|KQml+isext-p9&)BeaAH0v1pA^xWwp&*ax z9q50Y6OVV7In3VCzzs`Zb@PSXavGB*Iq<`%%{?d_Y)^|wMW#F%Q z+?aAKT-(Uk^H{iUtWeLXfZFDjsZhV;%sa*n`k(HRe|*IPzx|OxXTcx-xVF?~Oweu8 zdZ>-R-&=odRJsd8cx&p{MBuM=-01Z$pNjiKPndsvrcB{49+$L4l-~)Gi}VVlTegK; zOu{;8^<(mt7k_6(xelAPeu(-pX!SY;^cMQz7~bfpG9 zcXZTd>Ic7T?4Ov^ zmfx7$A@hqb|MXqjuOI2I^hZBOv;0%uWB%A%ceP)<J{KZ#4wy3z> z`YCU^ynpb;%jjmq58iUXru}RJME5ge3%LE4|LxfgO*H-I|1=HrW7=Mi|MXw}@Bj1J zwbTFcjFQKH`*~`Ldiyseavt~pON)y3zka?Rr>%bcTQNm-k$k^d9z~W<|8~al|2huI zzs*DapVMfH{tY3qrF%Y!{~AZ*<3#=MDSj2SKY#ljIYIl$_XINZMu6k?pZ$gZ;RiMV zWaT3HS(C7klbB9apOWy9Gmb!K#Bva*Sr!2FPP#A`F=&*Y-O)-RUOIL`w*DiT_AEIdRnU|Ta?8Vn?yEc-1Bo8!PhyE^TQcr_H(bC*OQ{e z4u2EV0T;dO7-DddbF?T9W1Qdb6xfJzfWl zA5P($9wjI~*5?B(=RL2R++`|RJSr#Nrhf&#v32Hx-ODran6a^O1NNZMgq+8Lkx8bI zN;DKw$vZSIcEec=U?m&3^a7N9dFn4s?R$=YmI_PGIB>ANj{t8A$G4_8mWwZuxnf((tmfe zU%^~a^;%+&?4Eb8I zVqT#~Y+Id9L-;)GM7_tDL081s- z{@iIEl+JKzPp|s0UnSYw3bmR@dVhtCh>v7zPo&y$7?g?-#7>pz3lBWu3~x`OiU7f> zTjsHBSkf|EG`=R4sghJho*qT<*wax;`R7BuxuyH`-D(~)w2WD|EszRMz6an5Jb_3C z!i#1pKUu9B*Y(nO)3Y7o_nj~*qdU%Hp!&sq)Sid)`-nW!r{{Apx3wUZf`4mjV|zH! zrRu8pIB8>Jw5Adq0;xi;-qqj@3=g{nqlk;fi8lf9Z1+Xq1M@2CH=xq#^}t{^{Jhr5 z;xE$iabio{QMUg1PC`g5G`g(>>wA=@gz)LMrEXFe#g60zdh0w7ZC)jVO$&`b9P4#H z9&w&;-;XUK_VX^BIoYd}tAE>IE_Rrj9gFAZkUdXl@@g5_+oZdxT(_z2OC(2M+g(#-y<^6cm`+hu4E zP-5sE;>P5651`+s;*xCknKts5VX7(?Ui&z38vb(6nsGUtYj`j4Q``#nO}jT$Pmbr? zR@14cR&CUscg2p}qkoe?hY0dSo|a-W7n6o1Y$R1OW*tfrxoVU!(BU;P)wMP9xx5B? zHI-NW37nVyIJwE)A=fbS1AU6x_N2L9a?x2I_ewP&R4IZNQGmPRU5BC#ZoJ2lj5V6{ z%xyNy6XFQYYLR522GaSNkU3FKDUF|r>cFI$mKZPYb$=hAOMe)y%+?@ro)C(jMLif6 z++VZBX-=jH4|m_!skf|F>=HPF%CbtT_x-~>N9XC*0_HTA`4AuJLxPVXcH8bsqCS+J z9dpOs9&689RwL4S2bMRVr*l}c0ETDLDUE^W@+;#7LEJIEXEBFjRSvo?Uk|5!I*!JAbi_F2aFnSc zphsC+SRvPyk?l%PVZmium?|}at%_zdJ&BQ4p?|?j-_(mM`@HEC%#-&iE|$65UA(XA z^NzFCjej6Iuisqa_y}e_N;$LJZf^2OC5g#jY`#UAOW(|w-FC;)!6OjkQ4!-&8b-op zz%0?CvEA=HojtF{9gn*4gM+WkG)&qU{9!E)qjxm9oTold1!(378}WgIQ3{t`~@YR-Xz&wRRs zlbA2cbbh#H^1cTi)<2rHsHWuHoiS@_j*5Q{q*S&Rhd#Zfi5xH4p?=zy(`{uFw)ZNh zr{o3SsVPsr*YM9u^j)1=)Ht9XzDnydaeu)#*Y2pPGZm|cmwXXADwasl>T^)~W$cK@ zu&?^^qB)$& z;dG)EjnegM8Us^5Jh@sbX068an*na;SW75hinoGu%TTztf*m$OcXLO3V5=EFHh&Sd zv~(|Hr70Hxc5#8jj-0{vx46r#Nhj$ zUkvHOKCsM}$^K0R>Uy^C7ndsTg!@#~n9!d7-04QCfAI6hwXbMy>!c{Sr z6}RB%JLWMv1q{K@=tf3;w*#RC*ef!!+sJL)DhL6}`=BNpm2_+vyaz1Epe!4k2#W0 zjn z59pfJF61llxu|zFtM`K|^D-)u;coVH<4-C6+D@}~X_kxg5+0Rjg!kzhcoshMv#xAg zZ@E-w$aqz&%u79=!eCCevU3E!sQ5K>a(Li<$<2v43<#r7D&rWboU2Z5-wU|k9fcL@ z>xP?2y+{7~)Hs|sGJjFl8J8^JYqoL@*vWh6cdGdf|J;&=IotXD0aJdl7!tudAbIYzy942!G{n@y2%3YdIgzFccJ!25)9CWk<|(Fo zp4G^TrDt1G+#$pI>kA54i%Ec%cyJjH);n(}qF&oxUuoLg*}!Gf@I`y)DaM<~dfYnK z0#lZrcYiPBmHiT4GmtmoVQoO}2d^uHTc+akYcF?tckqr&AOyg(P#= zkFnci(0W+&Y9=iyyw2pw8#!xl6|>N9+I6i0b~cr*>Gek*&lOkgywc4ouB*WwPUjNe z4{RJ_icIzD>C~^rbIyjIUN3K&3@e;j5ugYXZS;x{5hjEwd z=zsd=j+1nUixNLXaCjNz*6c@Hp0+@p>K7M2?xGjtan#gTGj}|vMZ3M9yGyT;jmf#LtD=m_8r{{Raj30(!}?QuYW`EcGgKSUve=$aaUU@Iw7j*lBgI-@8#yTMo#aGF`DOkPTsIF(!mlUT} zX+DI@k~~vwcLZ8Ho4}aQ*GJT;Pg8IQeR|)|@yR2DGV5jOvR7I^q?1t-V|J>wZA~24 zaAop(ZEJF76^2wr!Xu2{;VT04?te-cD2)ropqnIt@`t9sNWbI#;Z9$yrE@@?1I ztSe^PEqrfjHa%ZV-L1`PKvcrFi*9 zUC$-1-YFZraogIBVgrMxc;`!ekRwrd<%M{d;t~yUJ$8$BGcOMnf;b}7kUW2Gvwau> z$c=oZBojN^m7oUY_EfD?m@UkdYRoQgCrmQZL*hC3^C8Q{*hI(7IZp4p;fm#kt+e^7 zUfU~D&$YC3Be|(BJnW>(0fa@K76=YI!~)lQArC z!4JFgvg+gQCYCGHwfe$fY>FG&TyREo_arzi`{@{hVGz*OM%h@(!!aLU7TlES&q` zq%_#RIj@~<*f`?3?8*9_dpSPFTIu11iPkFanqw|$U=wM>S24Rc(P_`e%X6OIsZm+6^Bj1A z+UFC|EX##^7qYb{G{HgJ+LQ9t5!=S6cZa{3&w+6US>L;gUHE^VygJ$Swhdv%9lFtW zwmmtPT1S6Uw@i_I#9j+a>(LRxf^k+-401Q>Rt<0LYWYi;t#>YiIWc!VRS- z#bcSwlVoe~+z7lw*@V*76q}nIx39M#P#~d<)T6|>)w5f zYY)?luTdwHn8Q<~B#(k*b3!?mZRkVZ-yLNQj%IS-kAn6B-mSh0j(IBsDQz;XCYnwF za)Y1k)Cqqs%ORQSTX8ck5wDQ1CNupr?Z3%ky53n?zl+N}*S+_^-p3-b!W(tLPWh4G zMR7l!;``h14`$p+dR*U`xISDB5U~*xYv9rs^YLAJgms%vR6;V&7N0EVWKO9>7G29F z>nmT_-n*So7=HG_VaUn2?Ngc2LbH;2$PI!#g!g}=jjQ?gp04>|>sdYB?j;=3x|HX8 zTJalpWuMz{X>m0j+R^FqFfJVJYTun*V)1on4O$Nfs#YCrYj~X@f>LfJJ4*RX9pB4q z7>Q;%w$<|n8CYA|jG`pv(J zxj)gSDO2PYg)?zj`kf?t;Q-A}C5E>=jVBg1VF+s@m#Nk{U8qMJWCihTAJRqcOZ9=D z@#Pf_7H_Kk@ud_lHOz%}dY`Og_@Wo`IZf3fgiQKcD-*v%*pDhgaCOa>2bRZVUHz ztoO8h)30m1N4X?!D zmL{j_<~VorNxedKa-LdfZ^vVQKWBfNa&-58ar*5%oI3v?ocZ*bKD-69^JKSDzB<6H z)9<0e)qZn#qLyyM9rq97-j6q9WSEP0@=omiL~(O+*s=7&ixCkS!Hcz(dZ;d@}Xwbv7WbMbAobYEcP=NLmiAdsXO1tP7J^ zuD)?|N*T9y^EE^)dH1>GRnGKS6%zUh6zqH0wuWGzC$c%#%f!7hK@&fcM6^ebJk>vTMmS3HZ!@~t}M?8%R9zPC-kzuK#3 zgS1}T7C0ku^yy2z1lvmNyLHb48fx|ae1jP)nw836OqyeISe*Omp<`)I&8;D9{#lvF zxCb3Ei1<5nB06dzVP*&V;2m|-6^kbe9|6ffA^%SIn-119HCgx+)JuPJRtsXCXh)!& zuHJgmrfPyb!Raja1NLe(5L3ofvGY?q7{~ZRhv)01JQ}RL?a5rm5r6KC*Atg0ms_FH zMAW5Et7JZ&$+)iubH6OpelQB&^z(}E&SrYR1nu68ffdr1{C#mx$EOInR~W(j(;fWa z8iq~(ic0a^obh~*)vbSFyaSmfW>cO0`57E1hF4PVaXVe_JH`N9Jz!3?^PG63Z|BQB zG0h6k?5C@8#54j@*gA-3Y-8ow#f>M9pK#`nOOmlzOkc?&MUwTZ~uvNOz zl&a6uaNC(e7j1{?(zuJKAex1NP3Y*7Rxm6yn_lq+79k(8W|$6He@c%& zh}?GM&C^QB^qPP7p^YiFMrhGE$fvhrJi>F*P9WfBU31CHa_9OM!w>Iz4NmD@iVsx@NMq%I&_2Z zSW}v&y~Q;iDmCR+zFy12)kxmQZ1YmNK4FDo9JZNYYlS$n=MIxrtGs3v{Q$={Nm@BD zB4Un&;b?XHiHyMG^u9ffQF!5l)J=!MyVuXwv()lsWcOP`-`JnMTBeR&_R*5@LEkaPM51q5x0v|9WjsQQ~q!U^rGM;Z#&p_a1Tr6MgiwF zz~2;i1F?=^~|=o{NxVMCja!CiPr+PS=P z6u|83!C_;u+w6;*r1PYf_nww#^6Kb46FkoAsSl-y9Qmd^&+E4_7MalVqfxO0hlb`OU7o_7S~Yr3DQ(ND~=Kc>lnqo33rwXObR$ z+3kPr_Rjo2gq^p}u0W7r-(|BEBjH^z0p`Pd4;kKj&(n|o8_cYjm4L+emT>X2u&DZ;Pairls@u-wPzeO8z2zvl|##(2FhYGoK*vv5uHJwDt zNtP)&i{xSKi~P6{RNV?matRBP9xtyR&d`7FJ8Z#HvaZ+$fccC_}~WEU(qhAzC9?uq{#LYM7ZH zjz0H_jV+d>R{$iS<_SWDNG26G@+*L4j(D;)>(+PQPKmp9sId0|jb|yR%PfB)DWi?# zH~!dkg6!C^9-_=qJrpzz?=!+3D^>1=p8?TXkItpW@c>T>7Xr6Mylo%VV_}UZCsymk ztGQ3`nbM053qHJPst?U{ZEmDK z19*s;^|;qw@;0e*>Z+PI>GJM9Qus}+B`6Vl zF18)qRAvY`wKx`uCN*$3l##`HipHuE`&sNDUCb9;Ehiz5W;;jSpY7>3i6(Ftg^{HW z7M3*?ynUd8Czq?Eh@=9G0)}fdVGiM}5lx}Ieqg46bGD_`kwky6ov^6xnFW%N;^sMha#S~CiFJWU6hxl*E&U|Va1_z`r zn1<=7lDG3HK9+xdec7N}i;-u^cp&}V0A^wBn5ZwVqvc?m^c!I=xO#4Gy!NEU9g3?X zNh?_y7Lmy60Sdm}mJgViJ>Tk6HhibfQ-cAxJ3o0nl-{&2ARBL*P!f4V)8A(sF@Rr_ zPm~MR_xN1^g)f*ZjgKWk4Jf*N!umwX8FY^Z5HqgIPvd{P<~}3W)lO}7|FmP8aep;_ zbK04mZ1K`4iiFl>;ZgLdYJsp73)OG)OXJK{#%sn9sGHX!hqVuTW#Q-y zsVfGRV^dH*Yz!3F8S+IQ-wZ$?T3k9VkMdA*q2qr6DOZbLRDM(4kGL+z`u>vu{D_cX zoEZvap)>KeNFm)n7bZjmbhZ;=U@nn|hDf0e-wbqM ze)CU1PoAam0!$MFzWAlJHESK6>;iq0v{Dw*6)RSNQ}%Rpnyb?5T+)ji=)gv$oOD6l zvTPl;-y zYCjxkIP3STPio?%c|#o1wptJ)W-#!CjoIpk;w>=|7LC1}!llR`@a$0O1P&4c;8aE2J39bf9O@Nyl?c1!K41RTzHqXH9g;(VjCW|rjYH*f-_pcPrgxs$CH2$up~?vBt^{*`Aq#0|Xemz8f4E8Rw; zy$;blDU zJ7OL%cA4!~aCV(s=K1$ELu2o)F@1kw;xzF$-hc`Ui?wXZt_d3W4h+ERU6y{6|vMztGA?G11 zM3S7>Fa;s7{#)pVHV^12;em`+UKeklmys(Kb_%JU4gy#0Ro^Sv?)d`1)LzDSiQQxT zid%nPtS$PS5rL-Z4zAUCXj+`AcY62UU#-lSJ5fb^HjI=7YVsMmjk^$jPxZjTB|Ft% zYk3GxeQtVQ<&iFGG(PcY7qWlT)b<9p6!s=cO?;?B4hccu1Z2~80wyB!R{#`O;Iuq- z2#5%D=RyigW-K7;o58v%e`?97!p)L~}DxGlyDj=-NpWh|Ix=Li+@r2Ut-S%7XC;OkvfCEj_CUGiqi zkd!8qp@&j4J5MYpHsZVeWc7qenm@(BBNDEH;g3-I&PvFe5h={0K4FrsK&DT2$C6Q@N=&8hzF)-V$l|t|irh0V^e6a%4Yf%>McE6gJ0f$roEZmH zh&P9)V@;Gj>*;@{LH@QZef}MA7jnm-tP>K#H9ZfQ?)3qND@$4=!94q9$_y9N_X7uF zV1;?@AsV@ujB(U{ijd4Mtmnqv7~U^I@s@9hM7wojmSMf$nW;xy;H|d?_PItt&7pAG zE&TG1X-|mT7AN#`>bq9Q003#ATc3m@&RH3Tn=c4ui8y~M8JKFEj}ULehIS|RtK1p} zFas{J%UEgg-kGkN8EK+{A4ZDUR*466$ES5oU`&Ulf|ht+;BW6e_-%1W+q zBKTvN0}Fremb2cU9hsZAzT8q;@1m*9LA-DHTi@FtqJ{0s`EBD{r%1cE9PUQcxFV1| zu(vSxK{&cEyXp<>i{y38KZ61&BR&)Gj-NX?yC351v|x`CI%>3zS#pGf7#!yUTZpz_ z1;jdtyM;gX)qf?Q4AznRPNe2y6oprLk$sdZhueRPuT~G%A1DMhZZq{Lm0rZggt7*L7NcIu#H^Ceu%G-ycN? zqOwNWiv27~hJ0ocQ8{|Td||38d?mw*Tst#B7!NgyU==2yM1A>pt{9|HPmEpKNCtnA zSqBw(ULs~z`L<)eF<-Tb`B|BWzC{)nyKO~nE^%mT2|FOKAYtto!L5yMnWGJl*Y=vX z0*d0gCG!*y!CngEwO;_F;=Z&#+$eAe@)+my^WwabL%E2rau@^h7Q2?Blo9@yY(FU$ zzubKx9n|EeOfRHFH(mgoeoYPH6>onDu1YSVl7kzf@diVID4l1Yr~FLFg`W~)9Vqcw zJEenIQvK?5SdtCZF{;pQ?b>prD#leG@P-vYf>wd7udtvWf_5iWK#oC*4rum}S#LjT zrD?qMCXC{>td2M7AIz>a6;ZBPr=0TjE0C}ay5|iG`~bK_1Fk#7zR2kLa?pRonpmo@ z#>{^8m0aTqcJy2HC|zZ2rxXHsc=x60R!d`$uubeN<_A90)bXg$+xM=iNxjkZ@(gty zI6?Vlp7-H0w;^&60A-fAcQF;lLZ3CqDQO1$Sk8qJ|CVi>A^)VZ%tr;BSuprUC3i8=`UZCL~O-ONrN$ z=9a|_1NG(D#F*Eg<$5((O^}9d_d;#IY;BlIv$1T6)CYA+tp>wfXK;W1rWcdL3@y@) zD-76zNnegy!KVLkM#LXY>GMf`G6J3}(Fu-`&E1~(xwOz`XA}5uCjg5?m6$x*f6mPw zjrmx|tf{%pkXNTqCtv&T@yZkN%4mx}GNS?~qB2nk;WHKDX=B@fT8H83;)!PAHYJ}d z2JbTYpq2b^C1oLEQpA6~X;OSWqHK6MxqU%|jBtlTTuFjXbp<^`>}+XD>^oP-F5qZ+ zfq+RD$l#nNR58O65i};!64cMCBQ!K}v4H(}6O{UVB<0-H@OT4TcT1Xp*0o&$m1+5lDZUf10Ab!NJ~ex1m9V zc4`CLU8+R~&1eq-ZK-J`oN+TiZWGqkSP}>5P(d|5VMDS-t8!T3S=O#v_ z$qo?(a0qh#zVFwFKmvwK$wTVgc2qL}a^CT}Ul6OAE%?ML39qFWk=j{3qinmB$Miwq z(NaTXNhsyx$Pa(82w>GTz~@Wb@a7|~VLfw%AM{MAj2q_W1v?b6{XP(#+zt$>tX`a4 zUL%msv_7-q(d9aq#5lmF##M8cvDP}=c|1v%J8mtQDbgLrq0qW&6w^h??D_FAekBIer3z4{*x&(sm6wtTH$C_w0+{M1sY9H3K-!XgSB# zuR`W9vRnM~8Gt)*Z;EqZBnfXW3B8x_V2vDq=9~!(^8PY~$dKque!U*io7PlS4lU^L zgn=QJ!<&EQ_t7LXH>z3RoiX@BU@h;3O+ZK3#qEk#kKh3TlGkvBigMzxk^cOVnBB+Tsp;A$%x}?2mG_k!hZpmK)oTUYbe(DN&Y(`ZbjAwA2Do+O z7u&+oG3`9yl3;;Fhn5$ra@E#XnMQo~PKVgv#2|m`f~*m&=E_7EYm2Z6E6|7GMMC6u z(Owb2>qWiPMeH|R2H1TaA!;sFlz*Z<395I4k>VkNx%+}AkeoG#jdwcQw^|ki6x?2>2^05L z(HVaW--OM@&$4`An+(u2lxG985Yl)l{DNheh>Bv#^j^0@$x3qFDwf$LbXmw`@8TLa z(&4uV@thnJR?52{JKTA;__4%unl1M(r<&YQm_sVi%$Xt^>(5MbHTF=7H69x@6t-G3 zeKn6{R6&Qwzg;24)|-KDTF63dfikV!!@GY^FjuE6X*cbgO+xH%h~x<}#ZxDhOw>Y^ zw42d|X>Q~sjUkn#&bjWUh54y%?DrVLJEFL(2E<#0^Vh^(#)DBoxetK&uw&BAz&D~Y zN9xJaoh$HeJefR1l|5ZBjnSE7Ao}CuK^aJ1c*TR`rtH{vRFR8747!I&GhzqKJVt+? zshI9P(w!-5NFj}S=7n+6WLywmt=}tXGfKqrwm=TG_)?w{LA0#G2aSe@l1N=IUO_GV zYUGBy$02TQcSz|sWh?vmqlX8;e>Vhds8w^sZ>5;XPps{^JN?~Hm!QR#HtB9T1-uoR zJ>b@%#liby1@L0?hSfRw0GPq}t4n_q(ICSMuh*e)HwcKy+;)=Z*TTy;_L@h%juC=Jh_e-{5MiWSnjEjJ%zZpAjf_rC0sZSjNj2oHG%0vz^v zxPWWg2(uVkUiQMC%TE<(-ShGkWE%#NRNH>#3kbr-n7Y0g;u}g=0}QWC4d{Q81fdXM z^&R93s)8c7;dsfZs5tsLo48Eqr8%6701f#QDUzKe)C9NjvSRnucPHD?69o(G<<;=U6b zl`T~?>qm#L3iZ22ol}MkorHg0%MB4SkBzrNuEILCDBmFIe2e`0p0?u&7{ZCtsI4F1 zh>8GlFK6gdv(9o>h|D;=5d~CYGE(x|E^bpkNL=*V?Qj-9qOG_YA%&!c)=y7hTGyd7 zA?@oJ7tbGZ+3~J422f3ZNCVS}Uw6IIw^8SUJ<%691|2GtgaG=x1O9)m?6Hxszfu-^ zjY3}Fc!o`WLWV$Yvps@!Gy6~(HOHwRD)Z`1MMkdcglQfe%#zpLt1k3gFHoi`f!UX% z8RMce$KPvJY@Rs!1&an{zO!mOZ3BJnG79rruc1I3nHK?y%(rV`3G;vXje6y^kVQCjtZ+6|z}IT>cl6pxjAqG=ZJ%p5SI!&{8!ry3lDskLfqkJ5qg&DGtAW?R;zOFKj??-NiVbAEs2S6=EJDI1Ck&0HP{zqjSZSi29s z4NX`30aa*1;(wbETgo?(iD_RHi*!+9n|H%(#=hY@f%8cF1>>(tj}P&x?yQ_&-e*H4 z+`-b24vp#QUgs~1L^J8iN*zxTFiQOA8+DIO&6M~Pk}_En$H2aon-K-o4xv)@o&l$^ zB`JT1bYSV2AbPynpi@zKb}R1No-Z*45A#Kp$4gD#S)UvLz?59-Dc;T81)V5y7j*#7 ze8|Ekx+#;C?*>V5$ng>wD~eFYm*F-@==zvHHc3pvC2LA#e@$6RE^U;LW%|(l8|1X( z!@;BOM459Ruu&XOKHqLdCW$;r8h$d&A|HR@h6`Nuf8a2r;DU>U{nJ_q4<1d>_P@z_ zL{e9ii3sCU2}8hYTOo<7n)8a8uq-=^P(RCg{|V+q93Fk{L9}XDDH~)~N(Att?j)Tw zEZHh;=`0PwjrZg+v?BLOn4;;Lm#j8-$N=xOgOCf54s&?uY+RGm2_R11JvI>0d0u}` zu!Vl3oafZFkCqIboiiN}h<}R_I(@WYfGLxfz^XA&^G6-G;iLNRNTevv2=JyV87Ep1IV;&D7+0F@Y|k6@$O zZD7EokEws*nYBGV*MwsYl;z8;sAzx32%n&y>2>1X?q$J5Ub~h+kI(F85>h12{hOj2 z_t1`Au2r9eRVdE~eG|s~tV`+vJWaC`m*o}$gY$QQw^=eNd4qCuR@SK|S^e?=tZzPX zmTKCy2&Jmvv6J6(BgMJ%=E)cD{1wJxRdFZ1JsrZzj5xQ+F4Bdf)!Cun`^kUjaV^Pa zJ-b%fzVMVog0$feCU$VHKpjoUr>gP-QOh+!nYV8ZAbmHUzfMoQ1a^>mK_0vQwvo zEmCRR{;TAF<=qGN%xC+_L}Y(KpF?gg53dJ{SuF&lsoWqLgs5;XTCoR2AIx|;xkUu1eoa>ts zy|1#S29tTFcoZtY(Vczcyt`|kgA9Pdi}gJkv!n0!TkA^|q04t*^QRPBjM6aW<0=QVu8r>Rut#ZeRh1I$B&796mFKpMO zS9!>bgP%El=bi`9jtV0WS2bk4LGO$4Ug~KjkfI6sl&!`(__}J5YO)(82}50}ls16k z)9c21w9L|W9NpS|;>aq=!?28}4d{}gI1|#1uuUM`TqCo%WITU3Y=9>H<{P87Z^z9n zPcKw}TAQ^gJ|vyjZUi~6I=>9LsSnMp5A2zhrgj*uGNO{7_M?BTufTW`$`-GukBM&A zk2Ottmux}30+N$t!~Au1pJUDj*QUN}iBn6zR89?Er0pzYc{TKYpIMg^#9l5n8e9&2 z-aHq4t&18-t`UDz@W2nKKqHtH`Y;qg&&ISv!ru8%G1eK{7SzVy6$WC#lU1DP+Q4f* z1>0s)y!;`MRi2U0J*rt0So~Z0cf#ykse;K_@IXD}do{iCi@MNJV5g0&_5-BfA&}NL z@7OaMJw3BOTL}eFhF-b+XiDmt{47lHF4cNh7WUIv0bGAvz#_qu*`+}9_*}LftuF6cR-@^$;MxPcQJWJb({iA&MA>avQxHF$p|QRdnZcE zHi)fvx&xnaSqPS>S^{FP{Nr6ClS?yzCxH1QRo8q%w{#EET3aUt02xVpYv;}P^98SH zpd+m7VEuVut$V%(wC<7T{AG%8nBISoL6#6i_f3Cvm`W_!>@eDzwHt8T zYe0(Bg#+3?B0AX+O9GTOv466*1@shX3D>7 z-K6h+^%@u;XB>(9HquvHlV}>Sr%Y=IoeCJ}dwxS;geTuJ9e#&5n*=A%GKsT{(s_R` zcIQlFC$pZ{{t82y%XM~soq*U7cN)b77k_F_tk`7v{xX05kz-COa@~@^rIc!(?iAWS zq8wgqHk=v$jDg(IKuSlEiifNBG5BjUfQXYWLpqN{5MMpetiG8h8iisBHTGb@NFgS1*7t3&_6r~ODz3tT8JW3!e-$?@uerO(%)wJ1t^8@vPSzuJ; zQ=8srKiy5(G0#@-mrJnuQ>HwDxJX5?@3!W}O%v^cVc+Gu1ny5K<>QqV*-(Fxn?rLJ z!#1Z6@CY5gk$~lc8IQ2mUcO83U0H(07q`|~XGAB~!s$0Os5BtZh~@WQBhub zrFaIJMvP`BFXI{Zo__@^*zmqo!P>6qc;yM|1dff^^c5q7|84KidolSxtGnI1S4kEse zEO{WEA#NU;6W|i%(xj(SOTA*X={Cr;KW9)vy7QBCH8>fZeE2rc%$p?+(RS>J_=EASoGlE6? zKJ7)o?#c-e`EHHkrx*i?Ctp)iBCC^;a3;)Dx-b06J$T-(8C-u$^xCw9T5-Spy>$+# zUVJnZ_;|Xo^q?5?i|oV0bPlLZlT(^(&uV?54xb1&LOakIt0!6w!YG$0E?7f7#$VvT zyFferQXdH^X82L8jJ}W>;}}hfP-dmg?S2>&lQ&P|)V@r?J{)rpS1CE6{f4QIS-QV?kwZm-hIG^S3NBC(qUWsO?^V&e6KSGNL zRiIM#?ZR$7WbPapt~u>pBjj{6^y{~|jwrb!ai4RZ%ol&FK}csP)f#Bv2q%lbVahGU z66$7zsZuY1?v8KDX*P6bwyXD4;Vj*yHL@)|acT(Sg{{(#yh-U6TL{cO zAVf|(ufcy05$)io9U3+CyEo+0K-G=@b8yy?-FEg38#*KpN^h$BY0CR&a+dv66Lxtp zb`UuDNz5Zt9$dxl!+7!2LdDCd!;^edWh5;XxKJ|a^`@XIiS(d!aJQrgShLQ2>c`bCE zg|SFr7Vq#OVUKcBTx68mN?+#pwNDHS6T$7SJ#CTZ{AH&G1)K1!8wV@!Gw}dEpl~8@c)^9XBw5){O%Q z19mih_Ecjm45}*BR8`Pt$z4?y6a%VQTWMk%=pXfRK z*QfPGU@aI{bu7FtC-xeyohW{5gRW<8BdS)S+H$~$vM}p@p6E%K!xO&<8vT0JTVvD* z`CfUz>!*e|07#?&smY|ReLa6Ptge9XbvzLIN*i(nM%-o)drKtJj~vK6w$XSozf@b! zgYcr1 zOu~txB?+6mUOlQ*7vE7I*e~~)VyLhm|}+f)uMkPq$UaM7S)WCYCI zd@IvJEsQeT7_fB5Dsh!kvFW8gUx^9ux_%qJWjvLOJeK7A8y_i5*>kctdI<`SZSkf; z&GCrO{zFCsE$TMQKM*^}I#sk)&nwbpbU3bTjmv1zFQ{JQ){O)Dj(x<+fq?+o+PMEngkk5thnRU2SV(?LQh%+Pn6sbAS$`U zB4z53-MA%_hIVzDo1(n7&%@zJm&O4;8!t(u3ot=)eb$68Cx2j?!F2e0PE zL|>RZ#M-RL`IsyE8y84}V&8;O<*)9!--b!6=@b1Pw8&f8Xh^TwcdZ_MGz$_X9(_Ch zblL9*)lhTV;^wS@Hs*V0Y!^~IT+VVke>YHXnMoGkSH}}mj>ZQiKS&I%vyFqp>wwe^ zNv*8ng4Q&WM%sT1b1M7mz2bKba7}sewJigWI(~(qdI18b&5$p} zt3}C?9cN=Csu8=ONy(n!FidANc=whMELD_(A@h{#*XUd8pg&M$3iOBT@)NWdmLQ5f zBp<~$S1S7zK&KX&0KufBzh9J>*3Z*d7kbw{@Y?7Tv1osWe1A;ctPj*Lv@ISeKSck% zKfO1q?CucgN@1=&e#)x$kl$ZJxO(vWW-Tk62d=o%`0P(Vgaj;Lg< z3mBGR!Z_XKVsC0fM|^t4c`?b8xL>aCVF1dzO2 z+=SZlbxi!Mp;=+ZSrC8;Xu_H0V z%$o&^JP-E^zuzcA{4%-nOq0RlH7{-xP~CslAr8Q8veEmEwG)kB7Z?`+YZF4+tK|;K zyynINWNq22m#MM^YFwE!kb5nfZOQ6$LSEtB2uPOSn(LG8l@5?rV$cX#MeERT3m~T@ zkdw@7=!CId*wEv;owv({gy3Eh!K>E)BsD79{>@+lpZc?^|o=LBX*5Wasp zih_$l>u%)}*EBd*DE{1)cu!9NKS@U{4{Og)TrSrVN`J~J*y~Y>33g6@a-TAQO4cy5 z&>BfP8SrT|0RcQQ)La#yJ1Mf&U-Da~ePDVH;+XCdXQ9U#-LdJ1rMsy;!OJeR-i&cj z;i`4aml>5QsltJ|1HxqyG?T+JLnWN0n@I=4CAJ^Pnc=;o$Yba5=-g4|V zZg=O?Ss1YGhYwVXqUZsvr>6DeLr!kW>x$KL76J8VtIdMfAl4D z3Pub>A<5fdUkssOd}v}>6a*SV5!WZK0&mOe*Rq3LyW6ks$$B{T00j_(1CGU>*63Gg zW>kuNRVx($weu)^Cx&5%JZj|!beIArQT-yE8y+;>rmQ*{2bI2d3$U%DNQ9F?RG%Qv8#SP>y!zD-<2Z<}vh)l7dF9ybxe3vUzP zCm7mieN+{Rf}$hh*=RoxwTbDOx347A-tb#s3-S%`PVy3z(2kugZ^e;OD-k-NSJnob z_4^9C``TSy$0QaH4jL%gdz>t9##)GS8eO9EH)YdT&*PKFL5mf`A(@jOn)9Hulu8s6 zV6~nR7UH+>J<<$|6k5aDJF+7iQ+1 zeroPuyR2^M>8NesS$+-JJ@ByO zDkf_@4i2J_7m%4f1`uD!Mctc8^z9Id!6Il2y`R;F4CkS4{>D}|84CNgyU%1`FIRe1 z-*5ecA5_8=F6w`F-#bDDABkW-$7Dn#pd9|OI$n8@RX!PKcX8r0}tpFD5l-NrR3U|UjmT(V2hok zEXK@BDO|DNKB)ZJFFdIDQb7Ws#L})9Z}SN@^N%7oeUpFbdww$KqV+g zF?L9mF9e4QmgSQaMqmzzNeAAc^Sqsc45D7Hnf zpHwfrhJ_1|{NV^ppFa1VB`w~EPb2R6RpT(3@4&;bLuWFvoT5@a_%DYJdzn9P0J0Z| z+@58d?@q_W$c@)lPT_c?BNZ6{9t zJ~{?MXWax24#EAao?2*4OSNay0e&g`E?f?I;Itpxfcz-wnf1DmR3jxsl3?m};aq!C zxNYW7c`Fz4l6@hbJ?5oN#6KB>T0NedLIBvUQ*RayDWGuG%YSRZ?hF5%c|VV6XXdQ{ zNkF#0%^#w9*yZTS)sMhS@kw14UEWV~q77AF>FAbx_AhV!YD%_HKPI(TjXd-L@`RC2 zQPR1bG^yyQd#SZdTvbQegLjx`tYRMtx5ECrw))j7>sW z{sR~JcQ|sy+QF30yOquD^%EnlGpkP0w*=U=G=i}z>qEQc7_`Z$RG>h0+zLv(DjN1S zKD{FN6tz1$P2T#32qhHkrQ#mXG~p9tB*|wg%AS+oz-;7!cx}E^a(4C+outJu_&>43 zf?@LhEKJyLeC~hkTu;i`omCuQsEyqPNRCLF&8cQ*55&yLfWDRp;Y!QxP(!8Xmd`rrkE` zWn?-4HvXT9=^ixp^du-DgN&iXF-a0Mra)27v!1l(xF~;BTx@GetMTz}A#t?1*r{lM z9ow;mk!^$4Oiz=hpSzhjr1N20v#j#ioh-4OA~{f0^nhMAGr?ry{s~YLwAT7oBA18T zp?#%~!+g)sCD4hqU_CZ2^ZjMYybSqSibKpaa~cIGj5SYRbs^Z!TTTalOeNet2LjA( zux`%Pl&F6k?Bq$EU)d$)>O`cK|p&JxAv3Mh-QB^l^ z5(AKPDUy-%dk#&QoUu8H>YF^dg^J-DrS|?Kxc6tqp0}2{M#djfjiEp5m5BmZ^82>8 zh`yJ)z%-eaq+vQF>$XJG{P9C5nyN)E`64f>PC$PyL>?~^tl~=l440=`9$sjg2)kBc z;|rdK;Qj@GU7Nyiedq?@?XRze?r!yr=_!OF1Kpu<$D?AA9#R)>N&PIjrDa5-u@W4j z(3#iWW?ILcA}n1YP-1^C2Fz*?|DO)mVrO<2UYXE{yK^9CDEV9PK{7Z|+SkWnsJaNb zu`hpblUF#A8Je~HpVEU0K}3#K>eJIh%ky#-Atz(X-hsm0Os%n8ykvqd{H*we)osH( zPW`Q4sOSvK=LI^P2oNz6a)%;Ebl%-nmlwRg-13*Vhv%ph;*!28z7|l==cLd3bAUpX zF3C;W0J+OhTfr6p1w^Y!rvLHLE?&+@xrl#z zQo#5NnzOEAEkqshD&T2j{y(OX^O14r5Db zT2XFOa?MMSN_23-403cw>(YUj>fcKzU&3>KVgnoXn2{VVDX48U-)R~|=ICjJH{6%x}`*!`t(>McI>KEfyuVH=#h|waS6*T=oN>*O% zpuoJQq^H&#(4DG1Tj&RD9N>Y!yeKO}_|*zR`IF`C3~l|N!%t-xu{WRpp6`FHB2fgs zYkVN~X;9V@T3#%?RK($UCba|F)EnV@4%W%_95zsza01?aT$9%{xSF$GCG1O$&Ko#n zYxU;=yzde_{D=-%qrI*3;6XHL+Ui!w z8xpRb&z}=JPSC*K0T8~|W4?dWuo5ers-hAk|5`Y)1EMO?=YQ|8VX*qTG2pCen463d z@tUu2!3S%GqN(a?SnUxh(rtn~QA0{r_X^Aw?6eJgI%w_zke+_e@=rbNTRrS1FmZPI zklqGVA)kM5=>^L3aHv^Q-|uoLmG4*!2Jdyaqv?0H67|F&$Pfb&Fa3Wz@U1I(3Lh*T z5`P<0;geW?d7A_lF9G#Zrz&EWTk5@*F#kRsMET-?$a!+g_mh{$w@$%SWIo5Gj+dvT zEGEhLNlXI(<7jswG*YzbA1UpadFpqHVPnYzx$UbOPfLxQCnu)!;TIZRvsWpd2XqP$ z)Bvj+HZM3!gtXZkcyoVQa|_bxL~YPnbUWJW% zid&1i^~N}7B88|&bC(i5?(IBN3-;{HIf@at&UehOqR@H6>5}M^Q!dXOqCkN|7%iR(5zNQ(ty_O5PyA9t~UopFB4PjRt zFE;*H%BVx)Ca0nm5i+d0>6nK^CvDAP@ojP8(|V8p267K15CKze|K_GDWnB=o=3^9Y zYjbNxI3RzO3Rv8j#MYQZ@8lldC8F`I%*DQ{{M*UowrqbD0u8t@CYq|MN2`L%bJzr% z?Ku}m7zIyeqMVY)hgw+&FP{p(6vSjL?)yHR>uIAO96+5B%L@(3U5Kk|CMURqopaFn zxSpZ33sk^SLjV!ZkZvru9%Oj&dcCot$4hC22ZU;{scF6HIWfz&^4WeXj#<=k7Tp*} z;>U71=s$nh4{i7h?ALS`^0~SIVHn~rw+bMK*8Xh=cY71RT&8KlE~y=A$Cidfp@=D6 z1U-oK{g4W#Bn7ozQu4ti8A9yJW=sC~^40#HOx#>6M?Q z94u9VlXdQachd6)&O+D|0N~msR0`!&U__)hNi!On?a*)LDZT#CShTlwj1U>FNMdl^ zGunT;V!`Wg9mtNLAr0pD>|_M3LHwd^LZZUws$26@IW)iunWlVB$XjtxiZgASKQKt5@^vimzY{ zKFYzBn~?>MP*k!p11qI#De5_55phHx>D5%^2%$ppf#OYmV9gu9w2h%pw=YfiEgZE- zUMRRomQY9nKP0L2eha}8%>t6W=zX$o*lBopi`(*-)D1VHmgBas2VTUrKhNB@dlpvu z$1WkjBf~T{>X`EpSX>vQ!#G9Z8v#ag>n=;7v7{Lc{Z_>1otUvK&@c~B{!qjvb&%7PiI@6K2`PWk^O0o1ScX6F+g|$Q zGaZX+T#xq-9&eqnz-Fp=ez401tlLfKC>Wr|(wDPht5xN)q&&A0W%PZOBYq&}Z3z%N zJbRbmyCs*bZ)lM4#xe#%XB})ck37#)Eu-z!tGs?y4USC7?0b7WfjK096TI==GXQd} zlka0e5%}~l;bMQ+n^rqE(TAp!zsh!%mOu}9R>6MDNj(AbO=~Bcrw%<(OjZu%P)Uh9|j}I!YvJSUIM;=o^>|tv5 zr`KN!%h-&gg=`_MK50qY`Or>Zl3a}h*KXcK}`e|)>tjfUrCK+Ays^jnHI zLtIZjg$59v-QZ122b5$ogJCX2kCpbr=Uvr`>uzjm*NTxk(P!|UZg3-ioLdGK*c?-k z9v~N)=_mgyRN7?%Ey=l%K@vGlO0OM$r|uVy7(O2;17*ZoG|gaTeWv3N4m-3= zfpu6oFKOO#Z`Yv?hyOT#UkJTYA7usPS`WNj?{*Q5*C)_4uKA7iUf@Zpl|+2*USu9B z@qK>E;;<(D9^FD@-~9Z%(6!Mwbswp9uB^rue=<&%`gI0eWf`*FdyeOe8UL*0S8=Wb zG0Y=v(SDWVoPj7f1sqt?GWo-DL3|gQcqm(Yy|bn7RYwTB)|DoI9k73agCRQ5H==In z`&xT338AHDEu*Iv=govJDIY_aP78uY!O)FdQ2b6>8U%rtdUs(I^(G!B**RM+inA5{ zQ8J!7*45wZ!SrGTRCYkGItLFo6LX|5gr?P&(0aN}kU^0x ze5<$57Z-w;1;F7T47@!ZUB7+&hW6d=m(M-Jj<>9Dc2L@F`jE8!Q@Y8U3}b9=-Q=Lr zv1)JBx_-Z4aYPY^9nkk>dlf|9a7l&p%?j+&%3;aApJLy3ul@9vLyBdl>DjXU`Nd?d zBEt&5BKZ`5D?ZeLlMJhDYUnsK;>1qZRRyQ>7Q4uUG1bj>c#{C2h>uu$C`zRi#CB|? zV`dw3n;Zm+uZvW;_@N*#DmyijlTF)ApGb*6xDL6YLT?Lu$&)`+!%uX8mcE|xaJ%i1 zwDvaiF~?rZ?LwBOfP^ z;(-a7-XcYUR9@%$F(QoxPD;kGBCh9^z~d1upuxc6E;3#>Q|&d*b&XnihI@oP^et() z@5+jQWO^%4W>8*4u|+8`nmX<=;g*ka3dMhelak&FNayZzBdMy+3P|>%Cp$+tHLhqvzjh z%DG&*4VlU2vq1flV3Ov6+?km1GQz9sMz4i#m{h zm4PX{nW-4I$*T1dHNVTAuL&&+>LmZ8HeprGl5(2^e2&3Zy*iM(<;Qm3RZ7NTRncsMmt$vM-iv>hgd z?c0)U3O*YhI+xc}$g=8|6M>jt8=Fvnsm7sK|EklE31`hlcM>8GI8oZXn45=nEh1X) zhH)+yZ373A4H!{pe|FV1T%>yg8eX)y$->eTbXn%cfLm#>L!p5aVSoIbx^K;v-_Kf? z&YedcgghPGZI~&L`+}`EDSL%Meo~jf%_uHo%TV7Bk+o@pBvrngTeLGdJ%Iy%RyQuj zx_84>(Oy74q9ns&8-9+^$U^lJyqAQPMW(Bwt0hXhOpWjUHF4wKisowfC?2YmaXFoX zo28ZMibz34H-rg_5FNWIJv9)((5X?~xy_9BS;xR|_jpx$*veM*CDku6T=i_^iNaHr zBhhy7Dlx_XoKK3V#9;Ivo(jExJiik1o4v=8&80F}<~v~dCHBBbTpwAaR3JU{p0Bsf&QB99 zrCXbMGNQ&F5c$$Zh>Un;G5j(kKWhPYzz%Y@a^qJx`jrsp+(n3f1#)>jPgMq1!3u9f ze%-xea-U1IuzX!&HX*!!_6KyFxn9L*U*$UvMby|-%1Sz5uy4wWC`jjplUNy8BX~5G z45PF>UStVP((|qd`6x9mLzYq0mav9t3JsFFhAZ+rezFgxZ&MYT>q>(5G2Rx(gNw_2 z`5t%{wzG3kUM55Z0~QP%A7x5Wno1k4(a+p-(qdgNwiKF1*a8BD4NtqFvYs6sP0xrIZxY!n)e1!*0{I5>AF)9#(p3LQg z@7g8NVB!9Qy!VX5WA=~{a)JR$%28#JYc@lB1-z#b$Kv@|HbtB_x3zLt=Ed1QzVwNANeU?&+PXJ= zW}qB+2d3_SL_^`yYC-=_3-50q4UuIMPJh=|gzU24zBx2N6sERNc{C#}6^$egEiDfB z;b%9S@$zxgm4ONOe3K5zFO56-YCCS&j;2H)GB9zr11$-5VeBhA{|APctRoS5oWi_8 z>qkX7Y2DN*mS*MCp7H2n13BLh+a-SiEyXi4gIBYE?mg5c;Wcx{Kh>aB_{ql*+WozA zOIme7AhKqyM6x`Zr1y>P@HZL}G)-$wlH8M6df@#mR2DkqKmajLDmVLS>XkNIfR!Yw zWNWgafeM>iDNl*a)6x~E+Esp5xQyC4jK0I^O}K4_mSA781)22AqXQ;TqV@RJP1Cd; zjGeN7z1EZyqzLE!ypORzh<@(V<3)vi+pjdr&6{WvrL3CXUU$@7u_;-QvituXjrZ-e zdHX%^X{4q_Hs*^&;I3XGYOj5T0Os}j4xXS-9znZ|l^f6l96rah{G0@~LjYjX-flf< z5u!)qb1^I&%i@Q;_J0NES9jaaV*B4Nur39ELA_%r_lY_|5v*EmF}P-f)&e#8@tYth zJuYQWaJS7=;iX;TWnAUg%s?3q56nN8tJ}7JKzVZH#hu%q!c*4w3G9pO(9Ekt-Vw%9 zMdrl?adw1nLZnHYz#cU9Vsn;WD*AsJZG2EA#Z#yxjE^WpUwFc>KE@>Tv`C^ zgd-&1uX|EPe6BI>2KD&mhr*aqq)KI=sR^biX>_Fb)9qyH;$6I#eh?HY;!1k+5h-}o z5goKg=2rJy#$(_2zVoz!el2;JTgcvcQL^>xj%mTp_iew#7#Yg`i2TH;)7o5rZYF;U z=*h!hGCyt>T=bpEQrEVD%3uSCoXyu2q-=+!PsYuja4?=_xVnkX_nD)DXr$7W)&)ng zcZc@U26kl(HXRmztn)Ec(`(5GkwON7h?p?a+Z&d|2#tUPQU#d1T3Ky2p5dsT!Pc;I z^b~l`YjYJ5D_=t90mkHh>+1f0ZG0Qdl1W$&EMaN02s)%z6*Zs)tmHzm%uimZcNdME z5DVUmujH1df|svf)gRXcy+WY6+xqX$b@EW{3@8tQE;ht_7Z*euS}wWHIv7I?mBip5 zKsdyFsVqsI1JT@Au<8tV{~GnGO3kMH&@23^a`PcJbJ}^s&x>B*IHQSwx`KI-@Rn6+ zUn$zY@MTh$T+x@JrvrBF5sc*P)JZ?{EFZL&yD+?Z%{B1zDbNL8Y6JMB1VXx8x7=L8 z+NG^i5AmEV#@Bq0_6O9z-;Y*ZB{n{7B|k-VSizOq4)Jf$K!s1{?%q6jcsFHS1M^i8 zDvaPkS&%4F78=@|feImiok>}S)+%Kc5Fe|GC2fl{ri2G|08J#~Wk+-=Kb(#cPpc2F zq#wd9Q4d(USiRYkLf3pKJWubnq9+z_qK71CRVX?8dE!1IN_*I=E%nLHC5myqVu*A$ zWV}Uu3a&R!vUOJbGa(Q!fJgCkJhSqS?)WtSNcA8v!`mLj>o*L4@k*Uj0pdz?1gt*Hvu=0-ezw9Jbwyjx?x}fP#cX|%hapri5v-Nfw-IeM z%cmDAn0s=-8a#x5-3~z_F2Ls{0CHw#431YEArq&nrL}12!Ffy(#775`PF* zboA0HeO))@ipzch*B~)8T~jf%O?Rf40aVMBQp_wjWTX;!)?=g=8z$u|P4cT`lzzD4 zw7d+(&xn4*!*hm`Mt&>8W*sw0CnzJQs3a{=% zwt>8V6rOcWoeW)k?qUM`%oGN&?t;G=lR8v9`S{iPGv2;DQ;3DGgw_s2rqdf;@BWLQ zukf^eoaSE%b(CVV@LFf|qDu5oGX$OLoQTrrWTsqIi69a-vT3O4RmA#LT%@Lnn++>9 zZEl@UpT@kIcK=rLYhshMu^t+yLyn55NswB96{tmZEx~XXcwLhYX?Z`0Tpp8jjU@we z@za!3O(HILpSCndD=e^z%wq2|Sa?4o6nzb7xSyp8Yp#>gqq8MZm9(z-@N$KslIq*x zkh5r5>BT_aBvI8Q@Ulq)@sTOgE)>KclDb2-ram{f;A`L1Wd0IQe-L90WS?%<0xtP~ z&&y&8vObP52!9A9bp7(^P%!^CF)ZUY6Grf|_sMm`zM4!HBMB?`&}ugkSqn%Bf*fMC zcQQoj-x2Qlg5wg*C3t8nWvLgA)y~bW$!5KwLy1)tuZr~x+fC*5fxCdmh)A%+1#dj_ z;9=Ef$a$qrJ;MC>Eu-7>%=1xLrceccetG6&iK2mXc>qF>QpCrZudy@O>GZ@Tdguh7SHkg?(&*R4?C`SRM|b-zkD8e5IcfBV(Ag(>ShJ2uDgLO->w1ZqpAXvC zklfCbf!J^Gg^F48%w8sWi1LvBN_&K$2{0BB@Y@UOEIse6P4r%cY#W^w(4mT`_eXLz zk65~1u{0*}WE7qBh>kS>7JGbuzHklr20VbC%rK*2>P@%RHvvd#wzLkd&7kGs?b;JV z(pk!&PJ2?1gF9^_xD37q60O1e%a2an-gHay4ca}_q+Sk#1KH>^_+Dt(^j&XNdXsy^yU2pOo?p4md1Kkbj6|Ch#)_LX(0IY1p3W%ZmNl{ zPHuyFru7TAXupgtIL&9TfLXD(X(1JH&lJP2+sAb-xb>JfCktNv$BPa#(Cp;o4$Bgk&avyX7U@UI4!q1PXe{ zXdkGb-%%rL5ZUaHraZK%_TQPvzTgLay8LF&IGLF?!yu-&K;PY0$YAqFc-pEm!Vju_ zuT*+ze74CSs2*p5;;MW}`;lLZpCv3++_Mp?sXDQ|Gf65-uZ-~tM9X8+smA?-JkYE$ z$i>2nx8}~SGFKdbDh4$nX?lKoWNi+)bNb@LI6|)}ns#a9j;mL9BoXJpu+MkW)Nfx@ zLKK|0f76bV*aXD&0qPCTsk)`W0PeC|VQEBRu}3 zWo!^Wvy#Ik<1IH*@r>XH2$V*3QsCCviX2l$VVHln?=b3M{XN$k{(GOsYaLoA&d@OW z>ush{S@habLpXR{C48d-uRcY&<)s^fos=0zLf$^+7BBh{Raw7f`BPh^Ww6TYi>jC| zOTMZj2Ro5wqu+`C+>Nc$qgtu`>3Qbx`2mE5mvLnY7=L~Pn&>O~Qc`R*+rAh8fGi76 zlN1D3e5R1|wfZO7hZsUyW>qGbq*FGbO*lHMg@IXkC~%Gh_wpzkrF_~#(KL*ru$7H( zO&d$`TMQ|fa_Y#y2_v)P>lX3#l~3D5M;uuWqQ-`;&aIuZ-58I<8o5jxj3*+u_?o&S z3*Cnj5wo}oXga>q zLRFa;9MJN>Sgss{w?2as3xU|c8HB01>1dYFJp#kGV2FxpAY_{>CnFx*k!++$xyoIz z=T9VZf(&#w@bVKl)Z_UPn zPU(6njtOrUz#Wp)Cw8&DH+$YcEY#Wwk*0w@k&%(CZalw*a`Y+29Z`ap$Yu#6e=hbA zEJnEcH~5OpZ-sR-g!{_J3L(+*)-u}t)hUaEz5^wPA)3MZYQQUh6(|v`>5IC=mgHS% zyG@tUMZfE!g28D`!Q_4Gm?=l(q$c$nX|o8U+Hs316OjZ}<4>!m9ChJ3k+uC66}`=? zAaj9}{xwPGMz$e8R2d2T>`V(xe^>g70tlouio;bmp;dzt4nSF)NEARrGfw%7BpQ&m ziMs`x%RwWaNsHndr|%DmmPz_rRtk zikZL@0zi$0FOBhAZm{%zHzTQv6xGYUsUPG)uLf{4HjFYeg{G=s;_ZZCZxgvb6@f`& zhsKzM4XPMkvLDlK*x(ac-ISn8N`nFfUE|dBPrd;q=_5=5cJj|Jf7_IE4QC0OgF;ut zG+>C7iD(6^*)*y|#$bJKW^5il^kUT~Zczb1wRaRp)-&vh=z8NqwTwap z_ce!%+I?W;8exYQe-z|bkkRh_2unC?Sy&^zTGIrZe)PPSE(SkQVOjDw@BFk)r;DFX z+>VK*DUyY(v$RHF(i1A9u(E2Belt*X-x7r824qn5A!QOwL<23GPiO=`CqeAPt1+)* zy44Zka%Qy)r5!>wj*l>oojoZ5<9aZCmzJ_G?|y7!>8KEKcA`k)+e26uMtmUOXN3=t5AJ;O4duE3_ZpbG zwynmXVf(n{f8?V5d7805^a$yz%Lrmi8KyG(gV$?;ADy0Nqb4tX^FTR)?*11x)SyX6uD#r1e#O15?0G&31WNo%DCe+y+TrXs~S4(2}e@`|T*J=s_0qIftEuHp5H6Bd|QV<+zW1|RJH z-F!z0;A;>Je?Jzg)A{lfq+D=v!Y-TUT25pR^qs#OrqgRPf$~yOO=wyOOMUC#-K|DE z&x~XCe}Y~Su?X-Bu>~!tUJE*05Xlp}7#CH`yO72INF62)f0Pz~eK^i1P)F`xz(f=*L45Z^Ur zG8@4~&J@zpI%~+pBXt$GyI@f$PYL(|xS?kDHt{Hd_B^!3tp`kl|q7!9A@Kl7qTIQqOhl`TeDIvY# zfAQB(59O6($#mbof)3wI?Veyq<|iZS0>`~$T>~wOr>SU_IO3@{k4-juwP%`H zm-K5YGc1GLVi6j&8Qe~-pN`uP9YV$$97WNMp@oUWW-4HDTHvp^Ku#mnoT+{HoK`gl zF2b2ldY}%!I&UB?zVB@xwhvaY7Aeonf5ZYiJp)jTW?u~d7Me%6oHM)5_Ktv1f|0?A zz9_Rw>AYbFd99@MP%bl_3sp$3G2qmiXy&-pVmO(~cx>WXQuiSckhogB!LqOrn~xyEY0f3AL8 znnDPolhw^=p#1WvDLJ2Y8I~Rdb0c`d-^;`pp-5DYh@?p%s)%R0JUEoR7 zgEvTo#w$Py#bD_aBGTz76wM9@O^-;EkhOP(BO4dj^i_2_jUT_Ss)~@5snrMn zrpJl{zi95f21MY9+A{bpof_=iYu|dqQCQ}O&)<&0(c%9lzUlDj z3K-NnaJZo02TW!158oWX^51!@8L|8AeGU%4WO05{Cp;l+51ga=)erly@6uwe&O7?L zV!^DoJXV#iqkobZ-inMNe^UB$0hb?rUVg-ne>RH$7uShSsH1QrWXL~aYAqDH8-M}cre zb`VB{iIMr#N=f-wyKHH(QJGS))ufYJ6h`^ayxrxa}zK1`M7owu#WhDd%7&N>o-YIH8 zR8-WUpZ-}vyJzN{vwqM2V%BuGh*+tYy?0f>xLZ4C+3TdYe~}=tOV+ECXTdOk=B|4? zpH6-MyqnI-85E03rh{{5bMQL%uGzN-S{&I%Xg#8Xc%(y`J)<&x1eVUN$3CK+=-FR& zwQWVM&5|1jIqt1*V>+2H4kOdqBJ;VoX3temo2`t@Sv-fky%=@J_Gq&7?mJjap62#7 z-<7Vt?4p&ne|{`FV=n#eX=qHhFJt`79#$}9E)Fbm9HzbV%fz!zJdS7>Z+5erHO@~H zvphw+#qMzl9uqS;ccKM%>P?Yrsvvv8d7S1Ej_=`RdSvX6U6M2Tt zoeiFiSLma~Fn@vlJSKw&KJ+{NuzxDUzMmfnzAY3*f1P4n-ro-^Fw^Uz&?K4PSQjyn9Q%88Q4RJ z_M_T<4xca>&gsiXk8WTtS1WThvkkIYCcR~Fe=pgx86z|^`)^;fFK}?1vXVva*1ba* zzl!s_HZr5zd8X-T5e@wH{3xrC6uXG_<~uOAcl+WP+j_a4@2t|Q#=G5qHyMsz<8jYz zT!Qem7872C^z^)kt0DriRJT#rdw8-R9?a&QSp$DC9@a+Rv>XOFxs!53N4JC4u*jOf ze{QqLT@J?U=X0Sor`xR>cNyPp$yd|K=1Bp*fH6y>`+-HqhC zlVsP$?BL>IvUx9`Ya$DM^W5$y`^jdge_KvznOA>a>>nrk)`ExLS*Fcv@~WG=Q@$o0 z=+ET(#BcuQw77I?ecoNdY_^b^|4O>MZ0x7jBfja~{y5t`58cDw*be80^L~|!ll2plxig#3n&n;}Sr%x2wvm=5sIY90uFU+{vp2?io?l&C z#+yy>JUAB1BYa)2P^1ZiiO$;xP7>^`#$Ho>#&(Ev}w-%dX?1o_U&nJK*PRYLa>rzQ@xAd)Tkh zZYry1ZX(2HS+k4m1>5jOf73ZTrLRY8dh%Tlv-5gt)1@?w35Pj!#Q(g@e`dF{>>?43 z;8}hbjsDz!Q|}jj4*F}ygTNXajS(-j+x0Zq+)w?nxu`bTt}p$HU8CFm^q8mide~ic z-B~tgZ@)Bd@$xO-q2VoOYwf+gY&#D#8=<0`rHS^ohe@SBW75Bu!%o?K__wn)tn5W9 zJA5R?SPw;w=SNeIaF)jpe`)K(@jQH;nuAl!X7YXS4Wnh$@weezZu?hlx(!l$bZ^F0 zxO-gKdflO#W{)|WdClbVw9#=mCYMcBE*{?dUMKb`d#;Da+hwwSoExvQ13yd5pzqLV z_3T~D!6-SUgN1co%+{K7pG>p9W=3nP%Lb>_Iqqz8v$5u3q^+|se|kIHyNkw=w{ceU zk?}k@Lvx+dXj3gtiN9`q?Nkn*bw1Oa?*2Sg%J_0Q*=%6G9C(dY8%eTmuH*UG9$e*f zwR?|ym*=G;Lu=w%!)J82m$NmQ?Db_TClJl9dwlSguW`q%q?eci8b`}IY1 zuJk_c4~lW-f8Y!@5a+gcc7rutL`G(X@;>Yj>Tx!HCC{DVUY5@p zx1#aco2A3IVEsCclIbGjw0|2}r$g^_?;W4Lx}5I9>wLH-M(I3X`_;a?bgsM;ns?4` z{Mh3NZ0V=RvbE36+AMeXJ2UhC^^xon%Ui=KJKX$qf9GivZQ6`^f3RPUUtYg-ciYb7 zb(>2^Tg9{Epwk!A(sjq3)jeU9y`*^tjd(e*MQ1Ey*bGKg+i8P^f16zO?mTkkOjZUT zcOU6h%eozH(OplMrC1zS9?*Dk8frPVw2^tKJQ~SLXG1saU1qC%sm({(truF?v0YRC z-n!O4f7l$S^R@B5JmU_?AzjQ)mqWU{NA7Sx)gr5Mwxed@*Q;A_dpK)++?w}erOAG= zm`>+Xj{KBOoKRM8|DN`u@mUUUizUyZer0z1#uWD~W2_l#IyqLms{tKY%0&pXJkrgR?QXk%r*Jy6q6zeF@ld;DFM*)x7FimB4R;R% zZX&(*uGGcz&}IAOZQ8uYm-IH#uAIf>a7n{Qd2+9KJ3)P2lC{0=Z;r-$f8y7U)dZ*M ze*s;J0w0&`_^63C-|TPsF*rQXRI(db@69ATo=DM|#QD39--Q^bW4c-l9+y#1bJv(} zs&FXuNp<%GAG`M^BY=|k-K*hNz+8|e)aF){1MO5f$zrQ^HO$W&dxlP1dJaaVzLT*W zu1mKg^4-*Cm9HHa!$U@ZM?GDq)nwzXf3ZJhOV^#5qtYW*V9AAl%_BY>T6ww8s^WFJ zr*v)ah&(=4FK4_sm~v@ZHv#(P*nPt$6y}5ItF~lGcZ^+|2U5IexDm_6eb*zNsFp2l=TX z$Kz7pbo-BusTU9XWY>K$-o^Bce~e<&y!YNBeA0R|dX&<4<8b91oz)^<_`@U&y3^h3 zc3BOMdzZ6%6DH%X%`UE&9(~``(ZpDwN$jSb0qxAXY~)HA<=Uz{WGl|gtbTQ^aE&{w zBdmtn(XbxGygq8AFW3bK-D$3eb9H;LdL@fJes_a;o<=$A9M8sSkDD__f8A+bdB?2{ zdoR9>ndJBN`Z2tOVIw0pzIJhNNW0biFkArX>le3voi1}DopwXnkF;n|-J^0fbxI?f zUxsc`ZW5!vGp*S&I=K36ztS%|cev;tU+%4%4CY0)_V3B7PR8+596J485oAXBu8d2% zY+$cVU*$gMqtkgVFN<5Ye{ftmK3tZ*6HW)S-6dGNi(IB!92cGGx?4Q(X~V>{E9N=y z@pU`PW>r!haug>=o%deeB)Xh8)lnK#i!QW%BQ&>0QFKZl=fN5W=QKNx_G<%TzjL`Z z_s-Cm#t^-;zJ1-9ce&)He#Q1;okp2Cwaa&Jyd<>Pji-~4r`vlGf5+Qpsy;n^yOW(m29^3W8_Rn}h4=?-7#^;2s zjGWEG_u705a!EzC&PKyg_qv)Qe(xVk-*{=Y)?HWCD%PUve^~IzwH&jeNwiV_bq(-4 zohO?Pzum3=(A_Q{QMTrn9pTGl>|X}R@5)U0dky!x`}Eu~gWfq|H@&>Co>n%Bb}XxR zB_C{9`WoGw-tuXI-kpqZ)kEuhL(;3jCo{rZS|F|mp3wLr^ zycQ=Xx}e7(Jk6t9G`|ckoKBkkl47&pkJr6)H%lIIf73LYJzic%IP${DE90l`?B?3~ zY`a_8L+`jen6Fth<->77&d2U0drXCWzDRiY_1-3J7F&HwjqbDBWpi!rJLiba&R1~W z6Kq=j{-%5^ChR#i%;l|P9tK^_JRfxAj|_+>9jbfB{pxsnJucq!JvPQ$b3eRkY?WVc z*(td!fBo)N&)&~h_twuRhX^cZ=U)oLAEOLhUcGrXL)g}${ldQNt@WzZ$X>G=^Dwtu zn5Ea*f-UQ5kazZ{bKk5g-8}OPDJ<-uD!fj(v2Fq}9Czp>H0n$DVeLmenD$};MgLA8 zk$1GOmN8xlD+N^St@$lD8qZs|ZhHEmaR&ZAf52BT{gMCd39pGrb-y)>{1I-B)9d&#u9wqY{kYA#dw&!?UPqB;7k~IzbbOY_qpa?SZaLd>bKC6B;f!5nBXu(< ztd%&2yq9S;c~@fU>dQm_dGFPIW`wg~e>w~t+Zis-+f8mqay<{{9qX;1 z@_Lp}=f#a&$DFyH?RttAJGUO)R%Ec)Cg-vooBP|g?%kdq*NpX{x9l9o@A4WNM_Nqa z#n`&m++LJ~`XSMd^)Z#rnfvyAz1CklXRsFCZat})ZcNh&S_P-$^x4nehqXDwe{3<_ z``zdAb~&-EzU_9|&=^mQ(1%?>j&fD{0qadltzRuA(ri{fGbTIc5}8XL|dyxoWSvXceYa<=z6MmHXqk8#(SF3#iCV?070 z>2=irLNt9o&P5#Fcg%B*Rd01&e;miBJkDvRt?Oue=a-%y+heQI-_FCOll`N6-ISJ? z+8Q|@)*EJON>0()UMf#v&d7Osq=8h(rD~kut!3Mx|@F0)V z{*2W9W~9F@Z{?tW&Ihv%E{2&Jf#+Ee^@jaoajyHzEd=Ed@yC*v3wDk!+Hl)(!sFt( z+s{R0%;Leu+O3cMqtI;uf0wGy-l474uSLAAqAW^N1wPyAKcSjt_~N@7dgrntJyvuzBx%xnT8s-49)5lLzMLJM}i>$`)(J&_>bqP z{MTbt?)TrPsC-qOSu4&+B%&UBvT_p?I{{#y+XsB4?Nd=u`xF!ae{_wYeJaXfd*%Xe zY_$*oyRv=u$$E80=gopXx=RUQKh#-g=Fd}5S_u!sN3YAz8|QceW4{`;SD=nhUTZta zS2o97Nlx~zm5lsT`_z;6DJKBv@@k(_5>%UIpa4lgw!a6*baIgZtM+*#)ZW;WL%Q7U zme&-(Ubk`+`aI>NeX0onI)Am#Q^DwwE_%l_2)ar~++CH#6|&5pytlUXh#C&y80u2d zVX2;Kg4(B;0H9&}RF)}K4{~WQKAMXfWW3Y zUoEE&pc*_5^d6E73&5a z*JoyZU#9Q>hhvc4=zrxLR|l8CX$FRcgtS2nGbh-boPrx(!Ag!(-ITFyLW<3!+**Rh zG}mr3pwiXuD?cn<0u@1=VqBs%4(Zl=3(W9#x`^Tn4!}bYV-Ocuh_V$X_j{uQW59QT zE}%5{X(HDecyORMH)yZdp)#9+T9ZSp`R}yvhs-er{j7t!{(nRK=ldMk+ka;oebMvx z_&0m2V?O`bn=Y(?tM@0?n+@9YsbiFFcs2u(Io6xC$DkK;P{ZHun;)pm9Lr>HU-}0t zSRg@U$Ktwe24I4|I7OzOm?HHFFf)%QGnCU19Vfyo?#~TRLn2lkfh&I|(ya-?Mj%6w zD>COjE7sJxGJlG=3^34`a$^!(kXQ>VSMOi;{KzZrQT)1I%z!K~*nAr@n+S~}IX7U1 zdQ_&N$%O-5i9lwt#WOQb$nVclbF57>4M9?FiqbYIYJgL881Mr~7MdUec%@x?K+)$j ztRUug0f`krLI44-yM{1pK7D}6AP^`6TLUu&Z`GU3gMYW)v!x(^d@}p7>(V-~m$LJZ z?U4YRzUXJY1c1*oyIrpr{rIQIY`5WW_FXeS;s+{Pzjcs^7gip}?ax1P`uVH@JEW$C z($D+eY0t;3_`JsoW~OoRPsB2jwzJ6gw$+Ro7L`wcl5WIiNT~H-$OBS(P@h6m_aKO) z09OkyuYYWKu`w2LfuT-KZiE(hjw~Yiv=>iF&h2H+s%4{3flr>Ms3Yq@7pPzx(!ko{ z)+RVymO-Bd!1XBTu^Gwk^TNB0R6tc?1COVGpvl)nl31NcBl1)MhDJB?mdLluUI*m+_)aEwk zE0UKk%c1aqy0b#=9mz0ox&*=3UcZZif-wlGg+fkTQCsTv$j+M@^t3C(`j2rj^)e&y|IhQooRwv zZ`s3~htP-&A+fbFG_g%Jq@`3EllwZO(SA2X8 z1*rBxh%A9>dt1r~bAy8v!MtcR7Ju*&dek%EJl}XnoyZWO#0 z;{bVi4J$tJlD?Tj}U`g^uQtZvBwh*s#6zE6*S~vLm-PeWP#PNITC&c@ibB~^YlP&kf#^Xm87 zw2z;~?K)-evI-%dMemX!8Am3Gk7gr1rwG6^FAJ-QC)ulXtsE%RvNe)fp+0XR@EKB$ ztd=LN-zK!57yZVR@_!4FIc-f?7$$ax9mLBBK|a|F<^z`&NJ=nIRMJGiIaBA&6p9Z@ zxG0{Zz6S?&-uEBurhGI_kXZMfA~rELb|zv^IOwG#YS#%X0+KU76oWSYx`qQ+q2e$6 z>wJIkJN)FHtPf_^hf4TFGON2OF$OKhkU1b+3bMrGocNf>UVqmrF-F8UAvUj^PZciR z%)3vhI6eXdfJ?yBxUiH19q0{(^bh9v#WXEuQMIW5S&tUCoZMG>pr6`mLpU1%*PhY1 z3euyo@yIb~3jN^5Y;J|s4)J(zgC5|6IM~>@lMTzTbLqcxv{=6H*Q%VXMaZwLAkF1m zmUa_wT%KYa>VM)f=52o3w<8-GJ_=~sb(BB$iDMAdu}r%BsrT)*sXG776)j$Cn5JH9 z*ZI3X?e`ImZpFC+i`2rrw9uG%N34i+JI}~+023~^~gDq9O-tkYq z`_`8>uQk8i=7Teu?DdN?+B}!Ge*7nHGPO2m`kUiY_4{vL{O0Z-4EbA6*oQCleEgFO ze%62Uq@f)7nmjv2MvnpI)LeMei0p7964z!6+LV#QnTdg>wu}i1P%)FSZwmd)4s}j= z=9|D>JAX)3y+`;UPORjarD;2y`!3P zBypySHH*}@fEIS3O;Jx)B-hX@#HDCPr z#hE|Y?rTqF@1(7#|6s*baRgQKwQucGxUj9SEgo#whpM=1r#@>=C1|d{ia!4By5=0_5Ofiylia0N)4P863o|*(L zXMaKJnN8^-$xNHa%o61WR@#}GOwJJ?H)LHRhYu!FIrGBtUZ8$+3Awdv=l)8dpB+8QFQd$Ssa&jZEL7$pXQH0_<31WZN#tG=%w~nds z5>-;rjb)#@7JuU;QyAofn~NWOps+mqhkq9%K|gELr09PGE8sueb?_fN^cUX} z_4oXTgS2_*lYd%n+WO=lec{V(sU!~9bUZDpH4Wnx24g|PEGJwBR-QfVhCdxCToO=% zlNcjj`g83 z8PHAAtCpU~xXUZEJ&&*wkTF6*y+(ptMCdl|3v*_+_DkZp%Ise9B0N?)6j64|gcpO! zArwIGG4!Xn)z82;(}DlyXSew9GQ}l-`TsA!O#hQt_CNR{{^Vrw%OC9)yMG_pPwcB> z(Fhp{3c#X5Tmr-*i;FvZ(}|T!MndXPSzQ7%>$oMyX2EXfQ#Idl_8`JYxeyN;$b%)S zrGrwH(_934VgpCx2(Fk@tJSTdvU1}Zp+aOdET0tj$ds1{^n#ULG@2c_&*T&`eDaYc z^Rb)DBhQBwLFvH}C3TX^)_?i%7FX4+msIgcm7n#jmbZ|?Etsmif7?Q7Sbu8N`-{sz z`ET%Bo7HQLic$8_DBt}2$*sz#Qj8jFZo(bbNLV;XS0Jb!>+#-lQ(-fN35YOb& z#)v*p54rrNK(g&(i(GyL6&4~3j~tbsDFtaTN1q`VwF2Wy*rY$s7sa5=y+2TbsAu>d3K+0suZ3&fSEza!MkhmkiA5TBvW=-{G}JD zTKv)aUO&F_`ToCkzRhR!ME~%-h1F!Q7MBM~JNo5aC-;@LH5~RJJf0=Th3_q;#2^~R zxj3QR*OPa>=OED>rGG~!s+cvh+UcFYAT}BB$XcX3L%IS770u+Z(s>=7}lY4 z%$u3YGti&}6W2Y+wN+k@CMr9K-vdA3$u%lH;I2g=rX~pP6udO?vs3iwc*?m)csV04 zel~;qN4IgtT?5=2E*uy~MCms3h+iE}U*0?W)mN-f{%PanAAfE4@L$;IhcmP_86vLY z?JsQe(YyUSq!gDn2xb#RiXcx@M~d1n`I!|m=C8P!6|u7yWa{Fbm*mxc7RL<{M0bL0 z$9V1Axp>(Ks%g%=x{>KL5GF`!qM|HZTiPBbb=b=(jbvy^lAdzi+gQg_9x$6l7Ph27 zSySzuAkEe`B!8Z$7MmokKXy?Mbhq}fkzh*LDZAK#+K^BG!G-MblXp||zxl?-aBzCD z@)TjgGWXEj|AT)ZyGXMsG6Im~;DnhPl=~l?1Ta{e?wtQ#&rMyA6VU(bdYr#_+_zq& zXZP^Ui99{180aE-@`7{$yfPh}8ns!Txu}+NsXq|UTz?E{$Z~B>MCsTs@?Y26FT^Mg zsxmGG_c*~w)UFA}jfJ`c1^ejl9Nl4%qJo15C{{dYjwNq-4*A4jPCq)l)m~pgKK$5_ zON(<93d^;E<%Bg^g23`1E)$DeJ`O7iTnj955PyCa0K!x)AiUyJ2S^PI)z3&Q#-PIg z;KhTA<$wRg^=(Z~tR`;bdoK9vlP7;Qrys2Otr>0ne{1%K!;&w){Ae2Ox*)U31P-=F zAQ?voayKMwoi>6QPPD$V`v~BLp)ar6?@4t0g5~ z0O3vm3tQ-s2gVWyM9{F?85Y^beU}GY#*GuS{jIXw6{Y}h0v3Z*)#s#_c&Kje>f`h; z4L!5bM_(U&?3e!7ujNF4Y}nd=`Gbe(D?R_6KfX07`Nj2WeLl77>=qy1WCi?3pEYNW ze}AB1PUitz1(+;h+d(;kgA2?7o-JbCL)i)<#L3Ju>F{QkG2@S;A0VG$ew}0H`O$H{ z5Y;v{oB0aU*b>y6ONtOL&-QEgup)kNUgYf{ZNx-%g3Kz`AZAn0R)I~s^9h$oP(pyu z>@Y7EL0S0kJc_K~fPp094cc;T|)#@M4MT`4?Go7FT7V2OBUBH$i*8Gmae zDB1#>QmFSJh~b4L!9rk=;$-#7Vc8fyEiTikC2AtX4hgci%~bPw96$jSBteSDgRBTa zj3U7>oD9=(5U zZ+i3JX-E#d_{%5XzkP=<#4Mj9e$Z{zFe8F~ZIzlJ2(kvitb^qdsUejIthX^YrGL!qgghNt zW8i#>5G+7a%)r%miYMm&hBji{y`&ve&a7P7m9yDO00J*keI6ff21H7qf6HU5&wuLt zPyYPIW7a_t?K6e3|HG#4zxe#a4V&yoFHS6=AME9TE0c zH%{G{X_Fv$GXrF4BB4T<5)9cgSsD!bZtA?WKC=QZ>hl>wa*qOi4}$t=KJJ24edm;z z-@fx)h3bIgn}D+#!@$${;pK9s>fYDyKDxw@AAR!VFNZ@S`{1lF{eSf{_r*-e`7dXg zi?+XYYJDq|ca`H2qz+EMF}T0i;o_dhzP@~TKWK@jSTnI~^AmQ3z6P1iag!3v)0Gnp_jBNVVpexk z#-1g_919KXa1^?CeevzMgaQN7`U5^I} zYus|hH~Y~te}Cc}e~;|2~Ov#pD&J(31NKt?mcJq*;uChWHOawbiz zYLpFc(r`hr`_U$q9^xo_{n*|*RIU}MxeH+e=jP542!HdTMMR2KjCP;3g#)m0E)yGS zvHHQ{ksPs{_07p+TQwn_D^hmwa&qp*BxlXfA|{C$x)W2k>Em@mR)M6Ugh8% zE=V&j9)A`DWSwwj!(7ZvP&t8O7J+RwCEb_li=tJYgxSOt+&4`z1X)uMj82>3C2eSC zN@>%^(16DdD&aaSWn#XEF-5aYX66d>ATy#nM^bF^sMD3q0zqvAI=5n!sMpC+bVM(E zN2O8NQmFZ>zw18&o=yo878nwvkhcAbc5`E=#(y_~M#|oy{Q7xLLmz*&-cJr_IWGI+ z%+@YHGdXJB=bNkNf+~*n{&20ok7d|0|F?ht$?8)vlSBU#{+nM@0POsQ*w-G7Mz3KkI{IoI)!ujk?K*lw|92h#>M?!%U)lGEw|(no ztMBli*{=WUP1T?>TGN8#RiM>D(A}H2yw2> zMT%@7A^h=8tZZd|bMKEWT04SHl_vPHqdKnD*@sF8trc!1MCp1XD#;%`=7S9!_SIy* z{i3kdj{l1FuRp6YK4L1zQv3$|3ZNgN|d;wQ{D}H&9p`pT*zw1gB#`ygmb@^yt zG87Pbb#VOn^cg<5{Vlu)IQpm0Ae62NufO!~Prg&Lbcimj_kZ>rGv{ZPPuZZw_mKYf z?*7EycNVjtEq?gZNBST5tj(S2>wkwEsT{0&NGhMw*H;t!e4gnaE`Vi95sxjLlg5GS zyc`q}6r})JdBCf}vM!lj@HxoXBL$oJhZvzI<}OrtDoCyLIu!&YNY0VXqaPn+5kWy2 ztJfWnZ45{3WNgzyryz)hOxn|(^}m0&1uSyzi}LSlkJ2Ia@? z9LTcm1HJ#u(fEffBA?#k-{1SAiJ(;Zb)Ouv4*WeM@%26RnSbjdiYqCt^&dZOXb;4f zPrN?(!UuZxftl9fKVyIu=*U&#wQyr-q)aSTH-7l)eyQ>z6I9)vbbrkbAW{nF;~0eY z>vut%wSKE=kJ$vJ8wNYl5UZ_UCTa3|M;%ckB+JAS+<{U@JO*#pq;~6kQVZfn-$re1 z;0lW&ORa(Yhl`k4P7mE+X#s~G2yOuO6f1(A7HNv+ez|5px=I^*SqkRM!(SrP}mS$}IM$S~q0hCuO_zkHuZ z9WE)_ac+WO7HarX#X96v)%13)x@%@aYgc;f<`w&odfxKlO3l6W34RKQ0c(mEA6wn_ zGxze_SNKmp+~Owjk4~#z|C0-ZUF}k1;ju?dUea3)xNo1EbWwY@4Lx)ECVN{5#}vBZ z=H(`h#L5oJKz}%?5$dmz3B=#`Yt3Ku&(l!DXvHjIX2Vq?dv3uWE~Y0Oq5F_sjUTQ#47O?{i2$0W98I!C~32CZ-2o1k2dwSdt#w%`K{IMdi?S&(a!yTHHK^ePoP+G&wMow4rt>6WmD{?`=DN0 z#bJ6xtj4FUW(E(MLaEH0T1Q|g53B$QKaQ$3$;kl;dO(7?{3ZIf)P3*8!l{U{2HD+u zl_!d8lZeuWhUU&uhH$7x9!e?5tvx@m?wmZ>Lw`tQE+;qNOtf23s&qgg&q7xiXrSdY z6a=ALXt(@j#1@f9B*lT5G{KxPm!#$O&EoC}G$+Uwrm9Z@p>7wVvO8bJ(p;s`B3dqrZRWtp1wQfB&U-CBnjrpFnCp zjejZukxR<;{j?|N_>e^&RF(}oL4q%A!EHM?YMejDW&?#upa+ruqcaeUtM#f#3Jd+} z0!R}HnqzPX=Y*B|mcB}cd1;!6caR_{1PaS?52XI#_M9OI0@FdpdrHV2iL{)H6G3K- z%?Nx1RORc$Ey@|3fpG$C8ZaBdbO8MSihm!gqog!~|D2CHu+JL(aDsxKzZ%&Jz}g`oEt7{}zz8#AdS1~LIDmcHUdIg5}K=Jp*ZjpYjE%6~C? z8E2K)TcUTE@eDlvDakYOt?)gCE3L*!y3!GJ3r*@!G6H>PaoE1rsn ztid~g)y`LxKltsdUAH+xi{oZ+91m{$ASo`3yTq363j;Vbq?I3PGhbL<*|+ta6bQuQ^^>=8IFz zZQB&A?b%?SZE}+~`+!94oMgz#LZgTXLQyV~X(MtXQoGf`CbGp(h}ULc#(%9|j-WKj zTWqGyvon7816ru>P&-&KAA!=4KYYEf<}oHx=`Ml7`Q$SvYJGcq9r%CaSAWe7Wc?3T z&I-}J@f3z4iukr04p!eeCfTVBsw>JKlj-og|JCNRtGrU2iTfB5DMzv9w%m93rd1Pe_5Op*blpiGiEoMi3LL*7Jrp1H5X*Xrzf^r zzH}G!3|XSS_+{RYfRQOGEu{GA3(f52g5j!%A1yT_(Cu#(4_5UGbAo1q=)g0VAWD_J zNWem@W(S_b44pk=r+iXmE(y5X`Vu#jZpR=U@x!D1llj@7VfrEa7h^Sl`NGGCoHm!K zy7W@M79ajk9ZzAg&wufe`>F*k#b}dr}eYqPPVhw(N7gp}SSc%|^%Nls3 zDBoFmMi5Vl7gjC?;d<|2r}k;m7ys}qU=9x44Sl(L(zJ8WuKPVUT*vmjQRj=P8?0C$ zv;<^L5HI9`FLu!7>HoZV|0?G)jbmTb6l(I zC_-#jiVssmts`pl=}+JKQ=gR9dUBxI8R*@r0t{9v|idG z0Wx)YOp%l>0&%vjRYdYiVHGtqH+EEwbyliAnBGg8Se7=*plPQ`0Kz4X%QX1%Gj7`f zur>@5rQ&6X!Wq-&aCL-^)0CZj{ja)rr`^&XI8Y+a` z!I0vsmt5BBJ)ihpe)6X3FM&zg`3FtedH>N-$%{d01&QePxs!n1VbtXdP$#(ybZ%d|P1fgT#Bd(dM z24OzW+xBI@7kL>)3gizmC^pRw=yFPI0<=QZE338 zZM%Jf_5m1S4$bGNpdER{*O{Aq|<953b)z#H?xQYlX8f?~4 zr~4opB^agz_F#bRNz1>shae4N+zi_Lf6iJc>-@Ie!Lklu`ThtZu_WjQ5$eNjBY%t= zcmq|V{>6e%-K28)V9oR`h@_Xxa^xFU1m)ZBHi_IKyyN(A#Cf142nGBctCJ=EIzC8u>DzB!dX07I?EQDK1}cqz z;+L*^W;{RM`3nb<`O#1s#D7Q?0mcnc`4oCy339qHa&Fl92&oyBRmJA% z$qJ<~gnqQzxri0g-*WH(d6d=;8h=aPt=l{O%M{$%Z+=lJ$q7DByw5zYG?O*r>2zhhPf6%_ zvyplQ`E}%^EbM)@HH-9VN?xAhi|!j?Vp3=f+aSese+ZU)u8hWiv6{qlCwyVUWSz<^ zbOeyW!(b37p2A6kxdf{oLfP}zYa8>VHLt#U(4D7~U$HZ{IOM<4`j5X!r*HigcXvCt zf744xUjr+gtbd3S%o3GtOXe_0*APpch|)p%YI>=eDJS((W6~`2!U`ul0%=TC8g{;E zr3u%vrYJ?q1uDDMnCAZ1tkrXyNNI_TKqS&PnW}m76#m463XCh6XU)rslXT1Xer8dB z3ptNhQr}Y{J6)cM@lRq(&T*4@hMGa4j{MTwErQ)-1b=%;8h{2^=f^$>NU!gA#NT3$ z`Z4?FmFX>}>sL(Y^^Ah=9D7`mTTJM;e>28V9o*tYISq4+W>TzUjT)EiKpI_`x%M=b zY>}JmS6^t6MPW{0^6JKF3i3R$^D>(W#inEuQdyOp=vy;P=1D$jtu}l1_WXeewk083 znQDJ}1b;T3mDM++zi`sBU8ggcYeC;>f0>qnuQlDRVWweDv zdJ4Y=kC1BgEFr)+1@sR!RMy$SZg>|zaee6(@1)nscD6&6&PkqLe)i^j?4@qaje<)b z_kR_{X&=XQ9~&oGWfARw@sn0K(auHn?#0Rh>3>>8+8})Hhc%<2ToK3;w-g?`umPUu zy*>2LiDEP?yD>9?w|0|euFR+%O6ffU`c-fWx&z00Fl&-26~4JdOb;oz(dL2)zi}0J z&`*44D`E=W0X2U$R6+?zMiT+>H8>0|8GosyI|8UNu8W^zzDFBvkBjj=M%Zt=k3+yS zN>bcog#0mv`$t#X)zQ)F{_4+|^AUvg7boS;YuS<$#P-p3hE6#G4_lRk`;}Svj}FZy zPFTlG>J1@e9yFB<@@(c6{>qlV51sDS|1cpq5O6Ac4_hnL8v9&M>7^?0?2UdvIDam* zNFO=z_f{dMF-spCS$+zoY`gJS`~t65o<*I!lPAwqSX1kJ9r71khiip$OxD?{%{~TY z#Cn~5fx6{8KdEnQj=LDhUjN#DW9|%p_)zu}tI^4L>7Uo7O7}mQJ$-zPbCnHW!({u1 zbxGkdoigbwU+~bRiDtLSF{0%=G(B?X7C=bLDz;a9LkWM=_>>A}Q<5T2Gbs^PCpWbI zzyRMTmynZC&zSV*OqQN0<=TY5uGwB*B$-W(5Z?TknXd^SfAdQa+#q>oDxU`J#Nv<@ zM$Sm0$rVK(0mRps4Rn}y>$~$;3rH8QK1SPD@4KC}{@>T_H@*JzIrFVnqB5Qd=xk5I zpfc;|5n9v6IPa;N1Xh!TLI^@Za}AKhfX;W&I7mH$M@~XgI!psbJ0A+aUvtYchDKYO zM273b%IN*MfBrIVC|BY6jiH=0TcMovigc%m80n5(uJQ8n<}>`uhWHBi*f4qaJAU~u zI{)Bz{J>9a&|S>IFIgkL@x1Q&45Us?k>15)Wm-_{oycW|8oXvQHnBfI{Rz2 zGIPXC$P&^>)UAc{SbhnZOjy2}O(A{7a|LOWf0Y^PZ;oW-71H>69^gi+9X(YNxG>bI zSx`tHj$*;%L;lalTCU7P?d-^qI=~lpKi2Cv#b^Eb0ne4r3eT0MgbzGF@LbM7cJZ2{ z@3X@%oSm$rPqPvC*kx!VEc&rsyN({Fu6Q*}#chkP3Py^`QqFUn)bDm!S%DHu8cxQH ze`sqW*;x#I!bTI{D)??OPNz@_-^$_nHIXl#t#+0nXAp^%XDivYo7jVybTVAep(H7` zJJICW=y2SU!om29d%xxG{D^oDH`%wQon1Ii|4cjm`v2=*e7xcJ{qz}QhBFej^*cmj zV?*z!WS7-SC_N@|x_F9j{^JRoUG>zLf60Hkf3wvsZtM;-)%eDDn117|{_vKW5mxCPuMpG1cj>-F z!^GE&4w)h4r$gkFBz3bvXaIcKdNSAvG>A?vnkr^eLO=M#c zF*PEe<&XZ%B-Pr4l)k-V%ztMGn-2z(JWA|u=iiKR?Wk9{&YkEm=emD4e|*uaUi!vE zuQFLT-{u`=!4(JbN1i=pukKu}FhtV!kBFE%krJ^@s)fPJn-0MQ!Tpjaf6M?mql(++eqtJc z@hrk+bZeR7hwcwZ2d4amr*qpUzvY{sSh#B}8^5FdU;8e~J<<2RoF^9Uv1Ta#v?t%r zgGWPh5;*191ax}f{?8hzOd}dvfOvXw<$;_?18QZ3rDtiVf9G?TWEmtCwL$l({5eCL zG>vRhy>L}7En0DTe_4cq&;z^4-mmM&HRWU;i;6g8!j0Uo=2U%Jdo83yH$kzCNR(sh z6n4)M2<2FzwsOPUdHUcuIS;rhBSUK646ypH%%z_I9 zu=KNF@4x$N@ez~Cx1jbtz0AWyIW(lEnPGaB+p2|MR;7*DjgeCZB$6ryvSaDJrO5VLgmVr`f5-Hoc_@A>@FD)|poHmd%yLmG~MoeZN&D9HH zgcAflf8VbHN|ohY%1>vXWeC1XLdcD1p2`LmEY4y}^m-qVdK)R#>ENd^yal03y{7EP z`I6EF$^mAFFZN*n!Wa9;H2aVE;Y{%d_`%$=?)>cUvEX`~gl_C{ZFE>sBuEZn&Re`wu_V3#w}6QO>!N302v_3S|b1f7GytkER&$jI)t%iMX%F6%GdHp_P)(6! zf0YN=y`MmKiy6M@ddESX)o#c=GZJqmq@2kTePR$F%A|QcT0&{#m}a(aobSw!%lDwrmOjR^b+PZM zh5Anx@ntv8qK2VzDH>Uv&W>Go`Zwo0p5eM@H{V$0H^12*EaRRR5C`MAsaiA8(SEq% zk(k<%3W&%&#KrepwxclF@kXCntriS1o~x-o3?Uf^e{L0}LzV84XtJS-Z_FY?f4os* zNG{=+(z5~&NOulN43z)(zxRty#=QK#2ceH0QXZ!?UGs^qJESUuB3(P+Wc(s?s}*D3 zRK=9~YrdCz{9X!NLaEb6BEG4_@9iyBlm2_@FQ5INwTyA){Dv9T^F-va(tL6>Kc|#Q zP(#Pp%9xxITt^oALs2CvOJ;@{e^%TB<-@!74kCTEh2}W4s%Qg}2=ui^KFNp?Y90lh zvoJ-za~xcB@(vIKO_A>uM(2;z={bMPoZtAod>nN6p8VTB-mf3CB*M3+3e$jW-(8`{ z&9Al8B7I^5+ZcjnAbi&(f0YF)nOk`?^9#t$eTjBYlPH04Wey~Q8$zKIu1SOxEQXC8 zYQ#^7!NYoPHJSr5UH#xO=mLFwXrrAV)V6!JwWu_uDcNHjGG|-B14c-rZxY7yLntD| zR=}8a6IhJCTa2JD9XF|rcc7#}l;B1JmJ@NGrk!nQ%Jy2+=jGTxe>^M6N#hqrnA&7K zAA;b48Tz@)vIXK*5>Yu1GQs!;kH~64R4~q@scj*lKAx@T&4Z;LFiyhQLDTOFNJrToYrEh0Bih5YsMLQm$|wew_ZrDDR}ghX!>$?fy=&Q@=ff1LHe3J?d53nbt`MBYY zb=Z#mJ`K11y3=#?JbM@Ufgv=3;+z%4pSj#W@HHu;{s>#gkh|@ z6S67ma;H3ge?(K6#)VQ)%JCECmti%q4zk=f3O6`e}X< z2Ttph6?|c+G@RwarVVh`vPV1AGoScVo3WUM^3^27e>5h?hnMy-c8z*N+ZXQVS{G-Q zd<}Ha7~1W1U@0UDaUmbM*jE97|AWi=jT4hz>Hf#wisweuH~e55eV?Iwf0!QA$bujI z*4ug9?Ec?R==FC(P4Y&V$q!*QLOT`@lUJh*h*u^7k!tliN?Q;q^NRCqmV?Ve=7lLC zffA7ue?p3QJ11i@aU1Cy`KRgR-iv;zt00s!Pxv2iWWuRY4Z>vZ1hPWR>(FepEzc)2 zmRD=PO(|{JibnOPgo%nZH)(cx_V_=~9QZ#r)De-rz*1#~!WnRe#`unhL;Z$1+yZI@p_ z@!$GQ^tUe+#s&pOq}8-g#8^2sBmCV_iY%01G#6wpnB!vbfj+Dl;b*QCnOAIWQ=Uy5 zZHcjxa9q!x8y;Gu-g&f+nc$xU^;s&_16kKe?%SIc=6KK;EP)!TGT}`TTI8}J9WP%fXLyx78>Tz=&j-5#A; zrbw^8Xz86D?0XE6Pivje$UYf4h5vhBzpQ6&QJ!b$_uuNN1sTQ#HI@<%<^>^*_z0nn z{dj`<+E2fKuIvsYywhPMu;%}B?D&!ne=&~ZUAhlo)sO@5^YZ)|P}$k>9gKVW!3nyp zN5l~O9s7QdG5QPRx6_68b``kub?DBGy5TBrby!8WHU0;zqHCQ9)+eq$fCe?VqYvH~%*lWJ?u#{6tW$%$u{vWhP6Trqgi3?b|p z7?|vU#2pS_nLYRovk-@mM4f)aCigkGa4+q!kABlV`f8hDe_sFoy+{)CUKjI|C&+Ss z?8KONtH(S%kV61Iqb$s$VM>j$v$VqOz?j&@R#Vs__4#;G`i1V3WEg()f8m|(`US?v z)sdO|#E8nuZe-#LzTl*qvjgl1$%2T=jU(g>>fNWqcaJq;c?dBFn?Ln02VyjQW3dV` z@gMC2doXeq6CvHuVj789WVZ8V-1-TL>cY~d-i64Fgy;j!rj(|gKlvm2qb(<-0Sv2I zU%ar#!$<#@{_L%r{N~@XT)(ri6EP*~uUNg`^2eo*v&%*xD$5`cxQ#{#<`V(w(hzmw zY{m^Z`$orB5vz<-q1nC=@#-?Mji`R|3Z8P386GK(6`r}4*;Nm&V=Rc-#lb;k+p(1P zKCM9|D3|KN2_1jKZ#~q=vvM-1GO#;;8v(uE^&8*ehFyTM61~mtiVuVH07=Exfaa#e zC4##|>Qk4hBl^UUW9Oqm>@GgvwS!A$Lo19v-a1*Dkk|x*1aV^s?J$D~%o56_^9*l3 zZpl`ydYlS~uD-pOU*|H6Bk_n_GFfdEhiQKiQ*rU!px1Sq@4Eikf`_66P;2E!(Q z%7;R(n4)*(B0szvX75$Vbwj*zhZ6KIGL=w2boz3?nHoQRrJ|!3v&Q~FDv z2LB)E(_o)}U=E+Pi@6Vhp-$_@@1)-T1|SSXvdXo3A>U?Oy@&5wv(SjJ^3}B zKQwc^U;lq!JLorW=4QbcJoouZp-#HwxKE4cWWn>aU@AWn$ZSg>{h9`RbohrUQ+bAa zyCE=EEf|w#@@h6Rrl!J}v4G0NfpK=jBcc!BR>1s!&dzT1c4y2n@srPT|0m4HUjOdg za@wd4vnIKd%YN|G{xPN|dH98?iN4ly<`Q@bq~*X;^!^A9Pq$J1u|oUm6^qHojKf8T z6ZehxM?__fMdEaEaE*AkBAD5LzJ-Dew(72IMt`v8x_Aj(V@3*?r?unXIB#o1`hA|( zZ~mQsEm%K)Ij7y+0iO3A%#EP3r7xWm5y_M^7$cw352)y%iXU>}J4E^9MJC=fKltGx zT3>vPHV8|Ae#G<2BpudP>JZ`Xh&;ZiMM?bmP+dCQVpT2h%T-h~y45-h{eCviIoxk$;3$~Mycm6cRG^7U+SIgJt4j6kwx zGId#3VqgnK!YU%NtU#s-d?age(T9=oLd)mJG;V#x@+`~!HjUS=D7O}2m#Rj#zJqjOnj|Kx_{)+8QYXj)GHp5A_+PjG1TS<;@5Q;=iwp>5f@>Tfzt8) zfFWi$XvRC@c!~<~g^AJmMkJiLi9r-AnVdc$MsUX?pSg|wCV5gYs5&xL0Wd$rhhJnJg6^s!vMa+EBnSuCSbpee?Pm&CS_ObW(K=) z2I3!KoWgfJ_6G3}SZ8O_uKp?i%pP;)@#w>xU4Hby*`1HTuXTX=j>o-B(UminQX@Zl z2A}%-D&HLDZX~&JT$M(Dwk)%#s6T9(nIunqLr6MTXcKu}56&f{io1~b#^S&{Kzpc2 z=?Tv*!Nj1LSD|4J7h>Y8loeHpSnS5`Sfd(&QCc2jf_QOwt~#Aj(Cfxg2p6$pKb&+9 zMRJWtHug9^AJQFwDbUY?+WooiJARH7{x4th>pq>BO)~x+HRGWxvR#=pW# z^hdCjm8YdXVwVfa2`PW~+pE7S%X8f;hQr>TjI=g+h(ROTrG9dtW*u&#Qkj=9)V)Nx zVuhJIJ3n?HC|E!`UjgA6GM&yY?{5UbHn{d@Y{M8{P(ORZC44CnKPJO`hC9^Hj#nx@ z45zl}aMeaZl=j$AKYPOeTGpz{$L^}<6S`-E0ot9kvhhJ*ob#RE@Hpz ze8k^PkA-`p)butln1g3KPB$*pS03KlHj z`}(@S735&L7CWQjp~|H=`@0zc|F@53(Mq{myz@8$8J zi>uFmu=Bb+hTnc)ICtOk62$3$1dkQx?egOrmfg#eH#vU*@iyjvTYEAkH2hf?Ahf`E zn^c%<$OsX)X7V{z{u_wO4?vRiD88{s&3OC3ZML$Y$8efmCXLy2d9!?iNmNz3@*#bi zio`Kx?UVpM;1u^Zm9oUwW7A{`pIX9$@DXO=-Mnm!Na&DXrc=|Dr3u|U3Ob^AhNz)hdk z`9SbvK= zn4nB6;;1N>Hs%bh&axcwctyhXG(>y=v=i6XJ0yscZQ-?}(x{84lUd0w*@H{rus*C# zNumj}d=I<>(^yd=j-E+DeVQ0IA~a)3wKM5*nFy089)-LKd@Y$6%{%62gj5=ja7>8@ z2sM9>dM>}uPi$n@oD^A)hw{b)B5Vy)-93C&235@a3l;^2GsA<3-XK!Ha4py`Tni?h zc1;Y#%@T-fF@6~Av_IG9bCM{_>++(b9l8PgrfKdwKf zpll-2DSczFrnEJl>qUU}q1-XL{OC|wXcK?@$&YSqV9v=q;zp3pPkBmic?LUL^cTN; z3vTUDDVgSaIipqK7oD?@J)r$;K&m|~n*N!B#tuYBL( zm_;EwuYw|TN$w+GaD`c6{xxbk-nx_KW5{O7#)U4JaU{nuq6tP!vM zgSR2}VTRZPS zRaf*W^G(%EjL;7%&_9fGGV{bkfIILTF$v2cqK%6uf%q#7!Su4ZM;5WU*Ow5!a`Pce z?J!R)o}^V?POW9K`f92Nt134nJfE!xKTDbm*z;_A3LlsDIo(;!!z_W%VKGd*N2q$GJUy zx=KIA#i#F1^xBV_vp1{HX{&F%cm0bN56ovwJzmU5f0Z_8!`}OH(ysRIV>i0uIv=xV zcOcF?<8m1SOpGV^)1QI%C5y}|`XH{8CcmFOfx-LBMCo1>oFtjG$;1hCaD4;S z&gkS5fU^gqG{Ne&0MP)0{`oM23j7!tGvw;MtHqb8^GZ@LBfFNZB z7;_jQPBh*FZSefJ5U>EV91O5OEW-Au*nW#s6UaN{0#@St{tZ^f;92nmj0DHS#`i;M zmV<``pW6_XFvsy`DJ>+iu)gkn!tnVXb}59?K7yH`%4?4E6H0%c080%3V!5P)-O{-F#yErqCql$C-eY83RQe9M4L+tHB^Kriyg!kI-40rEDwc5XU!_7Qyyi z`$ueS&kXO)5#D<^{}GTC7?1FrP{4apWBFx2-cto8L;5J7%^*T#9#i(?f2m=qf)Px8 zW2$=k8VUayY@~k&QV>T+IQSCx%M~8A-!ynubn;FAU&jHFWKEps!Q<{lC$s9hF8=bz zG2fxRhm(R1TdJx$Fj(M-ZR<`t&V@K2g8${k&K>WIfl)u-)tgu=bA|1vAZ_8x!9~IX z5fWyI%tU8P*Hi3rAcAM$r!U0 zbuDoOynr!*r6Bpgs6XoJm!1P>Nyjja|5E<}o}^?gq?}v^gTY`wc<=`LZXgFQgTeS~ zuo*m`2K9g7p&mRf2XC_nd!XBc*TKuj;NlI~m;NJ-Ufe&=V35bdai-jyk&bHjTE#>t~}o` zNdlsrDyI1uutjUZ6QQ|{BLbra$C%qv)}Z!yEkD*Z@Xt3h*Qr3*b3lE6f)OpRw>U-{@ks>XEGWw^w%QYHEQH&I&t)i$2 zXsH4k&~yVdeOoJ7OcgAsB5if9p{#k|$qbpoWob&;3_YOq(U6OeR7*tMUo=W)l7 z#0V4q!8hF*_)1@O{fD)H%3m2qR6&1K<2V~Nh(k(=SrkFeEg=!96z2@cr}9mb))Y3_ zw?%~WO48N*O+lON(L;d@aKvj!d4RKUA6 z#AWJbDQ>1rK#VY^qUy#q;hL*pVKpYmQ_bs>)pm2T!0|Gp`dpK&t_6RayX{C>v~3_y zPTmGqp2v*-c~#d!^v~P2Az}AC^E_tt&$YT1S@+y7+J=(u`Ofp0(>7nE{mZWZ^-?H7>&qOmRF6TvKm^a*N{F3@suF`jd##byJTe zlzytBtvCFDb+NieYgs!|yl5MU0QnO*X#64}H;O!4%ANjJAIf}qaW4Yna;0kT6pjaqcUfZ!i`dt}+ zv}K zmdIOh-H|Aj-fccF3hCzMzDy0(UKFTNPxs+7wk^n~bfjiQi?piDHlQ{52i#Zsxv~_P zVd@nyL0J=j|9OUEn;dY@x|JoxLXl|h^!rkY337iR9r^nkZT;hQ?tk(Y!kG=}~U4Kk>a$A=xAua&684tg z&*Jap_&=*}&*`Jpjm-&`VwgwPkgOIICX48)@qs1CUq#!hX;J^Rtgwd8-^zxxRnzv< z#r}VE>!rf6`B(k-`?!}% zaS*4wI{t^&F!G&$xK_IL!TE=qMpO|N$Q7L`k_7pxlOaPR4L|W-cJkvN*AMoEAHp}> zUtq#7a>FB#VG-C}-MH_}-+ubnwhb87$vb~B1);$4;wY0$+dA)P(E|%*!l~Ck$F_SX zQ21^X;d)7A_}s~@C}$Hh6tQ=Lkz0&ZRQ!wQ_-)J|@eE_Uk8!S}>RdCe>cf*WdlVe2dqjFW+4!effXx zI_=AM*I8Yo-h%n~?bP%9?u>!`UqtN%bv~jiSJgG+c0#Uw`k*bG%;l<@@Wte1F}S@2^{I2SAAH z*pk>U0<0U;_pTj47?xWX*e`Z1SNDJYPr7n--+!|!x9R&&`f_^T|GwN8($I|7KYYUm zl;0-qGXZ=zi;(Wrsb&nzxBVK$cRrQCd9rwj_EAKtO; zXj?ngtm6ZE*QV%G38Wjl?8`qBie+C>ri{PKK6@S`KV=oR<-6>zYrB5GW*&dKx{FUG zy1wFB4IbQQLa@A~efgU4leeLytNT`#bFBNPEVeoRF3Y-l+q-e(3D!NhjZ2R0HYV3` zp>|*XIwo9V-QQ&kYVx*6QF770JqkRNLB%fv2z zWXDJY)K#^p^AL>e4{r)?d)|MTHUL{%x1G-aqZVL`Q?j9eaiZ1j{y=iH19kU~IMsHq zNaaA=j|Sk3+`L!6N$}iL+37JukCl+-A?}}rSv;b@%8Ese3l>l2g-(I;g?BA$mYX`t zNMV}kr^G-a#stsz^Xq%S!WHUp7Mo;C31cAjk58}%`3Bg2!zi%mLKc6*0v7&9m4GSQ zr7Zlv)5G#PYCgXPgD=QCxZ`KAYcvjP@bNr&KznEw4<2TdhyJ-c8azzsS=tO$nt|-z zzy6hwNsNBN97tHS(;MIJ2`X``pXU_)e$PMm2Cxb)Kjj(FikLsoO<)`~#J79vFV6#b z6GZ;pPr(qzBf^1N6UcvnJc7+(Eyxj#%Uhf=Dj`+yj(T5Tg2Fea0?ugn@lo$>;7k(+ zRsxwK?_g!Hks!hJA?@xH*hsR+|2?damHj={f&Mprx6Au|_g#tK^{{*@$S0N)0>8sx z1>^vh2Co92GnnJODN!FD;QboQGY%U?YOn;>o$xI^MDx~i*j{6pa(tE3hPbr`(u1B!|zkT_b>SW2H&%~_IzI3 z)wO4f_m=3}+0#>XZBLB!7llc`hIP(-i~hia$UO^gXH!eApUaI(}^u0!+SabPmv3Lr=ugg z>+c2I)3;NBhx21|SSk3zeq(rL&!@1Sjt_icJ((i=D<4x`{Yc;adZ_Q|!&792d?miw z^#EX?`$(Vv_>Xt{1VL`wN82a3MRD9us6^J<`4@jpkz`r76ge=3g_4eRy8WypmHOJn zj&BrbFLVG7hCH071xN;o0?dIHzypCxD2b^( z@Z%=-FI3ow5+X@FY)mZ36vrCjsav{jNvs(h$f`vZQbPC*uv7`bV z28Dk>if1rX*f_)vF;bW^{HO^cg^@Bc2s9W8V+SH56foypMW|vv2#pw$&9HGy&=x$u zH%WQ|&SE>{`x$3}BwD zt+|$pm7gw##mZuIVU;woN;4T9j#zfSB$t2nqWRj$K&-9ONbKPKTIWT?C0y;(WH6Y3 zq5kWC{V)I9t~$M~&;R*9oll>y=l1>eKmC{g=l|SQ$MgTOTW7ET_x0+nIJE!fuH)C` z|K+cX(*>1JxS~c5I?NKw=sXbf_x0mNn zYeODi-)pO048xbp@g=3&L$S9Uf2ukAI2g=0&!bsat;`j<|fP z!uR9DE}o3P_6KqDH|~1YjJM9=snw68)5ml?-M&m}!+m6y}jPLMe?xp^JKBDW@PojJ@YVCn{EBBHHYZ*JQ$tNht2G1 zMhETg^XPsZHkYk*8sjjZy(fPkZKfVRzmAJ?_`DiCLgSHK1NzAP&0_VbySwRBF0Iew z?ji@t{(X4h8KdmO%>^ou!;OJA1B;Y1#n%l3aXnCv!-bUAq> z3-;;eM`sCx(I6gwtzR^;eH7;^cnz1ZZ~zi|ClhPpzrBCIZOSKa^%id`(`~a5KEJ-sd+VeYqrld8$H3zauOfI{ey!}` z^uZdxUq+t^SYqXHdL&=RiO-45E^oS`+_QI~;-j~x$>s>bYDD<+xY}!ca(bC*U)$i3 z9goR}T{X+;!p(N($M*4a;*sg_$x{)Tyy!DR4y)x(@h~_t5o3Q``Qv($E$1}#dCPd{ zY~rw8oEHA1Y~mxGn&V;_4~l5^5cor**}a&{-OJPLb!?Ab>cLj4<@WjA&%REkc^Ep! z(${8Br}C}H4gU1xZwwOMT`^yBeWJe%sU`x?al z_;dDh&XlM=28=)Ip(yfgo7>IdoqB0J(cZLNk(VqIupWOs$)_gdBI3L&ZdcUjLdhoh`3+Z3+>fH}j@<8QjwlaY~ypBY+ z{O}@ZTJQM(Qgjzd5&}^aMHj>Xw;}EnZZq87S$#(@BY(zyXN`}p;Ete8I1RqNiULgp zd{}?%P^q>b@4n>*qV-cIgL@(!#gRFb9#0qZM>+zT3w_*)X9peaWiNTK$VT8iE#mZ% zwiA7dDgJPk(k2iY5;8K|mw}av&>N_qC^1@DX*unJUAGTuZkQ8mo4%c?h<0lO(Lrjz zPfyJINg%C!-mhu;ok0Bj&Tml(|C37GdjNkjqlpt+vh&+xGBcIlgp!Ze@Q2{Yz8$*# zWp5~%9hqLw*w9eaKZUV3DY?HO(ta+ZrYmf`>!RXHozx?) z4j3o6!&#Tzr2kkuiR809_zTixQ(hhOcT+`l#*`pZYECrHnHib> z6U5qEFCj>PSk{^u^Fz0M5^jAzLn(hShWq+qn1HUHYC#-a7g1%z!MBH$c&1Vo2RU^0 z+!8PMr!Y?-3@Ix@YVxV{@tU2J+q}RMRP9s<{dq9!%boMp5UI4Z>B$A(xy-lVV=`RL zXCtm##%?Ye(cLp`&Y_IHA7HB|cc;jIbMsLmH^W>ojh{;DY4p{@V>KQ1r9Xd~Ek=T_ zF)*fZr{n|y6y?6^*cI|ZR~MU6Ic2RdSYY*e2xwC7USShaPt&?J9!!l+@@_s zw}&`Db87ur>N7vL$v$c5Zo$G{NPxha6>8n-M{!M^Re=}O7v2-31lfN{zY}pA)@AZB zD~|EKX_-flmO^Tp0BPSQ`6kBzoMws@=Jkmxwl{}+?m7E;FAGzygr2RzsCXt|ay=>M zyCK_~(UpW97KfkLZStyXYC-y?{Nqy?d%NxT&V&%KiW({K)58D+@vnuh8(|ZJ-V{qF)e?Sv_<-c=a1Q392U_|0C9eey z^VB#A$}JZw_4P68EcNmzM2IYXbyNO^I>@CGf(=MLAE6K!S1LT4TMYgUM<(NcSax@@ zjF+lxGe)HJrsKQ}*B7>eQb!&d?O|etc}An#oBMGZVEQR&k&%Ba#=RV+EWLH`YSCa} ztW~O8*c3$`=XsvCm5U!`0z;+6i}ws#PABhvxMhplAtitOkS8)cNeP}kLl{sx5mw9v zg(v5cJW>8olA_7By1}$B`jGn`XWfYY>h3aEaaKq!YB>E(lNi-aq|&vsNM5db3;@uwjjtE*os|>x+T}SEviSF!j2g7oAi2JY{() z?LkEKB-+~5JtV+7gKEvReJXYxiJNL|be_!PEV;h=X=X%+i4Xl`s2LGkO_fE67gc@T ztZc*OlMPCQzU;v_@6cT;(}3&1yktsKhegwGTgTbtF%*9m!xbstz!<1(D3ds*!d07x z#?iPseuvL-+jc$!xc7N83<}u`%wrP#eswSL8e^*wnRwF%^M5XXew%i4wYM$dexBPH z?Jn+Pcd|FNa7bF8mm_89?YGd^%TBuhzjp9a<*n#?4_>>gHrjPx1UB82jkoDnr1hcZ zKbO}V-Nb*ZC+YaiW$bkfU0eId%p|sG97brLc-MZsmr!XADdrX1xTvf{LQDrqA>B~( z2f~xg=5w8E1Ghb^KLH#Yb*`*A;49b0vgh&atnSHiGs+?EBPxjJPsb(RcEHd{n5@XTXO=blm;2%#d5(TerFTWc7cr$RiuzP=Aji^dg#-%F|xv3}%ZzN3d7EgAx@8m|U zy(PbOjL}KUy&|&`7-$WG1z8&L%pVATTz?n8{(Lu6m~*gM>9>H`(1W1?9t<|?`iuMZ z{!(nH4*04~?1bZaj$0=4`amf03Oyt@{3boE=A?v-ji!7UnWU&$X>|kwAR^Hfqg{U= z_%79e^iI)~$^-C=18a-Y+2WZ7Jk9|PTR_w_rd9Xehmf!85sTLw+2so;R9ceKaG%$q`kbz= zbA1d~x&y~?`S*H;CxsHezjuYh=lvkDFk(rZwBt^9A5z9z-`AnqNsLE`&fS0PBY>>* zo~$x7I%Wuc1v~!+$#`!H9&oc(4$(pjPF)w>GgyCZO=IpVy9Pv)llGX%RJIy~17}p# zHxNfX)+@}rbG@NaIfP&I`M2>9CwpvF3=5AGKR2p!lZ0OJ`T|Y^aCe^xt15jXP2K`L zuVO_a%d#s;j5r5CEwtkEh0cHQ+|SJHXfVxrv2NQ5(pfTv9l+=|?mo^2dqF6RH04n7 zjVr2g_e8o8VdIS2KUv4>z83#k^RcTS0@@P3C5Y9W#0EN9IcV~^{8%|>_^BG<;*0x}-Dnn2Ex~5>ur>K%&K7@8>>F070=dTbiE4zz;B1Sk4%2dfhq4t_EzR2`>x!?vAaQjJyxP!mY zVjYe|g>t3x?#txj`&%yWMZYL}!`c(B0fa#!k>l(3HJ!Jf*`pE$rAzhJ;AxL+mW^s2 zi7Lg<__p&&+YdOgS}5dgx{5)y@f#7$6)Wo_>J{SV_5%> zJQdWTvxRBB6-H{H`*ZaP^~cOO9uozD2%Wn)HOw7_d4lhRMdRWFA6c}}v`k9S)lZU4 zW}^wtcS^bGED`}u#+=cxiyEQhp30@5le z!U=wE@*AlmPdt~kUa0r1Ksk2$YAD54#Gql)kAHm;;dZ+7z`RsSyVub63m=3+b3`LloFLj0X1wsXxz%Ot^teK-#xoL|Q5(z759ew%fXRQSyVubaq)l zZgAK^h#X*Z^V?;~4iZpN-dB-=6f|;tevCX<>G+uDyx@PYF(>(Tsir7Q2?w@P(&d#N z^Oz!Cn_v6*f$wa|0}TaS+zuPz8_slcwY?2K2BS_G!)8W@0G z1i0NaTIZt@F|D;5XOQwd+bDsN!@hdJ!5_xRp56`8vc#WxoM0?sc)PrfL-KV4OA6ys zBGDnvcVc00Y*5#?Jr<3L)8WjtR>L(Dzg0bd!GM3E_7IM7QrDaQxtH4+A7RUyh>Ks+ zeoEirDG_l>+j46!F47M_b%pZ<#8MjlT!0rbxDAJW@(~Sf4;>k9MnNzdq=A4`QJ^gK z_@f8Ud_>zK%TkDNo#c0ALy=C94G`JnpAWIE;+rWY6`qKj;@Kl!EEPfJUgME?JYQ2h zX%BymV!xM#r*n#V>t7stydU_RFOI4nJ2>jh_B?}r-CGT z*Im-NXkJUdg3r&ziIi($i{t1{LpFb{1w@mhEKPdU(zj3?=c`?bS;pCr2`)N}YX_^YGep9Kn-YN+VRRStoi;GkGkyi~b0;`q3fa3yp^0p6Nu(=3S7O^Vi zrC&TBq!F8+-THjo;2$C?)$SQ1DsHp8GN1t+rt(!Wwm37s(H0c>;--PVd4XJW{gweaMiGT>H&IzKRnjexfo08f*HS3l+aMs)j6;k@dF<^}PoUW2> zX`FnUjWnER993TW(R&_ECV)!;BSdYr$2$h~b~TTX0eoPaHca8}OB_v>d)ah0AFx~P zL3|JF2L7yWW{peqHRWRr?hSvfn?A9nzKJNHS_0iibFx%NVFJTFN&^*gpg(YnpE7S>znrAi?Ov z<+m`QPa&sx$LavJw)-8ASXKPV9f$n9lF_C%-6VL7%i=}G!I>Lh6#0Kp?{dI?Q62Cp z0mKz79hHwP@@4OqsswQZ-o)Mb3F)EO1s6GFDLUMJ^2*R#Jgd`F*tljnvq@q4f$@%9 z%$Uj9S&$jhIdpW%$oTf_Pi=CXHKoXilxxFK$Wy+u;p{}vjfE1oa)xSOo?b1`uUyi_ zzmW$l$wxqR`-r1Q^F4pseAT!f#c}`mXvy{(-%*Whf3N6drPUPXbDZE=yli95^>26N z3RSbtbntVv7OB;s=|`~0og};7ylC%Plk~>*|F+YW*KL_Z9(ZKP$B?SL+Ao*&$ETIS zD_2Yhu);{8p~Y18Ne(^d8%UL2@?~5Yc7*#eoQyxjk@4T`UWVC2c>>s~h^KELrc^VbM zu<)FS*S{`V>M{ViXTn?7ZP3i zT{w&ntVGx|rUHM$bl~Nv?daB@X8v$7!F-FT5Wlt&o9l{V>U5Zm|B@EF;oY}c3+KfZ zaRJo3)nVWC=Pq2@NF6Ck#ApugwiWH`A5%uI5C|t6X)Xvo+q#EMeV@ok^et{Nc!(HAG#Y0OA|QB zb)hLl7;W;5u0}eSJrWtAU50Zd(}M5scpN?IhpXhfdZtvFbanBeY<%*BWeV75RT*025Y!+W5a(ja!87nhE{lewJM-SdSPQ%fKL4- zrYQK+y6;B<ipt%OYw3YKyJNR2z}o`%ecLNEN=_bC+dzFFtZ^giTbpx`|P zMpabHGlHq_`ugMybM1b(Zk-ofA!xDu)BlHhemm*_&=#1J=NXz7z^!`@~Tu%;atco%#8I#A`Q)7>owZ zvOr1=Oe5)S<*U(Cw@^Ffe(0u}9Ec<2z!nwnYTKp*ZY9{5bb3?o>R1x+v8yuv8BT;e-?sy^?=>8{qZ#@s%ju#;4Si%{3$9sRRJj&)lM}x@A@<~{L z`?{cgJ*dz9^*2lUkF`CPN;s9_OuQ0&Zq3$_ zI_-3=VwwhrF33PnCbAB6jEh62Q$id7u*i>nJIo8yY@}Gkd7=by5eTM#q1-))bb%B< zjUAqd+z?sx=1qTGxUYvytT2WKg2lm@qs-if1};2YdiVSc;S;Ao$uXVol^>156%<9P znT#JZyy&Doky@RfYrofeAk_TIH?WFRG~mwDyK}7)ch%X5S}pC9wCEr%UH{V-EtPX^ zrx38Gz1dBnuXG+AI%A2>kp<8tQco9^1_~n#k*OMvVGn<+h%<{xOQ5vpz{P8(%yLyn z!nT?VV^0uPolBl|ODFZ@HJ@}5LwnT?q^LRk{{Tc>~7 zVZr_PhCV+b;B|N7PuAxCMXwM(OJw2U4;CNVeTOQyS8EDSy(71b45)2uSo6-Ex6U_7 zY^^p2?R*a-H4CCVjg8SR?Vg-Z=Q}S%i)yXiVH|}tz^)Qrh9*6LNE#2i(PH$`8+yM0 z?H>PPjJl9S30`}cF20g9w4_O`20(v03U~y}V9$3W-7F*E;U#lP;_5e7!^;6Qa^ob3 z3NTM}#YxN!%DBTRgL0=ALd-_;CPc85!J~}xoMHBurQyRgT_F-eqQ=%oH|G~BQvmxU z$hK4UCc%pg4vqTgE*ASrbtX44o^HB>#;I6NBZ=;a(26%sE5V7h8 zxh?}~yVf2=9_jfyq?jVqWZ}`UNr?pp^aHQPUj?J7>Zx0HkuT%uaBS-wcu<}BxML6~ zj2^Afge8eQW+@?ZLv1==H2h)#kLm$%6Ix^XlK}>DD6NwyY{}ruFbLXMyopIG*8aL+ z|Hj?JC^t5H?|S3e3Dp-Y(AIz2Zgsz8!xbLievmU8oJn98@W;?(m}QfQ`W#pdZ&b`L#Bj@LXShg#|Ynn45RT^I)@km;*fv(Oe>_ZNErw4 zsC3#&BZyif=q9ls0ta_{K_G#^(Zt^X-8fZ829&0I6y9Uz8E3&KR6UX``quzKK)$~c zxqr-w6{k+?yH^0XIpLRsAH;~>jmW^XE-9cv8$IHRq+`l|2*IkUc3b4`PClBrtDOj8 zfm~MtSWqzgGpAl64CoRTha^#dmp+Z#xKjuktv%CxU_eIuek66gc5@(#h4+yZSp+&bPx`&{_)kna}<>sg#`r zXB2mRfoVLY;X!)hevK(jqZ-z|y>+@!s5F;M=!C0#J4$o83KHsX0)Lu+J@p!XK{`f? zRzmfq26NwDf){pmFJHIg)q$H3ecLKy-rA7YNq8cf=dP>3=Dk8;0bfM+G~^45=~8rF zECVUhfxoZ^l&E(C&tD}-!Jr4ve3t~8NI~c|E9g^Hs*rEyr1XjefLnSwN;sleLEY&c zhxvS%5;Q%-c3PDA9+fPA{8Dsuk8dY}`>Ad?!{TFXye69Vn06xb6LgUawnU=9(KLMRhVsG~!x=D(U70Mzuj$G zPP_Z)&5tI#;{z0IV(T5rEThFhBBPKKUIbP8cpoEel{~v|KDr8= zEmyxj2%I9hf-dbHz|R{Bcg*{_vXPa)WKp`b1xI28-aKAj4Jljzo`g#n;zOe}&NE$1 z=i!XB3`5hY@Z~4v)_+3(0}uRoDoOUx9OhUpOJ3@b+&6`P@C7v6t}NMY%}i9?zxDy%y`J1gnSV)p6x4ef5(Rg+9|(J) zJVxh)4mO#d^{KjdF)HEdQ2BiP`rRN#!pXIrq1&}6E!CRzgePvgq;3Zs!RX5=k?6fK zs?`6=t+K^`-vz>wFE^|PdAtt{V{!hZb&G!0>yF;oa~n zCDb)fMAM(T2E*1pXfkCg#yU!no*zHC%#R-NG*(7kO?YQFqz54@=*sg3*y~}SdY>95l;P%WH3r^S<5L*LSEj!#=Dt?0Va43X~(u2+FWbun}A)2X`>x(>Duq}5y~6auh*2PVx)e5s;p&vEkOH=X!w z4e=U(yRcs!9&e*!Y{ThOBZS_Js~W5tkhTjHblMW7Ul;q)&!oj|>wtrN{JemBH_-L; z%No$iQ-EoXb^0T``}A7^MPvby)0*t=8u4)O!@^%H!<+cJbj|YPQ$R3~tvC0wi*`jSvi}Q>n)&@wXcr>ca3yq{)Djb> zL`;e(`2wnc3U9!wMN#iOYtNszaNTSef%fxSN^dr}V@hZ!H3RA7qEv>++{jc#!Zb6r$7D^$ zs~P`!=|)b!XYks}cX>0dy;?sLZEX;f7L1r+ z3Dkhz@hhv4SSmKF?k=0+giCN3>Q*aC!{w~)hz=6Ega&Q8l-138dbr2Q5J7D~3)oD=TdoR=tNf4TNLg)B9}-qhKKUr4 zM19ek=k}qZqaQJ%=&P`_2>V2TmemV9%+yIjzn}WV6$QdbFv`24=d~#LP8Xh<>k95J z7n1Wc<2j#szFO1vBf0Yv!O6*pnJ3^rx!|=OgQipG+#D=-@A|QUv*4JZW%AAfsv^i@ zBU{%SK@4UuO+2q}MH0}+57H`s{(&%?5T9-ax-WX$*t#g>zBzIOBl4?%Kub!dkZ6qt zWKpzK-d@Ln^g%)B(p`<*a6fhCJF7{S^;U{co|WB4n43beFj*&2)v+hb7^@!4TgIzo zwiAA`$s2$g9U5MyS66iRZ6;Twd07IUNB1LPMI64gp#sK8( z`rGloXD#2*dj2ci9}l>HXrU%6X%E*Zbt1Mr^bhFA#ktQ3}jn<~EBH3QEle z)HBSx{JER#eV7~be5IkG+R$aquzC$odHo1|^0{_bNYvpqWSe&i71bxK+duJ(`#Le` zfd7!kJ}65oM~|+3EI;(Ps2Z+j!2hMwN@hgdLT>O#x@XHwnMsQsD*#@r6`pHp3bwEpmsF;{ma% zaD=!g(4MD6xjy`u%(a(?{|49BtToMtI}>ovz*bi(quqH6*4Hg>qKZ7ASDZ!Phu0l%j= ztvW{NC8$KVp*?5+qFhD=h^UclO3T~p5W4+lYefc@f9e^3+oMZJ-5m_OJ>{8R_D;V3 zWGXMDaUsR#3jse@x+#=T%BX|n{mARA>D}?|AVe7MMHxj^jBahsdn$HnPi>YeHb4oj za_KHRc+j7w?!47e=kkr;fnmss?Qm12BHl8AaTZpIcoOh)};8B(SzRtFK62fbz+ zlv&X188b0euK?*2FH0*Da2?@+gsCM9;t9a-lFhBg=lbSG{E7K!DzZ73r&wUA2nU-v zf(-~92x%bE*s6QbtC<@)Poq^(CK2G|NBR(l$DSyE@Wm3(#US?!bISMGS8CV4ex~G? ziwP=|zNjtXi){UW4nC{hyvXThoXFCYO%}v%MDK`wtIqm95D`g(Vm&~ZLt-{S6sAEc zj~vPD7m7!Puh?yQU+nILkO+5O^d8FH?uhmQM~zz7T0qzjW=oy0woR#F(jCSp~rwVz@- z(}@n8J;5h(P{mK%uJO-0P~LP8GIxiEMxnZ4;-?MR{oF^2jq%ELckQ^AuDjOsD|CYt zn0@AB!{T05UcMHaJ7;-^ zb*m21HO$&+#Kok~iVf4yAE9?q;PSD5vsw2b#H7+-Zk5NBPUjvjB>U~4F|uk_b^PMn zRCL$D@@nZQNT)DF9fZ)k&A(=GF*aMgG4um=7A5Q0lBKKqRTjv}w|bINE#E5cLG@_R zBS+pdQ2SSPt*94z-RgtGXu*i5JX0cXbyhd1rR*9I(Teh5hXt-g>E5A2mMC6-?0Tm> z?^A>2%F-@R6C~EELaIhbo7?X6Jjvp;b@VWQO(S6Eo*EZC&*xAB<%`Wt{TxH4Cs6Hix<8|6sa&to@VucN@m*K*Bss474pFzrv@Ut4N?B+aErcI`#vJb(u99;MLH&-=8{ zD|4|>F1GN@0aLr3%g_AM(1e^ROWyQ%lzPb-dJsh~^v{Rym!^Q-UMHP@>`Kh78yv+6 z3Vx`h7~#zFh7(t{`sbCvCpY$79^G8>YzG)KL$GDvJL`#Rg6|V^P60Oyv0+64W1GE%WAoK)4rqi72B@e_>Gr}0G<|nNd z6N`Mfs3Vy!!{1JSzdFqx8ulI100qZ!(N1Kk*6?Y%CfCvTxXI6w zcstv_6$6y`n^N|=Ie|)X=v#KAUnsSmmE#(GB+<3y@-5wz$1i@(RA>Z8!at=w=(>f! zU+un($ZeI%`iT~Qe($LI@FL72Qmx1G#OLCMt?;XUNjG+SiX)t0EW?A@M!oqbu0t2Q z?sEx&bk2LvDRdop6y;%JM4C~fWp{5xakx!mv>$BuoO(s4gTRvKP(FjR(OlEJO~!hw ziGQU8Is+0{+!r?t=h|(u?h=d_BhnhBIYP;BL_M@cYF(CpSqizp&-_~b=R~4B=T^{a zZa)${>v5Ejj5bOXID5VTiQV`vSvK2s0#JbyDwc%KMcp7iPF$8?l_GpdqoEdsR~a9W zc{_cazwXN-+SKd%@*^9l0_ry6m;-$Eq6GEet+q8c9*cdBAFCI?5Vi`>P{yG|5K=ii zL--k(KaSsjU5(qt&EiKm@WOY`eIKo`+eo}J(66r*F_!gXLtc=7!SWykR*8g9g6uE}FwFeAvpkX2&A4myitF6FN^t@h z7*vTeFUp)kug9zkJ>O}8-(V1*r!{8M@`yfvk2V_GE0z>e5E>$>4^yl+tlf;PTKcu{ zN~>ZOWh2LHP%V}dDu3rs0i}fK{asrpP?*%CsAt&uK%oz_K+ZOQmx!&K(EV&zIGo@P zusuya4dZ(Z>p~V9qH06N-(PR|$;CfQISt*XN-147!Tc?zK@xh&T$Y%wrjZdlZV1MI z3$(c4A;0)_nKWhAyc^7u%1h(7gEQpa##gsjip^uJ5nIF%ot6bHvPB87nYj64Zad2k^`HUXMK%XC!L4=cd8^d#tLs%_2aLf4?T^L1tn-1mx0fR# z9aV!EBZT4>Rfx4;9%NpT$*J=^G^{ayV!VBZO1qGv`FJ=LEimI-*dA8zj7wQpOY2h% zkXHV?8O|%5SaJm-J{~R1v3X;Q)ToNZ^%FSNMPyh*C~gXPIIf>OM^6xBHrt1!%2QvH zpVllnf?e%B;&Sr879tO9$2r#5Tlvv8lJtmGW^BKnP-+g$_Tt)qY0vakm)%? zRYApRwVXV>phbXlhviH8$JTl5Yzs7r^S&gT`w?HAaTvN4usEAhTK0*TB---jVN zT)NZLMK4jnRuYZD`$O>5)HX?fJtJgz5pAJHGxv!iBN$mHSNyrjo6c*vvbqi**m-~Z zLqErh?9UG$UgRY-OoVkab;Xrd{<6Gp1 zXMQpNE1|HQWd6~@OC`f}@e~EC-GfPFFZs>;b4EIPeZG!j#P80Gb@c^*ZlNJp&q+*i zL%!c4*RMLxIU_V&v;z|$6Gw&G%O;%nuZEAuF3?li(zAV zpBY|Goi^&<^e#yqfQjqEa&0EQ4<}8NKShs-&4f!ds58kQxj<)=^Gz;hh?oj^I&CKS zYU{rBJDj;bZH~Tq*`2k2dvnIkb7v)iPVQbQR#G^q+`3E_`Ln%-JqE}=1rb0g!(bHK z(FJ7M@%*PgD&U+DtR{iaD2^faB8M{ViTQZMMSEMtxawB97mc z_6z8%WWIWJ z(N0Wmmlzhx?5@Be8BHdVkbt8VUtAH^pQKs5A$A2;V9@@5=B1l|9O7jZWQst8=xK_~ zW~X()=RXoM4k+JQHdJ^%9F}sirSQMkct97>UgQ@L1d28w%n8#^fJ z4`wk@XGAsH$p!qi8#R1a^QeiaDXf+XvGdjoDx|+jCxzn6PW!6_*^R5W?))BBx#sfblFnK(O-mD*qMKv<2mTP z@FNSk(D`#MCRYrXbM(xgssgBy4rIGMCV8CVu#__k(=vJ z)mlD|YF|JXNxwElsx-KBo{s_lSkAwDyD*l2O`!4jIa3y3KpygSrVeGSFkW1YXkvR< z6%MU+**uNi105raPwu?4v=iAca}Ui}YuRsDE> zADLv0xD~_kP06;g6!t_bbe=Jj(x}e{f z+$~x?4X@u{>75Um7$#t#XygxOrIqHX6N1DIC>aUA6rO&qFk81gcitao)s-s}323wC zCP%%B(wc4d&)F>n4e|la5Z79t)oId$khaDxCan;z*s7CyLDC0ac&ZXff+5U*M9`%_ z%)jB*5n(Yvz$$>De%i6iUlD+~(*p&quICGc21cuPb}`xw||4WY((w?s?+zXeuXv=@Ie1ldMIH zwnp95pm1kc4TlLVg=$mza~c!$r!j{X6m6#eMp+s;U)PplD1BEimuu6Y#b;zFGsydd zaRu-~p?-Z`d8k%Aw3kVK`pZR6?AIS7LeloDJ2hO#Tul-k0{ovwt9NR0Q9_rHSV{ah zq~WQW3#SNA?Q1gX;k=uF@lAIGuCs7a3$S&R28GOiyXM!4Kpfsru`ToPCGPX@eO_|e zA8%8!gfmybZ2ibkr8%#Oa9xfHV2{|?O1f2_k2LK#cNn}0rEBO7_LpW zYuo5Ucgw!L$(}X(R;wLS6aT&K&D69h*73OAo&9M!pYv5gD3sd?IB7>6`zc1`fK4oa3W^PQ}3J~*njga{6M59Q1hsM-- zvRZ`Sdw-9nbcoM?)v#4}1WLiURxVS$Rq1TkR!ZH)L1 zHN={rCCrWq@1Cc7e8)HaHg3gdWqb1*OEJ04vxr$;m_+Y}Xd%jECLnh$DLK4s^WxTV zIy$mlU0a&BWtjw(xm~CZRknio;kf+DzHH@T6W&HPEW0xaYlGCv{18!JEN!dpC{UZ& zd)tNqiUiPqzVnq&0AnvU9uXHfAMJBLW$s029!U6B?YIH)Nh)_aDdOYxk@Bx}W#Wqs z;is6qCA?Q(Dn`9><5{cU> z)>7JiCxNgs_7)x)qiW4HO}e}76W4M7`~2x`dBRqIF(q!S;iia7&rg!zmY4C2MT7)I zzH)^valPvO69~^fdnKBtgfnOz%mV?`l!#KKbGI1@B|6!z#F+dij>c8zTH`oBq!U!A zrCisdmc#H6xJv~Lt%X-But0|L~Q~;^=+8m_oBQII5HfA7klh?4nG4 zW!#*9ZxD|oqp`FXMDT7Fx-da@_z z^*xbDE=)jUsP3b@#=^!cNY_wJ@EaGZ=tFFO$vB{)8LM96*}U>4mmsI^+ZuFAo3CeBMiNBd{X~V8Ei*d+8{i;5CdmhQN(8`by zZLkwfEeW-Xo_(MUw>4EIA8Qm|tJT}Z;%Y^ zn+VWp`21$45j;WFoE{fyUz%+ZbNhKsnIfw;WWa=V}O{ zDfvLtB)m-lLpIlO%V-ejt@7uLVMJG9%=gDAnstVdgQIZE+X<(}mn3r( zV9Q)-v>Oai6=aVX5Yt?P2Pl8z2m+A;fzemCXGHRX+q0hJUy~5$%A47U|RNCF{=x9g_W z@;B@JOeys=#lnv1cUL~G(_l5C%D0Fo;tVtm`@{5*+d~+o>1irwPLN{?kyju z=Y9FXuGpY%j0z<;*r2t|CYV}(Yr=5B5eBgA&8n%0jvWdU`F{ZH|HeH2ha~=A|NFoG ze^Bv%=M+2tk5l~phg1CC|Fh5cKZIi2f2lwJ3j0saAR7PF)lS-Ktz;f?^cPQ>|C*4$ zw0x(T5TA|F1o|U!yT!j;|M|4TT1_P{8FQ&P7>4DVzewAkbOkfwmZU3xsc3TaUw;07 zWWN9U{}SZElYjC5tPRk`Fzkd%4W(VloP^>Z5E`8R86x6;sl-UaAz?Eoq41}0_~&Jl z|9N?Ta#!nkD#1|6FpN$Ay}Vfcweaj4Srxu#i20WqF#H?&Z%{$$igZp#xGCkoG4Ox! zl>eI~Yuu!ejpC8~7{+aX{^CjhI^_cpRm^i|s0lF)s~`WQG=C~&DWY4(@lf_n{(ZlH zBQfLOLsy5p)}+#0jq(m-2^!t4)!cUQS21p%5Lu9eaKrJqjE`r^lFIm5;mrzx$Y-u#%ck;Wy0D+elokW&+H;U{2k?@Pfa^+UO&@ z(KIUb<0y+UfS>h7-@QZy@JCm%qx+QK?OWCBV(Y8-Qq~a##_v^u=+^Mp-M;F^_!3o) z^kZA#lw68xmDYrRg=2ib)Y#6$V3k%_&Z}t9aZ2Iv%J_ z*OmyuS>m#aV>=8Y!UH_|i$IG(6D1lSa3u-2H8TV}Gq(UuW1vt;QcGp`0gI zUaYM~-+92XCpdHS04HZQ$P$y_pk-JxV6E}aBIlDb zz$oLxtL&D}gOns}(&X>&*Gx@h*> zj`{eQyh@LMw%5}YhXVQtqpR*4_?aZ7ntYx!%a5E4m7Gk`g|a+pX~JS@8lrOeVlU~V zT1)2J*fnj5NJ-l4P7YL?*mE8eRaL?_^kVJE=v$v0W*B377Jy~{n>pzQd5i; z`-kld(xZy6p1hQNDNrl-#x}ZA6QKEJY1AWTR?K7ng-53-1a+=$_Qc<2Ey`urN9CBnwcMVQqJKNH# zaGwrl9xAZw z1aUxdok|Z>htzccL!ud%RyGQ_cCHrjazMOANg1Zjup)fzU$rCXD^uqkYbbkmfs|4TjHiL)= zy+f|GpMKIcN$tGh0%`?Ubh!`#6eEtMP`r0^vQ}Pkd`VXI2E=K{@7II1R6Ppz7Pc1GJCq?mpR4|Xq5zz#Lgszdv<7qvu?HZVQHg>CvUxMuFUkDa3 zLZ)8G;ZD-Vd^vUC-cp8-87OpG{VdwSCx-6&n*-=vqUhQWP%d-Vf0Xq$jL}sBJ)XCK zN+%La5suAe~*Jr4qWoLha!IO*U3ipz{+K%c|ym7yER zNkYRW30~X+)w?vF`}F7Gz5C1_4K|WqU2cr@V&U%}fyFiVbs`mGjc(P*UT-I>yYG(~NRFni?lQi2`NWpl24xwoh9(^c|MBzF{ z=_whg5g6DB-{VpIuBGx4_kblAN7(*Kk|ra~b|`wk0rbSqBGhLg`fx$Rrjw+qtc;>H z=cS`UO3>zg>Axz@cC|UNR@n}`fy!WM4S1j?2A{8*me!-2$S|a26=KrSV5bsE=(dj4 z`Klm2)hAu7%ux=1qqsBh(iZMarUDjI-Y;kShXbxQmPEe6feqhIogIRMwUZ{=ua_OY z(TZfg5F$-i=AWY`$l|>rmfH03UaM|p7tw_P$>54%>m(ov-&&YBG*SX3cG!Pa8&KZI z>BjB#Mnkwp`iy7pba%pF=nOhtXsME{xrJrv2K5_GZ#15MPMi4Om&NZ>gRb8P#SY6&N7h1u8ZLth(u;}xYIP(@GVy3|mjzrBK zj5HEaKBAO&KuAQD0axYMbP%80PzZ+2f_BX3ov1I)4bK7UihZo#?CdMs4jIF`) zC@{#`7%C7nvq@6$zhXwx6ak6#z0dA|LJj%yI*UDj$pn^DOEL1(Gdv7? z9$wjhuRa!Ys?hC^)1M3WR}QrV~2zsJcPu0qn0M4yZGZ1 z1^Fe^T+AEI^UrFG5LZ(f3xWg&a|Mz}MYx9#e@2YubAe#sP3;z4^1fquSTjo$PadN#2<(T54UzjS&Cu(>QUR2 zQ^Z-72)k!}jr3CB2p`a}1sWEAu22?`Yy6f^1Z*9D zec-X&+Xy|ZCIJb#)bvJ3j!R(4aP4#IS9T*h`Ag%`3lmE5XOPyW0OMeg={0BamWMc8 z!aD6w{-pL3X^xW!x5qfeZ`T#2RQdvx>Bj=yonnOrfM==2G8>X)-R}D(N+Jt&;3+4eE@>={mMO#bu zsdb>su;I(F5^r-8(yvvwd-iQYv3gX9(M?~vFCbIw+%KUEgJId(r^}FWEN2maeUWR8 zj%lgs*9c`iq#%ht=&~)H)LA5^DypOOxgU-DBrdLNJp`501&alb5kDgUy0gl)gjyOL`mrJR&D7lRBaI& z0nlZGjMf(0``~04w2~k`(~ko(>{>e3tfh5k<@$vZ(UGTCGX1?N9SdvMc7`;Eh z=ayf=J2kqZ%3k#0;?oe9r^!tuI1ohxim<(;@#e56V+Bg8?^o`)Sx6dh-s=vlRVa0bJiot>OMJKrS3l`eZm7Xr<6SaD&|oO zA;N}T?l*12<^};PZ2kb1sGJ$J;W5QZLi1N^oo9v~A&M7TNy2X^o0<^JW$g<3VsshT z-S6ELC>{J=XF15VjCw|70%OtijQlc8KbEqFf}@} zRC}Aheb^^en35s^4*1925g#g8$X>zN!L5Q8So2L6x-Sy%Ui&R;5rLlhBfS< z1uDdL0J*?l1OAxQqN#*X4CUVNq2#J#f))0t3gh$A+0H;|3MZIWm%E?vxp(sp4TuGY zc2D5l;z+gBLC?p3XXw$Lzkx8i9G~N?GS+X+U?L(0Mu_s`&u$$9D-t?_&m7dA_ZYN% zHxdm*!mP#ow}Qgg<|w3h+6$P8_AW4C3_JJ=g~zK>91?I(^~Ki7CHks5z53i7axn4o ztpdqn2tVSI`*(ke(@=}~JAkDi^eWz0-zYUrF{|yg+Ys`99n6M#(a=s@S4^)2P(QNE z^8oNv?@}F)_vlF9i%GlvPIEYUG5Wrpn_L^R_z1bdLoObnmopVGCPu9Q62T*!t0We-Ui-_Aygg5FiFOll?bICs zzoj7BV7=0Rbot!8ztbFWtP1*hkyKSv#Q)ZJAGBGfD6#* zDQ_nz+|vB*o1$5XwY{z?Zi%Qd-%2n^HeJDwk{>P>40brJ6w)8=au`R?l8YA|`n-wz zs|PrLjfmDsUOU|3`!PVSMsVjMSesYwUHzIQQYSK!1q?G^Q>^cxYz#0r-Xmi@`5FwPm}1R9eXG1$Jfqp-$9f=W~u%MMkc*-Cww- zX$!Lge2wyf1{Qv{d50i8F!Bd$n85AFk0t;^=lqreiW%(Pi8^w}3Vj%AVQG-@LamWj z8CO|>FjFW*n^l<#J{yJT_|%s`>-8zN3IhV90W~jtjVFo>m?_W{77`K!511W02^Xw? zx(+%pq7aX^4sD~vL_CwDI9AX4TpbL=xi>05W$wHxP`ly}g_N!tjx!lMtUUl_qPMp(l6UU~E5SXMc(ZkCXx~K#lnDbJ2N};%v3S z6dFleG|m*ZlwG`7J9FKV)`}|JRo@>{C^~21oq=c(n>LD|Ue0RuxKqF8r;u`gW1*%Q z1WMQ}s>H*@O;pnXOAV+KByE^TR5Z-FXd%sP`Frr1WZtL4v%BJ}qrjyy#yC7IUkaJvA z!f&l%t8m$7gOmNuYu6={zi1JEIFsJHLFd%hy)e^LW_Pc80W(3d6~k6e@HC>*bT3YM z*LL(CuYjOBl~RzK`>58Y&5P93H&YM@X!{U=Y;w)~vt)m(kjr&pFWeC+ds+%V>(%eE z_N|8LuB@TLeG}a;ymsu67v&vK z35!8I-(iBr2a;^3yyg6V5w|G!ZcOWpSrCJ5zyW)Oqv6Dk+B-twPZ`Mt$_yeL8%BXJ z5lXWx(l-z8TivP(LL4S3xvo**o8O-m{OF1739%mbeZXYAnrn0=x6H}sQhj0 zaE-Sk*Pd(niKLlU(iKw$%frsn{07^Uph74aWDC@Wh{0RX3dBM1#lX7ikZ9nQcutHe z_;cEZlg!v+PQ4E5?gjCZat*mL3E#eV6qTX9+Jw3Cq{h+zDrYDq?AxjA09;lT3K1nr zGL`MLSLHSxL8!8SLF)!E4k^sjYjzXvYyz0AknMbFr1+%~{q%dmu6;Glq~Gw_4feO( zXX>umJ4u(hc6B7u-Tbz+^l>-i5fgK=3%csN3CBm(3qsnFL%5&pyqg@*oJ7mnS z^xE%y%mb_pnQsHEC^cYy!!W8VFqp$Y#CJWPtC3fV(QkNv7iEoJK1*QU70h!Mf4 z+w=CLm?_=sw)WXyVf{Fkk@yVo0&);caX3xM0}A=gYa7$oxVIVm{k4bx{wP#GbX?pn z!tlv$wq3yQSV5dwq&KA*WtS1}UDs8rM4CBA@*&&eC2Z45orbR7=KPSTK>UF5my(?4 z`9dMN+L;-DoEXJP&wK@*yf=B&t`=cNJ1b`k8bu$^uOk~o4zH9egfD(fl2H~UBc#7w zf?8Lifc*~k?Uo?w`Po6LFzt-jJ9~XQhW5#Ef~KsKG}VeatDIJ42#Tcm8s~E{DR=9F zR?>X|9IL#i*L?ny;cVMm4Wv!9qW+!SzzPG(23BJG)@MBl_zWj&A=8d! z$?>8w^tN~B^24xj;bnk1;ojxvNl<{DNHgI+@`{uE43AmHLlRO7Vh9O*XGM0I zAFdc-mp=uI{A`b_2MPKPqFPCi{8~8ESMsU~`EW=A&x>8BXU;8}B{i0F~nrE=dG z9x#^mI!EvVryUG7Iif#uLjpP_t55BJ-OD&Pk9;qi!e&{wRu0~+N)>Nv11pRJ-&BJ) z^DTZQ%Kk>m3N?Af_>FY^h{vmPF62DpO7TQSZqvN5>NMX5jRcH-2Og%^CdJp}5<#|v z50)NONa}07|M70Rzw^msrb3@d`O4U5wx{<^AqSY1~o3n;m8|($NH|V%qBol3Q{n2$~!GDxze&$`8-dy0-S*cmnO-q3dFRG zo^r%QOil(%^ZKw91W8BA)hnSxWo=uc zROL%175!roh)uQ%m8ah}cyoEwUrMEySl*Swet+Er2>W zws0Bpp-drFKAvU2MYKCt4O@t#rW%;Eqo79z*idw|1H8b^q_kg!;?$bhlFyrI$8fXZ zx|mJ@lu_u*)h_s)H!=)=6wg)2|1~KnVMcv$dQRD1v1a3GN=B;uWE|Xpl4$B$Z=5G; zN$2lkGht-v--cdSe!o0HE)v>?SGz8zU6Df8%U8Wty)j!2V>$@>T~kiudf88skCf5c znrH@#jv3Y|=Lelu+_+lariPre9A4Xu=@49qL$*H5rP2$uYC?11q${=;-xipd1boV23hAgkFB zp9`@$%+%5qZ!gW=lptq#j0Z*JN@S>Dlq|euJZh}vJJiHjRp7{z#)KX_i`m+urk_=l zVnA7z>Kp`yiHCrHED=4-D61z46c=ZZ5uIbYkXT_An*-TdRcdWJp?;CDnDI9dTO3#C zDt?^xx8}^3V-AkNhHR%D$kZ!18f%`_r$q{vt|B47+8^%Dt=fFDy-hD?ckz*rWEFo% z5z~r~-a{m?#;Mmsxj1QhapjgocffH0vPK4uPt@u+YG6Wtsel0jV<(9fFNL6^?hn1$ z(JBTeYUOJ|_q+0KqUKADfC_|#P>%K-!H%l+?I;%`3$sM3jNV+sO{M_A%^S5sOAQZzmL@G6?RBnGv8BhYu6qUysVN(kgy#*V`FSxz~-SX7c7rgq-+1shZlhy9`OIqA@ zcxPkjL_5<~=cNS7RXiuxnAwxWaC>7gl3DMdanV}K$8^&JLC0pSyefD3h&{T=BqgAK zoyTn`;pP|%&Y0vsbXHk$%4x~3V%Aj=y7$>Kk7>g%FEW54LQ>m&CvNn0T<#z2y%!kv%ulMe7n{{{`h zz%P}p&8sll;vv;oYJbd1XsLfnuMB0znKTSEiIk=*Z{e-b5s#_>vopb_G`o6=Z}W_2 zR2~SYmRP?t6?4_@uk)dl6w>-(S&z#(a{`)U8(kS z$0jI4=Dbck&XGNt?7jc#C{+5cw0d{XtP068LCD%*Uas|3FuiOL?2qMsiW8S%1wB=D zA0%k6t4NZ)!Wf(~g)TG8I>sU`O11@;@SKJfjD5WmO$lrchs8x&0)bY9L;v_@RJrnh zq^3kLOt#Hvi9MTZ?^ZQvyXHsa542aH;AS`!GmE&>2tG)O<_Ps5&L4--cIs<)A&{PT z+Lv!Ko7zkDq#haQA_=%phI^O*eVE_fxU1ojzpD!I#}_4-kbfLN(l$s<#zAX_ueB!5 zv=4?{VA3`>44Wx4J3pQQ06Rd$zvfr!=dKGx+~_({{;2Y9$s3tK<)kUHvI{7+a)#^V-?k9>I@sU`U!@#T z-*q82{w0?OWBkZ)b*gEm{Xle=oX_v@e+uwSHc?9&mI2CHX|;UwzD`tUSS*irUGA>$ zYkqGo60Y7U-%@XTo*1^%JJM6tKeZwMiff0FLrfPt8ZQQf3q^!9NQR0Z9_L z_YBf6@b#VBbR8qYCl^>m*=87_PSc%53g zi=0sa?VgKcq=X-wL45&HtaX+Ze-CMD@SekdYg*Ru6{zr$vQiZ#L{d_PJ-MUA{g9&4 zim{xphZ{QQLDldJ$s^+}LCY?S<-oR2T@hHW-34WqXq+A1T8+V1+FnUmeB~jrZ|6EX z=PtMJ5~oqIl37}f!li@tMD-dShpih3sd=}FXqwwPhJ@V;8%V`3H>~}Lf53=9G{Ep? zhh1<_!KH1FK__NhZh}a?iW1;ZinVbo9%%f;dvi3Cjc>5XxmTSB)PoYR0?(M2vTz$F zSp@c2J-wTLp{W>t{GcszdVHS&j-5k~jv;xGI+C?-^n1aamVHEd;3-60C84t+axBru zNTof82P+S$tc-z3D+Go1f9ZX~MCLK}JVYgnbap;~s$9KDV51I5MUB5pK-vf_(OI-9 zX}d;}-$OmPz1V=nq^2LT>nc_;WurPEJF!bHGK_O1a(r7DAH>jY=fRHQu`dfg(1eia zrNo;A+gtB$d=C0IJWcgMO5koK-*zXEUgCFK%Qo}oe~Uh`zwiNm&SZyP zl5}86UIc6VfHzGGb)CQsN0(2E>$e2mbij!gv%E`clQ322av=ATJn-aDlY(Apuw{P7 zuN-VgdOEc~C`1ZLgRkpNW<^1QrIaT$A7jD?bQa5QNfkXggVF8Qz;=T?8wCJ8G_!>+ z@*3r9y}g#P**XAJe*ne3fSXA?u;3F^J}WY*VX*uCPP5Pra~%Vc^mx{22@32T4K@}N zABu_Seg7inn|OPO`&yiQUVTBTko-9B%A@fv@-$f2}oIPFHJDa$8O}G^QuBoh8Hki#`U0UKlk z7Q(8H+*z=8fAy85tmRteEDRlyGH&{)&P^ppP<%Zt7xf4bkw?uNUypRr z-)zg{fsg9Ou6gI;>q0JwbUu`~ejC16o@-Bvij+`Of8BXMI+s*S^5vDDf$6Pk`l&V8 z2m9x)p5WP+>i02~Bw5Z7dan`g%@12CNZ7Pr6a5uyFxLa7jch}dTfc$)XFCE5PQS)|IlYNk+ zs=jN@#ce`8Y!4&ckRwT*eSaT~#bIHg=Wd0ue}EcZJW3KwznER^)~_g%Kda&spR9F~ z;AO2MVQ=5kmtdtPortL95DM7*wY;hgdZd2Dgs>{KMS;F47d+*OyK*??TEW)T z;YOu#6x~VFnxfYeWiAi#8;8ZZN6cqVXnSeV{++Ol#nhbK=?~(Z9iz6*9aw}1=4qOs z2grC>a}?(e^GiDKf~MX~EmU#R7>yAgIbraKP#o^P zt*N;+46=JP3jTb0{{cf02!{FQ`tXs+Opi4lPuKl%m7GlzB)2>aIS`cu1*p z>-&pASv?<8D^-s_3%P&C4Ga&j#(g{B{ah{b#b4u7O?ZA2YS6A;_EkdhCN)|f^$}}jf0+HNS-&kyOH<) zYUC)DAHmFa3PTb0f|Ii6n6JZzv7a+5HcXa2`pbVM~h3k$Q)y za@h-La>(2m8l^_)CehOi90-b8IZx=pj+}XO{#w!_@)rr|M%lP$I-+lT(*>9$rn`~V zXGD!hbdo+=tQwUtdDKrhhp~jRf6rIL*iT-v&E1Hu6^}S2%s>NO`y+2uuu%bZZek&ly;TVNe1UiQdkT#C zCdEBr{R7~X-$qOQss&$KOovPb7_%`?ga}}9gbuD|4X8@8cju#1@=}t6g$yqA-bkgk zEK^kG&jYXGs6bb&Kr(#jS3;zpd}AJ-9Yzw(5ryDjf9@^#B5MXJZzm^8hqC()l)QZx z(f7sRbyLsro_+SvV3B!`(AI51?)7QlH@i!cMY=kTi|NwHvSI!VeIAOkYEZ4LbJo$; z$j51%T$CzdL}A({W=diVyHonHJSDC#EE6|Fx|jk%a*``XUHe8lpO_5pGlsA%8L2`U zQy}7_f9ccXBajN8pYNJ4BGsRq-RjVA;xLDw7P#`NnKG}h0FVCXgW<1u#$>GU^yyC2 z1W|PkN$h*tW9;*U6UwFj!uEyx@nrZpI0PGvBC`uqOlVblnPiKdUNje^9XENHL4p4Ag3V?5weCu!N)Bi^^?@ z9=bbuesk4A4R?Jsd&`NGJS$N&Mt1H5#gzHyDW#^;+aBT~mokguB(%Du+*J!c5a z8YC}?)Y}|q$+s49bpj z{I%_EYSu4lp5owbz5^fcweJ|j_x{UZHm<4cqyv5FhDYV?;?sUXgv{@Y z=Ao#Bt+*>_O>6b+%cZdCnO14*^$vt0@V)2^a3x$w#^5t3q{TP2ytUGov$AlyfBKvh zkF@413L`4De0!hUeeq_Lm*2_>F+S%{QC zrB~FHvaV-B0gq0li)#gaM+3rwoQ3li?=7KSb;5WU{WsRyj>*ap$6-RDQ!L5))D~m) z`GZZxjZidOfU;wsNzF;))#RMIe{}L}R{(^hu~__0@cq%_TYqeRc}+QuPoptRW0CB63M}DU-i?qey2V#&5ZRDpf7xL>NwPcv*2? zhilyis*aPkfxGcAAuS~be~cB!1r9XKk>nON`3fsmQr<|6L zN*hhSP}8~enP)B>)4rupP6&Gd5Qxu|g=jFN;}WWR{(x2cF+If6XDTSHHt)*RId) z0AOWvzsA|&mt*&d6B}LPewi7#Xc!)sl9LBQ;EIrWt(erqQ^j)J-3yXdb-cvAGOViv zAlRG?-`ysgkerldVBysuQic%H2EHTC{ia*btTyO2)K3sW(kRG&EmrlWTs^@w#@6B) z3i!Jccl&Cv9r||Nf5Xcn$10-KOC?|2k=E6umx1lvxZMc30B+slt~CZ6jFRIhIurVe zYI={=pLC<%cD?LPdQwB2=0#VP*5x!yHFJFaz8yvfxS-&nUSfIDVrDaaP-IARc#lnA zyJ2=P{OH&~JN8yM82)L46+wq@51nD-<6uc)$<$(aqHWG@*Q9n{Cs1^aM_t%joif)AWvu*MYedBR&;kW3)oHG(-elD#V8sQ** zXK%j(_d%{qY;t&f73r+19@)$NO}@kDiPf5Cf%m3J&I}_s;e}ev7m=OYMPYyf}a=3-IGr3X2e0T@}{C!DwKcqM)l2`xShmQzA^ILh|)3BgOcdIHo)sHqu(0|oFW7W$bo zwrB0YdQshL61nyJh;->U!SgY7JKCZ8^ImCBf55&Q19c?2PPrcqIkq}%8=(x0U1AQPx^DCd}gxQ(|MF*Sw#+}EzEPz7H)xAoi z2E#0gAv{*PSo7Uc`P9zNnfrEGmCvdz&{`M`f*gVH?y{vVnT?GHcL+S{m>>Su3j=ys zf8QG(uhs7$-@*-7+NkO&q*vs;GfUreUL6ZPTj5@viSl0&a`)mCY-CjVA?-%z+l3P6 ze3sxg9q>opCXzwDOth2L)?dGa0UxWN@c@a+7lV@EIv^|c&V3kwcTn6T2CPfcpMiSa#B** zSYQib&c=6)zTO{Z)`@4Dc5~QgfSH$WNw(TF7oc9|!5I=EiwYylO_^3HW3)1Q5OF-fY!L@b{t3XGa4^Pe;)(%KBnUnIQPQfFRs|S(U zl&&jbhdI+{>Tb)cW*YV3tvZc4mC+i~V#tCqVi6bRIv3VW2pO2L>}Py)n^LD+$5Oab zTV-pLHlA_6Y)fyCC}AMkf6#l1^vj^bUh|u>+OM(YJXf141CXCLZOty1qZFSI0Jd4uTZmYt&C!_yX)*BhW$(f&D!539^JA1O}|k=R=)YEbPAri&&>;1iovxF zPbcSAdR8vL)=W50fmw_^QYffJiECP9lz%9MvPrdsNR687{4{k;fA6xAWRB1H(q}w! zmijxJH2_?_Ju9YTr?;ptqRa!-55QBtE2bVWajd$p&X+{9ygf4^r4+)tq>pT>z3oXmXB zgTUtnbNbB@(Qb@~t3;9sb4NFD75ss2nmW^+-XOyebpg|al00p+ER28%Op&HMQL8mZ%}ZRJp=s%Npa8 zYB+(TzEMS!`z*99Xv_02#g>kw$FnlrNB{yCijHGgJuC~iFA=FCUsBOU<5$W#H`R6W zCbt+{Gq}m=hU+gR&S21@LF;-LFG?n<1Ra9mx?KHxe;i^pD^)4*XU`DxGUI2>)yn=` z5~sd3m*Qjd105fSc9W*g&zO0=P>jiDp}M)A80AXL-n%t*LTywK!>j-a9UNo zD71W(GGUW=Cy3YUQbL*07HR#IG-hvTB;=*9fyrGG#FlOK5q)Qz#z>-2<&C~wx(BGG zFf<&2e|<>_n<_e20ry;2HeFf_U`GAO1GwxcJTmsPJB`y(2upJEVX+|U0bTid?{LAD z*46EO+fWCaf-Z7miJ=Y|oBeQEROLb4h$mp?g)~aTFbcD+3qJ@`p;)yz>bB6eq^_`Y zv>#m+yk>lO9sAp~(`FM!uyU2v)fgyt==DOde-4WIyWtP14?(60Rjo?!blxOd0xe#d zoz4L-c_0$=Z@D%2BvpLL08fK!UMZ!Lv)a2qzOl_3tcLq(Egpbo?uz?>*5WMEO&C@N zy+TYa(PRL)w%rWJFIY$w zfA+01+yZ|%&WT(~PV#rSFO2yLI%f{QM^lE-dG^4`DK^EL1XzLi+EAUmnar|?_0 zTU}qBo(E*e(FDoDVl+a+(@TyEyplsn74-(ql2-k2TS+w8ce76ix^g;$jRwgrg!+2T zT1qblN8+{ySGeqA6aph#r^p^hPn_eFkD zyhZQQO1ni8lR{=>rNeIM6V9SlH5?A6N~lNoS($mf?!ry1QF0K9DR*^)&fG&pLm%&n z|L`-7Tx;gZmc=x9v)+X(syNbvO#jpdfcz{MM>{gH013&Qf9m3#3v6iwo(kF?f1K|c zDG%tk@wBW?*0ZNV}EVofdwk_TB5tgof098D2b)uKmQN7!`a4MlZOi z_zJgJ=!U*GyxniVSP+4oQ|Fy%XFpB6iY0yriL73HmZ&SGlgh+?#Lj}RSpGz_K4p1v zIKN^d1<&24V@JjZIX04~GB8Df(y9r(Bc1)ZoL8jmoFbiU>zeNCfz@g;e+@qOJ4>=J zS4+^Svww5kk(-|%xSZ-wF7cdN*EZ>@A(&yDtMu%|g4&`#&Eq+z(dFcsa*2+>{H+iQfW9LTUhE`sX*NwpA zkRYkiai8vV%ttpx6E5zye|S+6QcX6fZSwH~{1r+7r?6wqGIh^wYgZslqpLv zx5NDlCR0obl?oYw0-*2waHOD6PR;Du{KB$C%|ArTSamq^0D33Je+wDD;q)cXG(r+9 zI)}F3NiP4CweuoxqIgd0DIs!)kB{&f)F-u-HyoMc=>V56GSHdDx2AUPi)r7_$`Ldc z+}t@6u_MZCUva9fWzOzcXGXw=v#*}Lz;NsapYO{rr9KK&*YY{eRc`?E4p}QoW`y>> z@F;{(D_ly3N{lT3e<84C**1HARO^v&^`@(j$uRDt`Ds&<#WCn4P0on+A?^J=c!-m0 zbo+Q>$<)o|*~O@BH|VcK!QFro6j`kzvuxLO_TdM7RZC4ozT}dT^TE!)an=n!FQQz! z;IKUcPxqppdV$2KA6ICcL9M2T4Ls87ec@0oy;}pQ`MgR9o_^^n|1wgZ%=dqj zX8vV?jDYl)67tKI=l^8RTi;#`&{Bb^dtwF48t@=%U3vs$WE9yZIi+63San_Gr&Q6Y;(CPFg~ zXHa%+O9|Jv&fC@>p3%JN@-jY#1DZ}w9GVLUyg9AsR$1|xV%AuB;3c_tUU_HBr#>lFK3BL#5;f=c=B7#&A5YF@ zbuHp<`jHl`h3~JctlB9(VPoz9aNr+2XiRtuVt7*2#{wpJmH*EyrqGLp=+bV$j{ggq zfApXHrZ`%o79g*$Wi$Lkoq?63r9D4M(=X{ZqT@EANUZ z#3=}WJB#w-RCKS=e_H!exhVHei0>haaknys6ah&@h=Z->`WxO7O>ir%^cSgidNy9WVkJ+k(-@Bol z&-E(G{>eS$SoLj7bVz;WJER*S#vZHoecTrhp=|9Z5B)N68GUQJKq83fdJ8YN-3q?? z)I@?^M&g1EI9%1oP(Wt^F-Y%}e~vPabPLuZ6Pq9YyQu~qT&+wMtp8Y~zUQ+gEW#v-YzziV+ixImBY%Mg<4&42WH zq$QqiBfw6XMU^y;4z4Eq7XKqQg@w<6)2H)3W*FZ|n+50qm#a}nW_f5>5oJ4_#_ ziff*!BPg%|pm(5DM^ZI*q)8x@SINsCsbWE+SS74@W2v#d1{i-}K=yem%Q|eht`)!U z-Da=7sSuzWTiSTTY_zX^6>8_=M+8BNN}oid6XGV%8yU zY?^GY78pe|^vveH3G3zui1S z4|Hj$k2Zie(_HNFBI?C{ptqfV#_j9SiQkT(vXgXhI4N{Tqc6wGWSa76J2hyDo-ZtD zx({CpmNhvmR)<^v!|&euSstVRxcDzrMP$sCCa_c3oSFTJW5KTx9Dsf*m+DJc~=sq$j;Qt%o`e`qIws)D)Dvgi%;W)3v5 zKIgG)rH|BJpYM8z=HAr@=1kzS9HXlg(uw*}#$YjRfIY@byoZneikSruc_G?0p;siu z&+dRcUF);GcEJ}6oIAk1f>1w@JBS?hjX0ulxrV}S&02NommHSR>PDH~E461Ew1#Jz zU3-i_Ex?Bce>|VeE`b-YGv?G$#x0zr%>5Xpy+nKEYlI6iT<7BKR&Uy6yV)8@hR4_F zE-}YXd;PR%+6ch1NaQMkP5FMU~9jU#|m8{Wu$?AwSlOQE_KJT;5 z$Tv2Te`foUcozP4^=&xzO7fI>=sli9oDf!fjXxeEq?{A@MRh<4v6{7lCsFbw>_%91 zCf@QP>eLh~RDB&Y=>oX3<@W34U2I2K=gT6sy_LNizCP7~``YC2prX5Nr`m&IvlOu_ z5)rFw-dTgcsb?Hm+7RslYOjuF?E6tEUmjJWfA{ENaKbadEn_B6v5M|i7O7pVq)#^# z%r!7FrJ5h^#a9ORkU??BlqzJBNbN#7`*1{4lQXOj%^=hK?mJl(Y3e(~&_WDwIO+ZAO4yBl2w=hBf8WXF$P zBGQd*J1V{{F9lcP^#;cayf~WOCzv?NDyR*Pw}Y@^Luo;bKHuw*Jy)+vZAf6P{^WHd zuE`=FWg)NbaQZAKZDC};2-tf#H;Xp)eN(o;j<9uvXG;7~6A#VjcZDEv5zjP{Vxt2zs=Be~wdD zU{_b+PIO1l*-u|7@|iH8FqQhSaW^a=Wbqh$jtIwa*%)Z#AAV4dE7%lk)MHGuQgoy4 z{FI7rlm)HdJW?n3;jQjHzYmK+@~fZoiR7uD&Xe`a+R;YzuSOR}hCrq$LNht4O)1Hq zJ@>BR=FX3*kEQ{fu4by~S%&2`f3+*mU`b$MkLo<`XRfQxwT(KES#);STjfJ{NtKCH z_44B%1hd0o+IGm7(65QR%r!fYTU|f4(C;&?|ap zznye#z;o>ApXW$R2v38m@)^2rNMF%l9LPdL%YaPksH`j34K}XRW@Vhr9uedujt!W; z^CW&78%Ga8@8n`Mc2z`oNZAEEkb@*0{p7Jq*D`}70|I#-+uWjlug1}UcB!01xzP&m zT!}u$roD!52O5n3WTj-Lf3dyz6;1?%D~h6(Zr^2;wMoU~i%BVRZPSuKh&F9!|Erl2 zp>6E2OU^fZbvM3))iY>*|GhyW^nCf3Vk5JqDz{jzmOt z4R|?@_UehP{0Tay(diTCq}0|wQPNTV)Vr1qSs`a=l-B~H9*OAso|_;NucU~Kx+#sE zX&uRvGNDoyguNDCV21;55cR%?7w$t7%^ki27&U=wLYuv044}`eIyjGf0v+Mv4VyZ1=9z~$gc@6X;EETr+{lQRl#wFTvQvVwK^Bjb zQCeC;8!)ypN?+Z>XhNAI7kPj4v(Y>~Ly;mX`ZtJJe+es2RBAZUHE-2eWsY!nj_C4N zCVJj{%`U=sZ4KQ+TlqxGxmZttW-jo*EFyD}#kF|gOh##b~JwVX+0wykpb|4#uCYETTyIBQ-4R^oT0IkWZ;A4tc22**(CvC z-CTQ{B*$u}#O?^g@_9M%eBbpE)?e4H4FVK>FbjXbQ#BQbW6M9FsxIV0SWz58)=`pg zp5+liAefcwT-PDUg}7meg{-TlMV@7!5Boefe@g47S(q7fzdqD9rs*4dure!(LXCOk z^PxVlX+qeILLZ6Lg?r*&P+n?+Yp=J+wU1wGKAF|ghO+izgpysqZ!fCqPLZ-MsRZ>q z35`6SNp|77sxAtJsX9xaI$&LUO?ysXVy+$ER^JZ(QE-@kahe82@Ms}2V<56D>hi;3 ze~J^>ilexS!??OzK4Maqn*wm{&YN7a&{`;_84&~(A3m+e{?V$00jgA zK>c4lHUA%)nr-_ZmYRP@b&S8knUgMEL_4%L80)bFK)C*>U7|nM8TyZ91^{~9!5`~P zlMhx2x-r&E01EXt3P&gLKo4mk?NE3Ee;{B#>J@|i56jHo5Z-SB?DtmYorvONCDp zMOZNZ2y%qxS1mLn{ISUVvBm&EkN4mG^d1FB7ZN$e0ssp8cfWmS!sp4&sN_5fe*j7N zqu%NMSYzlvmKXqN>W?Kym3~Ayf1++6`2c8uJ^|1!?%|y5;T-Mm`rl#&$%nq{4N?w( zl3srD3ZQC$w*H31HX9BZ02r-aX|0NU-pT~}j2fx29Q}zfvy>ja)KI6oA~J|h zoATAT3%?pJnG=L1Ba{jXX!r+(v^iA6S>B&sQXL^W8$Eye*_KF`e>x6w3a<6;4fDt#ine`uW?I8Vm3 z+d!<(Mkw2WK_*GGIgzzinzlyVxnKcUk|5a8S7o$>u_HBT%bc?^ohc=rX)0B;A{vy@ z-+*KWQ7`bE%vRDH5Iw-C{++hJJy_DBKNhAv8LB&4E#JTQ%24gmF#Y*^KamHS0m@9& zo(%gRY%WWruk7I~OO(z`f8gEU`7Gc;CeXFk&?nB*XC_Fu)a$N=>1_4_O@>KlX3$n9 z{$cx9R~Dw<+920nL!XiV(<8f>L$2d4E;Wq>`jv5nO_#020-oU*DFjyEyB#MmKfSz7 zaa-Kr6snhd-xe%Spi3_cf{)4AlF6Q{1WxJSvXUiL8OSm>j7-KdIEjmPQ2iaN4A2-rBtEc4k7*GNSR6-Jl0XBY;A-zH+L1+D zp1_d29|Xz zCx>c}6!2H{XibLxf7AZ{#p(Rv|6e$iKLLNogM9s6{|AdZ`saERg*ubvql;;!vY9oOLXjqkE1WP&%B8EO; z8XV*WEgnN#k;DRt1@=nw<#=hV_Ga($u&vDGKvXM$6I~q7f0+}`1={ks5|>$PG#St_ zTq)6PJ|86h;;wt@NZw1;TVShTf)*#o>KNz3UfAd$q_HMoLetL;?<@x;27nbrZ z(`zN4Rx7yes$Z4>ei?$QtO2K%yQOu2Dj_;}y2J}N`1+LTQ!+BnQW%#bB$~4MYjx;S zW56w;8VzjD(tf_K2G!1GK$)HDnf>gw;_UQ=*=5Sp z_?Is^e})6xDlihGINByxZ7(+2z?@vbR#_#Ls~W)`Yr|g3jd!cx)Mn85Y(0@CU0K8+ zUzs^&TC1azw>eKNJ%+y4My>G1w$_xfe(h=Og*&1cOG>?mD;+L#)fil=bGFO{x6&;I zf~{GaO?hG!@xYQeP)h(XW+98?k^(YV4`k!IVlMgvU)4IJ|ozDn09^W|pRcj?k4EQ8Zi3~o&ri(}|t zcfevS=!_WJQulxL_^rUXl`HH2H>PF3co6(oyz_ZF3V-cJg1B9~q`PW6js>L%t{Aal zf64*XS<3^8;09Q}~h$7})f&zf#;Z8tn z^%_cb^uO=@M2OBrgZzsfkMwBQf9Z5<5`Ahx?&TI|NtpyJp0+HWW$=e<6v5^O^1q(X z2n_#wzC@QA{imZpz!TzKf3ts@Mq5CZe}4YgD}k^+qXS%N;hsMm474i>`M-MktD9SI zw1B^?pI!cY?ms^G&sKka^jBBHpTDi6U%dOL|MMT){3l-iWAjah$#vGtcX|jHplQJI zO1%p7Ir8*b>cA=)zLa7XoL{k_Sh|c9j*e={;cD&CINIV;wk!n?XgX7RB1>}&e{#2*QuVM#)0yy$17ug^$6Su z<*&%+K_v35cJo){{nf{rd7%8Sf0%XuU;UC&M1jo-EK9MKFrY|Bc=M z`1_ML=<{SiwdI**aX;VwIy07L{||59*44PqZ2NZ z1Tful^6SqiI|-rJ`u0A1f9x~P17od*kYGtwHEY(a5^ywvBWUSk6VUtK5f+}8^nA1a zeNF$q-*GM6l`?T{P~{J^q+JK;?p{{jM{lQprarthWT*Y##hjqu$4lISj@GEW1_&iW z8*nRd^k4`d#b5=S!R&=WkHPpz5T2Wc;N~O3tM`9KAFx&I#XwF=f52vWBV7Z8S0Pgu z{!rJwA2{M3=q_T`$xqa}KKiQwt^fFp?k(X-=;Mn*s9T2u*FRnrW2p*xRZ-f0VfYhv`^7blD zN&thDLgE;rJK%!+e`?9ezjwO6@Mq*J$fx8`^>}Nod%GCUl-es_9dXw2*|Hc){|b$Q zL|1;EUo#K_;YWt9?G(jy)26FhRm)lhVAS0_Q32i*Y6igva}z`7fpz1MS!G7|hNBY# zOv40BBv>_rs$pnzWF|R=vX8j?RzY?x(Zfe;@Ye{v0Dtm?QXF^YUch z13vA)^g&hw#-wKq_s4yr*H^i(_WEVOJ;e!sy!1H3_CL?%Rv-7b>|giwnMH7ojb&YX ztCHzU=q=Z1TW@ylJ|u@{21OM&<&$uEIkT-m)2?YPe6CxalrJi6*BqLd!E}b8mIACV zS4)*2;DbZ)e-=WkXA)Kd%2`}Z5M9n7QIa{w_+j!JmQefFuGLcCDl^H}-mdTuyPMfz zy5Y6hGb=Q=Lg`v)G&$$X+(ij21DkUOUd=)I3{gF$ty}Oa4m!KjPKV3w^uxE)8C+Yz z%yiC-;e21D+}WGV89HZnbhTsJMt30l_SJ}KCa~vuMxiEy2o$f{vuYL z^RmCEmGN+^+|_lnXhplR*{p(|eL)VG#$ zS;@?Fth${A+i{Z@hWWX)~esB{t{x=rF+>T3@KXwIyY=<)^R=Ek>n-p7+aqj%Vr?A4&Aco4aQ;z34d ze@Re&*cJODpD_6DQ~p@%Qw~BuTOZ?EeL9C<`HNm&oU#5KAJ^PhQxhzW5Q3+|fD>Sg zC|G)}1b6SdQh?JDCdJKs`@oou;6PkAj?8;TJLyz4S&6E>PY&ci9csuW zIGKZMlwa*6T9vWN`;AUXux>WPWEJK5f0|d-%dQeE<+oN=ViTHjI|DVFv4^`PHBXVq z6?$5>ANsSg7seY%k+T8PLkTa`#9#Sz3syKwhAx_O@vQ# zYmu?MiBPAq)sM<5KRLAZUDenKn=<3mA3bMkm?D4n*xQ>XfA&+_-kuKN|!w`!Z$ z1Mcea;YylI4x~KsB3H=!T%lhpf4IaBmA;0@X{@kVh)Unyp+_+@5jLL$*fNoT&z%-g zW_;G?ruhuq?zgc@5PyWWG-%&nR?fzvvaewC!<%avhkPONvzYg|PAClW$=nDZ_BkUW z6%yBkP_nhv0a%4-wcvlUs~uCZs7)l=9b!i~P(v{qeL&r?7Yi@IcaT|#e?GnDm)@5t zfTndh;V$yiL!NOyGnd>z`7+?8;wG-j8jpBGlI?Y0u@;8h|I44(8W zEeOBa-7yp08D2dms0E}Ve~g5_Lr*?S+wP+-FG3tY%%Pk`8FhJgmRa$Zrqfth@0F1X z&Il1~&3llR?a1aIapMkH!CII$Rjf&M)86-Ys{4P)4&MmJ%#U`M@STf*?kq1)JV-zs z)&-WN^X31OFDrYze>zuRHkx~}72aQ%W#8|?dy~kdNf4%qZznoVe<#m@6&#F^$nrgQ zqLNHF*5yCbe8+^{Tq!ob&ct%&Ls`KRe(JPHeN-!zcrU?WVqvmd3N%k>lw{CwzV*6| zD`p5`@pgWI1-DHV8UHb@cguy?ltD7Bg2PKEC>1*L z_%Zl@09ug5Qze95e|jE2`<^cp{QJM}-(=!GgCu{Lz3i3&bB{9dJ|QbgiC*h>`NM4S zGxEFgA*$L{khb)Ba&&0VpBGvBJeU0UpLfp729D&E6QpDVrT-(z%UR+rgY@=YR_#Yn zPkhOpQr3iw`gJ(m3nv)3GWf^eM;tqMt3W1-)taH999J(if4483GGh9oa}^kir)MII<5m@r?kmFw1Q?8~`A!1%u3J1oY+FT`1RDOtxT%T|Y#wtc*d*!y|Bg}t%9|YJb#=)O27QoYD2+8O0d;*?Jhe8si= z8>d%!emd{He;@tg7RN{a{`!jxJo#V3euM%4s@wc-6X}de_RfXYm^hpGqV~e&b`l6X zUG?iyK)2`nJy|X^qn&E#BJq%@`xtCLuR^fwVu1_Qh+gwm3<7rFlh3L zk9HKnGioT;0hTMCcs^PkiE6rwlIe5?A`Q&M`7n76e-qF#LK-$ogDpzGUR+h^z4={n zH>=uy9bXosF?2a|1}y|7t>b60ZQf`dLsR^Fj_Hb?08K!$zcc;f;mZNj*zz*Z8FKZ* znKRnU?k`NVHQV@vv(Fshl<7mC_Xe;p&Rn}p%=+wChQEc~89@g*R>^LIMlMr?L}#1k zMdUw&w0sE0<9`QMH7jNXGPC@6WaaPm%`OHb(jwK%;-zKWEw9ZXXP7q2=->_#aztZp z=h?5>%eZ-+TPG#hOx2>3i5EgQ`6cUo$11T7QuQIbGW%^NA;aZ5Ka_zOcQ+jLdKO=^ zd<{!x%`9|u!%ZV$s|~(BS1g5!?S5T{RhV;-%Va#=$bapesYbstYY;*IS$WCtkG)qj zD?%(%A2PcHQ~sKXwCZfMckGt zT7QMF+tyDj^(>vB{C^5f7qhJlnGJuTd|N}Rx653Yf-l3+MFpoCYZb2;no@8%7eJSu z+p$dY>AC|S2=5ua5Jn@IB1BLvajR5o!s{d&;)fN#jACF^oNaaJKp^aXXfTHOz+|(X zzRih$3GYWgoNl4jB-HoYL-8>1sOFqAoPTO>N9;D_5B4(Ox~x%?AL{)N`4GxKmlD+A z^T|(b^J{3u&&Bn_gHFXev*J@XZ|13MINIqFtO)!`nE9!?$L97;Lc_!gwt@d}5KO6L zZ#Xnga!=9oQZ0`m1fS{nbECbt?Z(;FYAg2DlH742eL0uX6v(c_IAX=pS8raFY=2f9 z#C#5$7@Uzbj}KkW>3KL7q)Q;R1xFWs0Ba#3ypT;k1*je`3^j{#b;ibn4cFlT#^Kx16M-}23}rp_d+6-`R_;d3YO{px_@_;s=E?iZp6Kjp8x zJjy#u zUu9l^&OC-#O0|!DgyuzoV&HNCe?}whJcKG=FwjyRAT17sC)Wj$rY-EyY1C4>k&lJdY@R|^ygo| z)f74LRJ1P!5I)=UzNn$T@T~e8>oJGuW4hYwA@{fzTf$u>`9@mHN>=7rKRR+w*?>j* z_ox*$;r08Wk2R`=rSnX@YksV+(B2ODo$BM)fF+K$g3BttS1BgkO@A0zJ=D=`xs@*8 zDz6YMElWC^;7Xjj@&Sxma#tdl{3EvNhlFV*t%$*Ct2q1+Sl%(h*rwN9EbuU8w6Eye zwcyq%2C+(2#$3P7v=3*$N%b9{@@R@{321bsx+Y&`sNTDi5!!L+=(QriK71BISht*6 zify@s(G(EM08>%B8JUGR(Q*QZ+){pKc{}=w0x1-J2o2sOZa5*-!e43zbHOHduai?jQ z1uj!*8j3>#t@nZ3f`c;)VaGH1mR!rEjviRET3UyEYyFH~XMeU=SGJTrPh#E5_df4e z+4vz7&DNLk^Ng8zmU=;7cW}mVoA)tIfamj1A$g~ksq0m6*W{nq5PeV=D}Ly8zpjUq zU-=i`JFN4n_rG7bt3L7>eJxcVbibZ)+OK_YE1GckLD7F#1Bcqbp2@^KLT^XMSU7MZ zwBFlqez3 zU%qauhrDX2IrM9Ov?F%Pb7c2bGa@@X?R|VWo+B&Ts4CaTwPSq$e4paD~+S)O`!K%{WCM)D4%dKGmBqiFacN{ zAb9!k^WFwH`KcjRQ{C+6=!XmczH>{6 zvMv^wIFHk7UUfuy$$&c)GH4mv647boaDU~y<~oNR&NKBgtBTCuHyr3atP1C85x`>( z$2bQt-m!Pb0^0W?ne=t^e~)oR8fRu4?*Chx=K6w*xTuXD&(Dz$Kk^#nS7kS0GU@B8 z!+-nizr|Vl-Ej;<{gH43ZL+_IP9Lo1k4V9AHOfECRC>2wKHLaPJ+=K-DJDMO)RWC;=^1F z_;58Fy;;OcWmFS1x%er7@UaefGG$tj%lH%-ZfaRFH2gPW3XMzjr!{#;uyr4P^dBUt>rfnI;46AmTo^$VOfC@#ql7p~VK z0r}w*52X4!IAMYETI`G0i+v$ZFO{9SLf-qFtvX`D-al>ouj>nr=yYb8H2wa*&PX@0 zSL~7>k}zR^$`&X(R8qJlGmS(j7Y2o>oc^QCVB0HUS74((X$H}`g!3f|;`MF&7TaCJ zU{?~Aee^jdM3bY9XDRs`Q+mv}C41T7Q?|>0p7yB;%y1(;);QOLLd%F1wC%) z`6C6-PosC}408ZWBFWh-fAPT`bv=E2|I!Rj(Van)4Y)0cUrRV7DnzVe$p zb$eUS_P-t$UM<(n=q=5)k=yE7ZNb2H=V7H*Bz8&m_KAUD4ph8dRVsaXd{YRcH0XZa zWS-}L<-fLQZl98uIml`}E1*TNlSy*3e8{Xl@el0U41R7!eGYkt()IOP#Kkz+5{mwKacf7UxM&$U3-4vs5?F2+t8do`Z!>T?f62U-v=3n4)*4-e0w!p? zEL*x4&L#^4|G}@On=n;X4CP|sRnw;j?^g_#@uK^X=3zA1g2a0FibmA3b2Dbj$HqEb zcHVEvce{kO@5~X^|0@Zra8|emvuk#L9rH~!>x5E!_2$=tjx5BZ(K~!<_U@?O(GpL7 z*lgQ1*`+>fFV7h5@4Ra*zc@A@Pu%Zi&2y_RYaCyk68Txz*D)Spt5*77Fg^@>@v3Z5 z_nd~CaG9L)YWkY|hxmW@^bS+TvR{$FWw`+3&%v9^q?ik6D8|oq;z`bbY2Lwq9uLzi z#l&AZm5~TVw(P2V->UnFS$_`hURbgTsuAnCU(tb|J}T>}V=hebNI>lbo$)i@PF#k) zUFcfa$jdWurZINEgle~ziFsyjr&wyoXsKz=Oc&1kDU3op5ei5T|1I}Yze7e4rsw+} z0@2N5D-hi@(FfRm>Gi@}XbV_>Q{LfeOYtu5PW7weya&D3cx%3N%ZCYz4@Zo(*Y%hF zy4Q~#>s&|Qb?L8N-z!9ZsXo?7zG{i@4Nnf!6!*^ov+51A+fa17Cy_kAFA$<7Yv#W3 znA|oTI#87Nfk{5+a(5?-@`k(I7fbs?rOETmHH;Z2gd<$6^xTqGui&_U(bI)hQJ(3U zJa|j!#EN5x8S_RfTe5|jK5(mi4o}ZL{{tq#qX*Owf++E>L?Y&@df<17jTdgm zn4JjO`>4eZZNU+#whIUm)e7|syyv9H@Y@6UgC4SY?Q48{v6o%Gme}hF@?H;n`Wd^> zJN{=}cdSbd@k4YyKk>zXFa1;!8uH)%pGPM!Bn(;u<$8i8x)9s^lQf2~a~!>5cg!i$ z=(TvfOKgf(AZKvnyG|xn`?HnW${J`w?3lQ|eFp*3`Le>lhQ5Zx!I)6;-P8x`_dN`M zZc+qA`q8iaz?u6&k5_0&N))c-P`kTzwG)G`em44H2OPHSahUFZaTumYzd&)4=da)5 zNdr;pypIwFI0f42JHacTp$*pKe$?=bAqiaBxi;QAbv z6ZhZep&;O}7`>z4$F=L8cfwwmh{_JSg@x7U+RmVZEf2?-HTbNfjlvOQz%~#1bJuH> z6=|thADjMv8xBh*CM7*zd?=~@M)~evpQi_$XSE-@He^QZlcqR;5gXsQ7CI1jdNb$HbS3@M+YyFWV=xSAj&*mNkg1ygTv+jHlsjQBCfSJlJ|>TNJPPQ(TdK ztC~CNA<5}1zs~(hLb(Vy!{OCSeq6N(%XenS6Y?K_3l5k2G4KU5syW>ew zdmg#x=Sy&XUQNE>xRxi5zU=d@i(1KSQT=x|n@Y@vOh z+xDGVw-ruB%uwC4jc~7-fyf6}=G?R#C?~T$R^i-hHgV!IMmch}dlh^R$n6b6^E)s{ zzO!>lhM#lpJj&b2W;r+dF>|7g#+D&d^PW?G;(gsmNSFvP`T(z=9*0L|L5IYH2;=#b zdxL3GPQK4J0et*^qFxW?Xzz6luA$zO0r$JI&-;Db-aflky)!1FT%IRAuS|6#E~vkf z_(oIUtq}+P(r|QT0d^<(z%3zY3cvCDS03N=@#8yAHA4^8fNf6udbYKrZ6C3{I_n32 zPPz`DI?tCMINLFIYJTj|IDF$0_bQm0KJR*#E?3+Zo0#W}vB@Jiio0e$+g6=0wC{Q5 z+g-G@vk*bd6!FDYP`^%Wu=wL6%qV{nnR^w%_|~^&jIPm9@)e&J2Z~SU{A&h@yOGzA zzd!78O)DXZZy7i(MVe?f`W;Nss1z%I6QSEFs@wJZa@32!J~ywMhgUx<9;p`u+&M;3 ze?dZduIu@{9ogUAP#+WhT1wFTqQ*;@?^xp{eGQy62Ptss0WZEZXJ<~V^z@-Pz(*@_ zw4In0RHjysfh#3*g#o)s5EfS%aTX9XU;tTO#pu~xSXts2=F*8P1jEJa#H4lyd&NJo}T;E11(6$a;g>WbwJN8`TR?rY`66jC2!P;IqM75HyJ~;Xr${ z;|KD!wwO~*8UAz~^m@KvG^Kuj$W=*wR`(RYxG}x1e&C-j*7zcx#0Q$EV7Ai=bpM$P zE#c7If-5+}CWl9Zp_}=v6Po(q*dNy+P%V`DlMRE(6ni_P`iW5nJ}gNa{kF670Z*8t zmZW?=au7c0VJ~#WE6N9!YQ$k&mx>V0%oQVWLPQV)5iK4jn1tO#eX&YEyqixVrCAZqHxuGeC_hU@-RxNbLxr(*tG2EH9X|_+rXIYGwj+PUsBBZnxSx^(cJc2a&V!? zy6DwE>tartTYRnTMkdHrWfzd?K-A`cDc&~`L&LyLu2@jKP^d3Q4WFpPYu%GMEFQaZ~ zKMi27XU-b+E{1*;;>+mozT7i9%Qw0*{Mc7;iU)`9b@cKjPPPAUYasuVeH`O|Gc3h} zN)*#nUp?)!v6wWaeg^SBu$jaeSNVdKPeE5ah@`Jg#5^%$X7wKKn_QZ8jFte7AYME} zQ<%{`_@bL~s`sXyp?pM$=i*83MZ&0eYO8(z47Wl3nLfdU+3@TV}$9F zE!CJ=n}gN!#i|JXZ1SqDov#|OffX8@evMtjscHUsKYj8Om6~p%{pI1wuy7%W+iesH zYb5u|1*GMExzDZfiw!plL!8oDJ|)|>m?icRtB1MocGx5Mr_tN!V9?or8InF7KjP4L z$3Dd~-`eXt1}?;vTl~Tc+wc1S^2jgtToug^@-FI#`#3wEo5NrIjaEK|m8Kj~@T&E+ z>uX#x-sblqObUh5S4?0#tDzK0@hogU=dO&n)j1el`l)NN@BJx1XI7BjVJ$oFhS_qf znYb58!3vzLu`5GUv_lPl%NneI?k2@n{NbQp6t*uiQBTq;DLZ&g?XP%C`5R%xjZQR7cbamqp7$Eo%z-W!Tj>r;Onak59dxq@xJy{M;<=4;@| z2@ezJJ@kjJdOg{&&S&oFn-3ni!qb@w@MULw6h2t{98_X6gBH1eBRkV9${=O?sR!dR zC~O$Fxs$F$N(xw~f6Oh_mfq}qmp^Z|xn^neOnP)}N)tINg2Y+B3l~Jb3R4m`3TKrk z=2<~DhstQ3d$`>0VF6Bvz%|5!&cRIcpL5{6D|fn48#N7MaNd|(!8_2Q%rr8bzTSEH zvCn@rhFZ~Fq`nV-K5*Vimr|emN$k1jH5+u2!x`)Ca_HIq9?u4!e~#-%zaD(&zRPPU zyd8rj*QSm9u8kN>GM;E`TrmrhoN_?PKn98J0>&l8+|vTR1guI#rSF8(gpCv8g;dij z!HQNenqwm$<{-jlV(i_^#G;!d+=NTY+xxsN(GsiK)vZ&1esn0ifK7hp=_KJ`Qh^vdkxzAT3DPnYOCMxQmGAJsLU`!EXo{QRi5Vx>0!WXDBztS8{ z>kF!Drs0f#A-v2$92yjrn+ni~CZ8|Ud=C4`i*PyrWi*LBl0Z<; zlM-UB3aBSrZYKPf5ldPZn*JYyY>(?AXkApLhjr=uuP@g{pZEXR+qpzHVHdr6m;UX7 z*F4spzw)Lnm|Z11u?ahq%KOF_JdByOSJIXp>s$`L)g{JB2F0*T!M( zc5|M>Z@(_*eXsP%uAX|k+L2u)PWO4s2_MLR^UFsaPq;5Gdy>xeQNjcIpoTuA0aF|_ z;im`HAN0f}8QGxTA@Mql6*%1a9~dTF_|UK=N3 z9ahsi>a>x2I>)8zb(xu{d_(!5xXavh5p<=03Q)YtYPamMI_N~_BE_x!U}>XfRn8FP zc8P7kD-pwU{)Jy@{)&7W{W4-gIBwSGaXUWDu{rv$%XwX2IP{ypT3B^+>HA(S`K5gh zb`o>c8Ct`c$rG@-@M+#%+8n^-j|(RL`TX~wdGaq9>Y(1aEAq#uh+p`s{ye3l(HO>m zIsUfT(Rrd-DgX4-rLXGJ6W{ZNfBV^`9o0u2^ziKcOrN3mN1nCnXTP3*`K!M%E!pjW zLt;CF2&^;*HDeXc6PQe)A$}~M%c?DAsfk!}`u>!(?J-M4zqh0hr~V&~o`>d^2{5yH z2ql=NO%sUjY@-EE60pL=iO12n!M`Pcw)6Mmc}6-vV?G%uTxS;9vdEPGIbzs8Fc5d7 zclNDY@??X8$+stG?*n6TZtR@x#au2m^&|OccCRbzA!d2Rx!pt3{NygIv2wo`OK`+F zKob8x#)>qo%x~p(8mnezGnySm^~XP>UnX3_{SKCN?)IfV>mH9dW7q@r8Y9hrJ^GqW zF|6rXD}C-UIQGPUpFwrTDF3!6y{|yWtioLHncLkd(DMa^3+Rf3yVhK+yAesb4z^R$ zkH)cQ4-4Z=kO&5qNJY2Hz~wWnXl7Pkr_P$Nc64P9gQYhC^Sly!{211>z=bt)&cu_? zr~R6OYd>{YZemkN1dm4Im$U7EXELjVMpWi|jBzhAeY=Wn)8`2rI{%i=KNw)tkC;CQ z!aUU1d7i8AEpDzadjH4gUaHfYe?F@$Pcr*FF|v6`Fez@s)I0_UdruB7usVWTUy06A z*&OG~akP$hIm#~byQ+2j-9e_wW}T_uJ{rVB+s>bNs>ca%KUz?~+5HZGQiG2a50_2* zh64}q75_+=arnNU7rav6*LBVVPWqPnnKs2kll37_q%77>8IIJeJ-lWIxx~#s3f!AhaepX z*nq*#sM>o;G5=V=WF73wrASTh!&%a5&S4|%bV2n?>4BRaCZAE#4tkg#2gxzKDoFZx znh=JHxJ zNHgbLj@&rovq?gl&pw8`b{_1Tr=YfnFx5Cqy`dk$s5snDGXag_Hrxy2R3?a{Ctu-$ zCMjGdt+ zm=vFR+Pnn9F&^MDz~&`z-WLx4u=v|y+1H~$4LF<^-}sMX9Dh-lIP5>2p)((F$(xiX z?b8wanJ-TG%%%71!?J#T^3t#MSX6~mm8QD%^V|!4ofHe1=#8K-ktoy8i1EBYfUP4V zlF(RgOydXC%V7ndTO9}8go(YFe^I;+s$HdEhXHtKEAt$6-t+R>8>flzrJHEX^3h^D z_y5y6ZqjKgq+;w)L<000li(Gr_5qt~rp;u<(R)_x^Hegxh0DzN4?X8A!S zE|Il}SkmsK)7Mm+-Ei?1)LsbI&SR-*&O^kR`cR!mvG1Kz&y~~jnSIR!Oc&ayg-NO3 zzTni~GBeXoRw=6hEOVzWVKl88MeL3BGeSJy-L^ zEBE>Mkhcx}G4||JdOxzS+9w8fwZB{C4B5=im z*;0zcy9p9cGt(~of6gS_OliQIU>*3le`gPDTFJ^jW3Nw}7SI8{b(&$~mHnLfl=!Ve z@NSyTSMEnhH4Wq98PYsuL7J$CMdcZACZ^&xuR((;e4f9JO54P&(}s8u26-4$++CF5 zf%*x#g0SahpvNO2#a(`!(KGZK@B4i26eEthyo;Hc$gEjlf0ge8H<^pX76QuybL|Y0 z!pfv?PWO2ya%N2E@7uWt(+E4=G0+^*u4l%D%oak{5DTLM;)tQPLtWPx7FpH8n+MF2 zl(-&~eShOOyJ5z-+^g5)ES2LNEg_=Hd&jhSka(ygn_C}tLSp*~&&?*kUp6sy>QP98eScMC!N7TY6UyUZ5{dtDvl%YZ|+CAQ4`W=T833Z}xT zr%0CXGw0?}*KU}Z=`e*%Y|R4FNx=8{Q}~=a*%|BG)(m*@Fq#|L-wP6c*Bj9JUY+N! z*LoXS^#>@<9nN!P2p{)z>BiQCehs~LXWlift!P|Pe}3#IC1J7n;iv5%VW^KS4+Ko+ zvsAC5UN@zYSw~!8$W!%*uw%Nv$AaqXz(1+)L`~tIek?C$=1e$n^BaaU&19x;Vw#(iP1g(KCDeP`>&;v~U(_PqhV~%^yBr?twg zS=M^4jF^!oNm$L%-rqsrFHmsFTou`gVMau_y9=2qUm~L;aMTBv^AQb(q4R;$CC%u5 zT2B~SHH*`S2j705F>&nOo<9Z8Z+A9VH1iuFPZ{U{O!!ItrldU{{hlBcCH2{e(Nyrr zf6P9CO~F{I_rKqyo*wD6OsLN$KJ{XqY8OWyrss;Qy#6*Tw)gAhpi4T|aYzTG?764= zm>Udp^}rry$!{;`YSCGIN*2ZDF%_Qjy~#B*LKrA~)`3iEX7M3M_Fny-=Qy9+q6uxU zz2b=p^i1pohcHRDlSY9<-h3D{wts%Af4pFf6z}DDmH;E|7i638etq+>sHYlI(qASE zv|2IyT!P6(a@{U*vVPXsw8*?q4kaSQE7+QAFwfHXRlPDh8?$6H8tdQlr=NdzZpx%B z3ZKVUcdss&dshM3Jp`klf2JNZisp;`AsoG_dfmu+sA&(ikwMFTSqcL-f5xs@ z2Uab=X0@GroWAJ?jy+AMJ3EFt%Ei12xZuRkIPh7#OJkPIP3}!!_wMI?{GL&asD!It z%Ox0{q)hl|jSLd8;IR2v)aBUkihD#$`TLTY-IKI=$U&oz`i{HF0=s4#>^&=t7Z;T( zqpp1*P9!8@W3p*gl~eAk6}uCVe|voBFLtC~{#>QYX6;BE*0JS&w;--ab;UaD=*BbG z(14M^(C_GogPG@@T1YGmI91Kkyba9MP->%U*hz5D(bLTjvl1TxG)#5V$FR%$c~?uS zV||i7135?o8w1hjHLkLz*h`wbxzGR7_Y5;fzpzbtS(p0Kh8@h!KeP$of4I;hh!0Bf{K** zxdxkS-*_pw-SvWbBUQ`$<;*<=2x*!s&C}+`BGqDK-4(aHePoRdl4bIcDF5c*9_IMi=4#HSJ8m@T!N2Y~ z>9q~|)ZcKvtKygw_4{zVo54TEM7)?mKJXTY<1D`Ja*xaFXI6DfeU$gARkSpd2&W2?d?}OMXBBB{Eyk5?d|Vnu+T7PDMDAUeZj#7aZ{uXQVn*C+@%Fge ztC?`|AMEeP+BgJn#I0TVP(5bpWmE;Tl%nHs}=A4~k2HTd6X8j5lJ=>@FQ>Gyob zder|AvX+678z2t8`*-~UIJrSfP3Zrdn!JMDpV0?m%bdj)lp#L+=l!Am?D1n#4BS+G zo>SHLy7YH+eA6(i^~L<|zce)FJhxvS6#3tS?C--WFYZR7fA`M4nbq5K?_%FSU=FAG z2l4*g*ZfbdV5M$iJHJB{QmlTVUJjboooz*}Jb9C;Q{wYSUPrZi^1I3u zyW}zTrjmh4{koCDww&KxIdJ+Bj7f#-F+(D^Y8!(0AVFvZ@g!h+y)a@ISdZgqSBIU* z6M^s2s27z@e{O@S+2AuZO$UF_m|*m0O5^W1-)rNWGOQ<5zaXvU^E#jzkJj*?PAs6u zLQRB;9~G~MJHu5!pO9H0j{O1h+A4`wXS1qZ$Zlee^UV0;!JAUgNO*ubUQg=4>~#s~8l$R>px^4I|fKO6mo*$PSn3yTT(3fRZPf5TEdBXhuy_sN_!hm>0p{yb_> zE@s}B+pif;XTI~qUtY{=>i?bnrMEl#itw+y3x@uL`)1x0k$5g9ehQ3xyC*Q(50>sV z;$;Y4?mg^*aaJ_kc0$u!+N!TN`QosdF)Wt2oi>E&aM_%6siKdwF4fr)RUh{+v4CDJ7+)rI}{kaZ>(@Jjqtb#W`afpZfUG?)VC-41St*4PCykowd{w}AP;w-5a%XWp4xb+XALXQ>3QOo+mzQDR9KAsI|KyeZC2bSq&( zywDF`Q%`jna@(*MP;~DVRpa+?pR?bFS}DKr++Xt~?XmA+U!Y2+sW7cHRiBTFe+}Jd zw`7nK-uWzRgF*YvO0#fB#qf49oww{LG?cGpl~WJAC8M;Qs@E26caSGHoo) z(ktYic*UKMSXiD2h8^j})s7polM;WKkmeAB1X2mseuS){?O}TW{CdJ zKeN8Cj1hCbL$p&9?M=drMR+$Af|<;%@OcLxA&QXZo4`jS0@DOcgaVBXf6y(X1=743 z@t?l`H9xc!G^Y~&U(64s*(KlRxgC3K`+Ye47jr;=AKKF#P;^@QDWnF2i{=SCJ%)YDbOaz*`#=(1m)H38if;se!!hI#4< zowVN*I_DYnf3aD9CyDp%f6w{g3k2PN=ZZ=VcM$>guJpY?mY7{N#zZX9L;<+jRlE4n z4YM4A8eACat;W%t!@Zq_;0iHXiWwezIpISKTwE_kqbZJP<^$;O^K)1;%wP7zL3LIi z?}50Scl#}tk_~*rC|G=9Oi=|y)8VM!f2DuV(M)N+^$S!^ z2j;f8OLp7W7xia0_C~2%K4w>)O60FimV%DC$mBBAYi3E~&hA!C-%Bj}zGKaYo?uOP z5iodpe(U>ByK+H2Ys1VK#kre=&;sMdc$bQ|*Y_06c`iy^C3n|v^($|>Wb7Giy0Sg& zGqAfEnFHE}8|zkdf9KG^@6+s+|M)ZVYPZCFS7xR`_V;$JPV>w9Sy5m8-hJ<=sMG~F zqn5{d#}H#HGlyDz-Q{)e6W06TK92k{=Ukm@R&sdz&5Swo>2t4RbaC8(!brmMfzof5Hkwzjtq6nJ@zVZpt9_ zFd|-!K67!sBK(5TFb+x)4>|Ph7n~=;7mt5Fzy;+YDd>Kko*R{Y-@S&E{Kg-}-VE$O zYOr))$#|U4>Sl4A6Yd@7W1RS5|3Y^(j#K@Wlb+@fWab)^VkO-vuF$+Ay~WO4@G9ly ze7K&!?q`NOe{qqs9b-7XcV`ZAxXbb4ZlR-&@S;9CbtP7>fRH%qWeykuM{5E^9{skZ z5|H~e90P3e25W_A#4A)fr@lidQc1JapwbCp8qra|v$oi;jVKWyN;1@^vA2^}FtD*U~`8>2qd}sC_V5@e2 z;tkao5kv&~gZ&ruf8u2Qj$^#yijqC+Zy40nf16-C{BxXDXyXhxIb7pcW;OmREji~Q zn{nMF{_!^Pk6&nc-51$Ecrv_B;*b1bi2gR_zV=*|T=*gWW3N7CzhR*Nj2rt`oaaJz z*LVr=7JXv{d@=ie?qUjJgC+~2^Kg0b)POtDXVb33?Z<`mM?%?++|6Pji@1;Y0eBb&IVO0kWIht%t0S?Zc|1qH;neYpH+DN-iN6tKuNtsziFfFGu-ZP! zlD;SlPq29NjO=l5WIwYd>p2a`x%+CrqJF=JieGy5gBS5L7kGd868vV@N6vKHzm2h- z{f0fUo)P#zy81WYs=sgp{s#X~P6VdDf2Iy=eDV9Dvu(2~7z0VLv82qAi}=i2L3jus zY&o(^xpS)Si_ zJCph9>`&fb`1$vC!oS}~mOA}+F%TGQlo`0#hx+aUDp+JxsgSoxppfASybVFI_kl+!=hLKn2!!jibY z1n1vj7kAA&)DekYSa3WXFPSopiw>}+R{(l87KJ7%B%q9F_46Qj^(zL*PuxyS_RiP? zHbn5jixV3PZeBTXR6uS7AkWF#f_W0FXk76(f7c0KE#w&3xna(cD->9Ce|N+dYV2=h zesK)l+)$I!orA+iFKRTB|*SXS-jF*T3N8`~~L~EPprO zEOPiu-m6Y<=szuY_iJnT8|K{ZS6m39Q{Vbv&a7C3qB;L7Sv$`acl3@2(e-t~wQp!7 zG3@ggPr$^pA?JW4_|llg!{hh;V* zC)fB|mPR1MXTD-T{S!IgHzAn^-RF3o$TcVN51}Kyj<#uGBhCE>n_@K;p(ohpX4dJLHh%*AdAMphMe_~&#tAEtL#v*fm_Dn8r zjV0cI-2CoGavKARJyF%Ky2)85k8EOxoLPwYndv^fjzSOm4u}MQZ2aQu;$L`dudwf~t`{=Trxh%aZe~ z0GxApwN>UPe{Q`4zxI;9+cN$ZFZ#FmnX3&S9)xKaq$#_ALf*@eRgoR(jzjAt06eJZ zSSZ**o;pRyUzRiH9?Xfom#><5NYH9iP3&>_1)K+T9@QOy9l<@9UnDNh7Y7#pffxJ- zHp*A0v)^K(M$3PI1}clA$`dCM%tK&f)tYs(@58)zuR&FhXV+lx-@83f+pbYVwAe>B!EF7i+XwT23r#4P-Umr;#amu#U= zY+d5Pz+djVO}_I%o*kX#g1}h6agJp)jT4{iBJbFkCpY?I_fZ^>U47dUmI3jNY!s}l zixbfKT#|s}BV3RLDKZqa;ux&h8ECAfY?RA2*gNo%;u?sbbs5>RMDW&xky~yR4*9%0 zf9d0Z`MmC(34i?v$?zJ{6|4a3VAVrF;w#GQrNfoaNASlV&VdV{3^4KMS2O62(uSO0 zo(N9ocHp+Kn$l5~WVv}=9M^12Cf4fJ=SOlc|kIB!3m<}OKMqy`102UXrseMn? zBsz9nIrmPihGc|8D(>|R!x>6uck!KFE0x@po=%pu2e~*SQ#Sp=b;X&)p2|yIl3BI} z$e|RCtv{vrP@JI&%7|8a`hG^YSrzMUbX%Yn{xdC~wK}j*4h^xo(0ww$1@)Nqf2VTf zG-VvkwI;16p%Qmk9+{) z8q`3H0mP6Rc^-T?PyxIE?87*RfBv_;L;)y6YJ_ia<3O5A6u=!x=OnF>w78~Ok}d&B z>m=QO%dhnzYJ@b&A4iP>N{;p;ID!EH6KFb6BE*eg2c8iya@7Dl!R13CcxS-KMFe=_ zy9G@WirTQ11@IA{lehpRACUTd^dPw59}YCO76U#*dJa`EjNmzHcc9B?e?RgD*koV< z+8ywKq(#vBz_&^JYU0#2fg1-QX}?gD_BS9KK?)=th}o|+=?}H3;R3+Q4`(eukn%an zC$OAA$Oyklj8Nbb{;lPKNb+M_Yzci!;(Gfd2nhBj9^e{J@sSgH8}J?6lk#y548mV4 z`^xi=_IX5@FatXsn7Kv)f7wRjje3**2DU^2zJ~e;HE^mymuor@ZRlB4{;Rx0+i=1Pi5fmfPzw;3~emD^b9Z!9zoZ4X=e~sUH>hJ&Z5g?HG zumcu($aZm?_w^dp4@Pgh~{>Wk$?>}xIS+m z0kXFaD{DZP$un&fu?Da&l{5LawHb&8Bs3BdM^GF5kh1~soGxQBK4^e9P)vWNgbonf;1AEICS14D8rG*Ml}HF zh8h6wk(AAg0e?)qNWVv^S<7EtMr1L775P0n<^m9vI1HU4OaWB4O)MV)@P(ow?T$4N z5?+QbGeURF9T$xd1x1X|2x-F(H6Y2fIwE8samH&M5n>29%a^A>?`6@QAfq5z12Un7 zbr6Y1V%K8`(fZ#UK?{ZG2n4mn`2Y%Dmb<*{0aTB8gnw>0LKHip2yKxO+%U+i&=q2` zv-G#+2;3o+QwT!$m+W2x$c6zlym}-m3i$}juL0!40P4yv^?U%JvRiQkt805~(sHju>@?d;~CZ|4bA8B{Chpbs#1D{vM{lvgs@7zZ7Wz{Z)6!yY^(0+@B;I5I>J^c#eFVVj$sM%t)nDijfaKSKA8NprX*qXNV3IM#yXzQ2vWRvf z%prgn;1MFbR#(0XM6N(62FI4wBhu7qD1V7egp%;f@3j=1H4Q_-9bN;>hruQo^2{i@ z^8}_fKhZRdzxl&3*#4R?p?QMEnt#zWEPeCYVX*UW{ya~}S@U}h>3_=q2*e1#{1EB6 z)&qEUhC(Le7L9{EqY;Zg;}XWXyLBRY2JZm~(>lkv%&56zC6g$KE#av-DlOCkA%CxA zbQr-|PGwh!wG`wUKqEiDCjb;75Xupk29j|&@+IL1T@66V$tgVH&CAIN;G_evg31<6 z0?7M89(r+BNL_`dk?6j|Ye4ca*f=NsljnH?B_RE)H4XFMe18~hd(BtMd4lemf7diD ze)9=`vo&9J=Lw-T|EXzM`R22O%zw`#2uY+?ZjePZio}bHtW6>0ziTgmkp|vsiC+Ux zc0yb~Adbvm(>j06;9u(*G71yv>sl`3jGXM6He-@8S;`t2%jIZ`FkWTFfGYCxbd?V; z|E7c3gmr)hfWv{u&%ySg;0KW|)T!JMT73W-u+-y!l-?d>KK%BZUv>S5UVjk&SwF~} z_pkc-%?IHhn^}ivD^tTXaQvt2GNVzBxrLzgultWifC;~t(U`}#u#B&PC&4nu+?oIN zIxzsq6It1e&T1*xuX8Aj8X2*J^KiBH4a%O|K^U)Q!pUj7<*b|AVsz^t-+z$+<0 zivN|>0H7ZtHn)&Ycz=d|&yzJEC@ER9{-BMT0r-uZuIthskqFYde&41f&dj{_x+2@f)P7RZP$iqK%5CSsqYjr~OTpZwq4+W6=O%uud6l*;u zhXK8oLikrYIrW9V(4l`-A6X|HvL5@%D9}da7cV;u^4t>Y_kZ94D3JLT>%+iz{atj!#;|kz7h{Etj~S_DOZMRoABlD~fPjy}bq|Sc2B&LUAp6IE zJbt28H{*4Ev44=xiZjk-997Pd)J^75A^x*}*)Z7Ppw5R$2iPb4a>*W!A9~qcb#a7b z&0flHK56@W81=PmO~y+g|BSI#3M`v*)DeY7-91^`Fo*?(T2{q@<=G#UQDTA*OTRv!HI1B>K%TriMDwzKW6Z;A5b9wViMhQ_=n_p^;TOMjGob_U zOE(S!IVbX0y30xjgq7}G&Lq~$4;?oQIRFmX!-;@A!|yX5h8+L=E|ce<-}RN=C9)9h zmHzweuYdI4=V+z>K4+vI0N`~TcWb|Fh)FvVm$U-_XQk^%zu*sD|Lwo9(xu=2!!I5E z?Z3a$y}$kcp(Fb;e(ZmnH%795v(S6ANm&UK*>malj_j3|J=g3)v6EEMW655W`|0gYJzxRul zTif!BmL~OXejhV8vXAH4e!yfe38}U2@;%x_PBQq?A0Z;NIoZ>Df6yYd*8PiCC++w} zTdZy7e&Zt&*1G2(zQ|;L0@NRTDdsQzmnTSG>;6Ssle&M=mZZ(zFIr90$cjY1JIfIk z?|+%x^EyFZ$>bX^XpnjYa-0HQ5g_~9-th&PX1fCk(M>=Ax|1W1UH z8HhEAN7#|zAu<3R5Otv1AP2lmq%i`%CU~?n;XH}nMI`M2*pMrX#D;euvOosVBjpks zHY6R9dkqq;6+q>i25(aJf8xCpiGKt9+k08^UYF2|7^FGK0mkG#1+jODZTKSLW?Yh9 z{l8&EaG5-tE7~XoMmV$-NV#0ggaAlJ;c8Z+v~fmVf(cuJ0L0IiJ|c zh5+7(-DYIN2kDn)|D|4yv@an15pr-Ip+VA#4c`FK1ZR_8+Yd;29RxYyqkjxCVt==` zhrA!JN9^R@UM9v8l1;ou3u2#l0q_IF{9E52e3K%=pEnV%mV<@(8~pFdca<9^{r$Ep z`?gEaN&gtW3{V(-?^&?#SPp%Bf_j?F{`N<( zLGrKKhXCmZ^D{n%RSv`t{M)Y`pw6 zcm4PP%n?&>{d*P>3F&^$L?FL)s#l*e3Q8 zIw1;3h!s3uenUbzcHn@_)#H$k^oA}wa1bPW5zX(E@;&$yWZuLA>s zEs3LKjDRKcfSH38C(*LW)gd7KM&#v=P56|NMkoMxWW164S%0G67HO1kLzn1N0AN6H z$AXbGk}N$iC5=6kdDxagl_MS)EFgvvBtBpVP(eby4**TT*2%qPL0(Gi?13?W9pEp( zr~!#vPQY#m<_@q2$OCKun}z&H8i&D#FeReElvUxnJeS|M5Tm+y8az`Zjs}*Z=bB-`Cgt zxc-;__W%7qx9zi&x){eOR++OnDdO-;P(`G4w3d4K=+_2=O=H`jl=lk&~?m+@NW z#p~Z#n*R5p&;IS*H~;mLypn$-nbg?bZp^=@$#6Ze|NTOrL_Pnx2N#nZfYt5;&`l12 z7)w9*&j0iewg423Er)rV@es?X)6R;l%G!t-P}+Pc*;>J~fP71rQZ7}sujihe_DLqi z0;}Eo;eWzd(ncRn8wG2TDZ@xSCYEEA*mIxh!J?>-YM0R=jXgs*1P9tw+PElr*@oMj z`QxgTzS2mV>J*iJKbFV3WRo+kPD-0Rd)xKaD6 z=*y^+BO$4lu5a0o9}Om#eoE^O#8p4w!^Wge+e_BpS=Sk5Y-`ZYZTErs5(Bxe zO^WpB$;a!(2o02Lj}lm#JRghK15u2+E7i;@H|$S z*wE>5TXLWCmR20P3UY%!bCW?S>ZU6nuc%?4QE{${qmu6U&3LXof43R%c(`Av3x9KE zE`4~s*-iEE0}RwkQy)^f2SxsQS*|k-OVn+$(`_qI{^8j>?QJL>@pyFE#3h)-@>5AN zw%r~hE0Gj5o7}0J=iTFw*5L@boXc(|ttn6F0LMG+SYcN{9%B!}sza~I*wUk0Sl-9m z9ZuTwl@3&qhK1v&{=rXUH+r2z*?+!1?(u=UxF7CAN3$h!sXCoxCy)-oj@raYpB!m$ zp5?n=6sPO`&`k7FpC8#LwSfGN!-xkOf`!0N+QDhETIX=ncl>FR6Dja=MGyn)^S%e?B2lJf_u1SYl)3Qt-Gl4GS$Upjm1)uQJ%6KyffESS z$k-oG$7rBLyBlc7DH$_hTRcpiqfmW3T<$I9htIPs*}M9=Pv5b1t~{n##$tQg--63l zXlD2EqPtw4;$eTbPKQcs&_D^*={U#g6)Va#RzpKE(RBAVZE)P2Ztsf_I}E7n&f=1T z;GeE`$fmfS_>niY>IC72w}1Ft97}^cyu?gibgS5~=w`n8{XV!?w~3*!P(Myx)KSkm z(Dz^&$8oq_G03%JN=&KWf+}{hD%hu{V}@mu^^GF!9jl64hH1r5&hFXWcztuQwys&1 zUZ?boKl9F@9>S5MROBW}8Mg`Jx9z3VpJ+G(rH{ribx&00oG-7l7k^6T!Ic$6-imJi1bji-%*-%N|v^6RbC)rfw==zDK;PB3r@V3EcBd96j zm!c`0DY2o(s$PoG^?p1F)D~-=FjF_?vHa59Q&Bs zK7)Ry*ysH^%8!gruYc2$ZCkGW+;85q$|(8rKC6c-%fWIsEXZu?dvd4s9YyQLr*SDw zQ@EQSZL`a}N=)~(X&UzaIo$8Z_|`Lr<@BobLw2*Mu8!%x87DKz z9d$GoYR^Snax~g0x%tfdx=+sMC(9U=HE-!#nMoyOM(1VPbAQ|h`Sr3N$Gh|~-);`5 zG)`D<8#IqMBgmR|f&I|#jij4GwRCP$zfDBTx-}neRux2jBxP2ldl9nT!^d~wFmZ9R zw@Pbww?9a2xMBcaSS{3;U15Df`O!?<&TC!P1v zqn4)Kg|_WVd?YD5H88q`kGMXu>hu0S^l0dA;Rc#F)k@sa5%Xc#a*jNEreE|zw{4zi zR;s)*L5Ich^USD|ckkvy+@3CaTq_MfR?1E9@m4(OLZ6GyVn^4~C~d34!te}sPe)~? zUND`waLv~>3Pn=6o391BVaX$(x!4mEADl^ZCQryb)PCA8d{_Qc}UM zo?Ic_twy685%qyA$J=T?a-*n?jQg!mMs|%Cu%`7Y^*_M|jn^Q>J(e(VqQbOWU@ z27w!ZA@97$aDJvAaKGq4~;8y#qqiyIJ@@?1Yq@G(k2TU=9I zJ?&CJzW|hiANS*uZjEVwI=&uVaHCpUQqt)Cxc7y5pUf7Y7ty?$PUKck*>>NGE!44K zsh#U9rtCxQS-)456@P>oD5^O{umpDP&H z-Bm)bk8*BseonXit3ievu^s1V;;Y6lvrAve&5ioB>%h~!e_902%0m@grN&d}s5h=V zrPo)kM3;^6ezecCv*eQ&UOmY0qkq)wUJGIjhCQoeyB8WSvbf>}uU$6oCEXEtSh%@= z)3t6aj$)(FLcK$BbD-*-c$d`U@!={%?V!CniMD%5IPTok*)c1RelF<@cIKdztg#dc}S^$x0JX#iWc%w^a(-sl}8d-&elt%rxipcYn{^tBk)1 zcbZog`BRSeuBo}{j(eGh!Fy@LedKA>u=-M+i_t-)f5o|S5Ri5`Jd!caz-25op$gfP zaThikz8li}+u=4mjMv9mr{b%m zEsyQvbgg@andIwvOr_mbVt<9ZC1&AaUmcZtpZ0GFlrKgJl6&r$H+e6yMdslK(5JUY zKc9l6=${1|j^pafu5N1UaHi+V~y&n=6_%tWBP&2s?($6 z?rG(=hy$ZASV%)8c~G2(LKfennai8~mUH#Ie&;cJ&LA&Od>0p>cJom2RR3AJFHLWq z%Ls-^72HCZpM@&DfltjXZ;7(0_a^IlpU7)4q51^bU7tG2Jov46Fg1LZLT{20Y($1K0nUTpXN-FhHhV*;rF9iYZ*vZ5_!+skHt85Xw`n33}7fxnt?@z3g!282jPTJ?gw6`ND-ez+?Zavfr(#@L1quABP?$R-> za?%%X*gS^G6n{2ag`0iPu;w$lWtrRFU4QFPwz_$_Xn&k)*-p{+)?b42y?>|-l>M?Y%u$p)-+=fS$|1OH?#ieL%OypSDsIW!Nn=V zKIWHRO=V3XX`+&l5&h@$e59|(;Vk6T^Fg8U7O*$XOL#ZFPY=<}Hrum%x{A7+Zu-ta zn)Gs>vGVZu)?5j%a#0g8$hXVQRnm8~oX)@x;;!~z&c@hZya0D~T}D?FX&y~KPxNLl zO9y+5Cx3Kl)u?|@!4zBq10(f#9ki`@G2@^Im?>~y=zH|8@rR?uw|)@?g~!B z*Tk{h)(v)DYlKBBY@qm^~B-=2Lsl1_9|h=n~G zBWCcJTrS{MxJ+Ri?jk%wZ+4wz56F;=?fZAoWGriTk6qw&Le^`{)dDv)CBvK0br$KH&M z?a@IWbZ+S9(|K!L2hY#vna*1gfg?VQ6t_IJmKN!+tiSnrrLx_;&eMItW7A&2(4n$%Bq zNuc^l#r7;ux`!D?yi8&|cC_Gziuu51pf;%;XpivORm`)Vpq@ENiI|y|So0L!!A7;+ z=!K1@dDjxsC$T_L@G)xjs(&Z5`ZD0-K3}{e;`CD~9vgcHkii#NyXyDbz!&xd0w##W zstKot#hQ=7Ev#xTd`dSx*=8a>*F&M=_vXq@&PiiGQa{PmM;b02vN<8&9Pcd?XqXwM z!&b}hmD{@RwvJ3z6#2a*?d4B&b=vdetnEkUC2^PY%qXWc=Gq}%E`Rq&u{&pFR|Ipd zw0E|76~47Go9ETozLw)lXtRfV8jBlyr|$2=+0#Yu%%1g#Qn!uai7wwJx4Gg06>iVb zQ`|MSq#MJn2pX5Jj?Z*Q*IYSQEg#zZINjRsD9U;vgtci`hPCt{Yhb99qW}b|3*+{6 zpad@p-_!g!6$g*4Yk&OS=9qjSM}0{_)Rd=XJjlBZ-j0{#@YyPlR_MK5IDC$qTx1gc z^AulsmDlL_1{#x}uN|4C^><%(H}2h?DXvS!dEk!=ul2WKTlkJUb)5dreLb%A@}xJ? z5SZQmmNciQHx_iGie4NK$~C)uwC)L^JnR(TJA?WVmtm^vqJJM43{A;W#<`vVtb$MuV$`ZD63bSY+#P| zaKCE16Ql;KKYu-*`$RZZ!K*i*<-+K+Ov3Zsl4G@o+3#HN@PT$uAB`Ux=g`SM;0B)! z&pY#o#TR!>(no0?Sm}25>FeI!UE+vhL=pBk%2MZdSj8K~nvOdMr-SikHXgvtBv351 zvomk1FN2Nu#*~&DK^7%CsukaRT*~+`Y2&-eT7O1U4}WSN-X4*0eI7p-dM?y1dw*`` z>;qF~G+qb>1B*#ymUHuH*kRpACeZdqhlNR%(67-Geog}c&`hzjPBQ0d^?Nw&z zOE{{FT6P}~oxA96;ePKwca4_i&r!^q#?kg~n13~r^kPftdiUGs-e%lfS(2)L*5k{U zmr#6aBcf}X^K*%1>Lf*i$ZS=0vgWu5(z;H)WMd@G6hvE>7jE|pQe;j=JUhOvB4Lp7 zH1MFixNWm+;pqw_C+{;pbF=c(t~EUxz@KV3%+)J{&k`DRZ7U z(y4g+X!D-HhO%hu;@yfFPOlrf8KIaz>c!brm+C4#)#I44(XubGW-R4=d%d;#_|RXM zCDm_(;zanf>ofgRM>icuc(--80i0<0sL1<^$}jsZ&oVS)`*gm=b5mPIr=B$c4S(vk z1#R)r^>kvp@qUf=8nCK#gJ!j?Z~3;nJg=RW?&xHbNHBPgmi@WOtpCI!8%LDwni{1MU#JShkOd|lfRDx!-D0zks>>?uUkhbne(fq z^(5#+n{{{Pmg!$`!YA2}wztXI6isZrwBdH+!pEHj7d4H1^Mf{=tp|Py41c*`oAwdC znWOb~w2k(WGXFsz7QcJbH{JGJIgp?sm=Oc4wM-&0UR6^YM%b9;ez$Ye`EFDR`H-xm0$B zrfk?*z|!s6k6`XSAFt20ntzJX<@qHm_+~I6h@X>CJtJYR%A+5FEVx8ak8}<%Z*#6L z-3FgzL4##<{2cR|yBW>Na2>s7ys@;{cQln{jeTe9Z8ncd-X50cW>l-;d1yICy|RtY zr1AX)cNrbrUPFGOsNk$rvC)@_!g|Zj)3>9*wms+biF*`_*4|83e1B`^XxUata@nYPci_i^WFv$@~j z8)slk(*k>6LF2LXZxpB1r%A7l$qB!1Z{Wm9*~1QohlV@f=5k+}to@WPemdM*)}Z1i zL&6zNd3}LJbbDFjU4Nvf>zO_0p?l~TWe2{vouTjQ2<=|5F)k&4;Z8aiY zX@Mh(HHCt~0F-N@#D1nmA)a0(xCaaWsCx!fQyd06y&oI(=(WqsJ5J0)vd+oS+$R|* z9@ZURf#btH+dEA-4>KFh53gxD`PE2I1r3qo2bWWby^TbJlYhzk!gV)^uKN;FkA@{B?B>Y@l=9xMwA%7nS?G+tnkU4s82spTH&%yJqU61u8REv-5^hHzTXG=!rvhjphZ{b=)hd|iXZr9pgLNMt@^|<&B9>81#NJcx@k9wN5`{X@BDew z%_`sLd)GWQ?2=BNoWJ6GBR}Zrf%zTK6mggM?bg>n#ec_BdN!RjoKUtT9?K?;AKkmM zHG99fkXw=_5Bv9mhhRe;aaU~Z{;qpPi!SVS2!cGU&)Ci*w!r&m0zNS7E$^Y*wRL?9 zJM+3%xr=6RZ-;k82mMs2xXo+PN@{nRu+F(|*8APZ*5yqI>$*i|+fE9<_UN4Ntivj^~ngv1CXr^Tx@h`uI2bT)gIx+ueAO^ZUmTX!6m;-sYhj z;!|)7U%HRRo$ahPHn}M|&h(Z^(aFlkh9f zr~v*vDc$Mxe3qhoqrvCNo)aPCGwUfAxsX%Sm{d^rm&g8oufvjy^O|9E<=WQ$!D@~w z-+y*aoL#%#VvTMWR!CO(NSrWrpP3N+kUfZ_K+Ya~V zr~P9PRzE!nYiaxOmyrV85Ny zt3&sq&Xzq~uG1)Z#zQLl&!Cl+!bu-62?s2)MJavKicZdj>1KNKsvj4uZOWj>jek=G zmnL${yN*Of&7a!VFsxVk_8+%o`>r0>E#A-hIqIxf-^B#Q5+;i}4XO+>f#pih@956h z7qi&OyEuP7_Ii&D^S*WR#?2O$Gq0a$%YM{SV6G9x&QQqjk6hoWMnTQ8mvBOS%$+ik z>)obG_j}=Kp71+nPp-66hmgMG?SHk7&#%$PeK{m3ZD#%K(R={ zGhd$;vv$Y<^D)PkTcTN1E>2TkjTq#}3YI<&8Hzpq@NX2<4J6zn z&OX6H4+}9rk9aXzdPFQQa^sXTwm--!=LUfAf9wE~P;gGP=kS`rBoxR+YG^P(E~l>5 z_An$ejzVscK7Z9oEJ4y4;GC!0JAZ*nWn|3iEf>!CYKFWA^(kDg4u739wL3=}iyUf2 z?*sR7Bv_Y_FmUEwCCeu+TnYeI;*pC6(hatG~-;464kTaO)Z&Ld z*Chkz|C=2c_Cab{e}4qw9oxB}$P8g8zPKi!?&j&lx;CS(7|sovJhFR=FE{z%Wpb*X zwo9)3xc>nlGZ%mRrRGRvy!$S^3_AU`&Up)J2*<78p<89YpiYF`_*%aYWIrfOrQ`HP z(#NWuQBF78Qm`ad;8R|D0?~_XXfqcQg3ipNgkNRQdQh8pClFQ4Z;*7*JTE>s zLd9r*P9nKv@Q;wf48-PKU*yd`xIp`{7~o3kg`rpJD9hC_m08Gmu#N79$F75GPm=Cb zD>|{(`rIDstz1$XD7m@S(kD}L%lexPvP?ZTI2=LXv3~%LcPd)uAF`FzY* zO@KMy#}giy@p-tSE?AVmZ#Yj_Fzkck+5E1ve1DLy%zzE`=S-fd3er!ublW1}Cq+t}g z`@R)e-dfAsU?i{Qr{6qeQM8yW+K1~&GDWHOW`G1f`!e)=%;SNJ>9MqkBW)vtD+Bs( ziCQLBU^o*E_CYtJV6X)3t&W!7j_~#8^B?}isz+|4d$K*Af%yJH0!COYTwaR5uq(^kGE76)|Ah#Hu$qC2k zDH7<`9)M;F8UbaG=I~$-%qqI6Fbe;jVBY)4y1^J_3(P0g?J+dVd`B%|`D) zQ+@)m#?KAHL*kBFpy%A+b4?<)S#UI$X@&4#T0ZT6AvxJ9XJb<5uLGx=CZfme`GzI9 zFu#+X^PY1b6sc5%7h*T3FtfNH&eJNGeI)g8Gj4O~2~Lnt$WMG-(=WJe&f(w#BVM z0G>qVGf*|CRKXJt7Q|0hT;~<~1(Hz4uPZbrX~B<57*lyBC${$6%zsG>e{V-QlcH&M z{p0fO5_3H23JuEUyBc(!8;hY#h@FL2Q=i;;-l*uV;f&OoJn#~)77&7wBKXh@uK&8A zpbN+hHJcy8wCk3d=1n8ZFB4OLA3$zf^&6g(q}#^ZgK}2e!-Gp~6Q`R)yNunBDxiKo z_|Ti*tfU5I$McG$6@SU3?jT8U8Z8F~0UhLZd#bNs_529XChxbFm#iXOw=PQ}2d?1} zTK5Ee8akLLY&S*ao&i;F#tIuHZ=-K6HoP%0Lp&id1%v2DEW58 z@p~IKH8iYPfpffX((7aswCOL__nr&M*fdy7%Bfb_Rj!d0pMUg5cdP!>Nyw}7v@8r} zmP%xibs>idF@HR0Dp<1IkRLJ1Y~l`5S#rHW?dTBmezerWo%2-VnW}ZUk=2!+=PtaE zr^Xre_cl(j|ISq23IoOfLeQE!{?1m^_CrrJaFAc3X#-vFukJ8&{Z(^dr6SqJ2;`6K zxZIE8_c_rUU4Ix3a=iPh9!9v|Sal8Ix!6n-;#owdD0FAWrDNVJOm{_VspIwM_H!6!;;ZV(SVawea55PaP0Bn$(T!yAHVhHmD4jnZX14!LZZ=d)u8-33rD9@;|AZ z<6-sy3tIhCft0OnuIwtV9RO9f0QgJ|cj5arNzho{gw=C6)Z)yZ`Uzf5Qgx)>?w%B(Y zno0J2B1E+9PKtPZ^lJ>(v74LW16r+Ttaq_ivw!zy=11R;sO~s>=tJNhV3@mm*99t0 zS(&$A10 OWzvw{m_f&{W|E=pRmWDOu~HU8BeQn#ruJ%enRyT z*ngKkwwIWyHdWOSIM7EI6o578e%A!4DapgES{_KT6_Ft?26TRKepqrB2qL*5?*IpP z_r8(E%5S1Od>^omyDm5#+U(sjKb#T)l^dw9T)%soCByXhvxhs+i?u5Sps0kza>2^jGE5%>y4rGML4@-k3$;Iop0by3i+HaSwE;wRu&UwLJe z^n?w+)2TKBDY;)!RDPnyu%3ytl{;ga6O(;CDM6Ci>pM;~Wq*0u z^6DQ*l?J#NKX=9DBv@mEE0ilHd7*A}N;~ z3+bD2<2<|gdU2DA@W-VN6!gCU;^D}-|{x%7!}C2%H$-kD3N z9WmDnXqXO#;fIb`a#KyhOyerCM{TOJOGlhpU@IL%Uae!&uH$PAk})Kzp?`C*{y&Pw z{|Eo@Cz7LSv_U?PKgTZiPVoX-YPpv&2spa%W2M@%07i52T;Mr4#-yCNu&o{%r|q&# z#CSl3!WNAbLd%XK1X9}T72+c(DvB&O|66zH{s6Jy(jQMfb|Gd|>3|9+>q7a!i9S;9 zp4Q4S3~MYtwTO7(qHztt;C}>sRWv)jLlRs@QmxJy{|YP@?e$}H?29q>D+LQd0Vby# zo6DVf#Dc}P3L=D+2GmL-tIO_Qi+84)8{W92MPllq2nlh>S!Dc@v9A8wf(zgEr7m)7 zJ3T0Dn3tVyGF$SFjU?nZL84Sz@fi4%=4(B#K1 z2|2Zm0IX{5)wszapz~u#4Nlx5pexJyffc3I$+-6&Mu($@B@XWZccmLbcp#%FgaoeL z)oW%~do)*#dW_*hPP&BZ7WcP|+ z5eQn6cHiiz=|$z@6wg`{>w>LwJ;@cT@4(futPV9}f^vH6=z0;Ghcur&6xyv{TF2v@ z$&t5W3JHq&;hjD-4L#$pFs=P~vWySq72`3wIl^!WtMT=*9Djm1FbA)`9Um^;>`G9r z0$aa5jl30uoHZI^>j^irhtRYS2sp_gtvqb_zlat27i>qqmwUu+-2$#^^ zi*TXBF(1e$8g_x0tf&6PJTRzHC%w3jEnx>a+AH4=GUzr^`&cH4Zyqjk^CCcgCa1NMRMk$+;{lEe0?y95!6a~-@y@X5^ZYmL&a?V$QNxn~?m3$kxI`B71H$dlDC z2SZ&xi#q4-R{NuuaHe1oaBY<~;Ehr_D{?FQZI90EUqvn44F1!Y0VJX+hjl^Db{Il9 zC2-@W+WUT9I(Jf0aaVhlnPKN5faL<=EU`!u=A>oKTYu^xA*sNU7XPC2;CJx6e;f%2 zG+VnU>hvL$z|oWWoQPteSoQE*sl|pPDU{dNj*xJKQ%>J$Z46DJv2W)#auJovu54S_nAv!6>wkHEPduit3^-hJ{)_4kh+Nm zl#vTv2El%;gu`QUnSI41&?FaPfa4_=K2d(GoR#80)F(IkH!?A|lEV>tRXWL-j9}#O zrI)pdS9xCI`I4@@N34M4#dBsTg;+hj_*T;nmw#bxwyrL3{XM(am^0y;lt^)Q*^2Y4 zT&`(5V4&vbAVKGixK=New;^VR5m!n0e{aj z0Pu%)#e6{BERnn}X->Md;1A_Er8m$Ob;D(7&*IS(h(+)aH%%@qkPn+q5E>3z`N;zdyV`zhD*LoNd;CY>h0{=7e-~*=}s*kOYhEdCM zthJ8_FPlv+K%483!I8Cb|H1G z{RWW*FD`Un7rQpHmOa%)(rpClX_O$lH8wNiUpIZv1);MQ*>2i>!`^`=D~^9xcZs#{ zROUMVG8z~SA-fZb&6>`Vv?alY#)-^ER_@}*44yuZR`m_wV@xVTCl_AyGivT++N&yHYM(6D36Z3z>}$v>PkHG9@u| z&zGPo?!CN-p}cQ~#cV%`tjK>v^P!{ey=eV@I8|yjo^nw|wdhch9Iln{Y;vWp5nhkC zCoUZ`@Ui=@9|UJd?(3!Cme$nRzcHJ&j>c$c+mWOXoi@9U^B95jLsGS z_lOLxS-@AuZRXX?!aw~ti5WDs!yyP&rRNc6* zB*t*3#AJFHsvQr0v6-bFsiFi#N2-LHiIgL5LYYN2n5%>Bs~3 z!>t%Px(R=Ubu7SK#XckTyOiqTHk4+{Ob37Kf*d#MyrFHL;OTkwUekwP0qZ(MJ`EAJ z7>5S-?GZG*r6grrQr5ncwp{Ygzmc@p6^UvwvEuBMSVti=C+Vk@%`ys?=NvT&{q25% zW^({8iY5+A$CYIzy2D=u)?2JlBm#@}Q;y#t_O*XXYxfYIvcb^s&vMQ-5%DG05r(*D ziGnmXZ!&X!Nt-M6G6ZZ4{Lz+uF7RqmU&TQQVv6){-bJ>P)0V5910}aY#Sd`vZ z7X2aTS+}4P4!lI-h(?dD@D)yGpTx8LXYPTEQ8NurC&Fkz4u}O|!OQld5n_-qp4Ifm zk9>csYydwZ(1xZ~q{6MfRJ@Fw!$18-EV5?B-tDy=Fl+H#<%}N&Z|2iz)P_2-)~RPe zP|1~q9=1lS}`Nzee-`okN6#DtTxre8asZ3PcDY#tkE#E`e@)K zW@GVdqWh1)`Gqpn7ox1+fn+y(=^9l%+AJDo9G0Pqa!O`>VRErAif!%sJ{FHW>?$4V zZ+=d!v~VD}Uvez2B?)4Y&yi@$JRKR`Uo^2ZxGarC@-|Z=)xT3A^xrxH@JlnLfC7I} z=ZrcS)u*L$Nvqyid!=}7nw5!3YLcsjODpucA&bf(=N=^;7;^6V1n^|^v07NFst zc4exTDOWlAg_|B}&y(9ahSP^dwH}t#*9R3L-y-a3w+ad?hr1~6j}P0YQ22GSq)BT3 z3T&G#wo|ygB*9EfQeYNJf-~+hL*=xvbA)GT~w~+^L-%=hzNx z&yS3gZDsn#Osn*dAqHlyDZ?7ekp(6p_0@25iH>W z5dn9}iXVips)ALfc)O2;P`i3d{p3{kcn+c#W_rRe((q{J2Sa!ACv!UFrD#Sf;vt9% z&)-w#rN{xw?{nCT_Q%BY!vjD>a7P56lk35CzP(F^Q2k}CL=$D;L z3?t4?0llYV1rDZ`rH&3$jhV(AJqDCPg3!Eixdwjf^T{hc*t`9#`3EH3`N>m=GX&X4 z;HiOqxPw>n5CMU!Yd_rBUuwXar}bF~IY1Fx+k(4p?>-<5Hco#%U7x}C2up1)2PmFU zhDpnM`j;waGaV)2foDTeS3l%2tPJv2k|?(^r$ec9U$=t*EV<(6W!;ScVFm5*xd`qa zkC7I!v;Np0QG1mZSDt{hDq$O-nW%P~L&mq{gvq~;)KdI@9ww!)yUQ&+Q z!ak7iCb|brmYkAhvG_S+4a}}q!wVGkke-pYU2f^A{mOp=aQqS>&|HDGq0I=#`oe*< zdX#XKEH8u*XxNiebOg4@T~_IWCs_zP-r)(UIg*|H#Kx;r4D7(9_OgK`5S7;ir`J_RR zI(7yxZ1p9({If^*ZnT-Ban?!@>jzV3!)`s1pnrdf=&o~b#2QIl)jLMz1Q^9NqXw#T z=AoJdDzOp!GCJ)}n0a*%Y-@(8TJR@a2XC%cvJ8TV%4Mll_?Wx1+SiAP>z(9pkY?hv zTikMH`H9+gZVNE}M8H-Tmz233!@sy|LWm^0%PFO>oTq8aIV7XgL^hnlASUC8+ASze z=|X>6J0<1YjA+HiA2}(Td;EU#fwD(B@}TdkWd_STHkBFOO*iP^#N8-Y$c~*4uWtWc zNdQER7;lFhMrd}HQ5y+E`wPWaJt^G!Euy?IT1VYo6-*@`JT*{SXeQlfM7MWuSHSQF zhF6y_TbL)=b`^9ko|cCuxM}#kOwZ~%TIqjpH*+t8$|cGm;t(VVhmB>@L!L%BfY3Bk zMWTD`P>#rh(eP@3Q%YykCf^`rokreo^BKYAPEx_yq39RBY{hs^8s3Jc8+VXcH&j9N zWS%Dx(~iRb{*@7~hXGYFz9+5&;)7J^%DA-~q7LL5i^p?xO{)Sh4o39DOiZ=r%MpM0 zj`L;soF)x9>A7=(;4n)iI}w;2Fj(P96hRb=60ywlr_H)*hxvAlowHM;QA4L5~3m0R>WC4IrLF@WP&LGXlshp+?sV%5^>n9Dfyv*_< zOa%)WgdRp)Z0$u60m>p#_t2Fong5;~!4y zNDK+7i@SKbevh#Y;VjI5iJm6h=kHXki1X#%oS4_5o2f7=la1vX1uSI2}dFX*G zg4H^B6~o=fmim{u>hg4xWL+HFaaeZUQcuv5R?B;>q4x^)h)M>P#xbSw=#77ax4@4O z%~i8)_?r)!3&zYbRR+_+Wh^zOT>WYwze5f2QLxXt@WN0PEqUh-=jExYW7hd|J5qg; zU0hA|*nXdLV6}C>65Ens|5Asx%Iheq?9(CfNVMVY*@~H;?`qJR`yA%UC~=Lj9z*c6x`WT+ zEHqH~AHp%3G;0YW!8qYOPVX{mjS()Ku|Kn^W@^978GY2Z)L?0(n6Aje{%*jC2g9kD0&f1Il38CXQ60C!}9_ zkf+uEO!h%l*dc#B=}?CN{UGe9{OTylT9s|Y1DiAG)2@lOIc~`!XL#bZR8l}3csS*M z0e|)qj^pZT&MLfcO~TG_5mxBT`{*@I*ey|fec=4#-&28zXIqo1EWCR~Z;RnJ- zIOGz0_LiS8W-wwpFe4ga9i!#f-FKa{F3OhVy*13!XP*p&mg*rl5lInhJXZYioUGcL z9{}eAq0y^YQ}3w>jG37`V4A%vP$TxIyCU2bsmz1}ct^9kn-4x*CH@QF^5<6B?QuPT9CB8Yb z?c+K^_t~;FkR!o1>|eUciFX*6&#p|O6#n?M))eKQf+E1MVGynE*`@TE@vpm%&N)Hg12BDfNFYE|AobRWg@_GrIUSvj-v*i1Fbk zX9GORNd{(#d9nEP$5JvIVZ7;Gq*~1mBmZX42d3`_ciUQFAgqu?u7c+EQoUT86hs3# z2uWN68)j9(&eQA)P@`JQiEKh*e6z|6b zEd9dPQj&Jp82be02kc+f==hMdzHkOs0`l}cQ@lD`z(G_mgfV^37Y+v0CTga_WZrMy?`W$ z(Kxv8+=iwzz$bNwl0-X@vRtq1VvZTHj>ACDFZ{_ROyK2%du`Ygm{We2{DfIv91DNF z1^Y|Xec%p?Y9JO7i)&X51J@n$*F84LYmNhvq#nv9HfCw_Tiy5FImP}Jxvi)({&7~9 z`Rt3m`HZ~z`wk0-gx2=5NHV1}X1FAJMNN((;T^?M{QG4p!n9l0)%c6NYDiMMxNMO{ z&6blOHI{wNOl23{vq)z8jKX^4W*UDbC9B{eT3;f3m~{4_`c1|mn|E)9a4s%3Sq%l@ zuFITBb)W4RxDZ~S7<7$6aD!TipPER&jULwd@O`~O*97MwWrFEO9#toAhKk*=p-QJ0 zk18t*OxH!U^FNV$R?9jipPN!P4aL+ok;iQPLnzKXj2sexSa?dr1jRE4{G=nCO>G}DP%o~C$DQ{D~*xJBOi-wyGoRxP^d4P=m zzSx_t{AW8sO19l0{`BNf0?Y+%*!iM)V>!;JNE&lnxaKI9F6|tVYH$v@1GrjX%;35yvNT#HL*eRF1WnCJa+`+Pg)|T@fNeXa)2DKd3G2ElM!^ zbhTedihsdVTL=%rvPTX9H@7{4ou?nBV9Q5_Rvl?D$lgJSCz8V&D1h+7AZO}^t1%Wb&*PL2baE1kzIF;$+o*3W+}ldEyH_0Qvy>gIOi zeQu#FtuMLjlr2?5QoU_unvF^aC(Kk*fJPs27wk;Yoqjed|Hl3zL&6&aCpbOwZDF?b z1L?DIO=Kg-{LNySj34}4iR*CZdTn%qfvT-6;JV&8PeK*CRu^@5!hJPTv)VrV-cPFT zUYV~g1@wLIn^1q~jX5!^L#mFm063FE`5f>B2&@3W+>6sYNC4MsL)hsdJ^5h#3*a{B zN7eV*sH3O#xKWw%)0J{22<#=Zt9rBkSFZ68SKqLY%silpfFN-2q0eJvPHMYffNME( z*>@&s-$5Q@%~M#X;|P_n#kjxV0TPsRMtI)B0>p^^O_igt{PO;|x{L z0^CZzFf-4g5ufkHuWvbpIbx|Jgsm99_`ms9L#o=8XqsT0$M6H}17J*EE{4?)$S>;G zFKPlKAbu!r&VSp8X`)5fE@T6i%=lpAE1d1stQ|uZl)@llf>Z1A`S&o58GwJnmUVT5iueYq5RPeas5Ix+_tn@Fk^&%8e=t#HHk+huZ3MO=gd7mJSFR+% zboli~;Z@D^qnjd6?@JXp9~aQ?hzHgKfcs{hntfBCH2Y;y_NX>vC{(T#wQ!$ftQt&Z zPgvU*^WEwOV&5gy!jp|=Z*m$}P}LH)_t4s|3;TZ~v)BFzqy1cNXzbQLCJC6h!54%| z2plG~Ob6WTpGYG_9>w%g$__P`B$)!}SIlfQY6bmbi*MG#iJ(UcGRSg+*{!+F?CtNe zQPnHnZyCRbBEq<0H9wx>4E(p>k=SseoEXth807T27qbyH zA({qXYV0opc(31X^F(d*J=E{%cqkHa+6g@!gw^$_8h-hXMOL2N^W_Z&=T;i6R`NrV zrBHR5JI@$Gxgt919F^O-j$&YigNc7=U*}~VAA{|bN9RhMpIeK3&~e{Xk$bkXHRc94 zXpx{iV|(|MxNy4CQg>5H5k|VVP7;EccSoN~>t@^9sSZL$$ie6L1c%TVRhShQiyW?< zH_h9RdD9A-wA749ioZr?EIe%*xC-yl%(#q2tpRsIcG#^d}jL@mt@1#v*AWiwbJu=k;tIMhHH|soi%4b?JF(b z!n6IuTH>-tV>L`8R?xfJfntp>Y#l4iI` z5RoW;8;75ZJkqcd%^lXw!ro#EL7jx^j}BTM6v-sjpQB0uaDC$l!&(cFW za6XqjH~l;))_x7{bsFiy#3iQS82Z`ZjzVeG?~#&0Jy2)SJB1xiNThHCy#$KzST@~0S!331|!0F6d&y9~P#J_DJOquk{$>zqVu<`@Ly{HJ7x|7%(;GB8Zc zHe9H11YR03JMFHnu_Ou_j=K`8d5@A43|r@ZtJXe{)ARO_SDCjt-;v^#8S5^A9 zrImJtLVlAU97%sWTMkpq@6YfZVrrTsFhSB(IgtZFJf+3<wND`3^!MY#ad1%ARQ2KlGF-^Ei(_cIZ9OOO>!P;va+Su9o zEdD@@dE|fPokP_|01RGsw+b@FyvVXiua2wbZN@rDzny|?@GZM-ri9K${DzJ~Bk4jO z9dY73>{J1ARsb0Cn5*+~{9GF~d=&&JWAzYW2o}hpEQ0T3>+jC8WnwBxBe(Lw8>=E!Y z_d#M0EYKS!O*C?DfSn?SJ&BC!P|H&uCkhAA%x_Ho+o#>r;2&o*naxjVv56*a)*v(Q z7KNuN;}Ze6qSoj|PhB3obiUY;T%soF8(76BUa`TQHUVFtm$F3o&7;bA3WKZdv_}^< zOA3Er1#Nh%6zwlI#_{Fj4d7rcwQu0TO*Na%h4YA^f#ExvNP|TegXODq&;!`u4o`{n z0ECPjC*oY-VpzPL5a&RJ70Xx-*Iva9ChyzY-N$xSLpe2oQmcuvZ+JersL*BCe|>$ zNsnIjz0c?fw)|Z!*uGwLKo5B`>^a67X=>){k*0sYW6dFiR)xpa#tpIBH#KKG)A@fo zclD<0e)C&C)lAQ&3_b}MKht>0#PJmXx?chAdA*c4QyY{CR9rXE?Zo7%4D&);`sxV` zMY~ub0Dr!AG_n+)hQ}AY`osW$8u-??v~_D*cXo{^N;u){8{_$Ey`~I$U4fAbNU^i z^4`pfPlXZ_ch+HL+njt;sTR^9I`Bdy-Emubf=Po84nj3|dVsSJR_r{3>Ijyw5lMAV zL?-_Y`$`~#%f+z0O72VeJ;T$wBZ$4L23Wn~c>I`Dr6)_CUI z{&M-mJivWUYFx|Q$R#~C|KhDl?L06_)<6*KriwKLCm|5j70gXDcLD-RA~`T_p6ef$ z7e3@1d+J1B^d%H9?=qlyt|73#q)? zH~!=f2#||iRAiiZxMp5<4Q>uN^7=u;UcOOib?6*ov@-1y*lR5eTh1n?4FSv0`4(U; zU84cVQwa}n$-YArz?Skdj(d`9e0I?_gH`9wvaSM=dEb%g2B@v}nEHRir;aQ5AGWq- zrS}CZpgW40J&sqv#ba~Fv`yMZy-VbppI3at_mXR7=3t8n;==AWd~#%btqQeupBWdzF>25cp2rCbgy95iq3Z21857wggwuC(@E{ipMb>A|-Jh=p zt!bt3CqPo(XzW*}1v#m{!2#TaiIFqkG$H`sx;EQjgfe#vyu@d%QN@xK6w7886z_=! zKY(J0$)gjSzIF?=b`giyvU3N!!{vJ%3_C4nZbc2pMit1`3%h@|*Q)a$@_nPUk%bU@ ztP$hHqEo*JzmW#!7%>DFB!i>;=1jrZC&gYgAoaGZ5n91Af~bLv6*iR4FQKXfilz_M zC$7)^e7Hr{lM!L=Ln}*5rMe*R;tDYRQD^BZtE5 z?QP%89>FON>{Wkq*Vz8?7Uk!x8MXy#V&Rx`h`PI5N}~DelPPG%KM;&SNDF>ok|z+s z{{R#B4m}vdr>P#!YSRMvbTj_oChw63y@OU<<8b5TBH6^^6iV@7S=0s?8*H3?Z3*Z-kx1*u5N4||Xx0N^y z#{drAFQkxVqISx)sb#@urSJn+R@MqbXX!v*govXFFS)$#DgB(bN&=B>CE?H7pT5I1 z)rRODz3_xIP9w!BP-kWToIvIWeLC&M7eClNc(Ph&Zxpel$$MOSfH^t4iAJP%@T^ok zf5^>aA1Hr%XFNzG_Psvb_5(%wWOUmeBrn6`46}UZQ}~0bMX)+|nc_^3Dp9=Rl~@G@ zUJ_JapkVBbDhTCl?Y+|3iN@zjG$4Ai$>3304IfZcv2S;5>@7(GsM~U{eMR7F9C#QV^{o&eFL;T_qI&uOI|Ut39jiju9Cht&4wjpypF-Y$6F7qz@~zn!ZU=@9m{u zUl;`1B6hmbO$`*7D{Hi!EEyC8hAz!_(()8l9~m|Q`R!Fd6#InesGDF!oXJL+P%SnA^ z7?ywGgp0sT>KGUFz-rJ|Fh$XFnt5*U-QfL`oEIl~jBie%Vk+?XLMgQ24i}Oe+1#wR z47u){iSk+el6~CS@kM1_%6M`h1!rxN8pNmm7K3(ZQLFS=(DKUTQ8>i<`|Au*~J%x+bPy=Ti_C4^3XsR|TeuyVN zS{1oZPE)TB*#Wd`YP~{Xe6MftnzXqyOK^($+cGkA26Q+=(&1B>AlL_6NZ|FU@H>Bf z6y?*4!0nI~s(45=br?D10Tp!hJxn0AjI%m00T-! zTB5vFy^(&g1|D|_5avwG=sYn383BLpky-k&SwMkHs!6Irl?)=gQJG$~nj60H3M5w{ zoB?%1n}baK^cDYfDq;v8&Qr+q(Mg~E<}@NsfPXOG<5R+~k|USC%4Gxsb%$y67rBAT z{p{OYV{Q1eqLnINBg{!kRYre>_D#=J+%@D}eq!y>TS_KD5ZemMtMWHg z!6hqVss1EgW;r?cjas{GOR4|OE#rgbsWs-mjI z+=^zgG{(RTt8kBn8))d`c&!azctcgg_g@Z@LcLW>S~_{gj=})nlJiK4-&a(Aor-sh z-~zqf^i+inLo`HQ9rb^Q^RM^xp)$*?UXSGU0B-T8b$ZMuKZ3_j?>)_ep5y_L#ckdt zxqd$VwB`iFMH1DQQ@6X%4fEz4`707Rq;ydU&mpEGf=nAoK-enROd5~0f7eQFE8eMLb7(@VtEo3FK5zX5sWgV7KI%@l z*8SZ7wq!P9gK&R;I=?Y)H*o$d?Sh5;ohgBOHb){;^8vQGb&&46Y6VEBaS36Hc5f6L zwUE!kjBHF3iI>c#K7!kmK8CF-#T06a-;oU`zK!599s30@D!m5-Z@UudH@^{yY@eU6EnSff z(wmO!J&@%536HvCDP3Q&ay^`0rXadi=n(-aa)m7?oVY>4( z(x89f!KkK#;%5tq@3|rGkesKff<@1V$jsCivAw4N9l%e;lawJpU#abu>B5OPcaWcS znh}&ML3_NkPQllJV;s-twB@D& zR64``?s%f}Oq(cko1DF)jJ&g)Lnd5^sRQm>l^EUkjDqvt8MbZbu)#X{7A^Vw*iU~n z5}UaOHN2BUlh;9im^$@HIp88XJ+QHf;@~lF_zqx|@D|y_?~QYiI%c73$n`hXeuC=) zDgXT$j224mbSkenlH2hdeC0cdU6TqKrpee`t*E^3I-(463hab{DR}c+=H>i4Y;W<( zAG%-8}_@QY~jiy=*Z6nI6({hT@hkqQC`h}dD-$0d zGRZ@m=Z!~-dwMCM*qboxX<5 zWQHo_pf)CN;nuC5k> z7XyabEhto7#*;;b)MSm7ITf1K<%B`#}wF(yfp5N4hf$7N^Hm5!v zu=3O(t{$ekmr#wFZy;32l_L?_`60)`Tchg;fN^FBkun?NCDMNqKafBRRJnv1i#s5U zX%z=iZn8$A(m@=LSD>2Giv_G&R!P;EPQYnR`US2zcwJxEj3{I9Or!Wl8jHqLrmh}O zq)f~>r8De#vUaE4LDi)NIGGy$`27$qdobHF)G<4Du4a|Z^x<&u&}TzQSzl9YK#_Sz2?216PQe9gO z|B+Hfm2lC~G!vPq7&t6O-Ez%`6E0UYPa8jpnLnI)czA#G7AFrYpdT&iWkUOh=$jma#c32SDCN%2%WQsNYrry8y;n^1?%M z)`a;t-Wz6+KLZ$Y0_y_8q)V{(AY0&E63G_ zOaz#Nn*1xCsC2H=%gVkl_NtQ1{ zYvJ9$pD4~S2VmTZD1h}4v-T@O)6vO}QrU)>wH*)kGk@y=^#l$Zei~{DE6E^vARi z?oe6oGcG-dzt?6WB72KPv@G_Phj5qta}-Dc^r#cHT-24SV2Xi)*mXb1>O2)SQ165smg)IoR(e-g*8+M-HpGzW!B6qYq<8 zH7;ug%sm}q_E!knN?RNRs$k^Aq_Xfz%h@$6x>P8MlK9GPAt&3Qs%UIQ%}SjDkYK2K zZV;8UY+CuErbA7BjWrFj&D_YaB%c%Fz2cKjR}}Z$xS)Tf zupVORdYMTmFSG1p&iA-j56=}z{J`t7M@TGK*xHXPrz!~H#p@rNS(k_C(!Iy8!N9?^ z(;s_KY!MtUuN+FG)L?&%lhifFSWU;U!vX(?FG4rKMKz(f$X&fCa(yI< z+AmaxSM-)p%HXUE7r04>>H3ApI9j6P&ZfJ3BgtUED zEzg@D2kl$-9J%#qUeL^eHfVdRSyok2D*25kdIuIRrbF8fXDcgRn6<%D&a;0RB>7CI z35LD4ysAH})x6+P-hI+vZeFy?Wx~*pVNf1Si_z3~sGR=c^!u@tMScX4n-BXL`7t;E zHNAMe7DQ(e6J?22G58aRT*iD9mo$RA45=^7>iW%0=Q3V-DDsd< zf{~WC2YPDhnOqJbS%QgaU9Nw;DNVdKqJ6=p`t)+GWg{CR`9WW49L&$dOq!mqe6s6Z z&+>tktR!tJC_D*fW-PYB(n5=5SWdzATK9KD9|G08Dg$#n+4#wWOB%L{YouF%$2AJK zEbTHu(x;dikfOUTor;RZl@EUj?PRkbW|BuJ5s4y~nl_Dp$tbTQ(Y8)c`* zEc0y+;;})QINXMXWlVpF7jee09m!OLv$OPQ>rTUCl$_rB{3=9}Tu3WuAV+dVsF#;_ zT8y`8vVaNAv_Q;!q9Ll3Uxh^+8O}=X(T|93ceX6v!Z9CJH&fI2D)VfUAj~&8E$Mo) zX8?;#{n%p_d~z*Knp8-WqxiQL`iNOTf`sY@ttlg!BNn?q1Lc46vy!~UC%II?AabKj zaxD&vw17Ed5jn~Ebe6zpt+x!%Hj%P!q<)hCCBH=p=X(T|$ z)Hm4WK?PvMN`GgZfmHpyA0|OYqjYbculrk1Saw3W+&=&_{^7tWv(}$7qvq|Ih#Y>)+~m8?)m-{{5co8u|Bs{pYz2 z|50W~_Fwfp9pIn!zw>Ob2<$&gh?d-jVVS=qHxR>M{}g|r;AR+y`F|2?2QT$93_Ih0 zhobRkYuudHtjIi)m|+;H|8vGk4Z|?7GonDrBxnjL48s)bpOGQ{vZzrH$uGx3Di4NX zxA4!?b~6lv8Oe{NYpHAt^q>CifAzV4+W+T#Cq*7S{VN6z!$=y$Fx)zc!P9uR~q)zxVD*rfjREa69OoE|`VVHFOYhOVah7n4W87+I* zbHr>JhV7-l`rThw6yO%mFhk(BlK-yP#{SPA!+tmEJnf8!@B|J;G`FVlb3;chgkGS~gQ!v>?X$R?g|LtSwg zKebu9v|CW&O*`H)dSSt>mhY`}OebV0&!K2(`o&_JK4mWfo(>;_Rp(8}wy8~Xh(J6} z%cZrjG(cRg{{pZB$#5en$qB`0lFv5G;K-I}mDcnf%_)30#kRwYjR_vzY>u%Fk!EA* zqnv*PE6m6dv>9rx-OT3j{T#>t^u=j0QIEHuT$WpxhQ`WWF&}ik5PNouE3O)LLBQnn zr4CjUKcv*`Tss40)j)$Xj36SQL8fZ0LBy@PHCdfFsu2c6@X33MzbSML*-lG)T6H>w zhEsH$4GC(NeyFYla@(wsYdcHJ>p-suvtJ%+i@#QpJ08KU4kKdkV4-R9ru;_{t#YjD@fR~kl~%~Ch31JN~mh1 z=rR*FI*pOp*2K^8chjTm_nlZYDQ}sRNTH;u<9opR>zJKRn=6^+A1wLfnApaW+^>?t zXk~&LEU{MV?exwMRm=dt-;m6aWE8Z=RaE-&Nh|Iqub;K+U6*pI3^c;t2^=t{mFc%% z9FWI}ZlL+BK1<4ODz8pD@6vxUCp-s68B|b6LQ>rL#?K9GO{|*4DdT`=&1ji|TRtpe zL-YCYYyeNmm}cAU;GNQg=;&-k(K2dCd@}k4cbgHQIYi<1nm95H_43K0==}vgTK@~P>R%D(<#siWGa70nA=gX{r*8s z$Cwcrd|S)BmeRWB6ZApdW@ITjmkUSkTrR!WD3(K%sak?)N~;zemzn5`GH@`Kfq+*# z4GqY|M1Oqnt#Pl?orAIJgBmay9TK#;XErLAIk@3;H7Wz)P{EMLziRab=QxR=^vyE8f@3*2VR}vgUh*Es!~Ydvt$jG)t|oN$H;;`m_@P%B@;R z{KO*OH4(F<24~J@M~wtN?R%hStYG#UCz@gedpr}DdlAYJb6`z5x$_ibZtYyA;pL&@ znL!I>$C$0;opFP%n0Ds5B_t|`7%z;_29-DAAb=j!DA6EIfE*}UwpP0a!Ri?bo1w-J zRWYpw;HH0H1MGt_={5dD@8K=sE1DEBPE*AN;Wt)}wZ5;Vu&n&E87-QzfEuiv*(JVz zXC}?R&IRA3(iP1&YF0JzsgR+oVzHRCDr1ZsyK6v_dNiac;!)j`HX+|{Y+y19`R#2e z!N3KXcRD0`UU*3psdgrSN9*^cAunZ=>v#4!A+xLr0sd&@SrnURaNMS&14l%vWcsN{kF>ypzgzg z<)yCZn8^u&`umM4lvwzr>k`|zqOHOGEJTfNLHIFn(uc~EG+|P;DnpU`ik12~p*T@` z?H+$`JVC2M{w$_iA#*(DnuQ~!yv1E1EvPAz)%a@I!je--TLrESfJPkMkf`QsbcdNi zgb8V_@J%xR^TU-|8MqT3YypHbN0pe(C*}I=S9rk`xp{vUQg8BTV&2 zr^r-^O{cq8IjUF|PHLLn!Bjz@%W-I0^vhP+d=$7pa&VL;xKhI)zwUehML@d0SNoLL zHO3#Q(Jh|uo#Y08vmv>Cd1~yFjosq>i3+9p!F5Nt$O#uis%>aX@H$5};Ef?3vG%XsHe^6O(bbDwKjU9s$o(QgI(s=Tv+s{ki$S|Tq2reEjCHEfl(wlJwsSTH5goZSRFNg7P zq0{s5`SiDZQxHO+DH)a5G8h4*Ob0;4r27$n=cOH0K}{(1gI-AsdGM10 zlO0j4VM@RbLzIi^5FUSJfsbD72fDQ7Z3ZkMOdP+tFT6YJf*qZ0~1Btih|SP=ijH8Ujth(ra#cJ--SH$W(#^vOfoPH1c?8%ANmz z=Gg~A1P-o>o5TL|&+xu2Do(GWH<$S&f^qa8ka(cs{z=R0 zvaG7^>ZnqmrpN; z5W;8@9z`*DDFY6#a<4y1dCK8cJL;zxcuZuye9k&>qi3A9Tj!wG)3`s2KJz{I@kJ3M z6JP>X&iVVbTzV6<(Awn9_Z)3=jIVt=TQ`h40UDQ7s^7Hc6lLhRu@X-GZ{^LG)T|~`D$YO9n zTSD-I;?!^du0I^}cj7EKF65MpjGE%3#_@yg2wxXC)Tp z3f67eenx-<@aS#y&fl|Qs!?#SH=c#`2eI!18gy((aNU7qNEwTMPI{lG{i$23RJScv zZHKih09Cea%z7sJgX`iEf`T_k{B-`&LD08zE0{*dF8n6o+iQva$LdYPu9}<@htBX5 zej>C+|0>5=a22nAwGnjg?i?QNpgqWgfulwjdM$V+c0P*jNS{Uqt2v-}5k!n9XB^{s zTexhKx^{}!#XcNRLsAcf(79sxq%I4JG$YfyC0t5nL7J5+7eMPAq*}-q^fv}*&?FI= zRFu$&T-4&7x-Nc`XBSslM=Aggvbm#D!lQE!x*)HzsSE6XZSM*xJn0G^N@Zl?2|Jhm z=?Uv#fLE@6w5g&d+GmY%y(uRIl0 zjGrM}k^EAoj9}UW!b-(DB-ILg>KN*#YVnt3tjg)-busvsxez|#D(KK_BbEtln1r;f z-#&fqyhY-Fj`RX{Y?Hf^&eQF5phgJmS?EuDqCAd%ls2mVTz(N+5Kv0)FsaN4pu#;n zR!_qy=DaVX&U57S)~!PA*ho5aBVvqS>Ok-@cPYDlDGf8V&_9&yOn;u~3rmve=DxZ( zyZ0+K%(f>kpE(G3iH=Uo{=#CV>j5IJ^B_>X9y24gfa_B`EMi4te93oadE~HvW8~ui|Soz)lVFxCEl#l!FfI6 zX6Fe6MdnuuwS*yK4IMbDs8>49dXJpoMt9HUw38@D8T5x3g+v&o= zw_6EU`4?#bmNCH7V|+ZKQp0UNxRg9@LhU4fpA@`8q&Uux)Wi3g`YsYY1MKZ;OAuZ( z^e*w%+2KRyjdMSKz~y|XXj&H&!$ZaUa+05vu+1V_V5J~ZC30Ka*ZB7}nedw8q2|c+ z`w?NpHN4Qk8kgze;fE>|C|l4BH6F zaki|5oBU&kjoLAN7&D7#u&!+KnD`)9S49T_lBh9O!jnc^c}C`}D;gB^R#Wy6MN((q zUOV>OPPfkko9>xga|=6bEI(S;S5Q#V5TslBOFAmR}=qTnS?g%5r<; zLGhPrZX)p$y2jWyF*F{2$}0(fMnr-wbkR$Kk18>Y0qlMFvO^kb1{)fS&&Ew*JNQd} z_ejxmBkV6TvEGkWp#hv@h``vl!@)s^4HmsAYy2*3uQb|O{H}}Rk2@Yr#pgLcYgWe$A3s>D4@1!hx7_P?2z7Lw$P66O6X`XzS{~qMPg@b%{zbv5N zeU)*!FN3J(@Ru2ywI6fI- zva~dl7)38dL(iTU+q^hj-VeLaq=r2{g|vQ7DVI9sHtRfYOb6Ly+(>t*beN?SSgeKZ z%P8Iv;&0Y8fng49QV3u{f!Riebx}N}{+>V3i=t%dG-zLta65{BhyJN3%ZDrNO2I}4 zgg=OIEqVTN2UNDusdDeKa*W9@l?y`D#Yho&MG$GzT0eP-h%NQ3Rb@pc;D}lwghH}p z#+>Tc@W}pmH*mrK0#@*bR2oXHt;imc*^u}Z+kDQ1hN#JVDU-AhxZxoQR)d@rf{D`s zZ~_vB$C6%FML+p}csB_5nI=TeUr=%jZEwi3nYI$RvJIguWd_akh?g(6Vl2Yf^qa8!AQj;klzySN1ISwldqpD$5=H=^H!s4gjS&+dSYIc;~; zh|hr2kV#C$GTl2t*5G6jI&QhMBQv9_n;%w5QKyUT%;69*0tnD8H}ius9kC{R+&2pT z6o-AziOP$yf1y84;3_;8owQd!k8fbYdu2^?o~a*e2KtYy7pBY9B<|4OJPN!>b)eUe_QzJfJOSQD91+*JM;At~AA%^!>dV7GP( zeM98kSfU%}x5Sb%fHI+vbcNaexN>!wVZZD$fLr^2)`xIutEy(qK z6t}TXY^0-^O*a=cck3JnesiZQo6FbE0{r#{cZNl>GovlV=snpICvMA95Xpw8`nycO zD3c3J0m??&fRfHK5{|enU=G7GBbq>~g^Us?R|JSp+d{hV=$9@R$DBv03b>K$JUKP4 z%1A(e3@+#RAM#aOU?m}B$E1_U^@n7w9Q=F}tbCqGl|qaI>~Dubal)BVj6c6bo_0k5 z%_&;H<4bI%Yn?VaheTg5CUMItTC_HA0tbGPp%~vyA@0rreS>We%4=!RZ3Ja4lV1F4rmkw}(z>hx+1w zUz=5+mYcL!M&^}e{I>pv&52y+_KqkRdxbA_dZoFd^1# zBTGilMsYc;5?mWp%1?|1Ok-`%qG|9oNxI#f1zslmy3*QTq*kRB)<9-eQWnc%lTqG& zKGBLF!dpv|0GEEgV^wDoa{B&M8)m$J%6gZ_6uy62Eg- zE|AO}?}O8XSa!Kk3a4q3q>$K)8=rP9l;%uA^x@RTQ9$xFe@Z9ryaTf#Nt||s9QE^D zn-BhY%I5ZzZ@;}Aw{O&&){)pR`HJzd1A$?bmt5#JN1``V%xNB`!FqqB-iY^qj3ST3 z{WR_b$ZQZFpnm=9H2t!CcXunjGPaHzY;AZIA!wxDX|k7X3$Fr9njdr%c>f7BG$w?E zN!5k^YA*Ut(5>G~SqkRtPJyL<;R9`ty^+QYvXH4JXMT|Qmx7Sx_TqFHepbrJf)`hp zm44rl_|~yNVt>KaahgLs!;A@k(~tK+%)eO*Q$->r@ae`4gGl9WSS6%iMvLKP=IXb* zbMqFGmh)O}hn-kegzxfO#TbJgMv6H`axwzvC)L2SQj~O>JHfUK%_@Wo90IPsA80u7 z<lR3iwvj9jz)|>JH1AKXr=lx5N=&5 ziL@k~zGof3rua568vO@uve2gcT!@2A9(7v34GgG?!K#h&lu50)PO94q*{9djXj6UY z^*HXKn5;pTv%r`T^>2!ji@2L3QQKn#+6=mlR{Ko7*WazCWCh$czP#k}#brfLcnb%O zxo+C@v%{`3BN4>of>bMiMyG0wEFE`>RUtY1m^x+?qHJd)gfhT>;?&gaXrA0C>ByRc zaHJSe{z~B+nmRY+TGe9Hx|C(U4`YNlAX^ZDqayP}Q=<@*JizIUi%ri27(WYuuQ6MS$=N&A7rrN7vJt z%Plfn$D9N`>KI>C;yTG-ieJ|p33JMMj2s|~3e;~Xe^^6-kF7F{h}z!HMS?f$qt0Dm z8BW){jcr&ww4AVi_Q?Fqb9=JoB7m!!pS?g%9f^40@p+F3wWyFNx`HQXpS{Ca2nCwy`OCDgM|h;j@Wy zS|+ZN2QDmr)Ow_u$eekpY?a;<1kITTMoD-n)=6Qc>x$n`ZfwW&$r59jl3rA`Ral7t zhb*5GN2<#Yk67s8dGWE}(vc|>r@i-_b!B_s3Qp0DBALCg6yhU*7nRK@K0)TFD^}XJ z1tg%%vQZg-ixXZRrA0q}U;&ghCT^Mq^%$^xj2Yma7FvnUf@OA1lfH0*v394wP0pJg z>IYOg(hy)^)y1+8fJ8Qu{wd0A$qh55kI$8m{UbrlN<~6Z>=Sro)xre)@_$HZw5UHTX#fn%#g8v27SMzs)9rx35#$N`p6ia{O%#a zZXl1^Cc&IwcAqwT^r+e1+4uqe!BbvEYUZTlU6{Y=WjwULW!mDpXM@0RZoLPPY>Rrb z693?hLU^>w>mw+m9RM)$=aV%UFR_Ud96A*^bP6ZU%2J`}>6OuH_VutRhx&mNzO%|H zV7GUF!PSkJr*8hvZrpR4GRCf+*xZ-=;*fV8a;eBlQMmQ=PCV`W2}?oChGL1PwNZEO zn+FU$*3Sc%Ch(pBuESLT#YinZl9BuF$;auGsLeH9Ha$XR%Otcz7mrv6jo? z?5Lt@W|CwfSY!56D7e&qSNy8ju=G#!M5tt8);n3|>@J~MDvnfiU(hqJLcRGA^-O4g z@a0$i1>u^!iMREQH@Dv6>vc_CD-Y|2>VAlGHd7Ue!y&;pLnqj!!YQ|4G6TJ92opsM zA2r%GCeEX_qZ8X3q`vHkJ-lAvv<@>7h{kcbe$gV@C;)!|KXHkVPVqT%c(UG(y%S)4 zawU%(QnTKccA6jYMcJeyQiwxn?in+G?Qr3BN)4}A(ypgPR#FCBStK>_rzx5?tD=L4 zR!_b+=jcN#G5W;MyO18p4K^5S(gQ#s9kq^$!FecmS6EqI2^j!ttgGqI4a@735!X-k zGZp&?i7thF6^-B6{NBea%kl}OAZpjRBh z;^6f@7QtgY&XAAV$(`_C*&E-YyE=<0OZx4zXWAnRo(ZeOdi#y&G;NfGPj-jY{EkSm z81m#m&uy1|%{a!`n&)F;tRrM4rkw}5gL(&bKbiUX*8s)hy5HM{l?RxQzD+89YP6a5 zODxjGwB}As7Nr6*swQU-!>1pA%1uj5icDz?-2f+sewmjJ$|klse){1YeJ3SBUi=5j zP2fi1rxHx!;&In4YeC-{%kZDqa2%PiPs4`j{JMXU8L7$0u1l3m^;zTwq^JikzS@>{xjV^Ysv8=cO(UIq3M>dRk4wgueNj-|6AJ!~H_Sr+`3>`MJx0 z1=uy_m0@^^L&z6GwSKdINkL(Ee+O{fE)b_qmE7@8hW*{u1Xp+o|p+6!D zUn|OVK4!z1Qfvh*<1jhK^4yb%Z4zGr#2Xpou$J{ELK!Nhzk8?A!Yp%wvCc>$@6iFrZ8={kl65QyhM0*yEb?CGRkX)ss!Fj|MM=I0*!aGD zPom1anCClh8!BFZJ63~wxx?Pbz3@|tzKAzDNnje@tDaG&qx&JHKW zUrsK2TnX1sCCbCJ$Pm=Yd>{DXO(D**y(HNlV2u5tN@a3*LlaN=UXSdDxGc@wvW)XV z6Cm{+w#at)F@6VT>v5?AZk0x)`tPlBW^*G`+j$mJ)Apr*&mb&XIB`37CWEyBR7#Q< zUFR{8?M$>iD=#7I*_WfFAFulrxfhd(WQ33kjKO8r6%u$i;d&T}R$P-Vi}{n8zRn4Y z;SbJ%BTp6lL<^x`uz1!pZG?wv-y-}~bW4Tuy+*RF1HSzZ7mc+baGAnyUb(AG5d7|b z`t=4<9MOz_w9_9Aof_&cTnck~&MEY@~wz3je#mpG-|qi7b16seI;$urM{nuuPSMO?`ab;XZaGEOMG%xVtam2Kbu5i{DlTldOXe<=$ z(*9%(U;q=*CG3)aK_|?M9CeDOp5=wB{w3#&?ljhyKBG%{2J6fU%2?p%#`S_nb~%G| zT^ve(Rp!$osmgqewjnIxuj*d>4&cMb7BgvlfCFS}iHC2dz=6%F`y3y2hbs5r@64yj zpwUag=+aoDh!C2QM_#g4`jOuiv~&?uw;@l#Fm3o%@1#;>GPJ=oosrOeaLCA(?Ou!^ zl8W1u$F%d4X15*JK7e3i%%nBQ%xmnNB%G>$Y?b&;g5z;V%+~~FrWdsx)XK>!hJ)XemV1D6h?nae9d<06RYPit&xe;)nzxoy`B$fxB zve8Lf@-pvJSejw)fZMF^>2VPtX-p)ZUb>2A4B@R2OKPp;Zm!!HCBnI=9l4UDFpB1X zsi?^Led$+Q{Skj*8PUYp;|x3y(^O~S0L~Tgw`a2^e}TVxh=1@5uSYG>&s1>0B!Hy3 zZH$H`QI1mHuV^lwbY8Jg$1(1AureC%;umRnxLii;$Tjz>?nO3 zn*ed!j`&xQtD}X`BJI)QTS}(7nT;|1(Tj#q;nw0k(q)OIJUl%I%7Tvl;ZX9qFI(_w<0}p=)o(Qu%nMl;B{KX zejWLJ>%TfRTF5L~Zh$Br{T*Limu1Nq_eR5)vz1p@yVhro_j7(4e{wa1SRDVofV7Ug zk;IJgY==D1K33Ny)F*gjBP4l$m?|86dR0P+=SmV_8rvy$-nEOV2zjVKNvtI+K0Smf zsk;hudEWc6X%4Ah3NY0*>n|#`m$@}5%E;~pvq?+YY`5|7nr+zRNa}=Sk1$%Zf9R?XSPUL2r3e}G%Z0OmtnSmidOu%F3wsy zWo(w#V*$nPw`^vyy0Gd80}++}yS5_BhPlEVo>u%gm)Kr!$A4FdmzY`!{taF?PFCI4 zv1kQtV?i=%Z#N_IgWOSn#0s?fY@+aKNKSjm?JKklg_WXCR?$hsC}i=;+LT2|adD^1`AQ9|n)QePHT%+RaX) z8^_EQ{SMs8(k?|kUQQl1iP+gt)&VEW>4+D8@nH*de>HM;9)Ie8Lf^`4Q(TU3YsD~C zXQqM^JC+`XAmz$f!Mc1USV}!8xlY@rcmpNVFiNu*xakQ20w1YUc}o!HNxJI)wpT01 za;5kH;h-Lw5X7?sZ*lWE!2tovuwtY_Pg3Lfa!OcvPd59@3afLdU&F=LgG`F z4XEktal_DLEMxd9S1-cSOpAlaMa)nRH!6n1{L@`m4%V77V4bcsrYB%u6L; zPn9frP=UD|t?H#U##~vG(d(5FYeL z@hnIwoW#tQ-HNGz=oCW!DkENt~@lA!5mI;`(JpdJ4lv z<`gBu=q7I%H&)RW^(JR(*GGqiLEa8fc1glHGRC!mY$t zUik?qN(ov!-R2hys^Y%gh+ctrY(s$*Y*j2(P6jUL@<>(T^A;1uLagEzbg2d>4d)0k zpc+P)TVHM0KNmE9rtRBqaDpOs6(ymUs_(#NkcDr5kLtgmgT=boK1xv2`k_k`^ME59 zQn!Ua;t47yXCs=rv~Z}?a{$w}Iu3`ZrI(#`MpNY$mk69$k_<|W_5y$VJZ4DU9+2OU z*|F9IebyALy3i+|$oitQNq;m(?75rDn|@vC98}A|20kU#_xwdr?fcxU1*BQ}tCuSt zIh*T$qoTD;y@T7$9$tK?ZMRSQi@iFAS){-vJ8L0_JQ0;U$LRxh&OM+H=7x{3V%oD- z=Ib?#ICn?gPpv_PM>)A>@iEb>y=27t-w%H$%Wi)sobnb2pIfnZKAaymsI*lgoT7_) zF)r7wcX$B#QDQGu^3gs!#0&8Om z$u*5wo>jEMd)SviMk5!HInzQP{M_BA-nZJcK;l;PrqWr~(~C;UIE{e98boV&c&txj z9+U*3X&pNis@}pK#lo^Cmus1rF{CPCH7VqKVp(Y;`vwk*?LfPts|kg`pDwdZ<& z{D8K3!LJ*mw5bl7JHP74o>d^efm^wyb_wx?Nvw?^U`!Z5qgLIqXFiuzX?+had};^S zf&TcAa5Y#Y2S382cK=xVg^%laXgzAq)=yoB`q1ue0_dueOR?^|B4+D2hS5EK z=7-wQt@qepRrJ98)$ufs14K*3FSCVz=t#(u&m!rZSA5Q+=a`s~zlrMOcfG<1xCP%Q zCkE>Ex|5Ip6$}7J``pn$KGQ60%u)+7)J9m#%xq0mMAYdF{GJYM>mUqCm7Y|YUtsAmx(L8n)JtC3>OD)6y4hJrV zScw!lpFAMl6<`n+ zC=PYR75T8&Ie+?>CZv-XX%V%58Fr(71#~MlhtB%?1?`$cw#Wq(%360$D$YSB`0C4%$Q>y1*bYV^J zji2*~+ZymwEJNdlYbd&4b)IL65imX(G^oA*0I=5LTbYjNh4rhBA;x@vAYd%Bw88l) zRU+zp=5>F?HB@w7)!z`Uy_4S)B7aE43{@agknCl=sI$C;|I!JABTaVL#ekKCh)JOS zdajY#6KY9c_x-s>s^)jyGc6ImZJ96r@=mE6lnWnAOT^4q!7i7Y6*`9)a`A2mOS6g(VgVdSbV z7MK9DE3ySDx-T#7r9JV@xeD+40enmaHTNu&YyJis%(uZq<}#GGRrLq&{fh4g&jm;5 zUk8rRF?x4Vj>z?Y6LT3mrIn66wFiRnA!Iu2Sc6j;FKu8)R`uUGJ}`{9uSJ%-^V5** z!Bs|CmuRcQ-1Ob8MX*gz@pPg_nxHb=ll3#D*~jSS`p_Ic zV3Hb~@gRkcxf2%0&U`5FkURc9?I}HxE2`trF+aZ46K39j5#$HqU8Noh3}r9#t8F&y z^Qly68Kt9I^4oaU%Y}awZJa_0UaN3$f!f4_4`q6-_su->JiXt0c(@P%#Ck9{M@(uf zgnw`$d@6eUS^0GCU99)SeTXBh_i}0l15)VTfQ-zk<0!62vJA@E| zsb?LMcAIE_2046FcxI|VgT2ZzL7Rw3n8fR=;@Tda#GG%G&FBotfv9!_7$Zm}fK@ZB zn=QeSa0#XHbQ&3+h5MV2!au9l<4Y+bGU%gq{wR!l{)KFH8rqJKP z4a0BCrj5y3JzFtKeMyJ-mctj_cr44z#495Q4lM_Nwt8nFCAYsf`JP;oHK%u;=2N@< zBWmQcWpjQrf~8e`(P#r_bcFm7bvC;A$SW)(nM56n$|Cx|%A#|ub%t3dQDX*?Vb4QA z(SJt=;Ft@R;Yp~<6d%N7NUQ5iu0qFRx5*M=-8otPp_itw>o`==c_w~8JJM?g= zwSb|2nwR_{VzffsQ7R($DA2axNwJBJ;LBrN_uU-DrkkDoIz!)+2VA*4%$)HEJP zkets2sMt#3-M4{hll%=|A|UhRg(En7fJ%u`NxTYg&c7iRyXhzrM)pkp9AW#^F?J8`+||6%VpRLE5OAY}V+Wv2pyP+g&tTQB$O z51y`A@@8$fV_R<@VPGat=J=q4K|{{YJ?Ml__=&DE?pjL1fLt{QzSXS68WksVU`*G4 znKoiZC1V^IzS6#AAV@(O&smYmP=@q969U1WQWML?QO9|q9xttyAw#q}vU6ITftfL< zy$XQm!-w}%gY65Sa%Hl{fW_4@iGZnG6PlhrnA-ZLUN^tgNGk=A7gLhj$1#37TP_zs zb5gdHPOQ|9X-q%BCxe7J@wST6rSe~Yhzs8HH!dbN%=j7cg81-il+&j}Vl$t}CGSIz zBcgR#Srg%#hqCU_?_pS*4c> z?Qd1-m)TDw5Rg+_qvdau?~)oNSDZCcFxMBt8$>!BY<+y!{1Oh2n@1h-FkBgC2cKzh@0 zWnY?<(=(Lv}=-5ck4Gla>H0e^UBq%JQu_=<1>{7XWAmUvV^t}z#< zV&>EVETecz@O1LY)&yD@8)s*~WHzqroe1IFOMZOL-|ymSKga+8P)e7bv6}w0TvUHuzFV3@v5+9x*v156h)4)*4CIG zkN(?AN@!!|y4vqHiEGzBasARU!L0M?IGKbN5iGmi?UtLRxm}z^RjVbtlM~C0|!_}FxrfmHD z*vmY$#@-H6Q^p3)U^_&29SfgqG?)b z;@8=If-k7KJ(UQ5FdG1Z2R-rtlk14DyBLADKC)!bhldZcDjMO!lp-a5C=nEcsRQi5s1 zs99`c%`vT{Axd_)aFPNab=0_doJB0(HET6xKJYJJ#;nB}!zYaRepbptQp2#{{xfF1 z^Pb?OqadY~45cvZbv~inbCosf>*Imo3e={GiDwyq`YOF6s`Rg^8{^!^8cm+*s|VmJ z)ws_7y;J*()_!U}n0bE5;u%!fCK8Gpz9tgF@(LrDK7@y>CE-7Nib^!q{uEERNuyj~ z{N^TyV~b`wNn|*Q6`lOHnkI(!7NYW?&tAA4aqJ502g>sn;kDhgjL40BvRVKslu-tp4 z?yu3{N;gz(e$%P_01Ol|M-$2L8hwu^SAHWU`b`ZD4dY$tY_^0NNnp~_o!Q0O;<3D& z+ONV(E2qnkS8DcL836AKgg1|nZQyq>?VKZj%&US9SDV@M0(!pby(Spg#5Lo!ynp!# zbC_#P@D1asiKeSVM9MZ)QB+8PbA5**u4@iuxuWz=x9HB*6wQ2%w6#^!RIC zqZoYuMf<-hAdY!O_;;GA*h;Ux9LMUBOVt+<_Y0#BNprf+pi4)O{jzMsG9rpl`SuHC zZH$)~3B0-5g-BuHG`}t4;OKRcO^-1Fi%W22*s`hl1#;KHGMfOQIr}Wa0D+``R=9A{ z(^wX-qy^$efkLoSfLC{B*b9Yv+kuIVcdQ8upiE~Tl# zzRC03l_l|ZU^%jamf7mSpT?s&uoBSB#i`>6122gk$(X-%81CD6K6|jZ=h*SJZuv1! zKSRlb!#lin%wlkV0xXDkcT&^~lWf*%W?D`OC?Ml6kjXQmcx@h(Cewz0Kvi>`F8lL7 zO4+)jNZ*Y(+*}G?FEEBlKZ+;%1Oy9IJgsZW@WnpuOq1Q8xo>=5AG}zcEVo^+tac)O zXB1UBpS~7Bv&0~oqt$&_UCo0CTjC@fKaQ)ek%F|%SHj{Bcmbj#m1t5jxL2-UAuYG( z+oDhQ=Vub#>Fq+?@(;g%2Jh`kKDa*P2k2kq6ChMfkY09exI0F8s->ctbzodoaHx~^ zE-yVqOAYN)8HbTxpb(g3{j+M=VeeDDUmVTN79|fkUA8xTJYNj+|wk0DzVnFaEMJFjdv&K z06O7$r4Bn24Q76Ss2z&vWg*`L95h3gue#4G&oD;|{r4^5N$qa1nC~e2@W$OrDe=HM zT^L;tR^!jz-Rfo2k&UFPcExi2is%qPPAj>E2R@bq#g?3_XzXS|_Eq8Jf~|{k%PJzB zr(T%NpmW#C%wsH_y>Ln%KFduGtUT#rFBq9@&(n{kX-%+yc;32y^{1FGO^$r~z0%!| zcB^ZEMyVdrJ*xK!241&xzG>Hs=;UX%QsVPUbKZ>oTNtX~lmKLEIiZc5C`QF{cj?|= zt8!)BNeRcvh|`LC){=tR(WVU_SJEB{`*S!RO?ujQkG*Yu*^Pfx?SvIgT{X5Fnu2ly zCTKr7^sg{Le9dJpXd#yybL6^W!&psUV4s+Kx+AD{*45I`bN&iZ`o*#g0{R_)l&i-{jczBhbHtB}9}Ftz^&;zj zP_G#wHX{sKZ!_Q1sTXcfFnmzXi=|>sx<+wwa%C-w&~h27BP?~$NaY!{g~+sNdfnNi zSIcLY3!Bsxioqq=P0AQ?j-IEw~(d$gIJ9I2b<1|SL*cxHyi zS7?`iGu1tQ%H1$c1e^F2Tc68NxZP}#;=WchG z_sRxS%5k)6Rb44|bUhj*0i7ribp!ROkqoPUJ9ujJxRQphm69;fdFg#C3&``~x$cB_ zhfA*LyiISB1wY|H#B8TJy7xxoh-;LAo0!Y1a;Lpe+Qh6Psd#0vqi>b1%?N3Gmi;6& z4`=VOWh${$g;~(kkF;Apc#0R&EoY}xySXS5vO|x&ZZ=kHT|=XO6g?{Ji*ho5pE})t z=jM}f(7|TsJpO!6oi^VzYoxA%*JoSp;UX6&c>cmAcfrC556y&dDOkKpv(|@s)K(4O zs&IwUE-TP;j^f8ZXo@P3^nDAwzQ_k~Gp`>9Wt$qDnUpO1ZqeXc)wOo`PS?t1CWX2& z3AFm?$g!GvXx?3EaL4hjv@Vs+(~F~jj($<6e=qbmfywA+mt2RX@IER)kwoY!*A!CG z{ou=|X)&2@%)*Bv__Z#XD%=$wm@SWVDul^dv?XN_FD`D`Ra+NubCnx+!t)WorTe8U z?$#?iRr+{Cr4&ps{>?fW_EISsNKSa>%EB#Uw4~SO5!9Sy+fMBn`{m}jQe60dWS~=j ztCgq|v~=fP78Oute1E+Kvr~u}spJhX{X!<=hr9A1W0*91Kfllv0@5~Nn$Q!?h31ica3#oH3cSF!M?|8 zg6oWpEq#2hUrU$be)qt|yJS3Z1B7WmNa>Yjz4%qxyQt@^a@OV&Fm7kFHg0s54?Pt; z4!PfPnl15crdJAUG=f%QU>%_j+{ z0G44hd{kRO5yldG^FU~D8fw9~E14+*4ztRcs(ATKr$`mHF<;yumBH)?m4fD^q4Kxf zo%SS29McMU-7Ye>m|^|e_8T4f>IFx0P46E1skLF=A#vQ`fcY?lfe?zY#wmkU;zt@F z_p4aXtM>j&K%E>BI3Ji!$*roW1OZOJN#o-)mQmgRRsAu$`WiiGjz zx0ze~vg)*P8oo;OiG!Wyads{J9@ z(4ID1haEp@_!0Q@Z}HmDPl32hqr$iB_HbiZMXrrf`Cj%ZYHWl@jZXumWSBdG%T}im zhGJWrwuK(n#62jmvNVJ{PciFYmh&CYh;;;5!Xao0Hydx<^xB^qm&OX|7z6^UapPzq zYq1ag zcH~(`6tC2b-$FkG*K*BRL6?ZR3uzgtCvQ~GiBGm)9 zh^hmBXqQ5o*6Pp3uh>ElPNzA+wYDJc1StoGkMTEo4ed6( zeJSj8v@S=bf{^w%8$Ch_Tauh?9$gH$xjJF$4bAAyRl^8Q zhKM#rjjq@oRX$Tx`|Dsih8EH7n>Q*B1W(Y%n zlDK9Dv3~8e_*r~yj?NRLrv2zZbJ4=82f5Q-e=Io%W>)%M*?Rt#6-hc39AE+@caH!~3aW zIZti>vQzVuU~<7YuZQSPaZLHG1wILXfroi{Dnc&S(wIq+>thL$EHf4rC!&7R+Z46& z9>+dHg9t-ban1IP3uy3blYGIsNnfOi1`1gNZ?KjoEfc1)IeCbU-n21yJf>Uy-`I&7 z7^UCKMSAjpVpCtEJ}>+)Z@%yAtHRsz09+{k$V&js357cwA_{|TXr3j$IHWay063Lr ziKIg}83zSs^|MB<&>#xL3j_MtKJfi^ZpLV!Jwvi3R*UqAch7(+7R~!4*qv6T^ewrn zYAcIGicgSZFp|q?D5@`$R(AtjksrPO1TsQ`S7)xr`{!_wuzEQ^Uw#t=>C$$6_ffC3 zg-70*YrV<9uRPfH>pBd2Wd~h?9{;~KkYDy(~pCw%*YW)c-6md{QQ`4V} z<=jyu+z5RCMj`GUAt_eBcL7HIQ*wWZ#hQAYWCb!EXLgsu!u_z)-2bwFf4f}H4W_i8 zQnx&ZdZ4jpr<-Y55lWkfB~6KbV97ej7YOFTrv-hY^$GOELNo59s{rI`HHng9%d_>> z$8eH&stk~!)z%6zyFPYU-!pAm51}HZm4Nmm-ep+A=spU48w-s6;H{$Olr!8r^0i5C zdg^%ixaqDDsUK8<8$_dj65WEH`v9o-G>OOni|e3z_KH)9)~7SuQL8?afzcf6#zYL! z?z)w~{ULhxyYD&D+v}R6^qyH!9hu?VM@zCkySLG+`)%I-0+j~ZFA?~B!&^7zF^CtZ zkuTw+_^M*j0@+LjwU{F|>IU#S)i|O8u#&iv78S&DYe3!WCE}%jm=!5Muo6Kw^6UNv zTAWZMi$0F{>WoEb5(%;*yjp0LCpQ-H*Vhn>mguqqHe4OfQuW^UI{BXUA6w_Kb1jT) z(FbBcPD3&ratGLk(5tIFGZiB^ITLb8_?*dR>}_w?(Gg9=dd zGeTsvJ!ms%Qjacw2q+hpMm}3u$*qgZ%w!#0S*-8V?<2gU?#Z{DndAg2wur3ouH)`` zS_?1rgXJ;2=FYHL93b|hi5MOF}J%fxdHRgW4x1uA;npA-Nm4P}X=tlBsc>!#Dg732-iArf1 zjy%6<6B%c;f93&BGPSG239`TYLdB5XjV(RlJgt;scuUzhe^D~i9=L>mUHigu8NDGl zN6~{dl#3onzU-S?ZEuJWR(79qPfPrF>=4ZC;v7uEAZt_5O+>>HG~K$SOzVC3z zbVFWadeoC@f3t{%1j3?el_3oX53%O;>gIuE9VGQZqWe-v+)6(9!za$8m~O;C7@hkm)B zYy2bn1sqL0_<_(sAlZ$V=Zf!^MKF^zNH2APZrp@8*756@H;SgGi07;dVk;-jOU)n9 z!ubn{D0~uj1MR*Yx$_JSN-`xvz*9BcO0sc-N={GA`NiBJZEtb~tb)KmSdeMwhk-Db zQ5I8Se-(IZg=e>&xutPPjq`O#?S<;W#0+`ICue;rk#Vlt(eYksK0J$+z0eLrR5EiHdu;)SZ1 z1f>J7WUmAv#o%vzn5C@79;4~7e69eq2h5(V!8~u3qWwMvM4~c{j?%r~=k3#vAgkYn z=b-~tEB91r2T`zwQYcvz78%S0nQcpx8VRrQAsL%3NoNPr+0=>uL5g56EK9Kme|9TF z6p~X!*WMN4!`W7T^<4MC2(3it3_=re@i-T)3c~r{HT8Oj5E<*&ShbOudsM&V_A?zz zVF$?v`c0MD3tIY8J~zP)oX3+6jd zSpOLbB(yhvS)}p}WenG=y*TWAbgN^-;ZUR;$;9Z29J|X>9?(+op5}* zPD?J7lOly#CbY%=A^@dvlHcR?%T~B_D*GhA47M`kCnCqLX*$O+=!B;-#D`ZH#c1OK zAsyKSSEPdkTBE%RMKHqrYyQ9y#<$_Qt?vpW(wY$HbjM-iOh2rd>gtj2e+W^FHnC&y z32URbGI-JP?_m15{r$WTk9Z=AG{14nuT6oj6^Pa6^dunrlk|tqn@R6ui4=|J+`5O6 z2c%M5ME#Sg#ffYkZrbBVN8gJ;Ww>QP>l;&shI&y?p*%ozK{kU}WSe)l%u3Y>6-w8K z!_G=eGm-IMKeMF=SeuQqe@26AtuluMA&ObG6W{Y8982a&@eOb<9=Nth$FCvaD?DgS z*l%%_$>Wk60G2TDE7_FF&pT)`D)4VZ{V>((Gp^lF?n{Jw0y$hFah`{Fz4v;Fxz{R= ziG{gCHBfs5RPdQZ@^RH6Cc}aa%T8&D&+ggxOo~$n!W+pji&(vDv9v`bRd zz%h5`Nt4Mp8-f?Le*-ie__a`fM&O+*^zA(#Z-&&2|GDi}d>yOFM!!^XUB?rp>(e6; zb@iLL(YF^5?0p?;bvhhD9>~ z^b(XNSXltEe;tXi!hXie5rn>}gPY|Wiwen%41MIdufPMM+l^^XR=DZ~5WY?qPq^dH;a)Z%p^E5cVXy$JM$CDoAQt zI@vvye?ciP923WmP4m2JFO>5;90R#zfm&oM(zDJ7gW0_>DRacjNZAVr0Z2?}n4&T; zZYLd`(C~AP4|2Zb#3<-A@Gp!Uc%Y+~bSxeO=v67k(F2KPwac&!vSNfM^WNCN1Ff!` z9zK}X5k8)hq}EGAdj{<=ICKzqSiLC`F`%}=e?y|GiMISFml0R$)5vr(14I?D0l?{( zh4POSmsR7_Ew4xZXqfjq{x^+7OsOiE-ZtOTX@y}`Vt=?ojX~@}`)Mg&9PHOn ztc084uEvs$=D+G>57Y3Qb)Q=#uPGxF5HoVhQvbk<<7*n}ZeCQb9%LDZ^aaa=u<#)W ze>IpOfKmh%zB^IW383#@%R zM8>Wggjj65!zCPoYKAyV9B~k2t;iI3CwOIWnp0VlZB0Y|3V<`e{DAEoOHGd0*7(ba z$*jN+bW@-GmNpQ`DLvH=96oGCf5%%CeEl#l5c{F9s_9fGvfN(D+X;ZJGk+|#RC;Yu z)?7tZ>0(mxVPx^lSf{xFMGz$TRb^X-*wpVERl8$iB8Q$LmrZ-S!f3|bHOe@UZEUJUYF z*<~?dwo55Z;x#eAqgj-pvnQ86o~hQ6F>~l_T5p=yMKpe~J=e565jiL>ChJ8V=JLvZ zmxWcPuVt(1g$skvkA(%h`J`18G$2N=an)6elt7oMI*a{s7`ko;eTGs$+?jriulegp z=$fijGkina+EMI#*Ya$te{fNgxupws4P%3|dF=?d8{Kt$FPz_Ce~%v6xL&!xuBi}8 zKHDaN-4qm#`;wxF;3|6aDzUmM17wFoSY*s!lzmaEQ<>|Mdb7;N8)7m%&*lPDNGv}o z#EYMjJ${^GGyaZ!FsNZ3>~R)7xgBtwZ@{er&k13shhq0;Nbf4Yf4SJkj4X!a$9NxN zTykm6^|x^(2pcr7KWb!LVGv3I#Ae}%oZGbg{P-s*I@I~3Z7&vdv_k?wL>Bkt1DT8` z%ZD}&ukhPdq;xmy-mnjJB5<~@wyZ9Aut&y17>czb9z(&Hv6ckkfrx%jMf1%2r~uJ) z>N%}M&lT-?Z?iQFf3X(U^UD&7jbv6C#TFL`3~c=t$ch*6(cgpeCeJwG10g1Z*4;M? zGZ)oT1L?BjXc2pL!FG#8mUH6j z>5q&#MXAsqe;VO*j=1k;we5Ji2I7%-)$!ZreU}j^RRwM$OaY5EDDw|9g&!DyQC#E+ z?fc{UI$3x{(Kz#s%E}}#Ri!qv1aQk5STeTOJwTpbxF>V{o?M*VM%G<}m-)UinNL?~ zUVrG0;y~cx1;-x($hwex1#*8Kx0kslKO>b*N_v~Of5!@^!O}LTAMme)vB>bWjp7Aq zesl-3D;FoI;Kznv8+i_beR%@s7?^Hfr|JMo5!_`ur+f4*Ll-CI#B_ac)5ow53;i)@ z7R%SZtb!)Zf!P>mbpHuxbxGNPm%hAQ^|{`lg@^rkfRpaQgChgt-3XAvJ$s+Z&&7$m zl*-j1e+ucDu9)@;>6t3b)-$Dv2EQO9`eRB&0>UK#B7aw;yj*@-u{WY|q5i1!LW>x! z`i?eT$E--iy`(PLoAQ91$$WJBO*<|ocb7-f95`wfR^z{*Sx1vz4kim;Ow;Dw?f<&oe?&4xA0{G9O ze~e_hA<lKH~8`@u0Et0Gcx!o0@pIZt1IXuK&mec@dXfL6~8^m~o3e>-*Q2G-JTDIxwXscEph7ia=1sKBoeA$p^v zXn#Ys(78CUnRHxx9r#sWjDt?>)(6m{h6nZmr-ESDJ0Jd12GannG+^t}wAS^iovwop z4FZc~$^3%(M$+zJ;*gPktD(Nrky#TTnVQWQeh+i78X!){$zQz>1&g`xo)&8jf4Vi6 z;ZHgu^QH?G1x{=c)UmWXo-w$^95I~ZWM6i^N9U7X*}A9sNq55=U@Q)ciUtU%Sr%C;p5rH*L5d({L)=Ok84coSF&f#SM}3|Tf6|pbGyts_ z_dA=6NqQVT6yBu9^`SD6nSNFk({h`tonW zJpAmcDlChAZ<-rV0PcD)#W|Uwm0p~sRP}|>9PMqCBZqoO(E&V^V|0(<-g7N;auorN zsH9@B^IymOhVhKQ1u@Vs0&t(4lBG<@OCxaqttOA`$Ne>`)9(BLf6&l&Dobe73|fj` zU-3X1&r=GI!X#Tl!>Snb+8_)oSZ__tX#uk+O9;l0eO&i3wQCwJI}tL6dA19ngfK~f zxHp9?sIbq1;wx&`!UO`22F@r+w~DAIyQ%;_7f!PVmFky}_@dkJ*{)2CMyY}Kw=S!8 zO=j@K=lfH#uXfn9e@G$Vj56Y>x0Gji9W5C;Tu`H)vd2JC_w66vTLOIoJ<%L0+8eW> z3}BhmxFpdR+-BUSoCeAb1LkfL|5rzeje|c=dctbF)&rSo z=r>_L%DVqKf+ogAjjN#|T4f}ZvB0;iWJZwoU|IIvdgoWI))mKl+AkS@Da?} z+#R^K(9bZ)?&q{s-YRyd#YwT1JdZlvm;1{(xAncjLQ+R)xR$E`Hadgi5yN;W;f!l+ z^1$K(QAIMCAKc4f3LUca59RTE6auHoEZ3V@sd`z&e*mw)`?y|xH%vDru#7>3U!IS= z``&bZL=S<}?m=QteO~HGU~x!?{#6d}QQD1*2UXXSAYyLDw3_*!Ywqk_iwMxg?#)@1 zm*FUw8vlmg1wXWMjC5Q9Mz+sbc}^d{4PoId=1CGcpLtI;EtMjGy}@gh?k%BpZBf1DQO&BGT+qmv+N)#gkD0q&=Cerz=^ z5Br6K_Aa$Ppq0Tq=h8rm8lm*llA+~S?d(#f@L?hg3-O%BIm`2H3NH_a>swzqOFpmT z+%UACdz)AN_6iO%AnAKv!EE8=>}RI0T_mGFFV)!eO?l-ldki8?>Txtt>rO6T-m#J! zeyHNztnpi6kyu>+9EoKoGEj!jS!dIf|;w&w|o_4qo zsU$!4scRvrLLolVvbg}%AZPp}oHQiNP9q|DJO;t33&`n;xeVxcl~~q}NFWgytVHI3 zfBnh88wN`?LitdS9b&6hWxQua0e&WJe^Iz^A}^p*5IR*DFiA-YQYNv0cn~_S#T*4L z*urIRpm-bu@MxdsUp1OIE|dju5{w}@^c3u_r5h!vCXbn_$SyFZb3CzO(@yMaF1_<} z*&m!ZsVRI*vWO>XWEj<)Q&jA|oU0f*KpoV*xjP~kqh7vW4sa~1A4J}`95|4Xe_xEn z6w6Os$as$_JWm9*2Z)-^RH}j&ePJxY)lhsc))%Z`5#&)I?3t`oje2%Y$%os1@Lff( z$Denr~D|qrq^FQ zYa|}vmWrEOa0V>~!tL0WK1leue?e^V%k_nzAZ`oFIX9@z{JhG~GMeIbrmfxW@n%zs zk!!C|P>Q9oiMQdyEQZjCa<7bMWd3o@ym7A8_N5r9jxM`_l!z>{@7622wW|zc7+H9MtFg6H45{En=6OD#v9*PC;WU=Kf3+c=naWVv&q?P8)8^YD0^kM>Ad{b%p5xq8(+G@DgP^>j z32j3e?u@R4(dq0!u{tw zT8@I!(>-hS4t>W)SQlD<%k?%2fxyDC4rJ?9a*ewV`%3H^ zcJVx$ibi(mIMXmvL%FTJm6l85RGffeRhBfA;4H_;-3)$ue~WrD$Z5W~_}ii6TN^-a zMrwU`G|4Q?(-2bUpz?+%YirK2m38V2KOi$4?^-a$23Kob><9Kt^D z$%+EN23Bx#f7WkVzj{0EL%9QE$Z9!SZ;4a6i{OUe;3xs*3^H0&^=r#x9mfJxD=@!k zR=Vv*QY)0Y5|Z7~pqWh#yZfZnu3OiGUq!zwz9;c6&l*MTB@-b#o`)G3g7;=Uu!ky` zfGcvTHg7aPUk%He{rxN?^W4i6P@n>6oryLEPU46zf2|^kK~V~IudV1f_23Yq!?jBg z?G1!UC}Zmld$tgM&jp&DKRZlHcN0&c1}HC#3==bE73U4eUF>ZS5d#T zt)s~A?Dqu~=6Zd$^L}eoqD-G^8W@XYN@PuWxJX@fZKp;mPKIoQLZg3I|{ z=**&H1Y87hB&1pn>WPsagg1jc2PwdLbv&B&e{>a}z531N=T9krk}yYpr@7|Fnb)E+ zbK>@fYU_*tPNZ6vGgmGg(H&{ir4BvUKz|X25W0FIsWDe3e9%o{X~b61M#yg_bRS2) z=`sxj|Hyu+8m~-foVGBn8os|O8Iz079p+6G)HdBjr@&**;fRO$T_W*@!Ot;=P#vOj ze>g(-OM+tn5!*`GZ?R=uNBbHGNZh0XxA(Qju?Jfm!qeaIFbK>eKngzqUD!IV5Q3^N z+>9cH9RxB+`zwS3)=*~!UG2+!y}99_V-hc@bWJvB3i&<>+Qk<_w#VC)^03CEM&2lM zG%^~e?~YO{?VU|vKS~sH1$D|B~Wy&(PVaemwD13 z`6dyZRWVJWAZ%9910tiB>vv?Tx8LPQq9%?SFQpq)3lep$2KH`oPur49TLbEf&8K^I?nuklRWAtN zVWrYsu7EOIz+Jw?@YQ{xF~cyz-xt6#3}5<|%|Ap_zuvE%!WD}iWL!J49-f8UiQa#q27q8cf(+g0J{#lAN7atXmN9(rXRG(|a~ zSq)UC?nREV6{cqFonu)i?tiS0eqf0`0|ibN2v7u0<1aqb;OfG^H)?+&bf_Y(mIRUf zUPIixyb2|7O;205E6j|L_qWeEs$d)wL9<;XT8!VUlEOUitFMaw0w|K>f8hGC#C&%l zq8!v}wqk46Vp|67i@X(Ig1g-xP`qXr^!Jj^uLa78u6o7BUG{<|A?bz;1wEU2yw|oFy%VbE%rfUjB;8 z+)V-l&j2iL2?lo#280)dAswN{{=+IA>S*jueAJDH>_ z67E*5|Kt2Ik7^K;gr6zA2e=(gV{nK#prnRNO7@P$7)6(-z8iq_9%^dhqvvs0>aw56Vla z)SUE#n_)_0>0-&LV~GT5OlaBz#U~&GtMbcjR`74G=9wP^u-y+sMx&00k2e*G9b$=H z<744(owsj5NT!d8^-g&dMv$61M(h2)vOm-KHhE^Ye?+zpuI9QOUPN_lo^_sq zSWxB-mu`_B_LZJvUY+5o*)D4qL&neYF}BbhM#7AtPwC^(&$2y6B-6;?|JSwyDPLzq zO60R)76d_%5g45Ujo+6oG#SxlBzrg_+>R2d5K~L{_>0-~XsqyMr;N$G{df2*Baz_q z0!GjRMZFX=e_146T76^nmC@Zq)EiMrKy`7w#`7j9bD$`McelT<9p-xys7BDYLDwov zF(Y+|xZQd=>%Fv>t=YGWWmj+F{{E$jSUV}9J9_w>znI&Px=ddV&V=8wvk1ZfGUm2< zD;qbPl2VV>=>+^f6;FmRxN?MkNR+Aahkh(8ho~uge;stKN*>0hn3VMKHU+qkhglz@ z&s{sDdWsx4H+)4Mn0M12xb>4lPYM~qG{|l^R(}$wIxdI194J&@=ysuNp)X}fIX%py zAqqOa3M1orl~G_IEkF;M#UfUHI3VMc>K$MVR6WUZ29qhoW zIk&ece=;q}v`o+r0Qw>5hga}e@MFQL0cRpzwf-Ylzk`-gSgVAybOc}&OhPnU*6LY) zW|>syojTTevyLFHtOJNcYa?6F-&#NG@uZ8LZpXSc+`3Y08PUIwQ)3T-8Zz~=V`dSg z?J~E74X_ ze{QyU@j5Ayk!YYbS4Bo z3WBM1Lqpi?QKK(WhgUQ^qr|wZ8-C3$5e;IA&LxRc3udE4e)5VQ*W7^Jy!)kve{HS8 zbg3V6l59H#%SaFx(-3o)8n?Eb-9fDKSq4vDUDnux3%dbuAsj`bxXQgI5 zM!eEdyjQO#_mdX8(|E3z;QM78Yw!q4V2K}-wM+5p&E^Og{gOtxu0Y92QO9NzF<&V- z$hABQF06Zr5YE`NQqBF-%w~DSe=hs@H6 z6B$*Nec3`zYNkg2WnL~%EjD1k@i-k0%rSHCi|j;rX*Ax$J;GUrWgB(FSZ>(}P6v`CtO zW=t>tVB?v(4V4Ld)yFzvcfg)J2ki=zNlf#@JJ;T}Ek|{)l_A&jStT4?&_~^AB*UKz z>kZdvSbuh**~iTyA}g^7f6Anl(+qe}?OP5JY6||7zB#%93dx$Kwv$pKw9c@VWOsf^ z07xLcjW?id&C+bfY@!4AyDMd+a`YYg?bRe;TXfzHgweqrBpAf54E(-&(S0St0*_vw z*Cc2t!KYJQcl>#t4KP(D)>XiT*6~{1^_gw13s@mlfG7FFPuBG7fAPoq8WUS8i(kcN z4MS`jQ;MBx_tlG)LWhI}rAfIm-8%?*SSVngwItwgy7%O+s~1bTC30%{9n|vE1uY!6 zGxhsD7e14??mk8=g2#ZMW{p#IGksY-P~t#nH+ov23C+hP_rWchCrUS7B)BrB`tb$` zdrK8XN;PWB;KK7Ze%~6Yt`R1~Glp3t4 z*u1%k5y9s`E&WZaAGX1qONJ;aSBqTORxjhHa3IEGwqO#IH}R{ODwrLJNhuh4d-+6y z5Wn2w9#=eMDmJ)`@gaQF78ce{Zg?HjH`!z^uacC?a)xE=aTDF639fqi zlZ2&ahn^n8f9;2WOusogbh&DzL3D~QIR84y@(_o zN^5Njho(#Jp;f46`Ai)4q)PF4Xh&)aW7SYmrO7n3=3IRDKG@$ z7xz}Wn4hu-g?a32Z|?{XhhVRgPI04x?=ph35{Os9l6tJB{H`1A+7K%8y7! zpUgRWe|`y+%mVT5cx0F{o!Ga{CgQnT=zL$`B+*wS)mr@d^J=C~b)n~%_1AK$Xc#Hv z`tQ89J&^hFunawE%RhLTb^@}cB29i!(YB#jOHs%LvCj<2ayG$caIC@5$#OH<#sSFc zC1HFFZIuT5urv^1X9u)&5CMHeLHL}NNASzkfAld(M<{ngHt2a?uqRBMb^rEPJ_*0; zTVLnor)YFecL}hY>iQQlORV|f6%lDI#x6w8==5oXh#pL(>{=E{$P-WVpMygmzYsM` zfp+~A$B1jg*P9()i-bJ7PFr)ghU$?dA|rxLKno920|;X*Rd9?{RH@RJ)-Q zNe+su318@%P)!DhR7Yv-=1zt6@-1oiW3vQX%fB?WBAW>41(yyKMmaWD>)*v$o61CD zHe{H1dOQM3Dvbb1y(ksRp}z>S*P8jte=ehAZucKmOR3;EaUrQb)gxY7M4g4gw8?{F zL+QlSyOq=Cn!7#izkKOtpkDd03v26q|JJq)ejwM|-;a?RY1UH+S!;yPB-*}p5I`B~ zxPa@{ULO2F;Gp7%$(bg=ti%47f3>m1*9q6Ov6R$_DF}O$vwG|qFUN4WAjcl_e^9)! z|M?{@_rwVAm^8j5_qFy*q~wl^l--Tid7?G;pYEPV3}hRtcNu~W_P#wO!yF_CQ z#$-&+>@s5{)5+>I-ud=+^L22Ke&N9h9ar>rXx?Tl<_h;bJgD&b_ z>F*&M{8ED&495FCS(hLa_hJT~KsZ+27Higg8i1db8p{&>L|~ zUo-@bBAc5BfGiZ*i{xd2;W*A!VnI=U@pCJ4-M3@Ywp?hI>`(bAL+|(X&_(w9a;}!i zn(~Wjifh}xm42$KhI-zv^P((ky0$|%&1tH-zhqeEC5936Z2K0$e+fbjt*VaWL}?nQ zX@*V~#!+(BG)&_p#&L0IdNn`duU^7^-hxdM4g&5hwu{r7Pmvu5w#*b16P$EA!tYju zbDRsKQa5FUE!YMm!Z;&hpYU}%=1h{8}h3);)#^k*5z(X1nYdC39#yO^@x878tINMQ z4;~?!9o=yYCgzPJh?sm)UZ!NZ@^jK9}*`BjL0`Ue-}RF0HQ0~;1xvo{LlC} zXy4N3$V_yXowt}oI7f?{wlsvQq6f4JIT((e5Sgsb=~cplKHp!$Nb)3m3c^bjNEhvw zsvK*Z?g>L)Hjj>`EJbqks)fbcJN_gFC@F;OdOEpvE$Kczu`ZA~5lf4}+tEluL89V0 z>kl)-xoJRhe;7yu1AJdIUa<>M)S?i9v7e5Wcs2gK%LyU<)Dr7g!}*2M*j$b0p3ecl zYyD*VbM|CGS@p*Ew4s4NL6mN7R7iU0Bc4pBw!4tV^F=8}!Z0K=_lRinftK^$)Pt@> z#;Aw(oddC`{ScA8fg91bq62@#c?Dj)SB3wz1r<#`f1PyJTZMhLKyNr8ltP3!YH&QH zB!ogXx!;wk1Hoe%sFavgZQVMsh*zmG79Tz)>CIDuu4vCN9=v;YS-0$8+YUVf3kF7T zx+KqMLf)VoDTjsePFQ0T6!AX*O~qVfGsrpIaoM)-Hys)Uh~k3Skv8D7UGjlvK|ds1 z45IbFf4$2fk=n?u?XO)rb36GGTw^PKp4AYs#{C1-tOs7{4p6@#Y=-)kBMyY!lrYtN z*QP`W>FOB5`XAcftvPuf>Jt1bQ+?H4=_(9yo{*X9i>;F_+cLf%UO(7e9%w~4I=DO3L=oIX?>3#Fz zrQuk1??$g4m-sU>!2^lCukfHB|4ifB4VTi$%P~9M`8u25H$l65kgRTfxqN54amMea zZ|N~|&D>dCR;mBysg_6MH46{5pkZ@(@&(U>412^&&9VK1ChHU0zVCwRV>ddFKiAJa ze*`Z9jYd--=70Q;|Mq|Fnvdh=^k4srX?DB+^56dZ=f~l{Za1gR|GjHoPovXm_y0U^ z+Lu4>f4?#SvmVGdNM~d5CFxfF+5UJ2z5oz_41nn9g{H-Yrs8}!%CpdXF?wmw@Js2m4nmxiw;**9o5CJ$+ zZ=Rk4?D+>h^J7ekNqmv}{(tY46Ys^Db@uuS51IVJtK5k%`F)Iq$L;eoIT07WVx>~Z z!wee>7MVX!l-O z?}G|X^DKY!_iMgD7jawbJu-#*zL&G`kuGlcn8sPL@9*!rE_AQ%dA~QsCB766 zKZ_&EatNiJY>GIta-e+3%S>G6wEPe^ZUVAo>JqU}?|+ z;Qc;dj5pbSx`7M-h&$==vskb)U$Kt9@+v-Zb@__*d(yA*o~2^U?R!*(J#Q0H=2?tIg-q_TL@t@Nk&9T_&(2_pn#6l~ zY)W^9v~w3pNkOUrf5!Tq5!bLP$B`bVQDO*4xuspP^UH$z3r)$n-h>$nXNmo^Y}Z>= z`3?+ae%ic9$tgXHPjPMt(j3oBO&~Q-;;~(F5U)Z@1c^eRIbuw{hnI(fnU>qyk!0=1 zETN2c`Hpq@4rD;F@&ABnQ{RJeGHDjLntsxK^C=N$v@0r5f87Kb=P|^^hycZ~YDmdt zlr9nLXhFHSe>x!*sa$g^amgmsWfB295{%Xf7w&(ZpzPLk~ zI5S-UGPk`bCiId|c>jB0$348-we8m%+1H7SuzQ)w?AQDCeu1C)Yfy?KKXbdc3x@1I z=NECY|4TWLe@)}ytS0_Bmg&9_C@pr$IQM3HyPZkSKnsGT2nhI3BdIAeIs)w%n^%hB z%a}+lJgALgNs&@xVurXvNMz9y3S(nDaV}nO@Bfv^1?#xNT1A1(qyBj&M4~$7vrI4J z`)*!9js43s21)0s<{a!kN;e$MTZX`Jqy+(0y}YCPe+8sKaUvC~(L|OqrRS$I|8hk( znFg>g9v9>KVTSU1kd9)gc#u@&aR->5pQ=8*7;m^F`dfuQ1kR5iyLhz6-AP>v(8^OR>^vq58T2d3BuWkE3$AxIH8nQkR< zxj%-2m?rTPjh2%|p)T?b%pnq;yN~NS&U27>f6pdX??%rqfwRj3ps^?4rZZh5sHhk0 zRgIy3OpSQL@&GGJ+d!WrtT>vd4^d*K!QxV2EX9mw_ji~)#gSInpR`|o$rC?QU_Zqk zRxOMnoB*l;eTX>#J%^q3_x&0=-2C`0Vb|T-m@d4n{vFq3W#by@+W$DNmG0O4B5hpj ze@%MzHT6I__)$X+u(=RTk*He6u-FGxaiqikP2zoVjah|9QLzN&q(l~r6LLN`vPi-D zdj0u)G+!N-dHM#_eK{_-IplLNIYIDQkJh-x1`v?l2x)q>e#A52wPuPSv+=Sx3mXr+ zJh{eu51spXXJe+cK0PLeBB=!GmX4o3f4rfN69UEu`Tl$~4T4Vxp)#yTZTXl#JZNM; z%oQ{uA;sH}3XrD>a|t%?5;x^Kf70!e z=$00e`pD3X=xKI)4$|J5nduD6`hIQ%rznq*&apDy5_dcyK`x`RKuG5iD}l?ZUSa8G z6mky^0;NtH(FDYygimHD=@mF+Kl;#73UvWYg4cG`v-=S5|ez zJkv8sTZDBTBSYVb8QqO_u|cE9f5l_c_=mSVCZoqCMw*XK`HX1#Zr3~en%+Uo;YGsQ zt~WN^uJ`08|LphMp6}P7kJ+l~f3MxrtMWZ)_mOBxCG>?(J#b->2;D3B`M{WV%oQ-+mEb zT`(J5VE5;MWFBHLXa!M?H5JILripAqY;*{Zj?MdDM|TanK)3-ULD)P#l}jS&kjLNR z>n#udiM@xQ>f@r%;2IkTe}8=+@AE!B277f3j^O61zV|wD2s<+gtJ5V;+cikKT_AT! zfg{jTva)>-lXgi#TG^C@`jCjSc~=kKv)VTswmhWA@%UkW2F;uxK{a3IcWEE28@Z}>g$fBlgti-dOhBa{j? z$eZmE72du{FanG!_knh?njs!TDSeRBGfx`pyGgI|k|KrFCEPSInCH9k?opAm+h32c z?xCRedhYG@`1-eB=+ElU8tcMm!}VQbU$Nks8tRiFSGndSg~y0E_pdv;1^32c@k(Q; zM6pN{m^tSy*3v`Je>DF5PYjx6&&d->GWSmaoPtqn|4h+GDmSUO-KHI%&-UUyO{PyX zpbQcj&7>eFBQgd8uU7Tz(&_LrleWCU0n%%3{Vx6({n35^Z#5`w?Y-^ooqnJ5UuVc( z^WQmfW#hq*z2-jZuCY7oWBC`{)~Jsc{onTezUixme|`4H%_F%?jVviivw|VX z`FtVAVOBDE%u`&cjgRGq>0*-Xi-IbcM^!^W=h#PX8&zWA?fDTwL%;~iP+LMVt-Q-2 zMqOUcWs}Utjsi^)lx#b@BuftqH|mtj&+?g_IfKd>b0yX43?4{|hxnzu#bu0`cAr7l z6alq)#nQvJe~tz#KSW~_9smmnjI}>iGXVVedA;quD&y>0>vcsUI<>#n4J*;9S1QE{ z3U^lA>lsd_A6RP&yT_-)Lye&nDLQloNX12BdvhIob`b*3VK2|o=dJEB-Wo`(Kf9P}@O#|GDqAbre|^2>)8Bb^pGOb=z!8`H!mj%( zE_}gcpO?R~?3#Pny&QbZ5GRjq&QrcXM$G5co9}nx*{jyG5TuVgia}EkG*6{Ud?_`< zj7|Nn4riIkSRF5RV`mg0F$>OQ$&-4;LFsUB4$DbVb0vK+IeqUQQhd=El}&*Vk8_#h z59~6!e_GF%uG|x(zUGP1wk{99M(;3wr=)(qhhe>K9|jJe)I|U@(3%n3~Bxq8OlSIfAVua#lRUM{3A{vr{<|x zsE)?_>7S%?j4wqIVYL#{gaXrxXQmuOux46Je`Q`%V%O%NF9c0fX1NFKLpT(omJG?0TW+5Z%&p@IfUKIB+5tsf< z?uGSTeZFG-S|2B5{yTr4#Ft{Q<-zYQ_@zJ;fnw+(()R~S&bflU46V%|G)+C^P~x~s zf8vMjhqWb>Q;Qox(csOb#R!@i!rLv&L;+)uhpL$QvJPmXOJ7dkOIPbqYD&f3>ZZiDNJ_6we@tw&TO&tgJB^Q_mCH4WZb? zsayx39%q{7X#ae7$MT0ziG-$tr^YR@)MDOA4r;2Cef{C5&V6#x9xZb&_h4-w1)5Kn z3wBvn^1@6hVl8Lwxt(I5H9NzHC)FX)bdIX8yUG3(^RnIeXe}J!X zfWupB5kwE58qW!T^EE$*mqPVxHPqbaVU5)IpEhB0qTUbOu(=+Wk~TJE-*tN;uYShG zgP+-)SVAe$a>YyG3rt^8dI%NKTQ8X&W2z}F6<4YIF~{*f-ao~0Qf$|qwf52GCuedDBI;+ zz0nZoUz`+Ayml);b$fg=v+-XPsFe`gQW-~TL3HthPyUq;=H!|e+{ZVc*t8qowS9i? zon`(zUxX~LJ(O!CB}JGkubSVTE19zj7y?OoDL32z2KqRfYZ6-gmPk0-fBo_7lriAf zdT`hSHsG`#?B(F2ha7r{)0g~yuK6D6qN=)P?{zXJrF;&UD!rOgvd;rlatyLPVND5W znjvIR{LkwdJ+?89J0B&9gxUfX@r+D8hJf_lE)r#Yb>1k1;}e@SyEk4| zmylNMiqB4h%=!-bx`5h4h8}A^^^uA0?!O*2Dmg(w^_`FT=o74uoRPf0Z-rzSRKQbY zx5UrKGS{r{C?W-b^fa_KUqxWg$1D?TEbaN=)y=nweUE|tkegV4?OpSf?^xEa$*)|Ob$`Eeq2g~G@f+JJ zF6?)1a9x)xw%=eWxEf;H25W~SL<8ngTHl41H(6Y9@Q<00qmVONi7=H&vOgAqndB(ysci0;m& zontLN<4RPKX+?~whkq)HRvGdWEYBb z+4|{bwZ4c?bze(_VFjjPIB8p6ZPHveW!)A6QhuzDuxxB?K6xYR!W}TKi(3;~@@(jV zi(+UE2m{mpyzVWpX5-C(+wzOL*u9~p&8Ni0t|uMhi_Mq!zkljM8;f7LDSqNhr!tEd z5B$k|M2~DupqQsfqK#J|=pbMqkwKauXdPeI^Mcbk)??2@`!|LpK~=FtYrSQqJd4TV z2u6Iuxgd}!Bn;&sb2KW}KGHI@wIiI}6zbBSU0M%G8Pn87%kdUw51SPXxT~!faAm>| zz_z`~@^J6>DSxF-*ts{{^%xAe>Xn=LuB(3H>QCJ}i4Apvs;GFCSE%ZJyT=kMG3d|P zBrb)0{FQh8`TN1l8xhiTU7m|Njw}9$nJ$(3&09+cfgV?hykCwh`bJHIc0|2HQojbc z*noBjjAMnhu=v*5+ncZTQd+*jTkYvPt=bD5U7PcLf`8-zvpH;ME{g;RrtTD#e1*wL zVa-vbDaG2Sks~=sijYa3cDC-49Fux=_vdnu*myx@`g+$Rbpr#QGWvkG1{(<@o4+#1 zdpw18%KP?Z)i+jm7iw}@Kbs4Uh$f0is<^y zB2}1x$bWHNPe|fEBcL7_>sPo3@-UkrvD?t~JSd+oiF0TPC|gr{Wc(glpTXYCDNcEb zU*F%DwKMIE2`&!yeMX z*!~ScV@CobkOdum?(ZJ72tae?EAPBsIY+wgY)&BOCsJ+ff-KKs2_mIEITdtB@X|6u)hpT|zFxQyijJeYH|!}n zvFk+iAFYFb=AT~DN+cBnBe7mh5km67%hlZx=5_w8DjJ;j(_L(w71uu zBe&+4`W5JJ(R3rN3V* z!teNVwUd|E4`(=Yg7h)x>wf;%@0!j_SO4`Id%l?UK?{N34ZIC;=7vKC^tbAumB=#6*o6n{z)xw}IUv8s3`pfG49ymPU{9avlW*WdeVF@l4kD@byJ z!Cvq1g<*d8=j-tejJ7t>+k>|oy~CE`cY}z%-Wl$3o%?TWw!Fr+-tQS4@8x(7R$n8> zaoWegVIMDiBKDTsvisD>MR~;nkNn$a)@$!9w@eC*FL_RMy$y^)Ab<0W?wx|5jl`8+ z@`Pi1Z=uu(orAI7c%VIKYT`?W0f);>Vf{X zRZ!VF`mdbR))E#Y^M9{##Xj%&Z@)R5VOHvi#h>dpZZU`gbYXp2QfOTuqn8AzN9wjk z&(Sl|&5Tyf0t27>oviVySzx0-!JK9bqh_9VKEde6U-n^r>*x2mE-8KuK4*I)2}Zf! z`_6t1WsNolk3NckSRSd5&y+-ExGgMVl3q0X&6eD(fT4m`+r zzR{m8du9f12|8H)#Ve6~(&LLV$EdSd_+M+3>OiIhwFlf8i7n19g_qRDW_HmL7@-*O z9KPWrH!CHK@2!7Jdw$dX+rL@= z-|@wj-yUqh`hU8PF8{s1MKT9I4y;C@;;+ytk0H6B|Ef>&``DGekJ}t;ineT-b?yww zvD zrfD~SI`o)l@J7pElhpgWX(j3KJo(b+l~+3&TRS4k+JBMk^#*79yB@ozb`APi(B}&) zn*L+|joo`X@bu>#f{teW+DRIa<+>qKCka>%zPa0m&2_crb|>yM{2cz#d0+UnKUc94x8iws;UvGQT}$Ri=+Ih0}8ueIJe|7KYwNN&HqOoaKj=7+)i4k(`{cL>~ja9ItZeRRp1NDr;p&IZT&X}mfJ%c zo`1&ON~xkHAS}*U{b2dU4<6IqYN2b5@Qvg2kNe-8=D=Us`queA13!Cxs}*m2&n39X zaj-82Ez@u=`q%(@8X8S!vis+Gu=Q>JKGRN&66X}`IRRNSWav0Fc@`#hLG&~X8$J#a zx(brkBdGM^Y`sBdjN+1}^4R7GC|M-x*?%il>2+k*D=JIP(}KfuG9;dr9*+hbbnkIS5^6GikVYSXTRSvLKS4NV(2mi)2Z)z@WF_ zcVgi~6M$(l+ZGSV@-It5n)hNTcJ^$pEkmkq!+z$J)xbEvD&eEZd-Q4}5Mubrs?}%oLnYJ1~PB7I8Bod)c zB?ufwWUIgwF#)~z64)G)0)xzhFlj+?Tk zcmU$qxL2Y4SmvzUzgPwE5g)uYY%gf97-ie#nKb zt;*FuG(zKl)qSQ=O!oHYef!bs?;Loj<0!?cI7vk1oVpQ0Hul%C15Q9yEkIh`BAYH0 zofgPq(0}E66t@@cDS8Nvh?IiWB{Dxbz_fUYT%LkvvPhe~6}kBm0fPYJMf zZS&8%_Lq*;;(+`@_kTSHa;u4Mb)D)t@k&&^4czmHR%(mo!{-aiBbK@miX?hJH&&OL zE#JAorh6rI61yO zFxkY(!^;dxc)%7Hw`aD?Inukk%h#g}}4QU+x5G-bMFG;Q$KaI}Fu zCf$@{d3!#uz**yq6x_$f4$G&nm#&CVN#9ef4q$jlx_^9brR%)ktDd{?o~(vH$q5uc*hm_Ygx;`)JxmU-Afr1wRvzhLJ+UcBv9BM%@EJRc)fxms#$7_?TK^gK^|uZ` zY#SH)5b8gu8!T4VpTW@Zzt_(C7Ryy+ectq!y$!iq@uT1G6h!17cjsr1QMJj3Ft04-+ZJRPtV*|LrIe)`a2cJ(cDc*Aqe z1p-rV5q2?DE{&w8kH_*2!4+$fRtIPmUpDV{?fpK3>EngfuwOam9MGO$Zgu2U2aj2z z_5=T|EUz{GRa5K}*`l#M-(PkW@E{WHmQ{-Hbfl-1NandsC~^Ce{y``l{2GeU1(2TLT6&BqF3GT7lu% zT|MQ=o&6$=0Pd4qbZ=tab%e#cO=o+|5jdkuG9H@^NgOXR9+H(g_0y zo@TO~f-X4G+(gwh@}Tgb1dHPi6AI@e41c@%=)vlae%5PYc?j5vyC79VKik)63@?tA z28@7CEe{>av>5uos+)f5rW+l6!FPU^i3PgG7dtJdkG(gY<+tui4A@iOXVm*|=Lgrn z>6125RJxof*Of;)N0wic`HaZivwQ~5p!DoDb({)C9i?3!bVZeT%?#=094N6o0DmRW z)JJ@QeMss6H0?ObXS8fz<^*PZ!YccbN<^EF02dAF%z<|`g-+2hH8t(TSI`x6-{ebP zLK$d1_mfRE(=&IydWw4;MrLaa zG2r5Toa8;1{jH%mfA_Ln&wsd$2l8+I*ucHqLu%*&Z0#&o7H2+4>0@#Tl6`(hG@CB! z=Fjk05JgIML0KnZIsM`eWC+3!JbIL))+G!a9O(wdrZCZEyT<5L>ODJd;I?;vKIUh+ zN%jw4VM>o&=DnT!R&TPNT05@+TRT^uq4foO{t+a$KjOFMBmdE3{eOiuUC*@c@4eTc zqE204J@rSQxeA#6@Dmw<$zuj~c!0^g?69GxYcnIm<@hPZiuz2M z2mJ7tk1uX{<9)op>Z)zs_){DHyze(RV{=dzM5JE(rL+Q2j3lKf(aLJsc+!651TMI1 z0-8N3&!-ABpY7_0be_30D5rBO+uBdfGgIbEoX>_=t`0#FFdg>OhQVFEj4xsj+IZ6v-E2`{;Uol9Wht4CmT}>6ZCOrrh=h*|?Ih9Y!KWd-3RZ zZin>L_L6Z_PRCJIwDGxl_0RccM{<`ZtxOPo;)#)?cRt6E2SKSKoLE0DDaOSLNvHbe z8%o#qQ4jH5_x(ZFctF2_cj`R z9HK(LPz`*@H%q`3FE~d|W%*-nd9*X#Fj;t{>htp%hPnv$^fK$(gL3#i2Ris9)8oIJ zej2g~w|@YRO~Uyp(Guyt{~){dZGLN;bsrZlH2LOW_rKQQru*N=SxUSVo=GC!sO0<9 zG9_9tQkoTp({|Z$PN99rKq)37f;R{IU0V}VH%Pk$pSjVfks*>p?Lc%mKj_Bxd0K9H zbly!b5I6YM>S;-@!_TR0HMIHBzvc|&{&l~-?|;0g$F6$)z5g8x%b(capkEq8XUdM6 z_8K}e=5D0qOH@vCU*`g+jm1-_S8??p-ow{p5t-@dJe|I=&fT%DR!E9@Dh+ytTwz0p zeULW)1T-??1lZL2&(}f!*>j4QZ@w{;KRn5vPx{Bc@rL}STl>u$-SWKe8fa$tFYesl zyMJsnQf%Z7f9CeMi(oBY_F?|lk_RvnPSGfX@qna<=Ww> z(-g)oBH(U7>UZ1$$5R>g{H?b9?GR_*&DZP0KA#|6bE+-I)N2t~aG6qgt^;b^0edQK z&*Cz~+F8v=gZ~fCG_rao0P4d>4q@M)XMgZeU;N|^*ERTkcCFn`EEhNM**8ssEYMnD zFju=oL1FLndUmh*U7rWAQ7irrug^k-;sVbN?0OvLuj^YdBJTUXzU~+A(B~}w^f_uW zaPC)LsgoYF+I4HQ$^UnMX4fMoec#S6UzR3)-kYuWd6Huf#3k{a|GZ!O&-lH~G=B53IK4j%l>cl~uU){tl&_*i@}^yIUy_#Hor(I`h_TX!IN z?O%f>G1A75`+v?<_@%8x|Kz~LUiH4p_mhOeVI^tK2&+e|b6%KpdVxefdx`rzgBH^> z3SP8|$A?dXbpXjxDDU#LwSCF-zDCFx;WL^>hiNn42rZN1$>!V4&Q;M3`hT?j+0@v& zn#|9Dhq)?O`@8&OX2I>6yZ>;H?HmIq-Wwce1_%3}xQ|lqv*cR?ZqLCI1+w=gbLA{? zy?z1x7S9t`C&cPk|L6PWtHKFY@y_@8U1+4;$Bn0p7WBiPMJJ?OxGP*4f3YZj`JiIW zD|3nyU8Vb`^X1hCbz}7C=zqh^i`mBh4_j~Trgu;lc3m1EeSgZt&g$;#5P9pL{-sOr zinGgi%LN2Ps5Rc$ zIFO)0mt~WlrLlI+dL7DNd7+y8oZ0+VKaYc3ADjGT!*(tHod?;R!?AW86ZM@_FrCgp zi!7@0g5lyNEFBJ-FQ`3`R(}FI47+LYSB>+_2PP@hxRVD~It43fgn(kCwI3YxYtWh4 zPx2IL)zAI--}yDZ8-I;qSs)`((CfUqX6Pr!`JP{U5Wo4lYi}Fsac9fx6+bm+dBa9- zymoQ?Rm<33u0aQR zgg*-N5?84ret$o-sOQ_pLp-wm4pu+)*STH(^o|yTPfBn2hsE>6t@q>yGr7)!3Vt-~ zHs^UpL1FzALMg9(B>rIio$D-+t(}LTQv}6((1}XYJnT7XsY0P!^kF{7pO8OOT^CAH zEmG4_1lIq2Vr@UaOQ_*z#)N77=puc6bT$~lEg#M-o_{q0@9T<>&w4z;&@0D3XoQfy z&KEQMHPZZF8ls+;vi67veXY{!BOrBdd335n0@`xBO94{O3DP;zZ>yD=a#9sUT*PjU z8j0*RX#NRjMMv*0!&d%g`^SLa_{5pz(X&6mDUzRdpQy#JKBnly*!~RLAM5n?#Rq<8 z(1BFNKYw_^+;SDjjyi%iP}T2#A~`bSG=ZEUO(Qr@ zX&%I-*$hT>~ocblgH7H`>1Vb$lu z>N+Rmmwvp*MXvbP_q`7Le2p*P^-pwMF|Lq02urEi)e zbc@9j_fRE%Jw|| zuV*z~>uG_TfI&uHoKxNGnB_neKw>2p8GeU^&Xn}!caape#DzrR~QxPTndgntpv zg_)Q7gCXVHesT)l*N3sZ6Guq$KeD51Qffc6wdh+*FytWtnzOO_XbmM8HHAWedM?az zc3K5??g}h(0#bRT+e+Nek?}2GcD8(3-=A@oeZ7b*K794veZ6|N{Cl6PIq|KxV1Lsg z4!F$?i~QEF*_icHzbu)J)7m?uGJg=y(!srsLUuHit%r2`W`zg6pVxLEP>q?%719Ee zy*@C=8OXQKXHz32-)Lz2eeQ}F?ZpJS&4RN!0!ep~%r~fVb_BgmDp_cj+Br+PAW{(M z>}Qcre680reqKkY34M~7y6;=DeB)^9L(ub$ecplw{%mPy2+<)oO^&GXL4P&@k-GEv9blKy%H@|V~(fpYWIjrj?zqj!| zGW=HO9i@~ZUZ8hkYc~1jMbNL^Ya&~ATf6v`+Fo;t3G@5dngmCl(0d}et&79tLOFuy znaF%1%N*CyZsvs~r+;y!3(YcV`-#1<0#@Z^=0h9sjD{809>=ve%)ucm*Z`Vq z*#5Is$_T10wn} zxH%~ADQOZ4;tTgWV8lijiMBcd%Qxp12VSgYXvA%v>;2U}u75m7?;r1facax&c_(nw zWq$McH=7x7+3wdYpC7*NzMqX&e)l;KI+|-=Orl(uol8W)S<;T?KFEEn+rX>135j@Q z()|id=QF)n$yDw9v;OzF%}E`(fa}5urmA|*3#nGk;N@>q*hewD;^<#X2hI(UTJdGSKex}4M-d8k6Pe!`?@2_zNrnpA95{dp!P`>gIYM z`R`e}S1ds}?3#B4ea&>6<)SW1@+ijrQx z+u4Xbq4wTUXt+n9w)M?*E?xiqNgHDS#@GVt`v;$Z!LwV2eVpIi53XVsyu@xjht zmT*71OmWid6LP!W8PYesPjMFe{49ROXUmKAbAR!M@74!8KRwAW-qO&-$Ku4Ac8-m# zxSf^f*n@{?=N=L?XEq;3*$ZHQEakBc>~oP;<}kGL?Xvbyh_wL|~@Z{FDU@9(d@y2$R8)uDZ3hl|^c&})zG{#T#+ zd9L}tdUL;d=$o$RyVqa563s3CZgn|ney9a9W#Gy59@RA#iD@w6CwUnMeGhM1h5|$r zf-tGo;+mSLm#07^eIH;0RgOockoa74SbrA>oCt=b`10g3$BFH+rA9ioqtiXw9xYnA z6kc+KXg~zm+^ezNo>f2H{Xq-h{^8L=MWCl9QLy>l%S&|d=VjTfV>6x6viXFgnSor2 z%4+q1LTw;`*Bt%1_FZ|yoW#S`mk%1JUo&&#OYwS}Me(bj_1pI#W9O0Nc&kl-(tjY{ zc!zh}H-+tWPqxj*D+p-AZSU&lcxbYeGb|0=cxAHzxy4{~wk+Q^z%c=hkzT)Uyz&+4 z>w6r_w+(f4@0h*vZL`#T=-U`EI8pqdp#$PVCLugI2RyroE$aI=XwRYdIh^@5pXnd? zGpfXEAHRC6O8(X3fqrx{OJ?&D#D8UQE-I@_`|kHJ+@M5g3_1a6=SSvSNY-;Z4~ae; z-dWPE)kPzYOo5jw0Xg(=ERP+`K}+Gpm85**Km6JCbG>(ul!NmbXYtmDFa}w?2%{L* zx#!8|6Otq?x@i-ZhptH|6=S`C)H&hBIl+n#{Tft1Biz*4!h2gR&1tlA$$zMxq8T;O zNcWhH?`8DRYD1RBT=8Bu%PiNb$Hi=aSTQ)ENY@YPGVo=YdtM?^FHywK z$qkUV3cK(etCcYGNQh%uvGJS6Z!6r|B={|x?w+N-vM{%w6!)~ zdBZgZtoHDmR}og{2YsI91Q+H1#yH}p{q(gKX(o4!wjUmK+HBk+FxlP1!rA#49E)o} zXXk|HVwTE{$4-E{H$5AxlSkU8O{ZThFCHStXI<-|1U!pngP{^O&T@9nD-H!-Pj+{I zphlqEQLbGYVeX`G(|-ciJKJOKr)9fHI7V;|LTNq&+p}T>qiOMnKZ6~8`1;Hs6++@rsj34fe(EfP*fU(F0&&76!T z^bY4QF`^`>prcwKr<^uI>KVtm!Ev!cM(0?_T}UV=6&wZh1H-_d->s~jdJ;Y@FOwSU z@5FSr7faB+zq9l9EEYKQ5SPF5Zd3G_XkSMSJn`={X#Jz}J(tLyiA!-0mZSWQYug^Q zMF&t?w^b*on|~5!j>3;3OcliHKudc1fyndvHB2tST zt52HZ=6{RpHt1Sagm!MPY)g;0Bab!|Yk}reprCX6`5k_$k-QiLrs2tfMgCb?Z72@s zE@N3vi3}FSgoR#NcdV;%9G=E3oL`jjti7H4S#SONUU`;pe|^=@`v1`L{p{yn-0+RVEe8I-#GTuE z;=0nVFDM5#ge2s!w6QH40rCbgXNd_i7;qAj$Z+-RDVbe&k2%Je|Joh&)18DSSeB(y z)qhv-_se}qmDk?miw^rJ&EShTPq;uPNnGK2_yQ*P_ynH(W}yZ!Cv(>8WLRC%54+Sf zqfuN)tM%j9%(=Up(d;K;W=I>k5Xs(z6n!#^JA%1^3SzP?ch6b}EaES)LDr^ZB<4q@ z@>H2kCXR~_O7t78I`5Ra8Ha+C>e)n89)H5bbcU^+%IFxuln6Cnb*W}K-F_Hq8%QrdP|@5@ND*hwHQL` zl?VP0b)N3-X@5LeyFXD}yai1$_aI};tt8Bi?*Ve2+Wh$cYyE$r1%aH?5?YQoQ-7!5 zXSp#wDlx7yMG&qQ2M-y9tKxwJ=gbUQ9QU@cmF2j}N~*FcS@Xs#+jDNLgoc+MP4zE* z1u34HV&%QV3cd%nKc5X?S&@~Bp0M&lKBYL-)eaY)-=Ww_+%u3>I&pD*CW%{&ndZl# zRtbFWUw8~7NPCY=_lR-}-@`n$Uw@N3#dq+z$ND<Zul!T+zgN0KJx_BXteGTloo?$C>Vc+`Yzx0kMu+Pd)mBh z_u2z-!|1_!P^_j}moQj~M-IIWVdmQ-`@*BWKA{$vx7FaW|qV824VPfq?xSlCkpOlni3Vh zf(g1s{?mtZTzywlSAP$z&T)u~418d0^X7v&Wy-!L&hBj~R)5h_tXdxe`>elb^r+yW z)el~L8xH4s6EAyQmigf~X4+wx#5OXktJF|SeBRXTCPuhl3a#*~s ziyiW0-K4EGXMZ)IE(Rn-_^j@LzAKnvH{%%dF^8h^Z4OMH``=wbZ2x;EPC0)8ZJuA0 zHaCYHAAE*NPf6qo*!s<>o}e$e1SXHjPaPN@z{(JE(1Mt!yHxiAO3NYw?nZQdcNCnE zMrFiEpO%KKN)sUy4g0Z(37&ZmeLi@));WPvK7h$QI3HFi{Hz}2ar7XM<2#2Wb7I6- z0LBNbOR8K3)TLfn1(zK13on1pLj1}gTi@UHexAx-c|JD(-^Ddzi~Y9D@`k6DS8%ZW z%ZW%}=h5 z_}0VVCckplfVF2yI6b#5GxExL*=7+_y%5wQ8pSFrR#@K`^Wou{1WjE^C=@r63=`=F zM}b~KDuDgQHzswy^7Mbivl>yOlDW7qFs(&NtnN4I3c2W%d;XG_8-vC#bS~@44>o>|yxURAbg`o>J92?XN#~6^}6=A9+l7p0f!XGj|BM z_8ly5R7|mzhzgVFJmq>GNs69o-kd1O;g|6&=b+zx85=FieG}`NPSt5Yp3MD3*F>U@ z=&Kqee8FPWp)P-)hL*Qt+WORoA=alRZF*;4n43EFnLAX>P$NCi^1|5@?80F1Pd<i(@Jvc%T?$H* zIv?%&U0+wM^IRa;lU(B)?$P0LMi1TfsMH@6`POCSL!;;NxUY4o=^$}`oAc#MFp@_< zZu3J38-R}tBRXw$QSG?fdcZK1vs3=W=3s_NkN$d|Kl9ITd?4C>#qQOk2m2K~NS8OF%w$3DXT7t?>42RfWgP?w-o6r`e_b@_c%eh-Hv zp2)1X3fZEZ9dr|DH)wT!0Cx}^A$EWD%}R0K4~}=poVdJhy!-3djdz#VaUgGqKK;}u zzqcN}S<6`rHPnv%#5E6r7M$7~{>kU|C@SPS`Fio^n4&)g739r$u07W)IO2IFitF*! zpW^SwRsN&T?>r<#N^C>rv7Czoe+fyU0-_)mtpA_=ePOo$k6yhVnV!VEPI<9J;tUgEI$Ko!aGN8(x@sj5Z zDoWxGBvfUAeK)V#`za3gzS$a;=-&DvAt75(tcq_~$^#bvT!)_X`|q}s3IO$8%i?FWlGxQTkras zjZb*t6PXN@o?zX9!B@hdKo0jRB0BM7o%&R-FpeO`Ov5sl1ev4P`*FHwDqt`mgT~yC zW2|pwl*M^U8wiNDescu0afXdM_2X(9q(z@2LtKdgE9=zL?&&qIGi;7`^Fx1sF*@iM zVydBcP&1~gz+dU7AkOE0oL_&|h}Z`=Bu!n98rSB<(Z7c94BkxZUlZALo-91o3joh~ z$3OjJMDmBPP5BSLHst-*QwIFq-}xf`)7QAq2O8e7-Xg7EJNhQJdC_yO2-#Nl`6A+e zxX)igEy$-5+$ZlpMD?hoYDQFl9XK066nTif4@TG4Al_M3lq{V!>}#+L;a10Xxb2(4 zxPWPCeHBjgZQVZ4i?t8#2$|)y#Qxd+r$Em=WF#6zDVU#+$NiA0@F^@a!64%dYY%3R zeKONXaoZ^;LM7zi^KWUYZHk&WX}wZMgKkreF;t5UX^YnR}&WI4A|7urH)SKT;P1 z&qfeA504@m7?;Ud2z8W!Sb@jlzVuvlUlJAQK9#150`&e?+$V&m)YReB2*IF^h({!( zZg)tEvApj!p6*5CR1OIA!pJRRf130+oHP$H^k*X@Mwv$4v3ccS2B7B8ORM8<*Rf|E zb#NUU0rlE(z3@P%r~UyPr&dFJ<8gDt`t;B7-mQKwJmemZ={awB_7=eP|E4~O+^w^Y z;$EIKfU@b)R@Rz$$fELJWTk*V7T5ZruR#U-k_R?nCBtQe?||e{unX! z>czV7X#GfmKgbF~qOvFr(3>f&m@4St07F6{C5ODECMPVJENJTXX_+5IT7@G95%!;v zrij=oCqb%TMY8`0lH!4yoKu?bsVM`oDvk;EDd^xS+bK89_9-nzMv(`mbIDT3Jf8I# zdbnqDPnwAs9O{4?6HK!#e|}eL;3w#f26N9;rQg)bi}$Z_16X@{^%FOUu)m;bYX?E2 z-=f%!Xwug_7Gp}Xm1uO#bm@thyAk%OV7?TV&J*}UywBE$9H6rM@=1pw&N>WSzp!Wi z8bcj^jWg_7?8c`IS&s6;S86pfr#y-F=9EA@NjtfxBpEA(nx802e>m|D^P!}DN{skn zAZYMn1UFa{7oHbq+kviDeq8v3=w)y)32RjF((Jt#A|uZ4e0JO;a5W087v-LfHruzt zts_WW;m7sp{b?V1N{rDT(OYF@Xo@A2W(8_P<}ScEwTO$*CG=|ipo zkWX{**>1U@H+KmuCOmdkjw9B_xNpSTE9G) z*3+QLKf*&k<5ZO1$|G+{Z-#k7y4rixtr`L)dZsUCYa=y%j}JGX*gtZ;cYh4 zK`Q4#qS0$KcYgY@XKK$&xCa>yp-8C;zdU6oK~n9eX~uitG7(tF2Zizx$gU(=}f@`HgSU`s5weFZmj~b{+Bi7mtc5Kl`Yi z_HR3XkLPtHf4^lLxBEJtV~^R|JF#v~>zVbew)grJOIemdi97@J6v|J%(Cz(OnoJPm zmMPXRbk4kkc?&7L27o$9Qv|4sA!5L6U%LpxM1-)Gc}la=GjhaK949^KHGZAo&7x=D#W)@|3S4x_20Ie?(y~cx?0LLqNO4lGAL$)s(q! zfi{jfynotw8M4`+!*|US=twt09yof}d<9y+RI9$GD&urjk43C!!syp%RV*m+0?`Oe z0x@O`x8CkLxO>K2sGfwxj3ZCwiX^%s2fk)TdIUW5ew(smu_Q6~p&PHs@qH3*Tvx1% zchCa&e|=6N$Hc}6bx%-a&&dk)ZFvCq@_qzjXQoCY8$XVsX+ToA2!2t!k%A6$%{ z?foTRnUbW%2eXgAP@?Pwq>(w%c-Y}eQV49W{4*cQ#0=0c&mR;q)kVS%YC1Ls{Q!nz z&=FN~NC-qhqY_WMGr%?|!V_XD`4RIBbGPHt%mdn=QROxR7(P{v@d$hW=AdRFl{F{s zf7-J2KnyANi)oYcQYdjSYmfBqCIqiX?*fyJ0@dG~xXc}RLOzT&t^w4jpYuk)_U{&_ z997S|dN&p&Ji*veip>w;d&&ejhxm39Fe8(=n6~iPp7%_lmexDiHfA$+L`wqNG`fbn`7E-MDw4M$5Z9m$~Be=%o zZTwW`s5#4!U)jb?MT}kuT-m+ym1<6j5&={l!?2GU&orgCPmBm`JyO*^oyCNp?%ovN z=~|FoDLGLJ>sS=x_#sn!Moga&UC?wVQ}hP|ZC{hYft2*eT`}FG_xjxr%fj|Ye>hIY z_7FIz*9Yw9Kk&BKj@nw(x%VCaoa5t`CwPhrtf&1?CkRpB6e~%LV3f?aF;rpf=Tl-7 zCB{rh`Mh`3_{-NtCpp3o_U}`Zg&vu(m$*Q0`q@h+CmcQUjGQhfwtf>k$BOij7sN6E zn&Ezm+6p`?w=qbgu*T+J=LWJ2f7BKk754Mv{Aswt{DW5(aT|{W^Xqtj?YW&7t`DkX zQuX_I3!fp7x~Qy~+GboU#6==L7R3(r4r}f8XC9J_Y0x zhTg75Pv?~iX%=rijj8QIdMaPOjyo&p7Mhe9-b)H)xQR`l@oKR_Pq)6RIo_W|N3tw% zr(pITjnzbTs~M`*z3zs2v5bh?GjG>XfAehlEw4Ga>qD>xqae2~8lvUHpfBtC~lps{+;ezva zUXUU}JtzJw>6#c{OO;{-D$zp7?X-`_!wY#-Y*!JXY? z_&|GKB-I1ROh)v>cvfW(R15TY&|vm!8Y@Q3{*xNvt$&{G7MPm{J;d7m%DrC?5_b@k zWH-I^AGz$HPuB+=G z@*U8do)*pD$foALm*;CsaLvPP*eF35W9+%0gf8|`ClYITf~w4*k- zj}FZ~fJTLq>^W%d({=AzsAq39G^Td86K3kSmq`)h zn5LZvZ>099U3~#lSAXrgaPNe8g`Wj-Zeh4qznq&q)?-5hxAPRw)yJzB&1IfmaudSj zm;7qjxb}070DTrGS^6LjkRxlw-aF##(z~@1vyim+&I8F=T5_HcGQv1YVd7+O);iDl zWV7>#8iV51gx|$d`wVGscO4oJ{z;E%7Yx6JH zzPcMDK;g8kYbY9D#5GZ+R}ef4Lq)dw4yaHlEAZB0im$sXWKsr`~=WH{Fr+W*j;9AJZb4XMe>QnlPB7Um)h;K>MD`-(HlY4l^(8xc~6W`T_DzrOa^r*5r(uEF$JT zEK1`Oetzz8)hCQOU^P6dywe$PjXL8~& z{mSXN2bHtj^vBLvdVyKU5OM}3NTM^R#50rvJPYL%vRC3hlw2vFz+;wS&fE6IUaoJ7 zYs$_2z<-#^`k7;|i`~a`bo9XZByGAL24!a1->KdROz)VLg$GMDAeasSLx{?hjl&P~ z!&tjFaWo$xAMO-KU+d=WI%H!R8!vZdKzsW>QMt5ZSS}Lj91l-%9S~EDh&N4`$}rdK z6p8g~KgEmD5=ahKD)gCk-YzBB9uNdQ57wZw!XhLc621eu& zXzO?1?q7~G)*-_>FS5%$)8n|;Lc=eb;!D3E{$V|O(qXlABz4ZM7mT<6j&WR0`WE&8 z9OF{thYdiP?ikN6y5O|4VtyL~;=b(py&)RCqrZRZP%$MzRg&o}49gycvffbTAMsw} zeg&x@Qtc0c(E(G&C#&x=H^T1y5PR!7LB@v}5~HTgRtCMAQGR4-DMpwZ@6n`pYH3K2Ip|c_O73}BjEw(wjRM&M)Dv%^Z^7tQSS*9 zJobO^-XcRMv-kF=yI>{IwW^yKls`PG$eBq6ll_pGx*NCW7e*eQJ*EqfL-ta7`w)!g z=X=UbCc>u2pv30>46kSD9D7umsMqT2IC^5ISMR$v7i912`Fw%(iS_0DncA8mo-tyH z*WKpD8{UOEmsD{%^I^oyzDb}((*F39ygFigudX(66T&q(MWj4J&-zS~XnZbX|z3_j) zPrcT~Or)p2Y|TG0AH|g_hRc}?Mtuj6VGj}~Jn=-V2DDM->e$2QH}G^;43}x=FJ2eX zQk5$$zUcBk6i1ZLW-F@1JxxT1>be6*a;jlL~k-Es)+bJMPaaeH2CwGu7w8Q;hLzkp-x)z)(Ex#BCV!=m=}U4J9Uv z#}yLno`Ea$6Ee@krdlGKokKB`WxW`sMgl%JRdyt%IL0xa)r1_g&0=xLG#G!e6iSOQ z0S0Biq%b&I@7=~r$`ubly9sC>`0JOc^Oa181i45eVM+vk(wm62`^-!6JdWL$L8C9P z4_2-R8|@9^-u3nLd8(4)k$~z7vwNQEYB$n~4_7jT54&#y>huRzzf@bDI=|`l7pd(T zJnb!Dy-7n2WJqDc8zCvw9vpwn9#Z(EWiceGmpDI$kyv@)JUzqw{;n^|C-~sV4PlE} zeDsfd4XUDCr_U4gs5XkeJ5P6@g1epaEDt5ZzR7it9FcqEv$p5(-*tDFI@`r#?&@oD zs>>dW+g|gwo^ibPp5OGT+y8ec<($BCD4Qx^l>1p%PKVxmoUbRo-{gO|N6Zp8ro1WM zJT78r;<4YDts?S0EWTo*_9lr{=9OW^U_Kl)GUYnxd$EXEI>+8u!g^o3Q77tT)qWEt zu%_`|`~HHDwaZDf;It#V#X$~2VgeEGH-~MdAcd{iEQFZ!3CrAaoK{z^$x>n88pdx) z+m~F#_H1ylF;*;+)8Bs>SG{mNcMaSjA2IFwPOV2wdp&1Y(@k6l6qe5~kcFTyhsu{N zx1^8yyPfkS$PpyRxIWg%3qYQfJe@DqnW=YkPm1krP3X#5IM++%!NcC;vmdJE_u)Ed z{X^N+!vKS*w#G#o#UR^bjYbgEm4Q2;#>{!5x4h|f<3z0(C=Y+@%tY%UHG4E+*#>3( zAuIS4pg$N9hcMP7P->nva!>F3FVR7-0+KU`B_qbm;8PC+(;tH25vO9uNWGgpPTwZ0 z{hL&M4$4&_GAc?~g+{*)yqCAi8wMh&k^#sRjdo z{c}fQ@Bqu2zJp?@A91p_M+ZL35$a_f%$eD98p)hzO=BYp+aJX2xdvJCXCIj0TI$^VnAkz3ZuQ0|L)?6YSMgB(er^$f@HHe&V8p+_`LK< z8Ga8<7O5f{rDYe(K4^cDD^j-}&BDZU>E+~sjFK@8^&O>}|CwT7q z%<-Iuv2gdo6PTzGR2i}Yp75-5J{OM0&oA&L$TPt5`kdoE;g@@J!#3;u^33ntF>{`# zkkswpD)X^%5`La0*tZ!uPZL` zJO!pxe4b%qkZ&Z38 zM@)Y)y=6Cqzi`d+)-%hTSc+IDKm6h=G<{HRj;t*f%OOe4t8a3j#aRS3_=8Oya@`jM zhAxZch&8{xCYg9aCcb^B5@vPY{H-h@rHT^{CSIDHM~%#&(JxesXDIVMRlTvN1f7Un zG1VWl*mHKEye@u{!j z9$pRk0T}+}@6~>P!5{Fwn{1*UhCiFQNKG&EJTu1pv?9VeYBo_)w|?u`pIl79NIQSm z!4xNr!}**c54;rjLp`JvYV!ED=geTsE%$ikdDweI%=8P2r!km<3D^hZr?+s*8e6=! z1M~)A>WCUnr_RZT_6<=zT%R$2L0#-$X9BQ&V0cuwp0_w3&V70Qji$flq@K>I1Ea3m ziAG)e+s`%e6Qi~__rlEt`Z+m#?MHt`uL)?y^?N!l>!-oW%zeDKO`XK9SP!hfeHKi1 zy_HD06K3kkQ{{9_>ZdWooW-6G)<5|)-&FjN%OulYh`!NVSyMI170HLNmi~~uqgNiK zaEMt`9%6U%1tmeQjyLr()QUy9!akv;^Q=Y&eJer(UGzf&B5!)8ZsM<7k6(Z5ul9>} z#5?&}o_s6Xo=>5F@v%E|jkg)u28>LaOBLs7er?y3m+n9 zzbBRx8!R1S=n^@x$aM6uh90+l+kB4NULH~926w7Q9(1r}%IObroe+PiLy$G_-9v}M z^(DXFE2%6B!*O*n0Nac9pak+jcN>+5ZO zYS9~YWx1gV*Zhu{N)UhReJv7#9gW2gC$-_sgdlDTPOr z9$>mV7Eg=m+D$0R5p_-?#n)Acgw){jfK zSP{)9Y~NqX9N8Xx7t;DLX7W8!ufv1t4`ao%IO&&SwTQx`=Nv3Wd5m>W7DGZ!g-t+-DEC!( zXpr$yctpHlU7KniQOI-gapxB=;j&bHGKk&9AFol>r+8mJ_c!&9s$n;#HW6ydHSsv2cv>66f-Vm6aQol@1PkK-_oCMKMzLUi?FU zI~&jSdRdzsH+17 zbPq#y9%nnAx0>*lE!~cB%LqE>3fHpbHnE(v@xEmaZE>vC8vP3DRdASlii%~RRW~&_ z&JS4(^(u