From aa518098d4b85705c4fab58067b4c6dad9fa3ee0 Mon Sep 17 00:00:00 2001 From: Hina Chen Date: Thu, 15 Feb 2024 10:22:46 +0800 Subject: [PATCH 1/2] Update localization for zh-TW --- localizations/zh-TW.json | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/localizations/zh-TW.json b/localizations/zh-TW.json index 964655ecb..6ecaa6cbb 100644 --- a/localizations/zh-TW.json +++ b/localizations/zh-TW.json @@ -24,6 +24,8 @@ "network dim for conv layer in fixed mode": "固定模式下卷積層的網路維度", "Sparsity for sparse bias": "稀疏偏差的稀疏度", "path for the file to save...": "儲存檔案的路徑...", + "Verify LoRA": "驗證 LoRA", + "Verify": "驗證", "Verification output": "驗證輸出", "Verification error": "驗證錯誤", "New Rank": "新維度 (Network Rank)", @@ -137,7 +139,7 @@ "(Optional) eg: 0.5": " (選填) 例如:0.5", "(Optional) eg: 0.1": " (選填) 例如:0.1", "Specify the learning rate weight of the down blocks of U-Net.": "指定 U-Net 下區塊的學習率權重。", - "Specify the learning rate weight of the mid blocks of U-Net.": "指定 U-Net 中區塊的學習率權重。", + "Specify the learning rate weight of the mid block of U-Net.": "指定 U-Net 中區塊的學習率權重。", "Specify the learning rate weight of the up blocks of U-Net. The same as down_lr_weight.": "指定 U-Net 上區塊的學習率權重。與 down_lr_weight 相同。", "If the weight is not more than this value, the LoRA module is not created. The default is 0.": "如果權重不超過此值,則不會創建 LoRA 模組。預設為 0。", "Blocks": "區塊", @@ -145,6 +147,9 @@ "(Optional) eg: 2,2,2,2,4,4,4,4,6,6,6,6,8,6,6,6,6,4,4,4,4,2,2,2,2": " (選填) 例如:2,2,2,2,4,4,4,4,6,6,6,6,8,6,6,6,6,4,4,4,4,2,2,2,2", "Specify the dim (rank) of each block. Specify 25 numbers.": "指定每個區塊的維度 (Rank)。指定 25 個數字。", "Specify the alpha of each block. Specify 25 numbers as with block_dims. If omitted, the value of network_alpha is used.": "指定每個區塊的 Alpha。與區塊維度一樣,指定 25 個數字。如果省略,則使用網路 Alpha 的值。", + "Conv": "卷積", + "Conv dims": "卷積維度 (dims)", + "Conv alphas": "卷積 Alphas", "Extend LoRA to Conv2d 3x3 and specify the dim (rank) of each block. Specify 25 numbers.": "將 LoRA 擴展到 Conv2d 3x3,並指定每個區塊的維度 (Rank)。指定 25 個數字。", "Specify the alpha of each block when expanding LoRA to Conv2d 3x3. Specify 25 numbers. If omitted, the value of conv_alpha is used.": "將 LoRA 擴展到 Conv2d 3x3 時,指定每個區塊的 Alpha。指定 25 個數字。如果省略,則使用卷積 Alpha 的值。", "Weighted captions": "加權標記文字", @@ -203,8 +208,8 @@ "Dreambooth/LoRA Folder preparation": "Dreambooth/LoRA 準備資料夾", "Dropout caption every n epochs": "在每 N 個週期 (Epoch) 丟棄標記", "DyLoRA model": "DyLoRA 模型", - "Dynamic method": "動態方法", - "Dynamic parameter": "動態參數", + "Dynamic method": "壓縮演算法", + "Dynamic parameter": "壓縮參數", "e.g., \"by some artist\". Leave empty if you only want to add a prefix or postfix.": "例如,\"由某個藝術家創作\"。如果你只想加入前綴或後綴,請留空白。", "e.g., \"by some artist\". Leave empty if you want to replace with nothing.": "例如,\"由某個藝術家創作\"。如果你想用空值取代,請留空白。", "Enable buckets": "啟用資料桶", @@ -227,6 +232,8 @@ "Flip augmentation": "翻轉增強", "float16": "float16", "Folders": "資料夾", + "U-Net and Text Encoder can be trained with fp8 (experimental)": "U-Net 與 Text Encoder 可以使用 fp8 訓練 (實驗性功能)", + "fp8 base training (experimental)": "使用 fp8 基礎訓練 (實驗性功能)", "Full bf16 training (experimental)": "完整使用 bf16 訓練 (實驗性功能)", "Full fp16 training (experimental)": "完整使用 fp16 訓練 (實驗性功能)", "Generate caption files for the grouped images based on their folder name": "根據圖片的資料夾名稱生成標記文字檔案", @@ -498,4 +505,4 @@ "Training comment": "訓練註解", "Train a TI using kohya textual inversion python code…": "使用 kohya textual inversion Python 程式訓練 TI 模型", "Train a custom model using kohya finetune python code…": "使用 kohya finetune Python 程式訓練自定義模型" -} \ No newline at end of file +} From 40d7b605da10cf089a5864a48e583174bd4db4a6 Mon Sep 17 00:00:00 2001 From: Hina Chen Date: Thu, 15 Feb 2024 10:29:00 +0800 Subject: [PATCH 2/2] Update some typos --- README.md | 22 +++++++++---------- examples/caption.ps1 | 2 +- examples/caption_subfolders.ps1 | 6 ++--- ..._train_db_fixed_with-reg_SDv2 512 base.ps1 | 2 +- library/wd14_caption_gui.py | 6 ++--- localizations/zh-TW.json | 4 ++-- setup/debug_info.py | 2 +- textual_inversion_gui.py | 2 +- 8 files changed, 23 insertions(+), 23 deletions(-) diff --git a/README.md b/README.md index c95a930a7..21b4c0c2f 100644 --- a/README.md +++ b/README.md @@ -509,7 +509,7 @@ masterpiece, best quality, 1boy, in business suit, standing at street, looking b - `safetensors` is updated. Please see [Upgrade](#upgrade) and update the library. - Fixed a bug that the training crashes when `network_multiplier` is specified with multi-GPU training. PR [#1084](https://github.com/kohya-ss/sd-scripts/pull/1084) Thanks to fireicewolf! - Fixed a bug that the training crashes when training ControlNet-LLLite. - + - Merge sd-scripts v0.8.2 code update - [Experimental] The `--fp8_base` option is added to the training scripts for LoRA etc. The base model (U-Net, and Text Encoder when training modules for Text Encoder) can be trained with fp8. PR [#1057](https://github.com/kohya-ss/sd-scripts/pull/1057) Thanks to KohakuBlueleaf! - Please specify `--fp8_base` in `train_network.py` or `sdxl_train_network.py`. @@ -522,15 +522,15 @@ masterpiece, best quality, 1boy, in business suit, standing at street, looking b - For example, if you train with state A as `1.0` and state B as `-1.0`, you may be able to generate by switching between state A and B depending on the LoRA application rate. - Also, if you prepare five states and train them as `0.2`, `0.4`, `0.6`, `0.8`, and `1.0`, you may be able to generate by switching the states smoothly depending on the application rate. - Please specify `network_multiplier` in `[[datasets]]` in `.toml` file. - + - Some options are added to `networks/extract_lora_from_models.py` to reduce the memory usage. - `--load_precision` option can be used to specify the precision when loading the model. If the model is saved in fp16, you can reduce the memory usage by specifying `--load_precision fp16` without losing precision. - `--load_original_model_to` option can be used to specify the device to load the original model. `--load_tuned_model_to` option can be used to specify the device to load the derived model. The default is `cpu` for both options, but you can specify `cuda` etc. You can reduce the memory usage by loading one of them to GPU. This option is available only for SDXL. - The gradient synchronization in LoRA training with multi-GPU is improved. PR [#1064](https://github.com/kohya-ss/sd-scripts/pull/1064) Thanks to KohakuBlueleaf! - + - The code for Intel IPEX support is improved. PR [#1060](https://github.com/kohya-ss/sd-scripts/pull/1060) Thanks to akx! - + - Fixed a bug in multi-GPU Textual Inversion training. - `.toml` example for network multiplier @@ -556,7 +556,7 @@ masterpiece, best quality, 1boy, in business suit, standing at street, looking b - Fixed a bug that the VRAM usage without Text Encoder training is larger than before in training scripts for LoRA etc (`train_network.py`, `sdxl_train_network.py`). - Text Encoders were not moved to CPU. - + - Fixed typos. Thanks to akx! [PR #1053](https://github.com/kohya-ss/sd-scripts/pull/1053) * 2024/01/15 (v22.5.0) @@ -574,10 +574,10 @@ masterpiece, best quality, 1boy, in business suit, standing at street, looking b - IPEX library is updated. PR [#1030](https://github.com/kohya-ss/sd-scripts/pull/1030) Thanks to Disty0! - Fixed a bug that Diffusers format model cannot be saved. - Fix LoRA config display after load that would sometime hide some of the feilds - + * 2024/01/02 (v22.4.1) - Minor bug fixed and enhancements. - + * 2023/12/28 (v22.4.0) - Fixed to work `tools/convert_diffusers20_original_sd.py`. Thanks to Disty0! PR [#1016](https://github.com/kohya-ss/sd-scripts/pull/1016) - The issues in multi-GPU training are fixed. Thanks to Isotr0py! PR [#989](https://github.com/kohya-ss/sd-scripts/pull/989) and [#1000](https://github.com/kohya-ss/sd-scripts/pull/1000) @@ -592,13 +592,13 @@ masterpiece, best quality, 1boy, in business suit, standing at street, looking b - The optimizer `PagedAdamW` is added. Thanks to xzuyn! PR [#955](https://github.com/kohya-ss/sd-scripts/pull/955) - NaN replacement in SDXL VAE is sped up. Thanks to liubo0902! PR [#1009](https://github.com/kohya-ss/sd-scripts/pull/1009) - Fixed the path error in `finetune/make_captions.py`. Thanks to CjangCjengh! PR [#986](https://github.com/kohya-ss/sd-scripts/pull/986) - + * 2023/12/20 (v22.3.1) - Add goto button to manual caption utility -- Add missing options for various LyCORIS training algorythms +- Add missing options for various LyCORIS training algorithms - Refactor how feilds are shown or hidden - Made max value for network and convolution rank 512 except for LyCORIS/LoKr. - + * 2023/12/06 (v22.3.0) - Merge sd-scripts updates: - `finetune\tag_images_by_wd14_tagger.py` now supports the separator other than `,` with `--caption_separator` option. Thanks to KohakuBlueleaf! PR [#913](https://github.com/kohya-ss/sd-scripts/pull/913) @@ -612,4 +612,4 @@ masterpiece, best quality, 1boy, in business suit, standing at street, looking b - `--ds_ratio` option denotes the ratio of the Deep Shrink. `0.5` means the half of the original latent size for the Deep Shrink. - `--dst1`, `--dst2`, `--dsd1`, `--dsd2` and `--dsr` prompt options are also available. - Add GLoRA support -- \ No newline at end of file +- \ No newline at end of file diff --git a/examples/caption.ps1 b/examples/caption.ps1 index e61f9b310..073315594 100644 --- a/examples/caption.ps1 +++ b/examples/caption.ps1 @@ -1,6 +1,6 @@ # This powershell script will create a text file for each files in the folder # -# Usefull to create base caption that will be augmented on a per image basis +# Useful to create base caption that will be augmented on a per image basis $folder = "D:\some\folder\location\" $file_pattern="*.*" diff --git a/examples/caption_subfolders.ps1 b/examples/caption_subfolders.ps1 index 0bfba6f01..347195e8b 100644 --- a/examples/caption_subfolders.ps1 +++ b/examples/caption_subfolders.ps1 @@ -1,19 +1,19 @@ # This powershell script will create a text file for each files in the folder # -# Usefull to create base caption that will be augmented on a per image basis +# Useful to create base caption that will be augmented on a per image basis $folder = "D:\test\t2\" $file_pattern="*.*" $text_fir_file="bigeyes style" -foreach ($file in Get-ChildItem $folder\$file_pattern -File) +foreach ($file in Get-ChildItem $folder\$file_pattern -File) { New-Item -ItemType file -Path $folder -Name "$($file.BaseName).txt" -Value $text_fir_file } foreach($directory in Get-ChildItem -path $folder -Directory) { - foreach ($file in Get-ChildItem $folder\$directory\$file_pattern) + foreach ($file in Get-ChildItem $folder\$directory\$file_pattern) { New-Item -ItemType file -Path $folder\$directory -Name "$($file.BaseName).txt" -Value $text_fir_file } diff --git a/examples/kohya_train_db_fixed_with-reg_SDv2 512 base.ps1 b/examples/kohya_train_db_fixed_with-reg_SDv2 512 base.ps1 index 28aa1e70a..98fb8711c 100644 --- a/examples/kohya_train_db_fixed_with-reg_SDv2 512 base.ps1 +++ b/examples/kohya_train_db_fixed_with-reg_SDv2 512 base.ps1 @@ -61,4 +61,4 @@ accelerate launch --num_cpu_threads_per_process $num_cpu_threads_per_process tra --seed=494481440 ` --lr_scheduler=$lr_scheduler -# Add the inference yaml file along with the model for proper loading. Need to have the same name as model... Most likelly "last.yaml" in our case. +# Add the inference yaml file along with the model for proper loading. Need to have the same name as model... Most likely "last.yaml" in our case. diff --git a/library/wd14_caption_gui.py b/library/wd14_caption_gui.py index f58edb2e7..ae171a8ec 100644 --- a/library/wd14_caption_gui.py +++ b/library/wd14_caption_gui.py @@ -123,7 +123,7 @@ def gradio_wd14_caption_gui_tab(headless=False): value='.txt', interactive=True, ) - + caption_separator = gr.Textbox( label='Caption Separator', value=',', @@ -199,11 +199,11 @@ def gradio_wd14_caption_gui_tab(headless=False): ], value='SmilingWolf/wd-v1-4-convnextv2-tagger-v2', ) - + force_download = gr.Checkbox( label='Force model re-download', value=False, - info='Usefull to force model re download when switching to onnx', + info='Useful to force model re download when switching to onnx', ) general_threshold = gr.Slider( diff --git a/localizations/zh-TW.json b/localizations/zh-TW.json index 6ecaa6cbb..df172cb9f 100644 --- a/localizations/zh-TW.json +++ b/localizations/zh-TW.json @@ -51,7 +51,7 @@ "Show frequency of tags for images.": "顯示圖片的標籤頻率。", "Show tags frequency": "顯示標籤頻率", "Model": "模型", - "Usefull to force model re download when switching to onnx": "切換到 onnx 時,強制重新下載模型", + "Useful to force model re download when switching to onnx": "切換到 onnx 時,強制重新下載模型", "Force model re-download": "強制重新下載模型", "General threshold": "一般閾值", "Adjust `general_threshold` for pruning tags (less tags, less flexible)": "調整 `general_threshold` 以修剪標籤 (標籤越少,彈性越小)", @@ -101,7 +101,7 @@ "folder where the model will be saved": "模型將會被儲存的資料夾路徑", "Model type": "模型類型", "Extract LCM": "提取 LCM", - "Verfiy LoRA": "驗證 LoRA", + "Verify LoRA": "驗證 LoRA", "Path to an existing LoRA network weights to resume training from": "要從中繼續訓練的現有 LoRA 網路權重的路徑", "Seed": "種子", "(Optional) eg:1234": " (選填) 例如:1234", diff --git a/setup/debug_info.py b/setup/debug_info.py index a4c26c4a5..e46bd72ca 100644 --- a/setup/debug_info.py +++ b/setup/debug_info.py @@ -51,6 +51,6 @@ # Print VRAM warning if necessary if gpu_vram_warning: - print('\033[33mWarning: GPU VRAM is less than 8GB and will likelly result in proper operations.\033[0m') + print('\033[33mWarning: GPU VRAM is less than 8GB and will likely result in proper operations.\033[0m') print(' ') diff --git a/textual_inversion_gui.py b/textual_inversion_gui.py index 7e9d7c7b9..3362a750d 100644 --- a/textual_inversion_gui.py +++ b/textual_inversion_gui.py @@ -747,7 +747,7 @@ def ti_tab( with gr.Row(): weights = gr.Textbox( label='Resume TI training', - placeholder='(Optional) Path to existing TI embeding file to keep training', + placeholder='(Optional) Path to existing TI embedding file to keep training', ) weights_file_input = gr.Button( '📂',