mirror of
https://github.com/natelandau/obsidian-metadata.git
synced 2025-11-16 08:53:48 -05:00
Compare commits
89 Commits
v0.2.0
...
dependabot
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c19e2a99a4 | ||
|
|
42083f3dc3 | ||
|
|
64ac9a3ea3 | ||
|
|
8f7d4bb015 | ||
|
|
ba5693cf61 | ||
|
|
28c721f6d9 | ||
|
|
10449b3e6a | ||
|
|
22e9719402 | ||
|
|
461a067115 | ||
|
|
34aa78c103 | ||
|
|
d78e5d1218 | ||
|
|
476ca62e5c | ||
|
|
30009ada8f | ||
|
|
00990db77a | ||
|
|
ac487db3fd | ||
|
|
b762c34860 | ||
|
|
2d15760096 | ||
|
|
dbf1cc8e13 | ||
|
|
2e61a92ad1 | ||
|
|
9ec6919022 | ||
|
|
72fef38b0f | ||
|
|
4df10e785e | ||
|
|
5a4643ea8f | ||
|
|
c5766af678 | ||
|
|
375dceb8c6 | ||
|
|
c75d18200e | ||
|
|
ffdac91537 | ||
|
|
e8f408ee33 | ||
|
|
1dd3ddfb22 | ||
|
|
8968127c95 | ||
|
|
4bf1acb775 | ||
|
|
98fa996462 | ||
|
|
fdb1b8b5bc | ||
|
|
08999cb055 | ||
|
|
4e053bda29 | ||
|
|
fa568de369 | ||
|
|
696e19f3e2 | ||
|
|
7b762f1a11 | ||
|
|
c1a40ed8a4 | ||
|
|
6f14076e33 | ||
|
|
ca42823a2f | ||
|
|
36adfece51 | ||
|
|
d636fb2672 | ||
|
|
593dbc3b55 | ||
|
|
009801a691 | ||
|
|
2493db5f23 | ||
|
|
a2d69d034d | ||
|
|
556acc0d46 | ||
|
|
8cefca2639 | ||
|
|
82e1cba34a | ||
|
|
7f431353e1 | ||
|
|
4e49445b08 | ||
|
|
5f9c79a9c1 | ||
|
|
34e7c07dd9 | ||
|
|
32a838c8e4 | ||
|
|
000ac1a16c | ||
|
|
1eb2d30d47 | ||
|
|
b6a3d115fd | ||
|
|
03e6ad59c4 | ||
|
|
0b744f65ee | ||
|
|
bf869cfc15 | ||
|
|
bd4b94aefa | ||
|
|
3932717c7e | ||
|
|
755151e2ed | ||
|
|
8f8174a902 | ||
|
|
3bbcf3a987 | ||
|
|
347dd4271f | ||
|
|
167997f527 | ||
|
|
0143967db8 | ||
|
|
446374b335 | ||
|
|
401d830942 | ||
|
|
7eb8ff5fa8 | ||
|
|
2cca54320c | ||
|
|
d94d9f2197 | ||
|
|
17985615b3 | ||
|
|
13513b2a14 | ||
|
|
b7b77d998c | ||
|
|
0de95a4be4 | ||
|
|
90b737f7b3 | ||
|
|
8e040aeba4 | ||
|
|
4a29945de2 | ||
|
|
6909738218 | ||
|
|
1977ae362c | ||
|
|
c0d37eff3b | ||
|
|
48174ebde9 | ||
|
|
eeaa1e7576 | ||
|
|
ac0090c6c9 | ||
|
|
42dd73b038 | ||
|
|
bc394e2d77 |
@@ -39,10 +39,7 @@
|
||||
"--exclude",
|
||||
"'tests/'"
|
||||
],
|
||||
"python.linting.ignorePatterns": [
|
||||
".vscode/**/*.py",
|
||||
".venv/**/*.py"
|
||||
],
|
||||
"python.linting.ignorePatterns": [".vscode/**/*.py", ".venv/**/*.py"],
|
||||
"python.venvFolders": ["/home/vscode/.cache/pypoetry/virtualenvs"],
|
||||
"ruff.importStrategy": "fromEnvironment",
|
||||
"shellformat.path": "/home/vscode/.local/bin/shfmt",
|
||||
@@ -55,29 +52,30 @@
|
||||
},
|
||||
// Add the IDs of extensions you want installed when the container is created.
|
||||
"extensions": [
|
||||
"ms-python.python",
|
||||
"bierner.markdown-preview-github-styles",
|
||||
"ms-python.python",
|
||||
"bierner.markdown-preview-github-styles",
|
||||
"charliermarsh.ruff",
|
||||
"donjayamanne.githistory",
|
||||
"donjayamanne.githistory",
|
||||
"eamodio.gitlens",
|
||||
"fcrespo82.markdown-table-formatter",
|
||||
"foxundermoon.shell-format",
|
||||
"GitHub.copilot",
|
||||
"Gruntfuggly.todo-tree",
|
||||
"mhutchie.git-graph",
|
||||
"njpwerner.autodocstring",
|
||||
"oderwat.indent-rainbow",
|
||||
"redhat.vscode-yaml",
|
||||
"ryanluker.vscode-coverage-gutters",
|
||||
"samuelcolvin.jinjahtml",
|
||||
"shardulm94.trailing-spaces",
|
||||
"streetsidesoftware.code-spell-checker",
|
||||
"tamasfe.even-better-toml",
|
||||
"timonwong.shellcheck",
|
||||
"Tyriar.sort-lines",
|
||||
"visualstudioexptteam.vscodeintellicode",
|
||||
"Chouzz.vscode-better-align",
|
||||
"yzhang.markdown-all-in-one"
|
||||
"fcrespo82.markdown-table-formatter",
|
||||
"foxundermoon.shell-format",
|
||||
"GitHub.copilot",
|
||||
"Gruntfuggly.todo-tree",
|
||||
"GrapeCity.gc-excelviewer",
|
||||
"mhutchie.git-graph",
|
||||
"njpwerner.autodocstring",
|
||||
"oderwat.indent-rainbow",
|
||||
"redhat.vscode-yaml",
|
||||
"ryanluker.vscode-coverage-gutters",
|
||||
"samuelcolvin.jinjahtml",
|
||||
"shardulm94.trailing-spaces",
|
||||
"streetsidesoftware.code-spell-checker",
|
||||
"tamasfe.even-better-toml",
|
||||
"timonwong.shellcheck",
|
||||
"Tyriar.sort-lines",
|
||||
"visualstudioexptteam.vscodeintellicode",
|
||||
"Chouzz.vscode-better-align",
|
||||
"yzhang.markdown-all-in-one"
|
||||
],
|
||||
"features": {
|
||||
"ghcr.io/devcontainers/features/common-utils:1": {},
|
||||
@@ -89,9 +87,9 @@
|
||||
"remoteUser": "vscode",
|
||||
"postCreateCommand": "bash ./.devcontainer/post-install.sh",
|
||||
"mounts": [
|
||||
"source=${localEnv:HOME}/.git_stop_words,target=/home/vscode/.git_stop_words,type=bind,consistency=cached",
|
||||
"source=${localEnv:HOME}/.gitconfig.local,target=/home/vscode/.gitconfig.local,type=bind,consistency=cached",
|
||||
"source=${localEnv:HOME}/tmp,target=/home/vscode/tmp,type=bind"
|
||||
// "source=${localEnv:HOME}/.git_stop_words,target=/home/vscode/.git_stop_words,type=bind,consistency=cached",
|
||||
// "source=${localEnv:HOME}/.gitconfig.local,target=/home/vscode/.gitconfig.local,type=bind,consistency=cached",
|
||||
// "source=${localEnv:HOME}/tmp,target=/home/vscode/tmp,type=bind"
|
||||
]
|
||||
|
||||
// Use 'forwardPorts' to make a list of ports inside the container available locally.
|
||||
|
||||
@@ -17,14 +17,12 @@ _mainScript_() {
|
||||
iotop
|
||||
jq
|
||||
less
|
||||
libmagickwand-dev
|
||||
libxml2-utils
|
||||
lnav
|
||||
lsof
|
||||
nano
|
||||
net-tools
|
||||
openssh-server
|
||||
p7zip-full
|
||||
python3-pip
|
||||
shellcheck
|
||||
unzip
|
||||
@@ -34,21 +32,29 @@ _mainScript_() {
|
||||
)
|
||||
|
||||
echo ""
|
||||
header "Installing apt packages"
|
||||
header "Install apt packages"
|
||||
_execute_ "sudo apt-get update"
|
||||
_execute_ "sudo apt-get upgrade -y"
|
||||
for package in "${APT_PACKAGES[@]}"; do
|
||||
_execute_ -p "sudo apt-get install -y \"${package}\""
|
||||
done
|
||||
|
||||
if [ -d "${WORKSPACE_DIR}/.venv" ]; then
|
||||
echo ""
|
||||
header "Remove existing virtual environment"
|
||||
_execute_ -pv "rm -rf ${WORKSPACE_DIR}/.venv"
|
||||
fi
|
||||
|
||||
if command -v batcat &>/dev/null; then
|
||||
echo ""
|
||||
header "Favor bat over cat"
|
||||
_execute_ -p "mkdir -p /home/vscode/.local/bin && ln -s /usr/bin/batcat /home/vscode/.local/bin/bat"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
header "Installing shfmt"
|
||||
if ! command -v shfmt &>/dev/null; then
|
||||
_execute_ "curl -sS https://webi.sh/shfmt | sh"
|
||||
_execute_ -pv "curl -sS https://webi.sh/shfmt | sh"
|
||||
fi
|
||||
|
||||
REPOS=(
|
||||
@@ -112,7 +118,7 @@ _mainScript_() {
|
||||
echo ""
|
||||
header "Install virtual environment with poetry"
|
||||
if command -v poetry &>/dev/null; then
|
||||
pushd "/workspaces/obsidian-metadata" &>/dev/null
|
||||
pushd "${WORKSPACE_DIR}" &>/dev/null
|
||||
_execute_ -pv "poetry install"
|
||||
venv_path="$(poetry env info --path)"
|
||||
echo "" >>"/home/vscode/.zshrc"
|
||||
@@ -128,13 +134,13 @@ _mainScript_() {
|
||||
echo ""
|
||||
header "Initialize pre-commit"
|
||||
if command -v pre-commit &>/dev/null; then
|
||||
if [ -d "/workspaces/obsidian-metadata/.git" ]; then
|
||||
pushd "/workspaces/obsidian-metadata" &>/dev/null
|
||||
if [ -d "${WORKSPACE_DIR}/.git" ]; then
|
||||
pushd "${WORKSPACE_DIR}" &>/dev/null
|
||||
_execute_ -pv "pre-commit install --install-hooks"
|
||||
_execute_ -pv "pre-commit autoupdate"
|
||||
popd &>/dev/null
|
||||
else
|
||||
warning "Git repository not found in /workspaces/obsidian-metadata. Initialize pre-commit manually."
|
||||
warning "Git repository not found in ${WORKSPACE_DIR}. Initialize pre-commit manually."
|
||||
fi
|
||||
else
|
||||
warning "pre-commit is not installed"
|
||||
@@ -154,7 +160,7 @@ DRYRUN=false
|
||||
declare -a ARGS=()
|
||||
|
||||
# Script specific
|
||||
|
||||
WORKSPACE_DIR="/workspaces/obsidian-metadata"
|
||||
# ################################## Custom utility functions (Pasted from repository)
|
||||
_execute_() {
|
||||
# DESC:
|
||||
|
||||
@@ -1,20 +1,25 @@
|
||||
---
|
||||
name: "Python Code Checker"
|
||||
name: "Automated Tests"
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
paths:
|
||||
- ".github/workflows/python-code-checker.yml"
|
||||
- ".github/workflows/automated-tests.yml"
|
||||
- ".github/actions/**"
|
||||
- "src/**"
|
||||
- "tests/**"
|
||||
- "pyproject.toml"
|
||||
- "poetry.lock"
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
types: [opened, reopened]
|
||||
types:
|
||||
- opened
|
||||
- reopened
|
||||
- synchronize
|
||||
paths:
|
||||
- ".github/workflows/python-code-checker.yml"
|
||||
- ".github/workflows/automated-tests.yml"
|
||||
- ".github/actions/**"
|
||||
- "src/**"
|
||||
- "tests/**"
|
||||
@@ -33,7 +38,8 @@ jobs:
|
||||
matrix:
|
||||
python-version: ["3.10", "3.11"]
|
||||
steps:
|
||||
- uses: step-security/harden-runner@18bf8ad2ca49c14cbb28b91346d626ccfb00c518 # v2.1.0
|
||||
- name: Harden Security Runner
|
||||
uses: step-security/harden-runner@v2
|
||||
with:
|
||||
egress-policy: block
|
||||
disable-sudo: true
|
||||
@@ -50,7 +56,7 @@ jobs:
|
||||
uploader.codecov.io:443
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Python and Poetry
|
||||
uses: ./.github/actions/setup-poetry
|
||||
@@ -62,15 +68,9 @@ jobs:
|
||||
- name: Lint with Mypy
|
||||
run: poetry run mypy src/
|
||||
- name: lint with ruff
|
||||
run: poetry run ruff --extend-ignore=I001,D301 src/
|
||||
run: poetry run ruff --extend-ignore=I001,D301,D401 src/
|
||||
- name: check pyproject.toml
|
||||
run: poetry run poetry check
|
||||
- name: lint with black
|
||||
run: poetry run black --check src/
|
||||
- name: run vulture
|
||||
run: poetry run vulture src/
|
||||
- name: run interrogate
|
||||
run: poetry run interrogate -c pyproject.toml .
|
||||
|
||||
# ----------------------------------------------
|
||||
# run test suite
|
||||
@@ -80,6 +80,13 @@ jobs:
|
||||
poetry run coverage run
|
||||
poetry run coverage report
|
||||
poetry run coverage xml
|
||||
|
||||
# ----------------------------------------------
|
||||
# confirm package builds
|
||||
# ----------------------------------------------
|
||||
- name: Build package
|
||||
run: poetry build
|
||||
|
||||
# ----------------------------------------------
|
||||
# upload coverage stats
|
||||
# ----------------------------------------------
|
||||
13
.github/workflows/commit-linter.yml
vendored
13
.github/workflows/commit-linter.yml
vendored
@@ -2,11 +2,14 @@
|
||||
name: Commit Linter
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, reopened]
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
- reopened
|
||||
- synchronize
|
||||
|
||||
permissions: # added using https://github.com/step-security/secure-workflows
|
||||
contents: read
|
||||
@@ -19,8 +22,8 @@ jobs:
|
||||
pull-requests: read # for wagoid/commitlint-github-action to get commits in PR
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@18bf8ad2ca49c14cbb28b91346d626ccfb00c518 # v2.1.0
|
||||
- name: Harden Security Runner
|
||||
uses: step-security/harden-runner@v2
|
||||
with:
|
||||
egress-policy: block
|
||||
allowed-endpoints: >
|
||||
@@ -28,7 +31,7 @@ jobs:
|
||||
github.com:443
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
|
||||
35
.github/workflows/create-release.yml
vendored
35
.github/workflows/create-release.yml
vendored
@@ -22,7 +22,8 @@ jobs:
|
||||
matrix:
|
||||
python-version: ["3.11"]
|
||||
steps:
|
||||
- uses: step-security/harden-runner@18bf8ad2ca49c14cbb28b91346d626ccfb00c518 # v2.1.0
|
||||
- name: Harden Security Runner
|
||||
uses: step-security/harden-runner@v2
|
||||
with:
|
||||
egress-policy: block
|
||||
disable-sudo: true
|
||||
@@ -36,7 +37,7 @@ jobs:
|
||||
uploads.github.com:443
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -58,22 +59,35 @@ jobs:
|
||||
echo $TAG
|
||||
echo $PROJECT_VERSION
|
||||
if [[ "$TAG" != "v$PROJECT_VERSION" ]]; then exit 1; fi
|
||||
|
||||
# ----------------------------------------------
|
||||
# Generate release notes
|
||||
# ----------------------------------------------
|
||||
|
||||
- name: Release Notes
|
||||
run: git log $(git describe HEAD~ --tags --abbrev=0)..HEAD --pretty='format:* %h %s' --no-merges >> ".github/RELEASE-TEMPLATE.md"
|
||||
echo "current_tag=refs/tags/${TAG}" >> $GITHUB_ENV
|
||||
|
||||
# ----------------------------------------------
|
||||
# Test and then build the package
|
||||
# ----------------------------------------------
|
||||
|
||||
- name: run poetry build
|
||||
run: |
|
||||
poetry run poetry check
|
||||
poetry run coverage run
|
||||
poetry build
|
||||
# ----------------------------------------------
|
||||
# Generate release notes
|
||||
# ----------------------------------------------
|
||||
|
||||
# - name: Release Notes
|
||||
# run: git log $(git describe HEAD~ --tags --abbrev=0)..HEAD --pretty='format:* %h %s' --no-merges >> ".github/RELEASE-TEMPLATE.md"
|
||||
|
||||
- name: Export tag name to env variable
|
||||
run: |
|
||||
TAG=$(git describe HEAD --tags --abbrev=0)
|
||||
echo "CURRENT_TAG=refs/tags/${TAG}" >> $GITHUB_ENV
|
||||
|
||||
- name: Get notes
|
||||
id: generate_notes
|
||||
uses: anmarkoulis/commitizen-changelog-reader@master
|
||||
with:
|
||||
tag_name: ${{ env.CURRENT_TAG }}
|
||||
changelog: CHANGELOG.md
|
||||
|
||||
# ----------------------------------------------
|
||||
# Build draft release (Note: Will need to manually publish)
|
||||
@@ -82,7 +96,8 @@ jobs:
|
||||
- name: Create Release Draft
|
||||
uses: softprops/action-gh-release@v1
|
||||
with:
|
||||
body_path: ".github/RELEASE-TEMPLATE.md"
|
||||
# body_path: ".github/RELEASE-TEMPLATE.md"
|
||||
body: ${{join(fromJson(steps.generate_notes.outputs.notes).notes, '')}}
|
||||
draft: true
|
||||
files: |
|
||||
dist/*-${{env.PROJECT_VERSION}}-py3-none-any.whl
|
||||
|
||||
23
.github/workflows/devcontainer-checker.yml
vendored
23
.github/workflows/devcontainer-checker.yml
vendored
@@ -3,12 +3,17 @@ name: "Dev Container Checker"
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
pull_request:
|
||||
types: [opened, reopened]
|
||||
push:
|
||||
paths:
|
||||
- ".devcontainer/**"
|
||||
- ".github/workflows/devcontainer-checker.yml"
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
- reopened
|
||||
- synchronize
|
||||
paths:
|
||||
- ".devcontainer/**"
|
||||
- ".github/workflows/devcontainer-checker.yml"
|
||||
@@ -22,17 +27,17 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: step-security/harden-runner@18bf8ad2ca49c14cbb28b91346d626ccfb00c518 # v2.1.0
|
||||
- name: Harden Security Runner
|
||||
uses: step-security/harden-runner@v2
|
||||
with:
|
||||
egress-policy: block
|
||||
allowed-endpoints: >
|
||||
*.data.mcr.microsoft.com:443
|
||||
api.snapcraft.io:443
|
||||
auth.docker.io:443
|
||||
centralus.data.mcr.microsoft.com:443
|
||||
deb.debian.org:443
|
||||
deb.debian.org:80
|
||||
dl.yarnpkg.com:443
|
||||
eastus.data.mcr.microsoft.com:443
|
||||
files.pythonhosted.org:443
|
||||
ghcr.io:443
|
||||
git.rootprojects.org:443
|
||||
@@ -46,14 +51,12 @@ jobs:
|
||||
registry-1.docker.io:443
|
||||
registry.npmjs.org:443
|
||||
webi.sh:443
|
||||
westcentralus.data.mcr.microsoft.com:443
|
||||
westus.data.mcr.microsoft.com:443
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Build and run dev container task
|
||||
uses: devcontainers/ci@v0.2
|
||||
uses: devcontainers/ci@v0.3
|
||||
with:
|
||||
runCmd: |
|
||||
poe lint
|
||||
|
||||
6
.github/workflows/labeler.yml
vendored
6
.github/workflows/labeler.yml
vendored
@@ -10,14 +10,14 @@ jobs:
|
||||
pull-requests: write
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@18bf8ad2ca49c14cbb28b91346d626ccfb00c518 # v2.1.0
|
||||
- name: Harden Security Runner
|
||||
uses: step-security/harden-runner@v2
|
||||
with:
|
||||
egress-policy: block
|
||||
allowed-endpoints: >
|
||||
api.github.com:443
|
||||
github.com:443
|
||||
|
||||
- uses: actions/labeler@v4
|
||||
- uses: actions/labeler@v5
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
4
.github/workflows/pr-linter.yml
vendored
4
.github/workflows/pr-linter.yml
vendored
@@ -21,8 +21,8 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@18bf8ad2ca49c14cbb28b91346d626ccfb00c518 # v2.1.0
|
||||
- name: Harden Security Runner
|
||||
uses: step-security/harden-runner@v2
|
||||
with:
|
||||
egress-policy: block
|
||||
allowed-endpoints: >
|
||||
|
||||
5
.github/workflows/pypi-release.yml
vendored
5
.github/workflows/pypi-release.yml
vendored
@@ -18,7 +18,8 @@ jobs:
|
||||
matrix:
|
||||
python-version: ["3.11"]
|
||||
steps:
|
||||
- uses: step-security/harden-runner@18bf8ad2ca49c14cbb28b91346d626ccfb00c518 # v2.1.0
|
||||
- name: Harden Security Runner
|
||||
uses: step-security/harden-runner@v2
|
||||
with:
|
||||
egress-policy: block
|
||||
disable-sudo: true
|
||||
@@ -32,7 +33,7 @@ jobs:
|
||||
upload.pypi.org:443
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Python and Poetry
|
||||
uses: ./.github/actions/setup-poetry
|
||||
|
||||
@@ -5,7 +5,7 @@ default_stages: [commit, manual]
|
||||
fail_fast: true
|
||||
repos:
|
||||
- repo: "https://github.com/commitizen-tools/commitizen"
|
||||
rev: v2.40.0
|
||||
rev: v3.13.0
|
||||
hooks:
|
||||
- id: commitizen
|
||||
- id: commitizen-branch
|
||||
@@ -26,7 +26,7 @@ repos:
|
||||
- id: text-unicode-replacement-char
|
||||
|
||||
- repo: "https://github.com/pre-commit/pre-commit-hooks"
|
||||
rev: v4.4.0
|
||||
rev: v4.5.0
|
||||
hooks:
|
||||
- id: check-added-large-files
|
||||
- id: check-ast
|
||||
@@ -54,29 +54,39 @@ repos:
|
||||
types: [python]
|
||||
|
||||
- repo: "https://github.com/adrienverge/yamllint.git"
|
||||
rev: v1.29.0
|
||||
rev: v1.33.0
|
||||
hooks:
|
||||
- id: yamllint
|
||||
files: ^.*\.(yaml|yml)$
|
||||
entry: yamllint --strict --config-file .yamllint.yml
|
||||
|
||||
- repo: "https://github.com/charliermarsh/ruff-pre-commit"
|
||||
rev: "v0.0.230"
|
||||
rev: "v0.1.13"
|
||||
hooks:
|
||||
- id: ruff
|
||||
args: ["--extend-ignore", "I001,D301,D401,PLR2004"]
|
||||
args: ["--extend-ignore", "I001,D301,D401"]
|
||||
exclude: tests/
|
||||
|
||||
- repo: "https://github.com/jendrikseipp/vulture"
|
||||
rev: "v2.7"
|
||||
rev: "v2.10"
|
||||
hooks:
|
||||
- id: vulture
|
||||
|
||||
- repo: "https://github.com/crate-ci/typos"
|
||||
rev: "v1.17.1"
|
||||
hooks:
|
||||
- id: typos
|
||||
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: custom
|
||||
name: custom pre-commit script
|
||||
entry: scripts/pre-commit-hook.sh
|
||||
# This calls a custom pre-commit script.
|
||||
# Disable if you don't have it.
|
||||
- id: stopwords
|
||||
name: stopwords
|
||||
entry: bash -c '~/bin/git-stopwords ${PWD}/"$@"'
|
||||
language: system
|
||||
pass_filenames: true
|
||||
types: [text]
|
||||
|
||||
- id: black
|
||||
name: black
|
||||
|
||||
7
.typos.toml
Normal file
7
.typos.toml
Normal file
@@ -0,0 +1,7 @@
|
||||
[default]
|
||||
default.locale = "en_us"
|
||||
|
||||
[default.extend-words]
|
||||
nd = "nd" # In the context of 2nd
|
||||
[files]
|
||||
extend-exclude = ["*_cache", ".venv", "src/jdfile/utils/strings.py", "tests/fixtures/"]
|
||||
156
CHANGELOG.md
156
CHANGELOG.md
@@ -1,22 +1,168 @@
|
||||
## v0.12.1 (2023-09-02)
|
||||
|
||||
### Fix
|
||||
|
||||
- **notes**: preserve file encoding when writing to filesystem (#59)
|
||||
|
||||
## v0.12.0 (2023-05-17)
|
||||
|
||||
### Feat
|
||||
|
||||
- greatly improve capturing all formats of inline metadata (#41)
|
||||
- greatly improve capturing metadata all formats of inline metadata
|
||||
|
||||
### Fix
|
||||
|
||||
- allow markdown inline code in metadata values
|
||||
- only ask for valid metadata types when adding new metadata
|
||||
- convert charsets to utf-8 when necessary (#32)
|
||||
- improve TOML error handing and docs for Windows paths (#31)
|
||||
|
||||
## v0.11.1 (2023-03-29)
|
||||
|
||||
### Fix
|
||||
|
||||
- add custom exceptions (#29)
|
||||
|
||||
## v0.11.0 (2023-03-24)
|
||||
|
||||
### Feat
|
||||
|
||||
- add `--import-csv` option to cli
|
||||
|
||||
## v0.10.0 (2023-03-21)
|
||||
|
||||
### Feat
|
||||
|
||||
- add `--export-template` cli option
|
||||
|
||||
### Fix
|
||||
|
||||
- `--export-template` correctly exports all notes
|
||||
- `--export-csv` exports csv not json
|
||||
- **csv-import**: fail if `type` does not validate
|
||||
|
||||
### Refactor
|
||||
|
||||
- pave the way for non-regex key/value deletions
|
||||
- remove unused code
|
||||
- cleanup rename and delete from dict functions
|
||||
|
||||
## v0.9.0 (2023-03-20)
|
||||
|
||||
### Feat
|
||||
|
||||
- bulk update metadata from a CSV file
|
||||
|
||||
### Fix
|
||||
|
||||
- find more instances of inline metadata
|
||||
- ensure frontmatter values are unique within a key
|
||||
- improve validation of bulk imports
|
||||
- improve logging to screen
|
||||
|
||||
## v0.8.0 (2023-03-12)
|
||||
|
||||
### Feat
|
||||
|
||||
- move inline metadata to specific location in note (#27)
|
||||
|
||||
### Fix
|
||||
|
||||
- add `back` option to transpose menus
|
||||
|
||||
## v0.7.0 (2023-03-11)
|
||||
|
||||
### Feat
|
||||
|
||||
- transpose metadata between frontmatter and inline
|
||||
- select insert location for new inline metadata
|
||||
|
||||
### Fix
|
||||
|
||||
- exit after committing changes
|
||||
- fix typo and sort order of options
|
||||
|
||||
## v0.6.1 (2023-03-03)
|
||||
|
||||
### Fix
|
||||
|
||||
- improve error handling when frontmatter malformed
|
||||
|
||||
### Refactor
|
||||
|
||||
- use single console instance
|
||||
|
||||
## v0.6.0 (2023-02-06)
|
||||
|
||||
### Feat
|
||||
|
||||
- transpose metadata (#18)
|
||||
|
||||
### Fix
|
||||
|
||||
- **ui**: add separator to top of select lists
|
||||
- allow adding inline tags with same key different values (#17)
|
||||
- remove unnecessary question when viewing diffs
|
||||
|
||||
## v0.5.0 (2023-02-04)
|
||||
|
||||
### Feat
|
||||
|
||||
- add new tags (#16)
|
||||
- add new inline metadata (#15)
|
||||
- **configuration**: `insert_location` specifies where content is added within notes
|
||||
|
||||
### Fix
|
||||
|
||||
- find more emojis
|
||||
|
||||
## v0.4.0 (2023-02-02)
|
||||
|
||||
### Feat
|
||||
|
||||
- export metadata (#14)
|
||||
|
||||
- export metadata to CSV
|
||||
- export metadata to JSON
|
||||
- export CSV or JSON from command line
|
||||
|
||||
- limit scope of notes with one or more filters (#13)
|
||||
|
||||
### Fix
|
||||
|
||||
- do not count in-page links as tags
|
||||
- improve terminal colors of questions
|
||||
|
||||
## v0.3.0 (2023-01-30)
|
||||
|
||||
### Feat
|
||||
|
||||
- **application**: add new metadata to frontmatter (#9)
|
||||
|
||||
### Fix
|
||||
|
||||
- **application**: improve ux (#10)
|
||||
|
||||
## v0.2.0 (2023-01-25)
|
||||
|
||||
### Feat
|
||||
|
||||
- **configuration**: support multiple vaults in the configuration file (#6)
|
||||
- **configuration**: support multiple vaults in the configuration file (#6)
|
||||
|
||||
### Refactor
|
||||
|
||||
- **application**: refactor questions to separate class (#7)
|
||||
- **application**: refactor questions to separate class (#7)
|
||||
|
||||
## v0.1.1 (2023-01-23)
|
||||
|
||||
### Fix
|
||||
|
||||
- **notes**: diff now prints values in the form `[value]`
|
||||
- **application**: exit after committing changes
|
||||
- **notes**: diff now prints values in the form `[value]`
|
||||
- **application**: exit after committing changes
|
||||
|
||||
## v0.1.0 (2023-01-22)
|
||||
|
||||
### Feat
|
||||
|
||||
- initial application release
|
||||
- initial application release
|
||||
|
||||
201
README.md
201
README.md
@@ -1,39 +1,147 @@
|
||||
[](https://github.com/natelandau/obsidian-metadata/actions/workflows/python-code-checker.yml) [](https://codecov.io/gh/natelandau/obsidian-metadata)
|
||||
[](https://badge.fury.io/py/obsidian-metadata)  [](https://github.com/natelandau/obsidian-metadata/actions/workflows/automated-tests.yml) [](https://codecov.io/gh/natelandau/obsidian-metadata)
|
||||
|
||||
# obsidian-metadata
|
||||
A script to make batch updates to metadata in an Obsidian vault. Provides the following capabilities:
|
||||
|
||||
- `in-text tag`: delete every occurrence
|
||||
- `in-text tags`: Rename tag (`#tag1` -> `#tag2`)
|
||||
- `frontmatter`: Delete a key matching a regex pattern and all associated values
|
||||
- `frontmatter`: Rename a key
|
||||
- `frontmatter`: Delete a value matching a regex pattern from a specified key
|
||||
- `frontmatter`: Rename a value from a specified key
|
||||
- `inline metadata`: Delete a key matching a regex pattern and all associated values
|
||||
- `inline metadata`: Rename a key
|
||||
- `inline metadata`: Delete a value matching a regex pattern from a specified key
|
||||
- `inline metadata`: Rename a value from a specified key
|
||||
- `vault`: Create a backup of the Obsidian vault
|
||||
A script to make batch updates to metadata in an Obsidian vault. No changes are
|
||||
made to the Vault until they are explicitly committed.
|
||||
|
||||
[](https://asciinema.org/a/DQk0ufza1azwU3QFkE6XV33nm)
|
||||
|
||||
## Important Disclaimer
|
||||
|
||||
**It is strongly recommended that you back up your vault prior to committing changes.** This script makes changes directly to the markdown files in your vault. Once the changes are committed, there is no ability to recreate the original information unless you have a backup. Follow the instructions in the script to create a backup of your vault if needed. The author of this script is not responsible for any data loss that may occur. Use at your own risk.
|
||||
|
||||
## Install
|
||||
`obsidian-metadata` requires Python v3.10 or above.
|
||||
|
||||
Requires Python v3.10 or above.
|
||||
|
||||
```bash
|
||||
pip install obsidian-metadata
|
||||
```
|
||||
|
||||
|
||||
## Important Disclaimer
|
||||
**It is strongly recommended that you back up your vault prior to committing changes.** This script makes changes directly to the markdown files in your vault. Once the changes are committed, there is no ability to recreate the original information unless you have a backup. Follow the instructions in the script to create a backup of your vault if needed. The author of this script is not responsible for any data loss that may occur. Use at your own risk.
|
||||
|
||||
## Usage
|
||||
The script provides a menu of available actions. Make as many changes as you require and review them as you go. No changes are made to the Vault until they are explicitly committed.
|
||||
|
||||
[](https://asciinema.org/a/553464)
|
||||
### CLI Commands
|
||||
|
||||
- `--config-file`: Specify a custom configuration file location
|
||||
- `--dry-run`: Make no destructive changes
|
||||
- `--import-csv` Import a CSV file with bulk updates
|
||||
- `--export-csv`: Specify a path and create a CSV export of all metadata
|
||||
- `--export-json`: Specify a path and create a JSON export of all metadata
|
||||
- `--export-template`: Specify a path and export all notes with their associated metadata to a CSV file for use as a bulk import template
|
||||
- `--help`: Shows interactive help and exits
|
||||
- `--log-file`: Specify a log file location
|
||||
- `--log-to-file`: Will log to a file
|
||||
- `--vault-path`: Specify a path to an Obsidian Vault
|
||||
- `--verbose`: Set verbosity level (0=WARN, 1=INFO, 2=DEBUG, 3=TRACE)
|
||||
- `--version`: Prints the version number and exits
|
||||
|
||||
### Running the script
|
||||
|
||||
Once installed, run `obsidian-metadata` in your terminal to enter an interactive menu of sub-commands.
|
||||
|
||||
**Vault Actions**
|
||||
|
||||
- Backup: Create a backup of the vault.
|
||||
- Delete Backup: Delete a backup of the vault.
|
||||
|
||||
**Export Metadata**
|
||||
|
||||
- Export all metadata to a CSV organized by metadata type
|
||||
- Export all metadata to a CSV organized by note path
|
||||
- Export all metadata to a JSON file organized by metadata type
|
||||
|
||||
**Inspect Metadata**
|
||||
|
||||
- **View all metadata in the vault**
|
||||
- View all **frontmatter**
|
||||
- View all **inline metadata**
|
||||
- View all **inline tags**
|
||||
|
||||
**Filter Notes in Scope**: Limit the scope of notes to be processed with one or more filters.
|
||||
|
||||
- **Path filter (regex)**: Limit scope based on the path or filename
|
||||
- **Metadata filter**: Limit scope based on a key or key/value pair
|
||||
- **Tag filter**: Limit scope based on an in-text tag
|
||||
- **List and clear filters**: List all current filters and clear one or all
|
||||
- **List notes in scope**: List notes that will be processed.
|
||||
|
||||
**Bulk Edit Metadata** from a CSV file (See the _[Make Bulk Updates](https://github.com/natelandau/obsidian-metadata#make-bulk-updates)_ section below)
|
||||
|
||||
**Add Metadata**: Add new metadata to your vault.
|
||||
|
||||
When adding a new key to inline metadata, the `insert location` value in the config file will specify where in the note it will be inserted.
|
||||
|
||||
- **Add new metadata to the frontmatter**
|
||||
- **Add new inline metadata** - Set `insert_location` in the config to control where the new metadata is inserted. (Default: Bottom)
|
||||
- **Add new inline tag** - Set `insert_location` in the config to control where the new tag is inserted. (Default: Bottom)
|
||||
|
||||
**Rename Metadata**: Rename either a key and all associated values, a specific value within a key. or an in-text tag.
|
||||
|
||||
- **Rename a key**
|
||||
- **Rename a value**
|
||||
- **Rename an inline tag**
|
||||
|
||||
**Delete Metadata**: Delete either a key and all associated values, or a specific value.
|
||||
|
||||
- **Delete a key and associated values**
|
||||
- **Delete a value from a key**
|
||||
- **Delete an inline tag**
|
||||
|
||||
**Move Inline Metadata**: Move inline metadata to a specified location with a note
|
||||
|
||||
- **Move to Top**: Move all inline metadata beneath the frontmatter
|
||||
- **Move to After Title**: Move all inline metadata beneath the first markdown header
|
||||
- **Move to Bottom**: Move all inline metadata to the bottom of the note
|
||||
|
||||
**Transpose Metadata**: Move metadata from inline to frontmatter or the reverse.
|
||||
|
||||
When transposing to inline metadata, the `insert location` value in the config file will specify where in the note it will be inserted.
|
||||
|
||||
- **Transpose all metadata** - Moves all frontmatter to inline metadata, or the reverse
|
||||
- **Transpose key** - Transposes a specific key and all it's values
|
||||
- **Transpose value**- Transpose a specific key:value pair
|
||||
|
||||
**Review Changes**: Prior to committing changes, review all changes that will be made.
|
||||
|
||||
- **View a diff of the changes** that will be made
|
||||
|
||||
**Commit Changes**: Write the changes to disk. This step is not undoable.
|
||||
|
||||
- **Commit changes to the vault**
|
||||
|
||||
### Known Limitations
|
||||
|
||||
Multi-level frontmatter is not supported.
|
||||
|
||||
```yaml
|
||||
# This works perfectly well
|
||||
---
|
||||
key: "value"
|
||||
key2:
|
||||
- one
|
||||
- two
|
||||
- three
|
||||
key3: ["foo", "bar", "baz"]
|
||||
key4: value
|
||||
|
||||
# This will not work
|
||||
---
|
||||
key1:
|
||||
key2:
|
||||
- one
|
||||
- two
|
||||
- three
|
||||
key3:
|
||||
- one
|
||||
- two
|
||||
- three
|
||||
---
|
||||
```
|
||||
|
||||
### Configuration
|
||||
`obsidian-metadata` requires a configuration file at `~/.obsidian_metadata.toml`. On first run, this file will be created. You can specify a new location for the configuration file with the `--config-file` option.
|
||||
|
||||
`obsidian-metadata` requires a configuration file at `~/.obsidian_metadata.toml`. On first run, this file will be created. You can specify a new location for the configuration file with the `--config-file` option.
|
||||
|
||||
To add additional vaults, copy the default section and add the appropriate information. The script will prompt you to select a vault if multiple exist in the configuration file
|
||||
|
||||
@@ -43,18 +151,65 @@ Below is an example with two vaults.
|
||||
["Vault One"] # Name of the vault.
|
||||
|
||||
# Path to your obsidian vault
|
||||
# Note for Windows users: Windows paths must use `\\` as the path separator due to a limitation with how TOML parses strings.
|
||||
# Example: "C:\\Users\\username\\Documents\\Obsidian"
|
||||
path = "/path/to/vault"
|
||||
|
||||
# Folders within the vault to ignore when indexing metadata
|
||||
exclude_paths = [".git", ".obsidian"]
|
||||
|
||||
# Location to add metadata. One of:
|
||||
# TOP: Directly after frontmatter.
|
||||
# AFTER_TITLE: After the first header following frontmatter.
|
||||
# BOTTOM: The bottom of the note
|
||||
insert_location = "BOTTOM"
|
||||
|
||||
["Vault Two"]
|
||||
path = "/path/to/second_vault"
|
||||
exclude_paths = [".git", ".obsidian"]
|
||||
exclude_paths = [".git", ".obsidian", "daily_notes"]
|
||||
insert_location = "AFTER_TITLE"
|
||||
```
|
||||
|
||||
To bypass the configuration file and specify a vault to use at runtime use the `--vault-path` option.
|
||||
|
||||
**Note for Windows users:**
|
||||
Due to how TOMML parses strings, Windows paths must use `\\` as the path separator.
|
||||
For example: `C:\\Users\\username\\Documents\\Obsidian`
|
||||
|
||||
### Make Bulk Updates
|
||||
|
||||
Bulk edits are supported by importing a CSV file containing the following columns. Column headers must be lowercase.
|
||||
|
||||
1. `path` - Path to note relative to the vault root folder
|
||||
2. `type` - Type of metadata. One of `frontmatter`, `inline_metadata`, or `tag`
|
||||
3. `key` - The key to add (leave blank for a tag)
|
||||
4. `value` - the value to add to the key
|
||||
|
||||
An example valid CSV file is
|
||||
|
||||
```csv
|
||||
path,type,key,value
|
||||
folder 1/note1.md,frontmatter,fruits,apple
|
||||
folder 1/note1.md,frontmatter,fruits,banana
|
||||
folder 1/note1.md,inline_metadata,cars,toyota
|
||||
folder 1/note1.md,inline_metadata,cars,honda
|
||||
folder 1/note1.md,tag,,tag1
|
||||
folder 1/note1.md,tag,,tag2
|
||||
```
|
||||
|
||||
How bulk imports work:
|
||||
|
||||
- **Only notes which match the path in the CSV file are updated**
|
||||
- **Effected notes will have ALL of their metadata changed** to reflect the values in the CSV file
|
||||
- **Existing metadata in a matching note will be rewritten**. This may result in it's location and/or formatting within the note being changed
|
||||
- Inline tags ignore any value added to the `key` column
|
||||
|
||||
Create a CSV template for making bulk updates containing all your notes and their associated metadata by
|
||||
|
||||
1. Using the `--export-template` cli command; or
|
||||
2. Selecting the `Metadata by note` option within the `Export Metadata` section of the app
|
||||
|
||||
Once you have a template created you can import it using the `--import-csv` flag or by navigating to the `Import bulk changes from CSV` option.
|
||||
|
||||
# Contributing
|
||||
|
||||
@@ -62,7 +217,7 @@ To bypass the configuration file and specify a vault to use at runtime use the `
|
||||
|
||||
There are two ways to contribute to this project.
|
||||
|
||||
### 1. Containerized development (Recommended)
|
||||
### 1. Containerized development
|
||||
|
||||
1. Clone this repository. `git clone https://github.com/natelandau/obsidian-metadata`
|
||||
2. Open the repository in Visual Studio Code
|
||||
|
||||
@@ -4,8 +4,11 @@ coverage:
|
||||
project:
|
||||
default:
|
||||
target: 50% # the required coverage value
|
||||
threshold: 1% # the leniency in hitting the target
|
||||
|
||||
threshold: 5% # the leniency in hitting the target
|
||||
patch:
|
||||
default:
|
||||
target: 50%
|
||||
threshold: 5%
|
||||
ignore:
|
||||
- tests/
|
||||
|
||||
|
||||
1684
poetry.lock
generated
1684
poetry.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -1,2 +1,2 @@
|
||||
[virtualenvs]
|
||||
in-project = true
|
||||
in-project = true
|
||||
|
||||
234
pyproject.toml
234
pyproject.toml
@@ -11,98 +11,56 @@
|
||||
name = "obsidian-metadata"
|
||||
readme = "README.md"
|
||||
repository = "https://github.com/natelandau/obsidian-metadata"
|
||||
version = "0.2.0"
|
||||
version = "0.12.1"
|
||||
|
||||
[tool.poetry.scripts] # https://python-poetry.org/docs/pyproject/#scripts
|
||||
obsidian-metadata = "obsidian_metadata.cli:app"
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
loguru = "^0.6.0"
|
||||
python = "^3.10"
|
||||
questionary = "^1.10.0"
|
||||
rich = "^13.2.0"
|
||||
ruamel-yaml = "^0.17.21"
|
||||
shellingham = "^1.4.0"
|
||||
tomlkit = "^0.11.6"
|
||||
typer = "^0.7.0"
|
||||
charset-normalizer = "^3.2.0"
|
||||
emoji = "^2.8.0"
|
||||
loguru = "^0.7.0"
|
||||
python = "^3.10"
|
||||
questionary = "^1.10.0"
|
||||
regex = "^2023.8.8"
|
||||
rich = "^13.5.2"
|
||||
ruamel-yaml = "^0.17.32"
|
||||
shellingham = "^1.5.3"
|
||||
tomlkit = "^0.12.1"
|
||||
typer = "^0.9.0"
|
||||
|
||||
[tool.poetry.group.test.dependencies]
|
||||
pytest = "^7.2.0"
|
||||
pytest = "^7.4.0"
|
||||
pytest-clarity = "^1.0.1"
|
||||
pytest-mock = "^3.10.0"
|
||||
pytest-mock = "^3.11.1"
|
||||
pytest-pretty-terminal = "^1.1.0"
|
||||
pytest-xdist = "^3.1.0"
|
||||
pytest-xdist = "^3.3.1"
|
||||
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
absolufy-imports = "^0.3.1"
|
||||
black = "^22.12.0"
|
||||
commitizen = "^2.39.1"
|
||||
coverage = "^7.0.4"
|
||||
black = "^23.7.0"
|
||||
commitizen = "^3.7.0"
|
||||
coverage = "^7.3.0"
|
||||
interrogate = "^1.5.0"
|
||||
mypy = "^0.991"
|
||||
pdoc = "^12.3.1"
|
||||
pep8-naming = "^0.13.3"
|
||||
poethepoet = "^0.18.0"
|
||||
pre-commit = "^2.21.0"
|
||||
ruff = "^0.0.217"
|
||||
typeguard = "^2.13.3"
|
||||
types-python-dateutil = "^2.8.19.5"
|
||||
types-pyyaml = "^6.0.12.2"
|
||||
vulture = "^2.7"
|
||||
mypy = "^1.8.0"
|
||||
pdoc = "^14.0.0"
|
||||
poethepoet = "^0.22.0"
|
||||
pre-commit = "^3.3.3"
|
||||
ruff = "^0.1.13"
|
||||
sh = "^2.0.6"
|
||||
types-python-dateutil = "^2.8.19.14"
|
||||
typos = "^1.17.1"
|
||||
vulture = "^2.9.1"
|
||||
|
||||
[tool.ruff] # https://github.com/charliermarsh/ruff
|
||||
fix = true
|
||||
ignore = [
|
||||
"B006",
|
||||
"B008",
|
||||
"D107",
|
||||
"D203",
|
||||
"D204",
|
||||
"D213",
|
||||
"D215",
|
||||
"D400",
|
||||
"D404",
|
||||
"D406",
|
||||
"D407",
|
||||
"D408",
|
||||
"D409",
|
||||
"D413",
|
||||
"E501",
|
||||
"N805",
|
||||
"PGH001",
|
||||
"PGH003",
|
||||
"UP007",
|
||||
]
|
||||
ignore-init-module-imports = true
|
||||
[tool.black]
|
||||
line-length = 100
|
||||
select = [
|
||||
"A",
|
||||
"B",
|
||||
"BLE",
|
||||
"C4",
|
||||
"C90",
|
||||
"D",
|
||||
"E",
|
||||
"ERA",
|
||||
"F",
|
||||
"I",
|
||||
"N",
|
||||
"PGH",
|
||||
"PLC",
|
||||
"PLE",
|
||||
"PLR",
|
||||
"PLW",
|
||||
"RET",
|
||||
"RUF",
|
||||
"SIM",
|
||||
"TID",
|
||||
"UP",
|
||||
"W",
|
||||
"YTT",
|
||||
]
|
||||
src = ["src", "tests"]
|
||||
target-version = "py310"
|
||||
unfixable = ["ERA001", "F401", "F401", "UP007"]
|
||||
|
||||
[tool.commitizen]
|
||||
bump_message = "bump(release): v$current_version → v$new_version"
|
||||
changelog_incremental = true
|
||||
tag_format = "v$version"
|
||||
update_changelog_on_bump = true
|
||||
version = "0.12.1"
|
||||
version_files = ["pyproject.toml:version", "src/obsidian_metadata/__version__.py:__version__"]
|
||||
|
||||
[tool.coverage.report] # https://coverage.readthedocs.io/en/latest/config.html#report
|
||||
exclude_lines = [
|
||||
@@ -135,19 +93,6 @@
|
||||
[tool.coverage.xml]
|
||||
output = "reports/coverage.xml"
|
||||
|
||||
[tool.black]
|
||||
line-length = 100
|
||||
|
||||
[tool.commitizen]
|
||||
bump_message = "bump(release): v$current_version → v$new_version"
|
||||
tag_format = "v$version"
|
||||
update_changelog_on_bump = true
|
||||
version = "0.2.0"
|
||||
version_files = [
|
||||
"pyproject.toml:version",
|
||||
"src/obsidian_metadata/__version__.py:__version__",
|
||||
]
|
||||
|
||||
[tool.interrogate]
|
||||
exclude = ["build", "docs", "tests"]
|
||||
fail-under = 90
|
||||
@@ -181,6 +126,108 @@
|
||||
testpaths = ["src", "tests"]
|
||||
xfail_strict = true
|
||||
|
||||
[tool.ruff] # https://github.com/charliermarsh/ruff
|
||||
exclude = [
|
||||
".bzr",
|
||||
".direnv",
|
||||
".eggs",
|
||||
".git",
|
||||
".hg",
|
||||
".mypy_cache",
|
||||
".nox",
|
||||
".pants.d",
|
||||
".pytype",
|
||||
".ruff_cache",
|
||||
".svn",
|
||||
".tox",
|
||||
".venv",
|
||||
"__pypackages__",
|
||||
"_build",
|
||||
"buck-out",
|
||||
"build",
|
||||
"dist",
|
||||
"node_modules",
|
||||
"venv",
|
||||
]
|
||||
# Avoiding flagging (and removing) `V101` from any `# noqa`
|
||||
# directives, despite Ruff's lack of support for `vulture`.
|
||||
external = ["V101"]
|
||||
fix = true
|
||||
ignore = [
|
||||
"B006",
|
||||
"B008",
|
||||
"D107",
|
||||
"D203",
|
||||
"D204",
|
||||
"D213",
|
||||
"D215",
|
||||
"D404",
|
||||
"D406",
|
||||
"D407",
|
||||
"D408",
|
||||
"D409",
|
||||
"D413",
|
||||
"E501",
|
||||
"N805",
|
||||
"PGH001",
|
||||
"PGH003",
|
||||
"UP007",
|
||||
]
|
||||
ignore-init-module-imports = true
|
||||
line-length = 100
|
||||
per-file-ignores = { "cli.py" = [
|
||||
"PLR0912",
|
||||
"PLR0913",
|
||||
], "tests/*.py" = [
|
||||
"PLR0913",
|
||||
"PLR2004",
|
||||
"S101",
|
||||
] }
|
||||
select = [
|
||||
"A", # flake8-builtins
|
||||
"ARG", # flake8-unused-arguments
|
||||
"B", # flake8-bugbear
|
||||
"BLE", # flake8-blind-exception
|
||||
"C40", # flake8-comprehensions
|
||||
"C90", # McCabe
|
||||
"D", # pydocstyle
|
||||
"E", # pycodestyle Errors
|
||||
"ERA", # flake8-eradicate
|
||||
"EXE", # flake8-executable
|
||||
"F", # pyflakes
|
||||
"I", # iSort
|
||||
"N", # Pep8-naming
|
||||
"PGH", # pygrep-hooks
|
||||
"PLC", # pylint Convention
|
||||
"PLE", # pylint Error
|
||||
"PLR", # pylint Refactor
|
||||
"PLW", # pylint Warning
|
||||
"PT", # flake8-pytest-style
|
||||
"PTH", # flake8-use-pathlib
|
||||
"Q", # flake8-quotes
|
||||
"RET", # flake8-return
|
||||
"RUF", # Ruff-specific rules
|
||||
"S", # flake8-bandit
|
||||
"SIM", # flake8-simplify
|
||||
"TID", # flake8-tidy-imports
|
||||
"UP", # pyupgrade
|
||||
"W", # pycodestyle Warnings
|
||||
"YTT", # flake8-2020
|
||||
]
|
||||
src = ["src", "tests"]
|
||||
target-version = "py310"
|
||||
unfixable = ["ERA001", "F401", "F841", "UP007"]
|
||||
|
||||
[tool.ruff.mccabe]
|
||||
# Unlike Flake8, default to a complexity level of 10.
|
||||
max-complexity = 10
|
||||
|
||||
[tool.ruff.pydocstyle]
|
||||
convention = "google"
|
||||
|
||||
[tool.ruff.pylint]
|
||||
max-args = 6
|
||||
|
||||
[tool.vulture] # https://pypi.org/project/vulture/
|
||||
# exclude = ["file*.py", "dir/"]
|
||||
# ignore_decorators = ["@app.route", "@require_*"]
|
||||
@@ -206,7 +253,7 @@
|
||||
help = "Lint this package"
|
||||
|
||||
[[tool.poe.tasks.lint.sequence]]
|
||||
shell = "ruff --extend-ignore=I001,D301 src/ tests/"
|
||||
shell = "ruff src/ --no-fix"
|
||||
|
||||
[[tool.poe.tasks.lint.sequence]]
|
||||
shell = "black --check src/ tests/"
|
||||
@@ -223,6 +270,9 @@
|
||||
[[tool.poe.tasks.lint.sequence]]
|
||||
shell = "yamllint ."
|
||||
|
||||
[[tool.poe.tasks.lint.sequence]]
|
||||
shell = "typos"
|
||||
|
||||
[[tool.poe.tasks.lint.sequence]]
|
||||
shell = "interrogate -c pyproject.toml ."
|
||||
|
||||
|
||||
@@ -1,821 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# shellcheck disable=SC2317
|
||||
|
||||
_mainScript_() {
|
||||
|
||||
_customStopWords_() {
|
||||
# DESC: Check if any specified stop words are in the commit diff. If found, the pre-commit hook will exit with a non-zero exit code.
|
||||
# ARGS:
|
||||
# $1 (Required): Path to file
|
||||
# OUTS:
|
||||
# 0: Success
|
||||
# 1: Failure
|
||||
# USAGE:
|
||||
# _customStopWords_ "/path/to/file.sh"
|
||||
# NOTE:
|
||||
# Requires a plaintext stopword file located at
|
||||
# `~/.git_stop_words` containing one stopword per line.
|
||||
|
||||
[[ $# == 0 ]] && fatal "Missing required argument to ${FUNCNAME[0]}"
|
||||
|
||||
local _gitDiffTmp
|
||||
local FILE_TO_CHECK="${1}"
|
||||
|
||||
_gitDiffTmp="${TMP_DIR}/${RANDOM}.${RANDOM}.${RANDOM}.diff.txt"
|
||||
|
||||
if [ -f "${STOP_WORD_FILE}" ]; then
|
||||
|
||||
if [[ $(basename "${STOP_WORD_FILE}") == "$(basename "${FILE_TO_CHECK}")" ]]; then
|
||||
debug "$(basename "${1}"): Don't check stop words file for stop words."
|
||||
return 0
|
||||
fi
|
||||
debug "$(basename "${FILE_TO_CHECK}"): Checking for stop words..."
|
||||
|
||||
# remove blank lines from stopwords file
|
||||
sed '/^$/d' "${STOP_WORD_FILE}" >"${TMP_DIR}/pattern_file.txt"
|
||||
|
||||
# Check for stopwords
|
||||
if git diff --cached -- "${FILE_TO_CHECK}" | grep –i -q "new file mode"; then
|
||||
if grep -i --file="${TMP_DIR}/pattern_file.txt" "${FILE_TO_CHECK}"; then
|
||||
return 1
|
||||
else
|
||||
return 0
|
||||
fi
|
||||
else
|
||||
# Add diff to a temporary file
|
||||
git diff --cached -- "${FILE_TO_CHECK}" | grep '^+' >"${_gitDiffTmp}"
|
||||
if grep -i --file="${TMP_DIR}/pattern_file.txt" "${_gitDiffTmp}"; then
|
||||
return 1
|
||||
else
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
|
||||
else
|
||||
|
||||
notice "Could not find git stopwords file expected at '${STOP_WORD_FILE}'. Continuing..."
|
||||
return 0
|
||||
fi
|
||||
}
|
||||
|
||||
# Don;t lint binary files
|
||||
if [[ ${ARGS[0]} =~ \.(jpg|jpeg|gif|png|exe|zip|gzip|tiff|tar|dmg|ttf|otf|m4a|mp3|mkv|mov|avi|eot|svg|woff2?|aac|wav|flac|pdf|doc|xls|ppt|7z|bin|dmg|dat|sql|ico|mpe?g)$ ]]; then
|
||||
_safeExit_ 0
|
||||
fi
|
||||
|
||||
if ! _customStopWords_ "${ARGS[0]}"; then
|
||||
error "Stop words found in ${ARGS[0]}"
|
||||
_safeExit_ 1
|
||||
fi
|
||||
}
|
||||
# end _mainScript_
|
||||
|
||||
# ################################## Flags and defaults
|
||||
# Required variables
|
||||
LOGFILE="${HOME}/logs/$(basename "$0").log"
|
||||
QUIET=false
|
||||
LOGLEVEL=ERROR
|
||||
VERBOSE=false
|
||||
FORCE=false
|
||||
DRYRUN=false
|
||||
declare -a ARGS=()
|
||||
|
||||
# Script specific
|
||||
LOGLEVEL=NONE
|
||||
STOP_WORD_FILE="${HOME}/.git_stop_words"
|
||||
shopt -s nocasematch
|
||||
# ################################## Custom utility functions (Pasted from repository)
|
||||
|
||||
# ################################## Functions required for this template to work
|
||||
|
||||
_setColors_() {
|
||||
# DESC:
|
||||
# Sets colors use for alerts.
|
||||
# ARGS:
|
||||
# None
|
||||
# OUTS:
|
||||
# None
|
||||
# USAGE:
|
||||
# printf "%s\n" "${blue}Some text${reset}"
|
||||
|
||||
if tput setaf 1 >/dev/null 2>&1; then
|
||||
bold=$(tput bold)
|
||||
underline=$(tput smul)
|
||||
reverse=$(tput rev)
|
||||
reset=$(tput sgr0)
|
||||
|
||||
if [[ $(tput colors) -ge 256 ]] >/dev/null 2>&1; then
|
||||
white=$(tput setaf 231)
|
||||
blue=$(tput setaf 38)
|
||||
yellow=$(tput setaf 11)
|
||||
green=$(tput setaf 82)
|
||||
red=$(tput setaf 9)
|
||||
purple=$(tput setaf 171)
|
||||
gray=$(tput setaf 250)
|
||||
else
|
||||
white=$(tput setaf 7)
|
||||
blue=$(tput setaf 38)
|
||||
yellow=$(tput setaf 3)
|
||||
green=$(tput setaf 2)
|
||||
red=$(tput setaf 9)
|
||||
purple=$(tput setaf 13)
|
||||
gray=$(tput setaf 7)
|
||||
fi
|
||||
else
|
||||
bold="\033[4;37m"
|
||||
reset="\033[0m"
|
||||
underline="\033[4;37m"
|
||||
# shellcheck disable=SC2034
|
||||
reverse=""
|
||||
white="\033[0;37m"
|
||||
blue="\033[0;34m"
|
||||
yellow="\033[0;33m"
|
||||
green="\033[1;32m"
|
||||
red="\033[0;31m"
|
||||
purple="\033[0;35m"
|
||||
gray="\033[0;37m"
|
||||
fi
|
||||
}
|
||||
|
||||
_alert_() {
|
||||
# DESC:
|
||||
# Controls all printing of messages to log files and stdout.
|
||||
# ARGS:
|
||||
# $1 (required) - The type of alert to print
|
||||
# (success, header, notice, dryrun, debug, warning, error,
|
||||
# fatal, info, input)
|
||||
# $2 (required) - The message to be printed to stdout and/or a log file
|
||||
# $3 (optional) - Pass '${LINENO}' to print the line number where the _alert_ was triggered
|
||||
# OUTS:
|
||||
# stdout: The message is printed to stdout
|
||||
# log file: The message is printed to a log file
|
||||
# USAGE:
|
||||
# [_alertType] "[MESSAGE]" "${LINENO}"
|
||||
# NOTES:
|
||||
# - The colors of each alert type are set in this function
|
||||
# - For specified alert types, the funcstac will be printed
|
||||
|
||||
local _color
|
||||
local _alertType="${1}"
|
||||
local _message="${2}"
|
||||
local _line="${3-}" # Optional line number
|
||||
|
||||
[[ $# -lt 2 ]] && fatal 'Missing required argument to _alert_'
|
||||
|
||||
if [[ -n ${_line} && ${_alertType} =~ ^fatal && ${FUNCNAME[2]} != "_trapCleanup_" ]]; then
|
||||
_message="${_message} ${gray}(line: ${_line}) $(_printFuncStack_)"
|
||||
elif [[ -n ${_line} && ${FUNCNAME[2]} != "_trapCleanup_" ]]; then
|
||||
_message="${_message} ${gray}(line: ${_line})"
|
||||
elif [[ -z ${_line} && ${_alertType} =~ ^fatal && ${FUNCNAME[2]} != "_trapCleanup_" ]]; then
|
||||
_message="${_message} ${gray}$(_printFuncStack_)"
|
||||
fi
|
||||
|
||||
if [[ ${_alertType} =~ ^(error|fatal) ]]; then
|
||||
_color="${bold}${red}"
|
||||
elif [ "${_alertType}" == "info" ]; then
|
||||
_color="${gray}"
|
||||
elif [ "${_alertType}" == "warning" ]; then
|
||||
_color="${red}"
|
||||
elif [ "${_alertType}" == "success" ]; then
|
||||
_color="${green}"
|
||||
elif [ "${_alertType}" == "debug" ]; then
|
||||
_color="${purple}"
|
||||
elif [ "${_alertType}" == "header" ]; then
|
||||
_color="${bold}${white}${underline}"
|
||||
elif [ "${_alertType}" == "notice" ]; then
|
||||
_color="${bold}"
|
||||
elif [ "${_alertType}" == "input" ]; then
|
||||
_color="${bold}${underline}"
|
||||
elif [ "${_alertType}" = "dryrun" ]; then
|
||||
_color="${blue}"
|
||||
else
|
||||
_color=""
|
||||
fi
|
||||
|
||||
_writeToScreen_() {
|
||||
("${QUIET}") && return 0 # Print to console when script is not 'quiet'
|
||||
[[ ${VERBOSE} == false && ${_alertType} =~ ^(debug|verbose) ]] && return 0
|
||||
|
||||
if ! [[ -t 1 || -z ${TERM-} ]]; then # Don't use colors on non-recognized terminals
|
||||
_color=""
|
||||
reset=""
|
||||
fi
|
||||
|
||||
if [[ ${_alertType} == header ]]; then
|
||||
printf "${_color}%s${reset}\n" "${_message}"
|
||||
else
|
||||
printf "${_color}[%7s] %s${reset}\n" "${_alertType}" "${_message}"
|
||||
fi
|
||||
}
|
||||
_writeToScreen_
|
||||
|
||||
_writeToLog_() {
|
||||
[[ ${_alertType} == "input" ]] && return 0
|
||||
[[ ${LOGLEVEL} =~ (off|OFF|Off) ]] && return 0
|
||||
if [ -z "${LOGFILE-}" ]; then
|
||||
LOGFILE="$(pwd)/$(basename "$0").log"
|
||||
fi
|
||||
[ ! -d "$(dirname "${LOGFILE}")" ] && mkdir -p "$(dirname "${LOGFILE}")"
|
||||
[[ ! -f ${LOGFILE} ]] && touch "${LOGFILE}"
|
||||
|
||||
# Don't use colors in logs
|
||||
local _cleanmessage
|
||||
_cleanmessage="$(printf "%s" "${_message}" | sed -E 's/(\x1b)?\[(([0-9]{1,2})(;[0-9]{1,3}){0,2})?[mGK]//g')"
|
||||
# Print message to log file
|
||||
printf "%s [%7s] %s %s\n" "$(date +"%b %d %R:%S")" "${_alertType}" "[$(/bin/hostname)]" "${_cleanmessage}" >>"${LOGFILE}"
|
||||
}
|
||||
|
||||
# Write specified log level data to logfile
|
||||
case "${LOGLEVEL:-ERROR}" in
|
||||
ALL | all | All)
|
||||
_writeToLog_
|
||||
;;
|
||||
DEBUG | debug | Debug)
|
||||
_writeToLog_
|
||||
;;
|
||||
INFO | info | Info)
|
||||
if [[ ${_alertType} =~ ^(error|fatal|warning|info|notice|success) ]]; then
|
||||
_writeToLog_
|
||||
fi
|
||||
;;
|
||||
NOTICE | notice | Notice)
|
||||
if [[ ${_alertType} =~ ^(error|fatal|warning|notice|success) ]]; then
|
||||
_writeToLog_
|
||||
fi
|
||||
;;
|
||||
WARN | warn | Warn)
|
||||
if [[ ${_alertType} =~ ^(error|fatal|warning) ]]; then
|
||||
_writeToLog_
|
||||
fi
|
||||
;;
|
||||
ERROR | error | Error)
|
||||
if [[ ${_alertType} =~ ^(error|fatal) ]]; then
|
||||
_writeToLog_
|
||||
fi
|
||||
;;
|
||||
FATAL | fatal | Fatal)
|
||||
if [[ ${_alertType} =~ ^fatal ]]; then
|
||||
_writeToLog_
|
||||
fi
|
||||
;;
|
||||
OFF | off)
|
||||
return 0
|
||||
;;
|
||||
*)
|
||||
if [[ ${_alertType} =~ ^(error|fatal) ]]; then
|
||||
_writeToLog_
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
|
||||
} # /_alert_
|
||||
|
||||
error() { _alert_ error "${1}" "${2-}"; }
|
||||
warning() { _alert_ warning "${1}" "${2-}"; }
|
||||
notice() { _alert_ notice "${1}" "${2-}"; }
|
||||
info() { _alert_ info "${1}" "${2-}"; }
|
||||
success() { _alert_ success "${1}" "${2-}"; }
|
||||
dryrun() { _alert_ dryrun "${1}" "${2-}"; }
|
||||
input() { _alert_ input "${1}" "${2-}"; }
|
||||
header() { _alert_ header "${1}" "${2-}"; }
|
||||
debug() { _alert_ debug "${1}" "${2-}"; }
|
||||
fatal() {
|
||||
_alert_ fatal "${1}" "${2-}"
|
||||
_safeExit_ "1"
|
||||
}
|
||||
|
||||
_printFuncStack_() {
|
||||
# DESC:
|
||||
# Prints the function stack in use. Used for debugging, and error reporting.
|
||||
# ARGS:
|
||||
# None
|
||||
# OUTS:
|
||||
# stdout: Prints [function]:[file]:[line]
|
||||
# NOTE:
|
||||
# Does not print functions from the alert class
|
||||
local _i
|
||||
declare -a _funcStackResponse=()
|
||||
for ((_i = 1; _i < ${#BASH_SOURCE[@]}; _i++)); do
|
||||
case "${FUNCNAME[${_i}]}" in
|
||||
_alert_ | _trapCleanup_ | fatal | error | warning | notice | info | debug | dryrun | header | success)
|
||||
continue
|
||||
;;
|
||||
*)
|
||||
_funcStackResponse+=("${FUNCNAME[${_i}]}:$(basename "${BASH_SOURCE[${_i}]}"):${BASH_LINENO[_i - 1]}")
|
||||
;;
|
||||
esac
|
||||
|
||||
done
|
||||
printf "( "
|
||||
printf %s "${_funcStackResponse[0]}"
|
||||
printf ' < %s' "${_funcStackResponse[@]:1}"
|
||||
printf ' )\n'
|
||||
}
|
||||
|
||||
_safeExit_() {
|
||||
# DESC:
|
||||
# Cleanup and exit from a script
|
||||
# ARGS:
|
||||
# $1 (optional) - Exit code (defaults to 0)
|
||||
# OUTS:
|
||||
# None
|
||||
|
||||
if [[ -d ${SCRIPT_LOCK-} ]]; then
|
||||
if command rm -rf "${SCRIPT_LOCK}"; then
|
||||
debug "Removing script lock"
|
||||
else
|
||||
warning "Script lock could not be removed. Try manually deleting ${yellow}'${SCRIPT_LOCK}'"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ -n ${TMP_DIR-} && -d ${TMP_DIR-} ]]; then
|
||||
if [[ ${1-} == 1 && -n "$(ls "${TMP_DIR}")" ]]; then
|
||||
command rm -r "${TMP_DIR}"
|
||||
else
|
||||
command rm -r "${TMP_DIR}"
|
||||
debug "Removing temp directory"
|
||||
fi
|
||||
fi
|
||||
|
||||
trap - INT TERM EXIT
|
||||
exit "${1:-0}"
|
||||
}
|
||||
|
||||
_trapCleanup_() {
|
||||
# DESC:
|
||||
# Log errors and cleanup from script when an error is trapped. Called by 'trap'
|
||||
# ARGS:
|
||||
# $1: Line number where error was trapped
|
||||
# $2: Line number in function
|
||||
# $3: Command executing at the time of the trap
|
||||
# $4: Names of all shell functions currently in the execution call stack
|
||||
# $5: Scriptname
|
||||
# $6: $BASH_SOURCE
|
||||
# USAGE:
|
||||
# trap '_trapCleanup_ ${LINENO} ${BASH_LINENO} "${BASH_COMMAND}" "${FUNCNAME[*]}" "${0}" "${BASH_SOURCE[0]}"' EXIT INT TERM SIGINT SIGQUIT SIGTERM ERR
|
||||
# OUTS:
|
||||
# Exits script with error code 1
|
||||
|
||||
local _line=${1-} # LINENO
|
||||
local _linecallfunc=${2-}
|
||||
local _command="${3-}"
|
||||
local _funcstack="${4-}"
|
||||
local _script="${5-}"
|
||||
local _sourced="${6-}"
|
||||
|
||||
# Replace the cursor in-case 'tput civis' has been used
|
||||
tput cnorm
|
||||
|
||||
if declare -f "fatal" &>/dev/null && declare -f "_printFuncStack_" &>/dev/null; then
|
||||
|
||||
_funcstack="'$(printf "%s" "${_funcstack}" | sed -E 's/ / < /g')'"
|
||||
|
||||
if [[ ${_script##*/} == "${_sourced##*/}" ]]; then
|
||||
fatal "${7-} command: '${_command}' (line: ${_line}) [func: $(_printFuncStack_)]"
|
||||
else
|
||||
fatal "${7-} command: '${_command}' (func: ${_funcstack} called at line ${_linecallfunc} of '${_script##*/}') (line: ${_line} of '${_sourced##*/}') "
|
||||
fi
|
||||
else
|
||||
printf "%s\n" "Fatal error trapped. Exiting..."
|
||||
fi
|
||||
|
||||
if declare -f _safeExit_ &>/dev/null; then
|
||||
_safeExit_ 1
|
||||
else
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
_makeTempDir_() {
|
||||
# DESC:
|
||||
# Creates a temp directory to house temporary files
|
||||
# ARGS:
|
||||
# $1 (Optional) - First characters/word of directory name
|
||||
# OUTS:
|
||||
# Sets $TMP_DIR variable to the path of the temp directory
|
||||
# USAGE:
|
||||
# _makeTempDir_ "$(basename "$0")"
|
||||
|
||||
[ -d "${TMP_DIR-}" ] && return 0
|
||||
|
||||
if [ -n "${1-}" ]; then
|
||||
TMP_DIR="${TMPDIR:-/tmp/}${1}.${RANDOM}.${RANDOM}.$$"
|
||||
else
|
||||
TMP_DIR="${TMPDIR:-/tmp/}$(basename "$0").${RANDOM}.${RANDOM}.${RANDOM}.$$"
|
||||
fi
|
||||
(umask 077 && mkdir "${TMP_DIR}") || {
|
||||
fatal "Could not create temporary directory! Exiting."
|
||||
}
|
||||
debug "\$TMP_DIR=${TMP_DIR}"
|
||||
}
|
||||
|
||||
# shellcheck disable=SC2120
|
||||
_acquireScriptLock_() {
|
||||
# DESC:
|
||||
# Acquire script lock to prevent running the same script a second time before the
|
||||
# first instance exits
|
||||
# ARGS:
|
||||
# $1 (optional) - Scope of script execution lock (system or user)
|
||||
# OUTS:
|
||||
# exports $SCRIPT_LOCK - Path to the directory indicating we have the script lock
|
||||
# Exits script if lock cannot be acquired
|
||||
# NOTE:
|
||||
# If the lock was acquired it's automatically released in _safeExit_()
|
||||
|
||||
local _lockDir
|
||||
if [[ ${1-} == 'system' ]]; then
|
||||
_lockDir="${TMPDIR:-/tmp/}$(basename "$0").lock"
|
||||
else
|
||||
_lockDir="${TMPDIR:-/tmp/}$(basename "$0").${UID}.lock"
|
||||
fi
|
||||
|
||||
if command mkdir "${_lockDir}" 2>/dev/null; then
|
||||
readonly SCRIPT_LOCK="${_lockDir}"
|
||||
debug "Acquired script lock: ${yellow}${SCRIPT_LOCK}${purple}"
|
||||
else
|
||||
if declare -f "_safeExit_" &>/dev/null; then
|
||||
error "Unable to acquire script lock: ${yellow}${_lockDir}${red}"
|
||||
fatal "If you trust the script isn't running, delete the lock dir"
|
||||
else
|
||||
printf "%s\n" "ERROR: Could not acquire script lock. If you trust the script isn't running, delete: ${_lockDir}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
fi
|
||||
}
|
||||
|
||||
_setPATH_() {
|
||||
# DESC:
|
||||
# Add directories to $PATH so script can find executables
|
||||
# ARGS:
|
||||
# $@ - One or more paths
|
||||
# OPTS:
|
||||
# -x - Fail if directories are not found
|
||||
# OUTS:
|
||||
# 0: Success
|
||||
# 1: Failure
|
||||
# Adds items to $PATH
|
||||
# USAGE:
|
||||
# _setPATH_ "/usr/local/bin" "${HOME}/bin" "$(npm bin)"
|
||||
|
||||
[[ $# == 0 ]] && fatal "Missing required argument to ${FUNCNAME[0]}"
|
||||
|
||||
local opt
|
||||
local OPTIND=1
|
||||
local _failIfNotFound=false
|
||||
|
||||
while getopts ":xX" opt; do
|
||||
case ${opt} in
|
||||
x | X) _failIfNotFound=true ;;
|
||||
*)
|
||||
{
|
||||
error "Unrecognized option '${1}' passed to _backupFile_" "${LINENO}"
|
||||
return 1
|
||||
}
|
||||
;;
|
||||
esac
|
||||
done
|
||||
shift $((OPTIND - 1))
|
||||
|
||||
local _newPath
|
||||
|
||||
for _newPath in "$@"; do
|
||||
if [ -d "${_newPath}" ]; then
|
||||
if ! printf "%s" "${PATH}" | grep -Eq "(^|:)${_newPath}($|:)"; then
|
||||
if PATH="${_newPath}:${PATH}"; then
|
||||
debug "Added '${_newPath}' to PATH"
|
||||
else
|
||||
debug "'${_newPath}' already in PATH"
|
||||
fi
|
||||
else
|
||||
debug "_setPATH_: '${_newPath}' already exists in PATH"
|
||||
fi
|
||||
else
|
||||
debug "_setPATH_: can not find: ${_newPath}"
|
||||
if [[ ${_failIfNotFound} == true ]]; then
|
||||
return 1
|
||||
fi
|
||||
continue
|
||||
fi
|
||||
done
|
||||
return 0
|
||||
}
|
||||
|
||||
_useGNUutils_() {
|
||||
# DESC:
|
||||
# Add GNU utilities to PATH to allow consistent use of sed/grep/tar/etc. on MacOS
|
||||
# ARGS:
|
||||
# None
|
||||
# OUTS:
|
||||
# 0 if successful
|
||||
# 1 if unsuccessful
|
||||
# PATH: Adds GNU utilities to the path
|
||||
# USAGE:
|
||||
# # if ! _useGNUUtils_; then exit 1; fi
|
||||
# NOTES:
|
||||
# GNU utilities can be added to MacOS using Homebrew
|
||||
|
||||
! declare -f "_setPATH_" &>/dev/null && fatal "${FUNCNAME[0]} needs function _setPATH_"
|
||||
|
||||
if _setPATH_ \
|
||||
"/usr/local/opt/gnu-tar/libexec/gnubin" \
|
||||
"/usr/local/opt/coreutils/libexec/gnubin" \
|
||||
"/usr/local/opt/gnu-sed/libexec/gnubin" \
|
||||
"/usr/local/opt/grep/libexec/gnubin" \
|
||||
"/usr/local/opt/findutils/libexec/gnubin" \
|
||||
"/opt/homebrew/opt/findutils/libexec/gnubin" \
|
||||
"/opt/homebrew/opt/gnu-sed/libexec/gnubin" \
|
||||
"/opt/homebrew/opt/grep/libexec/gnubin" \
|
||||
"/opt/homebrew/opt/coreutils/libexec/gnubin" \
|
||||
"/opt/homebrew/opt/gnu-tar/libexec/gnubin"; then
|
||||
return 0
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
_homebrewPath_() {
|
||||
# DESC:
|
||||
# Add homebrew bin dir to PATH
|
||||
# ARGS:
|
||||
# None
|
||||
# OUTS:
|
||||
# 0 if successful
|
||||
# 1 if unsuccessful
|
||||
# PATH: Adds homebrew bin directory to PATH
|
||||
# USAGE:
|
||||
# # if ! _homebrewPath_; then exit 1; fi
|
||||
|
||||
! declare -f "_setPATH_" &>/dev/null && fatal "${FUNCNAME[0]} needs function _setPATH_"
|
||||
|
||||
if _uname=$(command -v uname); then
|
||||
if "${_uname}" | tr '[:upper:]' '[:lower:]' | grep -q 'darwin'; then
|
||||
if _setPATH_ "/usr/local/bin" "/opt/homebrew/bin"; then
|
||||
return 0
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
else
|
||||
if _setPATH_ "/usr/local/bin" "/opt/homebrew/bin"; then
|
||||
return 0
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
_parseOptions_() {
|
||||
# DESC:
|
||||
# Iterates through options passed to script and sets variables. Will break -ab into -a -b
|
||||
# when needed and --foo=bar into --foo bar
|
||||
# ARGS:
|
||||
# $@ from command line
|
||||
# OUTS:
|
||||
# Sets array 'ARGS' containing all arguments passed to script that were not parsed as options
|
||||
# USAGE:
|
||||
# _parseOptions_ "$@"
|
||||
|
||||
# Iterate over options
|
||||
local _optstring=h
|
||||
declare -a _options
|
||||
local _c
|
||||
local i
|
||||
while (($#)); do
|
||||
case $1 in
|
||||
# If option is of type -ab
|
||||
-[!-]?*)
|
||||
# Loop over each character starting with the second
|
||||
for ((i = 1; i < ${#1}; i++)); do
|
||||
_c=${1:i:1}
|
||||
_options+=("-${_c}") # Add current char to options
|
||||
# If option takes a required argument, and it's not the last char make
|
||||
# the rest of the string its argument
|
||||
if [[ ${_optstring} == *"${_c}:"* && -n ${1:i+1} ]]; then
|
||||
_options+=("${1:i+1}")
|
||||
break
|
||||
fi
|
||||
done
|
||||
;;
|
||||
# If option is of type --foo=bar
|
||||
--?*=*) _options+=("${1%%=*}" "${1#*=}") ;;
|
||||
# add --endopts for --
|
||||
--) _options+=(--endopts) ;;
|
||||
# Otherwise, nothing special
|
||||
*) _options+=("$1") ;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
set -- "${_options[@]-}"
|
||||
unset _options
|
||||
|
||||
# Read the options and set stuff
|
||||
# shellcheck disable=SC2034
|
||||
while [[ ${1-} == -?* ]]; do
|
||||
case $1 in
|
||||
# Custom options
|
||||
|
||||
# Common options
|
||||
-h | --help)
|
||||
_usage_
|
||||
_safeExit_
|
||||
;;
|
||||
--loglevel)
|
||||
shift
|
||||
LOGLEVEL=${1}
|
||||
;;
|
||||
--logfile)
|
||||
shift
|
||||
LOGFILE="${1}"
|
||||
;;
|
||||
-n | --dryrun) DRYRUN=true ;;
|
||||
-v | --verbose) VERBOSE=true ;;
|
||||
-q | --quiet) QUIET=true ;;
|
||||
--force) FORCE=true ;;
|
||||
--endopts)
|
||||
shift
|
||||
break
|
||||
;;
|
||||
*)
|
||||
if declare -f _safeExit_ &>/dev/null; then
|
||||
fatal "invalid option: $1"
|
||||
else
|
||||
printf "%s\n" "ERROR: Invalid option: $1"
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
if [[ -z ${*} || ${*} == null ]]; then
|
||||
ARGS=()
|
||||
else
|
||||
ARGS+=("$@") # Store the remaining user input as arguments.
|
||||
fi
|
||||
}
|
||||
|
||||
_columns_() {
|
||||
# DESC:
|
||||
# Prints a two column output from a key/value pair.
|
||||
# Optionally pass a number of 2 space tabs to indent the output.
|
||||
# ARGS:
|
||||
# $1 (required): Key name (Left column text)
|
||||
# $2 (required): Long value (Right column text. Wraps around if too long)
|
||||
# $3 (optional): Number of 2 character tabs to indent the command (default 1)
|
||||
# OPTS:
|
||||
# -b Bold the left column
|
||||
# -u Underline the left column
|
||||
# -r Reverse background and foreground colors
|
||||
# OUTS:
|
||||
# stdout: Prints the output in columns
|
||||
# NOTE:
|
||||
# Long text or ANSI colors in the first column may create display issues
|
||||
# USAGE:
|
||||
# _columns_ "Key" "Long value text" [tab level]
|
||||
|
||||
[[ $# -lt 2 ]] && fatal "Missing required argument to ${FUNCNAME[0]}"
|
||||
|
||||
local opt
|
||||
local OPTIND=1
|
||||
local _style=""
|
||||
while getopts ":bBuUrR" opt; do
|
||||
case ${opt} in
|
||||
b | B) _style="${_style}${bold}" ;;
|
||||
u | U) _style="${_style}${underline}" ;;
|
||||
r | R) _style="${_style}${reverse}" ;;
|
||||
*) fatal "Unrecognized option '${1}' passed to ${FUNCNAME[0]}. Exiting." ;;
|
||||
esac
|
||||
done
|
||||
shift $((OPTIND - 1))
|
||||
|
||||
local _key="${1}"
|
||||
local _value="${2}"
|
||||
local _tabLevel="${3-}"
|
||||
local _tabSize=2
|
||||
local _line
|
||||
local _rightIndent
|
||||
local _leftIndent
|
||||
if [[ -z ${3-} ]]; then
|
||||
_tabLevel=0
|
||||
fi
|
||||
|
||||
_leftIndent="$((_tabLevel * _tabSize))"
|
||||
|
||||
local _leftColumnWidth="$((30 + _leftIndent))"
|
||||
|
||||
if [ "$(tput cols)" -gt 180 ]; then
|
||||
_rightIndent=110
|
||||
elif [ "$(tput cols)" -gt 160 ]; then
|
||||
_rightIndent=90
|
||||
elif [ "$(tput cols)" -gt 130 ]; then
|
||||
_rightIndent=60
|
||||
elif [ "$(tput cols)" -gt 120 ]; then
|
||||
_rightIndent=50
|
||||
elif [ "$(tput cols)" -gt 110 ]; then
|
||||
_rightIndent=40
|
||||
elif [ "$(tput cols)" -gt 100 ]; then
|
||||
_rightIndent=30
|
||||
elif [ "$(tput cols)" -gt 90 ]; then
|
||||
_rightIndent=20
|
||||
elif [ "$(tput cols)" -gt 80 ]; then
|
||||
_rightIndent=10
|
||||
else
|
||||
_rightIndent=0
|
||||
fi
|
||||
|
||||
local _rightWrapLength=$(($(tput cols) - _leftColumnWidth - _leftIndent - _rightIndent))
|
||||
|
||||
local _first_line=0
|
||||
while read -r _line; do
|
||||
if [[ ${_first_line} -eq 0 ]]; then
|
||||
_first_line=1
|
||||
else
|
||||
_key=" "
|
||||
fi
|
||||
printf "%-${_leftIndent}s${_style}%-${_leftColumnWidth}b${reset} %b\n" "" "${_key}${reset}" "${_line}"
|
||||
done <<<"$(fold -w${_rightWrapLength} -s <<<"${_value}")"
|
||||
}
|
||||
|
||||
_usage_() {
|
||||
cat <<USAGE_TEXT
|
||||
|
||||
${bold}$(basename "$0") [OPTION]... [FILE]...${reset}
|
||||
|
||||
Custom pre-commit hook script. This script is intended to be used as part of the pre-commit pipeline managed within .pre-commit-config.yaml.
|
||||
|
||||
${bold}${underline}Options:${reset}
|
||||
$(_columns_ -b -- '-h, --help' "Display this help and exit" 2)
|
||||
$(_columns_ -b -- "--loglevel [LEVEL]" "One of: FATAL, ERROR (default), WARN, INFO, NOTICE, DEBUG, ALL, OFF" 2)
|
||||
$(_columns_ -b -- "--logfile [FILE]" "Full PATH to logfile. (Default is '\${HOME}/logs/$(basename "$0").log')" 2)
|
||||
$(_columns_ -b -- "-n, --dryrun" "Non-destructive. Makes no permanent changes." 2)
|
||||
$(_columns_ -b -- "-q, --quiet" "Quiet (no output)" 2)
|
||||
$(_columns_ -b -- "-v, --verbose" "Output more information. (Items echoed to 'verbose')" 2)
|
||||
$(_columns_ -b -- "--force" "Skip all user interaction. Implied 'Yes' to all actions." 2)
|
||||
|
||||
${bold}${underline}Example Usage:${reset}
|
||||
|
||||
${gray}# Run the script and specify log level and log file.${reset}
|
||||
$(basename "$0") -vn --logfile "/path/to/file.log" --loglevel 'WARN'
|
||||
USAGE_TEXT
|
||||
}
|
||||
|
||||
# ################################## INITIALIZE AND RUN THE SCRIPT
|
||||
# (Comment or uncomment the lines below to customize script behavior)
|
||||
|
||||
trap '_trapCleanup_ ${LINENO} ${BASH_LINENO} "${BASH_COMMAND}" "${FUNCNAME[*]}" "${0}" "${BASH_SOURCE[0]}"' EXIT INT TERM SIGINT SIGQUIT SIGTERM
|
||||
|
||||
# Trap errors in subshells and functions
|
||||
set -o errtrace
|
||||
|
||||
# Exit on error. Append '||true' if you expect an error
|
||||
set -o errexit
|
||||
|
||||
# Use last non-zero exit code in a pipeline
|
||||
set -o pipefail
|
||||
|
||||
# Confirm we have BASH greater than v4
|
||||
[ "${BASH_VERSINFO:-0}" -ge 4 ] || {
|
||||
printf "%s\n" "ERROR: BASH_VERSINFO is '${BASH_VERSINFO:-0}'. This script requires BASH v4 or greater."
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Make `for f in *.txt` work when `*.txt` matches zero files
|
||||
shopt -s nullglob globstar
|
||||
|
||||
# Set IFS to preferred implementation
|
||||
IFS=$' \n\t'
|
||||
|
||||
# Run in debug mode
|
||||
# set -o xtrace
|
||||
|
||||
# Initialize color constants
|
||||
_setColors_
|
||||
|
||||
# Disallow expansion of unset variables
|
||||
set -o nounset
|
||||
|
||||
# Force arguments when invoking the script
|
||||
# [[ $# -eq 0 ]] && _parseOptions_ "-h"
|
||||
|
||||
# Parse arguments passed to script
|
||||
_parseOptions_ "$@"
|
||||
|
||||
# Create a temp directory '$TMP_DIR'
|
||||
_makeTempDir_ "$(basename "$0")"
|
||||
|
||||
# Acquire script lock
|
||||
# _acquireScriptLock_
|
||||
|
||||
# Add Homebrew bin directory to PATH (MacOS)
|
||||
# _homebrewPath_
|
||||
|
||||
# Source GNU utilities from Homebrew (MacOS)
|
||||
# _useGNUutils_
|
||||
|
||||
# Run the main logic script
|
||||
_mainScript_
|
||||
|
||||
# Exit cleanly
|
||||
_safeExit_
|
||||
150
scripts/update_dependencies.py
Executable file
150
scripts/update_dependencies.py
Executable file
@@ -0,0 +1,150 @@
|
||||
#!/usr/bin/env python
|
||||
"""Script to update the pyproject.toml file with the latest versions of the dependencies."""
|
||||
from pathlib import Path
|
||||
from textwrap import wrap
|
||||
|
||||
try:
|
||||
import tomllib
|
||||
except ModuleNotFoundError: # pragma: no cover
|
||||
import tomli as tomllib # type: ignore [no-redef]
|
||||
|
||||
import sh
|
||||
from rich.console import Console
|
||||
|
||||
console = Console()
|
||||
|
||||
|
||||
def dryrun(msg: str) -> None:
|
||||
"""Print a message if the dry run flag is set.
|
||||
|
||||
Args:
|
||||
msg: Message to print
|
||||
"""
|
||||
console.print(f"[cyan]DRYRUN | {msg}[/cyan]")
|
||||
|
||||
|
||||
def success(msg: str) -> None:
|
||||
"""Print a success message without using logging.
|
||||
|
||||
Args:
|
||||
msg: Message to print
|
||||
"""
|
||||
console.print(f"[green]SUCCESS | {msg}[/green]")
|
||||
|
||||
|
||||
def warning(msg: str) -> None:
|
||||
"""Print a warning message without using logging.
|
||||
|
||||
Args:
|
||||
msg: Message to print
|
||||
"""
|
||||
console.print(f"[yellow]WARNING | {msg}[/yellow]")
|
||||
|
||||
|
||||
def error(msg: str) -> None:
|
||||
"""Print an error message without using logging.
|
||||
|
||||
Args:
|
||||
msg: Message to print
|
||||
"""
|
||||
console.print(f"[red]ERROR | {msg}[/red]")
|
||||
|
||||
|
||||
def notice(msg: str) -> None:
|
||||
"""Print a notice message without using logging.
|
||||
|
||||
Args:
|
||||
msg: Message to print
|
||||
"""
|
||||
console.print(f"[bold]NOTICE | {msg}[/bold]")
|
||||
|
||||
|
||||
def info(msg: str) -> None:
|
||||
"""Print a notice message without using logging.
|
||||
|
||||
Args:
|
||||
msg: Message to print
|
||||
"""
|
||||
console.print(f"INFO | {msg}")
|
||||
|
||||
|
||||
def usage(msg: str, width: int = 80) -> None:
|
||||
"""Print a usage message without using logging.
|
||||
|
||||
Args:
|
||||
msg: Message to print
|
||||
width (optional): Width of the message
|
||||
"""
|
||||
for _n, line in enumerate(wrap(msg, width=width)):
|
||||
if _n == 0:
|
||||
console.print(f"[dim]USAGE | {line}")
|
||||
else:
|
||||
console.print(f"[dim] | {line}")
|
||||
|
||||
|
||||
def debug(msg: str) -> None:
|
||||
"""Print a debug message without using logging.
|
||||
|
||||
Args:
|
||||
msg: Message to print
|
||||
"""
|
||||
console.print(f"[blue]DEBUG | {msg}[/blue]")
|
||||
|
||||
|
||||
def dim(msg: str) -> None:
|
||||
"""Print a message in dimmed color.
|
||||
|
||||
Args:
|
||||
msg: Message to print
|
||||
"""
|
||||
console.print(f"[dim]{msg}[/dim]")
|
||||
|
||||
|
||||
# Load the pyproject.toml file
|
||||
pyproject = Path(__file__).parents[1] / "pyproject.toml"
|
||||
|
||||
if not pyproject.exists():
|
||||
console.print("pyproject.toml file not found")
|
||||
raise SystemExit(1)
|
||||
|
||||
with pyproject.open("rb") as f:
|
||||
try:
|
||||
data = tomllib.load(f)
|
||||
except tomllib.TOMLDecodeError as e:
|
||||
raise SystemExit(1) from e
|
||||
|
||||
|
||||
# Get the latest versions of all dependencies
|
||||
info("Getting latest versions of dependencies...")
|
||||
packages: dict = {}
|
||||
for line in sh.poetry("--no-ansi", "show", "--outdated").splitlines():
|
||||
package, current, latest = line.split()[:3]
|
||||
packages[package] = {"current_version": current, "new_version": latest}
|
||||
|
||||
if not packages:
|
||||
success("All dependencies are up to date")
|
||||
raise SystemExit(0)
|
||||
|
||||
|
||||
dependencies = data["tool"]["poetry"]["dependencies"]
|
||||
groups = data["tool"]["poetry"]["group"]
|
||||
|
||||
for p in dependencies:
|
||||
if p in packages:
|
||||
notice(
|
||||
f"Updating {p} from {packages[p]['current_version']} to {packages[p]['new_version']}"
|
||||
)
|
||||
sh.poetry("add", f"{p}@latest", _fg=True)
|
||||
|
||||
|
||||
for group in groups:
|
||||
for p in groups[group]["dependencies"]:
|
||||
if p in packages:
|
||||
notice(
|
||||
f"Updating {p} from {packages[p]['current_version']} to {packages[p]['new_version']}"
|
||||
)
|
||||
sh.poetry("add", f"{p}@latest", "--group", group, _fg=True)
|
||||
|
||||
sh.poetry("update", _fg=True)
|
||||
success("All dependencies are up to date")
|
||||
raise SystemExit(0)
|
||||
@@ -1,2 +1,2 @@
|
||||
"""obsidian-metadata version."""
|
||||
__version__ = "0.2.0"
|
||||
__version__ = "0.12.1"
|
||||
|
||||
@@ -17,6 +17,21 @@ from obsidian_metadata._utils.alerts import logger as log
|
||||
class ConfigQuestions:
|
||||
"""Questions to ask the user when creating a configuration file."""
|
||||
|
||||
@staticmethod
|
||||
def _validate_valid_dir(path: str) -> bool | str:
|
||||
"""Validate a valid directory.
|
||||
|
||||
Returns:
|
||||
bool | str: True if the path is valid, otherwise a string with the error message.
|
||||
"""
|
||||
path_to_validate: Path = Path(path).expanduser().resolve()
|
||||
if not path_to_validate.exists():
|
||||
return f"Path does not exist: {path_to_validate}"
|
||||
if not path_to_validate.is_dir():
|
||||
return f"Path is not a directory: {path_to_validate}"
|
||||
|
||||
return True
|
||||
|
||||
@staticmethod
|
||||
def ask_for_vault_path() -> Path: # pragma: no cover
|
||||
"""Ask the user for the path to their vault.
|
||||
@@ -34,28 +49,12 @@ class ConfigQuestions:
|
||||
|
||||
return Path(vault_path).expanduser().resolve()
|
||||
|
||||
@staticmethod
|
||||
def _validate_valid_dir(path: str) -> bool | str:
|
||||
"""Validates a valid directory.
|
||||
|
||||
Returns:
|
||||
bool | str: True if the path is valid, otherwise a string with the error message.
|
||||
"""
|
||||
path_to_validate: Path = Path(path).expanduser().resolve()
|
||||
if not path_to_validate.exists():
|
||||
return f"Path does not exist: {path_to_validate}"
|
||||
if not path_to_validate.is_dir():
|
||||
return f"Path is not a directory: {path_to_validate}"
|
||||
|
||||
return True
|
||||
|
||||
|
||||
@rich.repr.auto
|
||||
class Config:
|
||||
"""Representation of a configuration file."""
|
||||
|
||||
def __init__(self, config_path: Path = None, vault_path: Path = None) -> None:
|
||||
|
||||
def __init__(self, config_path: Path | None = None, vault_path: Path | None = None) -> None:
|
||||
if vault_path is None:
|
||||
self.config_path: Path = self._validate_config_path(Path(config_path))
|
||||
self.config: dict[str, Any] = self._load_config()
|
||||
@@ -66,7 +65,11 @@ class Config:
|
||||
else:
|
||||
self.config_path = None
|
||||
self.config = {
|
||||
"command_line_vault": {"path": vault_path, "exclude_paths": [".git", ".obsidian"]}
|
||||
"command_line_vault": {
|
||||
"path": vault_path,
|
||||
"exclude_paths": [".git", ".obsidian"],
|
||||
"insert_location": "BOTTOM",
|
||||
}
|
||||
}
|
||||
|
||||
try:
|
||||
@@ -74,10 +77,11 @@ class Config:
|
||||
VaultConfig(vault_name=key, vault_config=self.config[key]) for key in self.config
|
||||
]
|
||||
except TypeError as e:
|
||||
log.error(f"Configuration file is invalid: '{self.config_path}'")
|
||||
log.error(f"Configuration file is invalid: '{self.config_path}'\n{e}")
|
||||
raise typer.Exit(code=1) from e
|
||||
|
||||
log.debug(f"Loaded configuration from '{self.config_path}'")
|
||||
log.trace("Configuration:")
|
||||
log.trace(self.config)
|
||||
|
||||
def __rich_repr__(self) -> rich.repr.Result: # pragma: no cover
|
||||
@@ -85,6 +89,15 @@ class Config:
|
||||
yield "config_path", self.config_path
|
||||
yield "vaults", self.vaults
|
||||
|
||||
def _load_config(self) -> dict[str, Any]:
|
||||
"""Load the configuration file."""
|
||||
try:
|
||||
with self.config_path.open(mode="rt", encoding="utf-8") as fp:
|
||||
return tomlkit.load(fp)
|
||||
except tomlkit.exceptions.TOMLKitError as e:
|
||||
alerts.error(f"Could not parse '{self.config_path}'\n{e}")
|
||||
raise typer.Exit(code=1) from e
|
||||
|
||||
def _validate_config_path(self, config_path: Path | None) -> Path:
|
||||
"""Load the configuration path."""
|
||||
if config_path is None:
|
||||
@@ -96,15 +109,6 @@ class Config:
|
||||
|
||||
return config_path.expanduser().resolve()
|
||||
|
||||
def _load_config(self) -> dict[str, Any]:
|
||||
"""Load the configuration file."""
|
||||
try:
|
||||
with open(self.config_path, encoding="utf-8") as fp:
|
||||
return tomlkit.load(fp)
|
||||
except tomlkit.exceptions.TOMLKitError as e:
|
||||
alerts.error(f"Could not parse '{self.config_path}'")
|
||||
raise typer.Exit(code=1) from e
|
||||
|
||||
def _write_default_config(self, path_to_config: Path) -> None:
|
||||
"""Write the default configuration file when no config file is found."""
|
||||
vault_path = ConfigQuestions.ask_for_vault_path()
|
||||
@@ -114,10 +118,19 @@ class Config:
|
||||
["Vault 1"] # Name of the vault.
|
||||
|
||||
# Path to your obsidian vault
|
||||
# Note for Windows users: Windows paths must use `\\` as the path separator due to a limitation with how TOML parses strings.
|
||||
# Example: "C:\\Users\\username\\Documents\\Obsidian"
|
||||
path = "{vault_path}"
|
||||
|
||||
# Folders within the vault to ignore when indexing metadata
|
||||
exclude_paths = [".git", ".obsidian"]"""
|
||||
exclude_paths = [".git", ".obsidian"]
|
||||
|
||||
# Location to add new metadata. One of:
|
||||
# TOP: Directly after frontmatter.
|
||||
# AFTER_TITLE: After the first header following frontmatter.
|
||||
# BOTTOM: The bottom of the note
|
||||
insert_location = "BOTTOM"
|
||||
"""
|
||||
|
||||
path_to_config.write_text(dedent(config_text))
|
||||
|
||||
@@ -141,7 +154,12 @@ class VaultConfig:
|
||||
try:
|
||||
self.exclude_paths = self.config["exclude_paths"]
|
||||
except KeyError:
|
||||
self.exclude_paths = []
|
||||
self.exclude_paths = [".git", ".obsidian"]
|
||||
|
||||
try:
|
||||
self.insert_location = self.config["insert_location"]
|
||||
except KeyError:
|
||||
self.insert_location = "BOTTOM"
|
||||
|
||||
def __rich_repr__(self) -> rich.repr.Result: # pragma: no cover
|
||||
"""Define rich representation of a vault config."""
|
||||
@@ -149,6 +167,7 @@ class VaultConfig:
|
||||
yield "config", self.config
|
||||
yield "path", self.path
|
||||
yield "exclude_paths", self.exclude_paths
|
||||
yield "insert_location", self.insert_location
|
||||
|
||||
def _validate_vault_path(self, vault_path: Path | None) -> Path:
|
||||
"""Validate the vault path."""
|
||||
|
||||
@@ -5,10 +5,13 @@ from obsidian_metadata._utils.alerts import LoggerManager
|
||||
from obsidian_metadata._utils.utilities import (
|
||||
clean_dictionary,
|
||||
clear_screen,
|
||||
delete_from_dict,
|
||||
dict_contains,
|
||||
dict_values_to_lists_strings,
|
||||
dict_keys_to_lower,
|
||||
docstring_parameter,
|
||||
remove_markdown_sections,
|
||||
merge_dictionaries,
|
||||
rename_in_dict,
|
||||
validate_csv_bulk_imports,
|
||||
version_callback,
|
||||
)
|
||||
|
||||
@@ -16,11 +19,13 @@ __all__ = [
|
||||
"alerts",
|
||||
"clean_dictionary",
|
||||
"clear_screen",
|
||||
"dict_values_to_lists_strings",
|
||||
"delete_from_dict",
|
||||
"dict_contains",
|
||||
"dict_keys_to_lower",
|
||||
"docstring_parameter",
|
||||
"LoggerManager",
|
||||
"remove_markdown_sections",
|
||||
"vault_validation",
|
||||
"merge_dictionaries",
|
||||
"rename_in_dict",
|
||||
"validate_csv_bulk_imports",
|
||||
"version_callback",
|
||||
]
|
||||
|
||||
@@ -1,11 +1,36 @@
|
||||
"""Logging and alerts."""
|
||||
import sys
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
from textwrap import wrap
|
||||
|
||||
import rich.repr
|
||||
import typer
|
||||
from loguru import logger
|
||||
from rich import print
|
||||
|
||||
from obsidian_metadata._utils.console import console
|
||||
|
||||
|
||||
class LogLevel(Enum):
|
||||
"""Enum for log levels."""
|
||||
|
||||
TRACE = 5
|
||||
DEBUG = 10
|
||||
INFO = 20
|
||||
SUCCESS = 25
|
||||
WARNING = 30
|
||||
ERROR = 40
|
||||
CRITICAL = 50
|
||||
EXCEPTION = 60
|
||||
|
||||
|
||||
class VerboseLevel(Enum):
|
||||
"""Enum for verbose levels."""
|
||||
|
||||
WARN = 0
|
||||
INFO = 1
|
||||
DEBUG = 2
|
||||
TRACE = 3
|
||||
|
||||
|
||||
def dryrun(msg: str) -> None:
|
||||
@@ -14,7 +39,7 @@ def dryrun(msg: str) -> None:
|
||||
Args:
|
||||
msg: Message to print
|
||||
"""
|
||||
print(f"[cyan]DRYRUN | {msg}[/cyan]")
|
||||
console.print(f"[cyan]DRYRUN | {msg}[/cyan]")
|
||||
|
||||
|
||||
def success(msg: str) -> None:
|
||||
@@ -23,7 +48,7 @@ def success(msg: str) -> None:
|
||||
Args:
|
||||
msg: Message to print
|
||||
"""
|
||||
print(f"[green]SUCCESS | {msg}[/green]")
|
||||
console.print(f"[green]SUCCESS | {msg}[/green]")
|
||||
|
||||
|
||||
def warning(msg: str) -> None:
|
||||
@@ -32,7 +57,7 @@ def warning(msg: str) -> None:
|
||||
Args:
|
||||
msg: Message to print
|
||||
"""
|
||||
print(f"[yellow]WARNING | {msg}[/yellow]")
|
||||
console.print(f"[yellow]WARNING | {msg}[/yellow]")
|
||||
|
||||
|
||||
def error(msg: str) -> None:
|
||||
@@ -41,7 +66,7 @@ def error(msg: str) -> None:
|
||||
Args:
|
||||
msg: Message to print
|
||||
"""
|
||||
print(f"[red]ERROR | {msg}[/red]")
|
||||
console.print(f"[red]ERROR | {msg}[/red]")
|
||||
|
||||
|
||||
def notice(msg: str) -> None:
|
||||
@@ -50,7 +75,7 @@ def notice(msg: str) -> None:
|
||||
Args:
|
||||
msg: Message to print
|
||||
"""
|
||||
print(f"[bold]NOTICE | {msg}[/bold]")
|
||||
console.print(f"[bold]NOTICE | {msg}[/bold]")
|
||||
|
||||
|
||||
def info(msg: str) -> None:
|
||||
@@ -59,7 +84,33 @@ def info(msg: str) -> None:
|
||||
Args:
|
||||
msg: Message to print
|
||||
"""
|
||||
print(f"INFO | {msg}")
|
||||
console.print(f"INFO | {msg}")
|
||||
|
||||
|
||||
def usage(msg: str, width: int | None = None) -> None:
|
||||
"""Print a usage message without using logging.
|
||||
|
||||
Args:
|
||||
msg: Message to print
|
||||
width (optional): Width of the message
|
||||
"""
|
||||
if width is None:
|
||||
width = console.width - 15
|
||||
|
||||
for _n, line in enumerate(wrap(msg, width=width)):
|
||||
if _n == 0:
|
||||
console.print(f"[dim]USAGE | {line}")
|
||||
else:
|
||||
console.print(f"[dim] | {line}")
|
||||
|
||||
|
||||
def debug(msg: str) -> None:
|
||||
"""Print a debug message without using logging.
|
||||
|
||||
Args:
|
||||
msg: Message to print
|
||||
"""
|
||||
console.print(f"[blue]DEBUG | {msg}[/blue]")
|
||||
|
||||
|
||||
def dim(msg: str) -> None:
|
||||
@@ -68,19 +119,18 @@ def dim(msg: str) -> None:
|
||||
Args:
|
||||
msg: Message to print
|
||||
"""
|
||||
print(f"[dim]{msg}[/dim]")
|
||||
console.print(f"[dim]{msg}[/dim]")
|
||||
|
||||
|
||||
def _log_formatter(record: dict) -> str:
|
||||
"""Create custom log formatter based on the log level. This effects the logs sent to stdout/stderr but not the log file."""
|
||||
if (
|
||||
record["level"].name == "INFO"
|
||||
or record["level"].name == "SUCCESS"
|
||||
or record["level"].name == "WARNING"
|
||||
):
|
||||
return "<level>{level: <8}</level> | <level>{message}</level>\n{exception}"
|
||||
if record["level"].name in ("INFO", "SUCCESS", "WARNING"):
|
||||
return "<level><normal>{level: <8} | {message}</normal></level>\n{exception}"
|
||||
|
||||
return "<level>{level: <8}</level> | <level>{message}</level> <fg #c5c5c5>({name}:{function}:{line})</fg #c5c5c5>\n{exception}"
|
||||
if record["level"].name in ("TRACE", "DEBUG"):
|
||||
return "<level><normal>{level: <8} | {message}</normal></level> <fg #c5c5c5>({name}:{function}:{line})</fg #c5c5c5>\n{exception}"
|
||||
|
||||
return "<level>{level: <8} | {message}</level> <fg #c5c5c5>({name}:{function}:{line})</fg #c5c5c5>\n{exception}"
|
||||
|
||||
|
||||
@rich.repr.auto
|
||||
@@ -124,10 +174,9 @@ class LoggerManager:
|
||||
self.log_level = log_level
|
||||
|
||||
if self.log_file == Path("/logs") and self.log_to_file: # pragma: no cover
|
||||
print("No log file specified")
|
||||
raise typer.Exit(1)
|
||||
raise typer.BadParameter("No log file specified")
|
||||
|
||||
if self.verbosity >= 3:
|
||||
if self.verbosity >= VerboseLevel.TRACE.value:
|
||||
logger.remove()
|
||||
logger.add(
|
||||
sys.stderr,
|
||||
@@ -137,7 +186,7 @@ class LoggerManager:
|
||||
diagnose=True,
|
||||
)
|
||||
self.log_level = 5
|
||||
elif self.verbosity == 2:
|
||||
elif self.verbosity == VerboseLevel.DEBUG.value:
|
||||
logger.remove()
|
||||
logger.add(
|
||||
sys.stderr,
|
||||
@@ -147,7 +196,7 @@ class LoggerManager:
|
||||
diagnose=True,
|
||||
)
|
||||
self.log_level = 10
|
||||
elif self.verbosity == 1:
|
||||
elif self.verbosity == VerboseLevel.INFO.value:
|
||||
logger.remove()
|
||||
logger.add(
|
||||
sys.stderr,
|
||||
@@ -190,9 +239,9 @@ class LoggerManager:
|
||||
Returns:
|
||||
bool: True if the current log level is TRACE or lower, False otherwise.
|
||||
"""
|
||||
if self.log_level <= 5:
|
||||
if self.log_level <= LogLevel.TRACE.value:
|
||||
if msg:
|
||||
print(msg)
|
||||
console.print(msg)
|
||||
return True
|
||||
return False
|
||||
|
||||
@@ -205,9 +254,9 @@ class LoggerManager:
|
||||
Returns:
|
||||
bool: True if the current log level is DEBUG or lower, False otherwise.
|
||||
"""
|
||||
if self.log_level <= 10:
|
||||
if self.log_level <= LogLevel.DEBUG.value:
|
||||
if msg:
|
||||
print(msg)
|
||||
console.print(msg)
|
||||
return True
|
||||
return False
|
||||
|
||||
@@ -220,9 +269,9 @@ class LoggerManager:
|
||||
Returns:
|
||||
bool: True if the current log level is INFO or lower, False otherwise.
|
||||
"""
|
||||
if self.log_level <= 20:
|
||||
if self.log_level <= LogLevel.INFO.value:
|
||||
if msg:
|
||||
print(msg)
|
||||
console.print(msg)
|
||||
return True
|
||||
return False
|
||||
|
||||
@@ -235,8 +284,8 @@ class LoggerManager:
|
||||
Returns:
|
||||
bool: True if the current log level is default or lower, False otherwise.
|
||||
"""
|
||||
if self.log_level <= 30:
|
||||
if self.log_level <= LogLevel.WARNING.value:
|
||||
if msg:
|
||||
print(msg)
|
||||
console.print(msg)
|
||||
return True
|
||||
return False # pragma: no cover
|
||||
|
||||
5
src/obsidian_metadata/_utils/console.py
Normal file
5
src/obsidian_metadata/_utils/console.py
Normal file
@@ -0,0 +1,5 @@
|
||||
"""Rich console object for the application."""
|
||||
from rich.console import Console
|
||||
|
||||
console = Console()
|
||||
console_no_markup = Console(markup=False)
|
||||
@@ -1,89 +1,127 @@
|
||||
"""Utility functions."""
|
||||
import copy
|
||||
import csv
|
||||
import re
|
||||
from os import name, system
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
import typer
|
||||
|
||||
from obsidian_metadata.__version__ import __version__
|
||||
from obsidian_metadata._utils.console import console
|
||||
|
||||
|
||||
def dict_values_to_lists_strings(dictionary: dict, strip_null_values: bool = False) -> dict:
|
||||
"""Converts all values in a dictionary to lists of strings.
|
||||
def clean_dictionary(dictionary: dict[str, Any]) -> dict[str, Any]:
|
||||
"""Clean up a dictionary by markdown formatting from keys and values.
|
||||
|
||||
Args:
|
||||
dictionary (dict): Dictionary to convert
|
||||
strip_null (bool): Whether to strip null values
|
||||
dictionary (dict): Dictionary to clean
|
||||
|
||||
Returns:
|
||||
dict: Dictionary with all values converted to lists of strings
|
||||
|
||||
{key: sorted(new_dict[key]) for key in sorted(new_dict)}
|
||||
dict: Cleaned dictionary
|
||||
"""
|
||||
new_dict = {}
|
||||
|
||||
if strip_null_values:
|
||||
for key, value in dictionary.items():
|
||||
if isinstance(value, list):
|
||||
new_dict[key] = sorted([str(item) for item in value if item is not None])
|
||||
elif isinstance(value, dict):
|
||||
new_dict[key] = dict_values_to_lists_strings(value) # type: ignore[assignment]
|
||||
elif value is None or value == "None" or value == "":
|
||||
new_dict[key] = []
|
||||
else:
|
||||
new_dict[key] = [str(value)]
|
||||
|
||||
return new_dict
|
||||
|
||||
for key, value in dictionary.items():
|
||||
new_dict = copy.deepcopy(dictionary)
|
||||
new_dict = {key.strip("*[]# "): value for key, value in new_dict.items()}
|
||||
for key, value in new_dict.items():
|
||||
if isinstance(value, list):
|
||||
new_dict[key] = sorted([str(item) for item in value])
|
||||
elif isinstance(value, dict):
|
||||
new_dict[key] = dict_values_to_lists_strings(value) # type: ignore[assignment]
|
||||
else:
|
||||
new_dict[key] = [str(value)]
|
||||
new_dict[key] = [s.strip("*[]# ") for s in value if isinstance(value, list)]
|
||||
elif isinstance(value, str):
|
||||
new_dict[key] = value.strip("*[]# ")
|
||||
|
||||
return new_dict
|
||||
|
||||
|
||||
def remove_markdown_sections(
|
||||
text: str,
|
||||
strip_codeblocks: bool = False,
|
||||
strip_inlinecode: bool = False,
|
||||
strip_frontmatter: bool = False,
|
||||
) -> str:
|
||||
"""Strips markdown sections from text.
|
||||
def clear_screen() -> None: # pragma: no cover
|
||||
"""Clear the screen."""
|
||||
_ = system("cls") if name == "nt" else system("clear") # noqa: S605, S607
|
||||
|
||||
|
||||
def dict_contains(
|
||||
dictionary: dict[str, list[str]], key: str, value: str | None = None, is_regex: bool = False
|
||||
) -> bool:
|
||||
"""Check if a dictionary contains a key or if a key contains a value.
|
||||
|
||||
Args:
|
||||
text (str): Text to remove code blocks from
|
||||
strip_codeblocks (bool, optional): Strip code blocks. Defaults to False.
|
||||
strip_inlinecode (bool, optional): Strip inline code. Defaults to False.
|
||||
strip_frontmatter (bool, optional): Strip frontmatter. Defaults to False.
|
||||
dictionary (dict): Dictionary to check
|
||||
key (str): Key to check for
|
||||
value (str, optional): Value to check for. Defaults to None.
|
||||
is_regex (bool, optional): Whether the key is a regex. Defaults to False.
|
||||
|
||||
Returns:
|
||||
str: Text without code blocks
|
||||
bool: Whether the dictionary contains the key or value
|
||||
"""
|
||||
if strip_codeblocks:
|
||||
text = re.sub(r"`{3}.*?`{3}", "", text, flags=re.DOTALL)
|
||||
if value is None:
|
||||
if is_regex:
|
||||
return any(re.search(key, str(_key)) for _key in dictionary)
|
||||
return key in dictionary
|
||||
|
||||
if strip_inlinecode:
|
||||
text = re.sub(r"`.*?`", "", text)
|
||||
if is_regex:
|
||||
for _key in dictionary:
|
||||
if re.search(key, str(_key)) and any(re.search(value, _v) for _v in dictionary[_key]):
|
||||
return True
|
||||
|
||||
if strip_frontmatter:
|
||||
text = re.sub(r"^\s*---.*?---", "", text, flags=re.DOTALL)
|
||||
return False
|
||||
|
||||
return text # noqa: RET504
|
||||
return key in dictionary and value in dictionary[key]
|
||||
|
||||
|
||||
def version_callback(value: bool) -> None:
|
||||
"""Print version and exit."""
|
||||
if value:
|
||||
print(f"{__package__.split('.')[0]}: v{__version__}")
|
||||
raise typer.Exit()
|
||||
def dict_keys_to_lower(dictionary: dict) -> dict:
|
||||
"""Convert all keys in a dictionary to lowercase.
|
||||
|
||||
Args:
|
||||
dictionary (dict): Dictionary to convert
|
||||
|
||||
Returns:
|
||||
dict: Dictionary with all keys converted to lowercase
|
||||
"""
|
||||
return {key.lower(): value for key, value in dictionary.items()}
|
||||
|
||||
|
||||
def delete_from_dict( # noqa: C901
|
||||
dictionary: dict, key: str, value: str | None = None, is_regex: bool = False
|
||||
) -> dict:
|
||||
"""Delete a key or a value from a dictionary.
|
||||
|
||||
Args:
|
||||
dictionary (dict): Dictionary to delete from
|
||||
is_regex (bool, optional): Whether the key is a regex. Defaults to False.
|
||||
key (str): Key to delete
|
||||
value (str, optional): Value to delete. Defaults to None.
|
||||
|
||||
Returns:
|
||||
dict: Dictionary without the key
|
||||
"""
|
||||
dictionary = copy.deepcopy(dictionary)
|
||||
|
||||
if value is None:
|
||||
if is_regex:
|
||||
return {k: v for k, v in dictionary.items() if not re.search(key, str(k))}
|
||||
|
||||
return {k: v for k, v in dictionary.items() if k != key}
|
||||
|
||||
if is_regex:
|
||||
keys_to_delete = []
|
||||
for _key in dictionary:
|
||||
if re.search(key, str(_key)):
|
||||
if isinstance(dictionary[_key], list):
|
||||
dictionary[_key] = [v for v in dictionary[_key] if not re.search(value, v)]
|
||||
elif isinstance(dictionary[_key], str) and re.search(value, dictionary[_key]):
|
||||
keys_to_delete.append(_key)
|
||||
|
||||
for key in keys_to_delete:
|
||||
dictionary.pop(key)
|
||||
|
||||
elif key in dictionary and isinstance(dictionary[key], list):
|
||||
dictionary[key] = [v for v in dictionary[key] if v != value]
|
||||
elif key in dictionary and dictionary[key] == value:
|
||||
dictionary.pop(key)
|
||||
|
||||
return dictionary
|
||||
|
||||
|
||||
def docstring_parameter(*sub: Any) -> Any:
|
||||
"""Decorator to replace variables within docstrings.
|
||||
"""Replace variables within docstrings.
|
||||
|
||||
Args:
|
||||
sub (Any): Replacement variables
|
||||
@@ -103,55 +141,116 @@ def docstring_parameter(*sub: Any) -> Any:
|
||||
return dec
|
||||
|
||||
|
||||
def clean_dictionary(dictionary: dict[str, Any]) -> dict[str, Any]:
|
||||
"""Clean up a dictionary by markdown formatting from keys and values.
|
||||
def merge_dictionaries(dict1: dict, dict2: dict) -> dict:
|
||||
"""Merge two dictionaries. When the values are lists, they are merged and sorted.
|
||||
|
||||
Args:
|
||||
dictionary (dict): Dictionary to clean
|
||||
dict1 (dict): First dictionary.
|
||||
dict2 (dict): Second dictionary.
|
||||
|
||||
Returns:
|
||||
dict: Cleaned dictionary
|
||||
dict: Merged dictionary.
|
||||
"""
|
||||
new_dict = {key.strip(): value for key, value in dictionary.items()}
|
||||
new_dict = {key.strip("*[]#"): value for key, value in new_dict.items()}
|
||||
for key, value in new_dict.items():
|
||||
new_dict[key] = [s.strip("*[]#") for s in value if isinstance(value, list)]
|
||||
d1 = copy.deepcopy(dict1)
|
||||
d2 = copy.deepcopy(dict2)
|
||||
|
||||
return new_dict
|
||||
for _key in d1:
|
||||
if not isinstance(d1[_key], list):
|
||||
raise TypeError(f"Key {_key} is not a list.")
|
||||
for _key in d2:
|
||||
if not isinstance(d2[_key], list):
|
||||
raise TypeError(f"Key {_key} is not a list.")
|
||||
|
||||
for k, v in d2.items():
|
||||
if k in d1:
|
||||
d1[k].extend(v)
|
||||
d1[k] = sorted(set(d1[k]))
|
||||
else:
|
||||
d1[k] = sorted(set(v))
|
||||
|
||||
return dict(sorted(d1.items()))
|
||||
|
||||
|
||||
def clear_screen() -> None: # pragma: no cover
|
||||
"""Clears the screen."""
|
||||
# for windows
|
||||
_ = system("cls") if name == "nt" else system("clear")
|
||||
|
||||
|
||||
def dict_contains(
|
||||
dictionary: dict[str, list[str]], key: str, value: str = None, is_regex: bool = False
|
||||
) -> bool:
|
||||
"""Checks if a dictionary contains a key.
|
||||
def rename_in_dict(
|
||||
dictionary: dict[str, list[str]], key: str, value_1: str, value_2: str | None = None
|
||||
) -> dict:
|
||||
"""Rename a key or a value in a dictionary who's values are lists of strings.
|
||||
|
||||
Args:
|
||||
dictionary (dict): Dictionary to check
|
||||
key (str): Key to check for
|
||||
value (str, optional): Value to check for. Defaults to None.
|
||||
is_regex (bool, optional): Whether the key is a regex. Defaults to False.
|
||||
dictionary (dict): Dictionary to rename in.
|
||||
key (str): Key to check.
|
||||
value_1 (str): `With value_2` this is the value to rename. If `value_2` is None this is the renamed key
|
||||
value_2 (str, Optional): New value.
|
||||
|
||||
Returns:
|
||||
bool: Whether the dictionary contains the key
|
||||
dict: Dictionary with renamed key or value
|
||||
"""
|
||||
if value is None:
|
||||
if is_regex:
|
||||
return any(re.search(key, str(_key)) for _key in dictionary)
|
||||
return key in dictionary
|
||||
dictionary = copy.deepcopy(dictionary)
|
||||
|
||||
if is_regex:
|
||||
found_keys = []
|
||||
for _key in dictionary:
|
||||
if re.search(key, str(_key)):
|
||||
found_keys.append(
|
||||
any(re.search(value, _v) for _v in dictionary[_key]),
|
||||
if value_2 is None:
|
||||
if key in dictionary and value_1 not in dictionary:
|
||||
dictionary[value_1] = dictionary.pop(key)
|
||||
elif key in dictionary and value_1 in dictionary[key]:
|
||||
dictionary[key] = sorted({value_2 if x == value_1 else x for x in dictionary[key]})
|
||||
|
||||
return dictionary
|
||||
|
||||
|
||||
def validate_csv_bulk_imports( # noqa: C901
|
||||
csv_path: Path, note_paths: list
|
||||
) -> dict[str, list[dict[str, str]]]:
|
||||
"""Validate the bulk import CSV file.
|
||||
|
||||
Args:
|
||||
csv_path (dict): Dictionary to validate
|
||||
note_paths (list): List of paths to all notes in vault
|
||||
|
||||
Returns:
|
||||
dict: Validated dictionary
|
||||
"""
|
||||
csv_dict: dict[str, Any] = {}
|
||||
with csv_path.expanduser().open("r") as csv_file:
|
||||
csv_reader = csv.DictReader(csv_file, delimiter=",")
|
||||
row_num = 0
|
||||
for row in csv_reader:
|
||||
if row_num == 0:
|
||||
if "path" not in row:
|
||||
raise typer.BadParameter("Missing 'path' column in CSV file")
|
||||
if "type" not in row:
|
||||
raise typer.BadParameter("Missing 'type' column in CSV file")
|
||||
if "key" not in row:
|
||||
raise typer.BadParameter("Missing 'key' column in CSV file")
|
||||
if "value" not in row:
|
||||
raise typer.BadParameter("Missing 'value' column in CSV file")
|
||||
row_num += 1
|
||||
|
||||
if row_num > 0 and row["type"] not in ["tag", "frontmatter", "inline_metadata"]:
|
||||
raise typer.BadParameter(
|
||||
f"Invalid type '{row['type']}' in CSV file. Must be one of 'tag', 'frontmatter', 'inline_metadata'"
|
||||
)
|
||||
return any(found_keys)
|
||||
|
||||
return key in dictionary and value in dictionary[key]
|
||||
if row["path"] not in csv_dict:
|
||||
csv_dict[row["path"]] = []
|
||||
|
||||
csv_dict[row["path"]].append(
|
||||
{"type": row["type"], "key": row["key"], "value": row["value"]}
|
||||
)
|
||||
|
||||
if row_num in [0, 1]:
|
||||
raise typer.BadParameter("Empty CSV file")
|
||||
|
||||
paths_to_remove = [x for x in csv_dict if x not in note_paths]
|
||||
|
||||
for _path in paths_to_remove:
|
||||
raise typer.BadParameter(
|
||||
f"'{_path}' in CSV does not exist in vault. Ensure all paths are relative to the vault root."
|
||||
)
|
||||
|
||||
return csv_dict
|
||||
|
||||
|
||||
def version_callback(value: bool) -> None:
|
||||
"""Print version and exit."""
|
||||
if value:
|
||||
console.print(f"{__package__.split('.')[0]}: v{__version__}")
|
||||
raise typer.Exit(0)
|
||||
|
||||
@@ -1,12 +1,10 @@
|
||||
"""obsidian-metadata CLI."""
|
||||
|
||||
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
import questionary
|
||||
import typer
|
||||
from rich import print
|
||||
|
||||
from obsidian_metadata._config import Config
|
||||
from obsidian_metadata._utils import (
|
||||
@@ -15,6 +13,7 @@ from obsidian_metadata._utils import (
|
||||
docstring_parameter,
|
||||
version_callback,
|
||||
)
|
||||
from obsidian_metadata._utils.console import console
|
||||
from obsidian_metadata.models import Application
|
||||
|
||||
app = typer.Typer(add_completion=False, no_args_is_help=True, rich_markup_mode="rich")
|
||||
@@ -28,16 +27,44 @@ HELP_TEXT = """
|
||||
@app.command()
|
||||
@docstring_parameter(__package__)
|
||||
def main(
|
||||
vault_path: Path = typer.Option(
|
||||
None,
|
||||
help="Path to Obsidian vault",
|
||||
show_default=False,
|
||||
),
|
||||
config_file: Path = typer.Option(
|
||||
Path(Path.home() / f".{__package__}.toml"),
|
||||
help="Specify a custom path to a configuration file",
|
||||
show_default=False,
|
||||
),
|
||||
export_csv: Path = typer.Option(
|
||||
None,
|
||||
help="Exports all metadata to a specified CSV file and exits.",
|
||||
show_default=False,
|
||||
dir_okay=False,
|
||||
file_okay=True,
|
||||
),
|
||||
export_json: Path = typer.Option(
|
||||
None,
|
||||
help="Exports all metadata to a specified JSON file and exits.",
|
||||
show_default=False,
|
||||
dir_okay=False,
|
||||
file_okay=True,
|
||||
),
|
||||
export_template: Path = typer.Option(
|
||||
None,
|
||||
help="Exports all notes and their metadata to a specified CSV file and exits. Use to create a template for batch updates.",
|
||||
show_default=False,
|
||||
dir_okay=False,
|
||||
file_okay=True,
|
||||
),
|
||||
import_csv: Path = typer.Option(
|
||||
None,
|
||||
help="Import a CSV file with bulk updates to metadata.",
|
||||
show_default=False,
|
||||
dir_okay=False,
|
||||
file_okay=True,
|
||||
),
|
||||
vault_path: Path = typer.Option(
|
||||
None,
|
||||
help="Path to Obsidian vault",
|
||||
show_default=False,
|
||||
),
|
||||
dry_run: bool = typer.Option(
|
||||
False,
|
||||
"--dry-run",
|
||||
@@ -66,33 +93,20 @@ def main(
|
||||
help="""Set verbosity level (0=WARN, 1=INFO, 2=DEBUG, 3=TRACE)""",
|
||||
count=True,
|
||||
),
|
||||
version: Optional[bool] = typer.Option(
|
||||
version: Optional[bool] = typer.Option( # noqa: ARG001
|
||||
None, "--version", help="Print version and exit", callback=version_callback, is_eager=True
|
||||
),
|
||||
) -> None:
|
||||
r"""A script to make batch updates to metadata in an Obsidian vault.
|
||||
r"""Make batch updates to metadata in an Obsidian vault. No changes are made to the Vault until they are explicitly committed.
|
||||
|
||||
[bold] [/]
|
||||
[bold underline]Features:[/]
|
||||
|
||||
- [code]in-text tags:[/] delete every occurrence
|
||||
- [code]in-text tags:[/] Rename tag ([dim]#tag1[/] -> [dim]#tag2[/])
|
||||
- [code]frontmatter:[/] Delete a key matching a regex pattern and all associated values
|
||||
- [code]frontmatter:[/] Rename a key
|
||||
- [code]frontmatter:[/] Delete a value matching a regex pattern from a specified key
|
||||
- [code]frontmatter:[/] Rename a value from a specified key
|
||||
- [code]inline metadata:[/] Delete a key matching a regex pattern and all associated values
|
||||
- [code]inline metadata:[/] Rename a key
|
||||
- [code]inline metadata:[/] Delete a value matching a regex pattern from a specified key
|
||||
- [code]inline metadata:[/] Rename a value from a specified key
|
||||
- [code]vault:[/] Create a backup of the Obsidian vault.
|
||||
|
||||
[bold underline]Usage:[/]
|
||||
[tan]Obsidian-metadata[/] allows you to make batch updates to metadata in an Obsidian vault. Once you have made your changes, review them prior to committing them to the vault. The script provides a menu of available actions. Make as many changes as you require and review them as you go. No changes are made to the Vault until they are explicitly committed.
|
||||
|
||||
[bold underline]It is strongly recommended that you back up your vault prior to committing changes.[/] This script makes changes directly to the markdown files in your vault. Once the changes are committed, there is no ability to recreate the original information unless you have a backup. Follow the instructions in the script to create a backup of your vault if needed. The author of this script is not responsible for any data loss that may occur. Use at your own risk.
|
||||
|
||||
[bold underline]Configuration:[/]
|
||||
Configuration is specified in a configuration file. On First run, this file will be created at [tan]~/.{0}.env[/]. Any options specified on the command line will override the configuration file.
|
||||
|
||||
Full usage information is available at https://github.com/natelandau/obsidian-metadata
|
||||
|
||||
"""
|
||||
# Instantiate logger
|
||||
alerts.LoggerManager( # pragma: no cover
|
||||
@@ -113,12 +127,12 @@ def main(
|
||||
|_| |_|\___|\__\__,_|\__,_|\__,_|\__\__,_|
|
||||
"""
|
||||
clear_screen()
|
||||
print(banner)
|
||||
console.print(banner)
|
||||
|
||||
config: Config = Config(config_path=config_file, vault_path=vault_path)
|
||||
if len(config.vaults) == 0:
|
||||
typer.echo("No vaults configured. Exiting.")
|
||||
raise typer.Exit(1)
|
||||
raise typer.BadParameter("No vaults configured. Exiting.")
|
||||
|
||||
if len(config.vaults) == 1:
|
||||
application = Application(dry_run=dry_run, config=config.vaults[0])
|
||||
@@ -134,7 +148,24 @@ def main(
|
||||
vault_to_use = next(vault for vault in config.vaults if vault.name == vault_name)
|
||||
application = Application(dry_run=dry_run, config=vault_to_use)
|
||||
|
||||
application.main_app()
|
||||
if export_json is not None:
|
||||
path = Path(export_json).expanduser().resolve()
|
||||
application.noninteractive_export_json(path)
|
||||
raise typer.Exit(code=0)
|
||||
if export_csv is not None:
|
||||
path = Path(export_json).expanduser().resolve()
|
||||
application.noninteractive_export_csv(path)
|
||||
raise typer.Exit(code=0)
|
||||
if export_template is not None:
|
||||
path = Path(export_template).expanduser().resolve()
|
||||
application.noninteractive_export_template(path)
|
||||
raise typer.Exit(code=0)
|
||||
if import_csv is not None:
|
||||
path = Path(import_csv).expanduser().resolve()
|
||||
application.noninteractive_bulk_import(path)
|
||||
raise typer.Exit(code=0)
|
||||
|
||||
application.application_main()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@@ -1,24 +1,24 @@
|
||||
"""Shared models."""
|
||||
from obsidian_metadata.models.patterns import Patterns # isort: skip
|
||||
from obsidian_metadata.models.metadata import (
|
||||
Frontmatter,
|
||||
InlineMetadata,
|
||||
InlineTags,
|
||||
VaultMetadata,
|
||||
from obsidian_metadata.models.enums import (
|
||||
InsertLocation,
|
||||
MetadataType,
|
||||
Wrapping,
|
||||
)
|
||||
from obsidian_metadata.models.metadata import InlineField, dict_to_yaml
|
||||
from obsidian_metadata.models.notes import Note
|
||||
from obsidian_metadata.models.vault import Vault
|
||||
from obsidian_metadata.models.vault import Vault, VaultFilter
|
||||
|
||||
from obsidian_metadata.models.application import Application # isort: skip
|
||||
|
||||
__all__ = [
|
||||
"Frontmatter",
|
||||
"InlineMetadata",
|
||||
"InlineTags",
|
||||
"LoggerManager",
|
||||
"Note",
|
||||
"Patterns",
|
||||
"Application",
|
||||
"dict_to_yaml",
|
||||
"InlineField",
|
||||
"InsertLocation",
|
||||
"LoggerManager",
|
||||
"MetadataType",
|
||||
"Note",
|
||||
"Vault",
|
||||
"VaultMetadata",
|
||||
"VaultFilter",
|
||||
"Wrapping",
|
||||
]
|
||||
|
||||
@@ -1,19 +1,21 @@
|
||||
"""Questions for the cli."""
|
||||
|
||||
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
import questionary
|
||||
from rich import print
|
||||
import typer
|
||||
from rich import box
|
||||
from rich.table import Table
|
||||
|
||||
from obsidian_metadata._config import VaultConfig
|
||||
from obsidian_metadata._utils.alerts import logger as log
|
||||
from obsidian_metadata.models import Patterns, Vault
|
||||
from obsidian_metadata._utils import alerts
|
||||
from obsidian_metadata._utils import alerts, validate_csv_bulk_imports
|
||||
from obsidian_metadata._utils.console import console
|
||||
from obsidian_metadata.models import InsertLocation, Vault, VaultFilter
|
||||
from obsidian_metadata.models.enums import MetadataType
|
||||
from obsidian_metadata.models.questions import Questions
|
||||
|
||||
PATTERNS = Patterns()
|
||||
|
||||
|
||||
class Application:
|
||||
"""Questions for use in the cli.
|
||||
@@ -27,117 +29,467 @@ class Application:
|
||||
self.config = config
|
||||
self.dry_run = dry_run
|
||||
self.questions = Questions()
|
||||
self.filters: list[VaultFilter] = []
|
||||
|
||||
def load_vault(self, path_filter: str = None) -> None:
|
||||
"""Load the vault.
|
||||
def _load_vault(self) -> None:
|
||||
"""Load the vault."""
|
||||
if len(self.filters) == 0:
|
||||
self.vault: Vault = Vault(config=self.config, dry_run=self.dry_run)
|
||||
else:
|
||||
self.vault = Vault(config=self.config, dry_run=self.dry_run, filters=self.filters)
|
||||
|
||||
Args:
|
||||
path_filter (str, optional): Regex to filter notes by path.
|
||||
"""
|
||||
self.vault: Vault = Vault(config=self.config, dry_run=self.dry_run, path_filter=path_filter)
|
||||
log.info(f"Indexed {self.vault.num_notes()} notes from {self.vault.vault_path}")
|
||||
alerts.success(
|
||||
f"Loaded {len(self.vault.notes_in_scope)} notes from {len(self.vault.all_notes)} total notes"
|
||||
)
|
||||
self.questions = Questions(vault=self.vault)
|
||||
|
||||
def main_app(self) -> None:
|
||||
def application_main(self) -> None:
|
||||
"""Questions for the main application."""
|
||||
self.load_vault()
|
||||
self._load_vault()
|
||||
|
||||
while True:
|
||||
print("\n")
|
||||
self.vault.info()
|
||||
|
||||
match self.questions.ask_main_application(): # noqa: E999
|
||||
case None:
|
||||
break
|
||||
match self.questions.ask_application_main():
|
||||
case "vault_actions":
|
||||
self.application_vault()
|
||||
case "export_metadata":
|
||||
self.application_export_metadata()
|
||||
case "inspect_metadata":
|
||||
self.application_inspect_metadata()
|
||||
case "import_from_csv":
|
||||
self.application_import_csv()
|
||||
case "filter_notes":
|
||||
self.load_vault(path_filter=self.questions.ask_for_filter_path())
|
||||
self.application_filter()
|
||||
case "add_metadata":
|
||||
self.application_add_metadata()
|
||||
case "rename_metadata":
|
||||
self.application_rename_metadata()
|
||||
case "delete_metadata":
|
||||
self.application_delete_metadata()
|
||||
case "reorganize_metadata":
|
||||
self.application_reorganize_metadata()
|
||||
case "review_changes":
|
||||
self.review_changes()
|
||||
case "commit_changes":
|
||||
self.commit_changes()
|
||||
case _:
|
||||
break
|
||||
|
||||
console.print("Done!")
|
||||
|
||||
def application_add_metadata(self) -> None:
|
||||
"""Add metadata."""
|
||||
alerts.usage(
|
||||
"Add new metadata to your vault. Currently only supports adding to the frontmatter of a note."
|
||||
)
|
||||
|
||||
meta_type = self.questions.ask_meta_type()
|
||||
match meta_type:
|
||||
case MetadataType.FRONTMATTER | MetadataType.INLINE:
|
||||
key = self.questions.ask_new_key(question="Enter the key for the new metadata")
|
||||
if key is None: # pragma: no cover
|
||||
return
|
||||
|
||||
value = self.questions.ask_new_value(
|
||||
question="Enter the value for the new metadata"
|
||||
)
|
||||
if value is None: # pragma: no cover
|
||||
return
|
||||
|
||||
num_changed = self.vault.add_metadata(
|
||||
meta_type=meta_type, key=key, value=value, location=self.vault.insert_location
|
||||
)
|
||||
if num_changed == 0: # pragma: no cover
|
||||
alerts.warning("No notes were changed")
|
||||
return
|
||||
|
||||
alerts.success(f"Added metadata to {num_changed} notes")
|
||||
|
||||
case MetadataType.TAGS:
|
||||
tag = self.questions.ask_new_tag()
|
||||
if tag is None: # pragma: no cover
|
||||
return
|
||||
|
||||
num_changed = self.vault.add_metadata(
|
||||
meta_type=meta_type, value=tag, location=self.vault.insert_location
|
||||
)
|
||||
|
||||
if num_changed == 0: # pragma: no cover
|
||||
alerts.warning("No notes were changed")
|
||||
return
|
||||
|
||||
alerts.success(f"Added metadata to {num_changed} notes")
|
||||
case _: # pragma: no cover
|
||||
return
|
||||
|
||||
def application_delete_metadata(self) -> None:
|
||||
"""Delete metadata."""
|
||||
alerts.usage("Delete either a key and all associated values, or a specific value.")
|
||||
|
||||
choices = [
|
||||
questionary.Separator(),
|
||||
{"name": "Delete inline tag", "value": "delete_tag"},
|
||||
{"name": "Delete key", "value": "delete_key"},
|
||||
{"name": "Delete value", "value": "delete_value"},
|
||||
questionary.Separator(),
|
||||
{"name": "Back", "value": "back"},
|
||||
]
|
||||
match self.questions.ask_selection(
|
||||
choices=choices, question="Select a metadata type to delete"
|
||||
):
|
||||
case "delete_key":
|
||||
self.delete_key()
|
||||
case "delete_value":
|
||||
self.delete_value()
|
||||
case "delete_tag":
|
||||
self.delete_tag()
|
||||
case _: # pragma: no cover
|
||||
return
|
||||
|
||||
def application_rename_metadata(self) -> None:
|
||||
"""Rename metadata."""
|
||||
alerts.usage("Select the type of metadata to rename.")
|
||||
|
||||
choices = [
|
||||
questionary.Separator(),
|
||||
{"name": "Rename inline tag", "value": "rename_tag"},
|
||||
{"name": "Rename key", "value": "rename_key"},
|
||||
{"name": "Rename value", "value": "rename_value"},
|
||||
questionary.Separator(),
|
||||
{"name": "Back", "value": "back"},
|
||||
]
|
||||
match self.questions.ask_selection(
|
||||
choices=choices, question="Select a metadata type to rename"
|
||||
):
|
||||
case "rename_key":
|
||||
self.rename_key()
|
||||
case "rename_value":
|
||||
self.rename_value()
|
||||
case "rename_tag":
|
||||
self.rename_tag()
|
||||
case _: # pragma: no cover
|
||||
return
|
||||
|
||||
def application_filter(self) -> None: # noqa: C901,PLR0911,PLR0912
|
||||
"""Filter notes."""
|
||||
alerts.usage("Limit the scope of notes to be processed with one or more filters.")
|
||||
|
||||
choices = [
|
||||
questionary.Separator(),
|
||||
{"name": "Apply new regex path filter", "value": "apply_path_filter"},
|
||||
{"name": "Apply new metadata filter", "value": "apply_metadata_filter"},
|
||||
{"name": "Apply new in-text tag filter", "value": "apply_tag_filter"},
|
||||
{"name": "List and clear filters", "value": "list_filters"},
|
||||
{"name": "List notes in scope", "value": "list_notes"},
|
||||
questionary.Separator(),
|
||||
{"name": "Back", "value": "back"},
|
||||
]
|
||||
while True:
|
||||
match self.questions.ask_selection(choices=choices, question="Select an action"):
|
||||
case "apply_path_filter":
|
||||
path = self.questions.ask_filter_path()
|
||||
if path is None or not path: # pragma: no cover
|
||||
return
|
||||
|
||||
self.filters.append(VaultFilter(path_filter=path))
|
||||
self._load_vault()
|
||||
|
||||
case "apply_metadata_filter":
|
||||
key = self.questions.ask_existing_key()
|
||||
if key is None: # pragma: no cover
|
||||
return
|
||||
|
||||
questions2 = Questions(vault=self.vault, key=key)
|
||||
value = questions2.ask_existing_value(
|
||||
question="Enter the value for the metadata filter",
|
||||
)
|
||||
if value is None: # pragma: no cover
|
||||
return
|
||||
if not value:
|
||||
self.filters.append(VaultFilter(key_filter=key))
|
||||
else:
|
||||
self.filters.append(VaultFilter(key_filter=key, value_filter=value))
|
||||
self._load_vault()
|
||||
|
||||
case "apply_tag_filter":
|
||||
tag = self.questions.ask_existing_tag()
|
||||
if tag is None or not tag:
|
||||
return
|
||||
|
||||
self.filters.append(VaultFilter(tag_filter=tag))
|
||||
self._load_vault()
|
||||
|
||||
case "list_filters":
|
||||
if len(self.filters) == 0:
|
||||
alerts.notice("No filters have been applied")
|
||||
return
|
||||
|
||||
console.print("")
|
||||
table = Table(
|
||||
"Opt",
|
||||
"Filter",
|
||||
"Type",
|
||||
title="Current Filters",
|
||||
show_header=False,
|
||||
box=box.HORIZONTALS,
|
||||
)
|
||||
for _n, _filter in enumerate(self.filters, start=1):
|
||||
if _filter.path_filter is not None:
|
||||
table.add_row(
|
||||
str(_n),
|
||||
f"Path regex: [tan bold]{_filter.path_filter}",
|
||||
end_section=bool(_n == len(self.filters)),
|
||||
)
|
||||
elif _filter.tag_filter is not None:
|
||||
table.add_row(
|
||||
str(_n),
|
||||
f"Tag filter: [tan bold]{_filter.tag_filter}",
|
||||
end_section=bool(_n == len(self.filters)),
|
||||
)
|
||||
elif _filter.key_filter is not None and _filter.value_filter is None:
|
||||
table.add_row(
|
||||
str(_n),
|
||||
f"Key filter: [tan bold]{_filter.key_filter}",
|
||||
end_section=bool(_n == len(self.filters)),
|
||||
)
|
||||
elif _filter.key_filter is not None and _filter.value_filter is not None:
|
||||
table.add_row(
|
||||
str(_n),
|
||||
f"Key/Value : [tan bold]{_filter.key_filter}={_filter.value_filter}",
|
||||
end_section=bool(_n == len(self.filters)),
|
||||
)
|
||||
table.add_row(f"{len(self.filters) + 1}", "Clear All")
|
||||
table.add_row(f"{len(self.filters) + 2}", "Return to Main Menu")
|
||||
console.print(table)
|
||||
|
||||
num = self.questions.ask_number(
|
||||
question="Enter the number of the filter to clear"
|
||||
)
|
||||
if num is None:
|
||||
return
|
||||
if int(num) <= len(self.filters):
|
||||
self.filters.pop(int(num) - 1)
|
||||
self._load_vault()
|
||||
return
|
||||
if int(num) == len(self.filters) + 1:
|
||||
self.filters = []
|
||||
self._load_vault()
|
||||
return
|
||||
|
||||
case "list_notes":
|
||||
self.vault.list_editable_notes()
|
||||
|
||||
case _:
|
||||
return
|
||||
|
||||
def application_import_csv(self) -> None:
|
||||
"""Import CSV for bulk changes to metadata."""
|
||||
alerts.usage(
|
||||
"Import CSV to make build changes to metadata. The CSV must have the following columns: path, type, key, value. Where type is one of 'frontmatter', 'inline_metadata', or 'tag'. Note: this will not create new notes."
|
||||
)
|
||||
|
||||
path = self.questions.ask_path(question="Enter path to a CSV file", valid_file=True)
|
||||
|
||||
if path is None:
|
||||
return
|
||||
|
||||
csv_path = Path(path).expanduser()
|
||||
|
||||
if "csv" not in csv_path.suffix.lower():
|
||||
alerts.error("File must be a CSV file")
|
||||
return
|
||||
|
||||
note_paths = [
|
||||
str(n.note_path.relative_to(self.vault.vault_path)) for n in self.vault.all_notes
|
||||
]
|
||||
|
||||
dict_from_csv = validate_csv_bulk_imports(csv_path, note_paths)
|
||||
num_changed = self.vault.update_from_dict(dict_from_csv)
|
||||
|
||||
if num_changed == 0:
|
||||
alerts.warning("No notes were changed")
|
||||
return
|
||||
|
||||
alerts.success(f"Rewrote metadata for {num_changed} notes.")
|
||||
|
||||
def application_export_metadata(self) -> None:
|
||||
"""Export metadata to various formats."""
|
||||
alerts.usage(
|
||||
"Export the metadata in your vault. Note, uncommitted changes will be reflected in these files. The notes csv export can be used as template for importing bulk changes"
|
||||
)
|
||||
choices = [
|
||||
questionary.Separator(),
|
||||
{"name": "Metadata by type to CSV", "value": "export_csv"},
|
||||
{"name": "Metadata by type to JSON", "value": "export_json"},
|
||||
{
|
||||
"name": "Metadata by note to CSV [Bulk import template]",
|
||||
"value": "export_notes_csv",
|
||||
},
|
||||
questionary.Separator(),
|
||||
{"name": "Back", "value": "back"},
|
||||
]
|
||||
while True:
|
||||
match self.questions.ask_selection(choices=choices, question="Export format"):
|
||||
case "export_csv":
|
||||
path = self.questions.ask_path(question="Enter a path for the CSV file")
|
||||
if path is None:
|
||||
return
|
||||
self.vault.export_metadata(path=path, export_format="csv")
|
||||
alerts.success(f"CSV written to {path}")
|
||||
case "export_json":
|
||||
path = self.questions.ask_path(question="Enter a path for the JSON file")
|
||||
if path is None:
|
||||
return
|
||||
self.vault.export_metadata(path=path, export_format="json")
|
||||
alerts.success(f"JSON written to {path}")
|
||||
case "export_notes_csv":
|
||||
path = self.questions.ask_path(question="Enter a path for the CSV file")
|
||||
if path is None:
|
||||
return
|
||||
self.vault.export_notes_to_csv(path=path)
|
||||
alerts.success(f"CSV written to {path}")
|
||||
return
|
||||
case _:
|
||||
return
|
||||
|
||||
def application_inspect_metadata(self) -> None:
|
||||
"""View metadata."""
|
||||
alerts.usage(
|
||||
"Inspect the metadata in your vault. Note, uncommitted changes will be reflected in these reports"
|
||||
)
|
||||
|
||||
choices = [
|
||||
questionary.Separator(),
|
||||
{"name": "View all frontmatter", "value": "all_frontmatter"},
|
||||
{"name": "View all inline metadata", "value": "all_inline"},
|
||||
{"name": "View all inline tags", "value": "all_tags"},
|
||||
{"name": "View all keys", "value": "all_keys"},
|
||||
{"name": "View all metadata", "value": "all_metadata"},
|
||||
questionary.Separator(),
|
||||
{"name": "Back", "value": "back"},
|
||||
]
|
||||
while True:
|
||||
match self.questions.ask_selection(choices=choices, question="Select an action"):
|
||||
case "all_metadata":
|
||||
self.vault.metadata.print_metadata()
|
||||
console.print("")
|
||||
# TODO: Add a way to print metadata
|
||||
self.vault.print_metadata(meta_type=MetadataType.ALL)
|
||||
console.print("")
|
||||
case "all_frontmatter":
|
||||
console.print("")
|
||||
self.vault.print_metadata(meta_type=MetadataType.FRONTMATTER)
|
||||
console.print("")
|
||||
case "all_inline":
|
||||
console.print("")
|
||||
self.vault.print_metadata(meta_type=MetadataType.INLINE)
|
||||
console.print("")
|
||||
case "all_keys":
|
||||
console.print("")
|
||||
self.vault.print_metadata(meta_type=MetadataType.KEYS)
|
||||
console.print("")
|
||||
case "all_tags":
|
||||
console.print("")
|
||||
self.vault.print_metadata(meta_type=MetadataType.TAGS)
|
||||
console.print("")
|
||||
case _:
|
||||
return
|
||||
|
||||
def application_reorganize_metadata(self) -> None:
|
||||
"""Reorganize metadata.
|
||||
|
||||
This portion of the application deals with moving metadata between types (inline to frontmatter, etc.) and moving the location of inline metadata within a note.
|
||||
|
||||
"""
|
||||
alerts.usage("Move metadata within notes.")
|
||||
alerts.usage(" 1. Transpose frontmatter to inline or vice versa.")
|
||||
alerts.usage(" 2. Move the location of inline metadata within a note.")
|
||||
|
||||
choices = [
|
||||
questionary.Separator(),
|
||||
{"name": "Move inline metadata to top of note", "value": "move_to_top"},
|
||||
{
|
||||
"name": "Move inline metadata beneath the first header",
|
||||
"value": "move_to_after_header",
|
||||
},
|
||||
{"name": "Move inline metadata to bottom of the note", "value": "move_to_bottom"},
|
||||
{"name": "Transpose frontmatter to inline", "value": "frontmatter_to_inline"},
|
||||
{"name": "Transpose inline to frontmatter", "value": "inline_to_frontmatter"},
|
||||
questionary.Separator(),
|
||||
{"name": "Back", "value": "back"},
|
||||
]
|
||||
match self.questions.ask_selection(
|
||||
choices=choices, question="Select metadata to transpose"
|
||||
):
|
||||
case "frontmatter_to_inline":
|
||||
self.transpose_metadata(begin=MetadataType.FRONTMATTER, end=MetadataType.INLINE)
|
||||
case "inline_to_frontmatter":
|
||||
self.transpose_metadata(begin=MetadataType.INLINE, end=MetadataType.FRONTMATTER)
|
||||
case "move_to_top":
|
||||
self.move_inline_metadata(location=InsertLocation.TOP)
|
||||
case "move_to_after_header":
|
||||
self.move_inline_metadata(location=InsertLocation.AFTER_TITLE)
|
||||
case "move_to_bottom":
|
||||
self.move_inline_metadata(location=InsertLocation.BOTTOM)
|
||||
case _: # pragma: no cover
|
||||
return
|
||||
|
||||
def application_vault(self) -> None:
|
||||
"""Vault actions."""
|
||||
alerts.usage("Create or delete a backup of your vault.")
|
||||
|
||||
choices = [
|
||||
questionary.Separator(),
|
||||
{"name": "Backup vault", "value": "backup_vault"},
|
||||
{"name": "Delete vault backup", "value": "delete_backup"},
|
||||
questionary.Separator(),
|
||||
{"name": "Back", "value": "back"},
|
||||
]
|
||||
|
||||
while True:
|
||||
match self.questions.ask_selection(choices=choices, question="Select a vault action"):
|
||||
case "backup_vault":
|
||||
self.vault.backup()
|
||||
case "delete_backup":
|
||||
self.vault.delete_backup()
|
||||
case "list_notes":
|
||||
self.vault.list_editable_notes()
|
||||
case "rename_inline_tag":
|
||||
self.rename_inline_tag()
|
||||
case "delete_inline_tag":
|
||||
self.delete_inline_tag()
|
||||
case "rename_key":
|
||||
self.rename_key()
|
||||
case "delete_key":
|
||||
self.delete_key()
|
||||
case "rename_value":
|
||||
self.rename_value()
|
||||
case "delete_value":
|
||||
self.delete_value()
|
||||
case "review_changes":
|
||||
self.review_changes()
|
||||
case "commit_changes":
|
||||
if self.commit_changes():
|
||||
break
|
||||
case _:
|
||||
return
|
||||
|
||||
log.error("Commit failed. Please run with -vvv for more info.")
|
||||
break
|
||||
def commit_changes(self) -> bool:
|
||||
"""Write all changes to disk.
|
||||
|
||||
case "abort":
|
||||
break
|
||||
Returns:
|
||||
True if changes were committed, False otherwise.
|
||||
"""
|
||||
changed_notes = self.vault.get_changed_notes()
|
||||
|
||||
print("Done!")
|
||||
return
|
||||
if len(changed_notes) == 0:
|
||||
console.print("\n")
|
||||
alerts.notice("No changes to commit.\n")
|
||||
return False
|
||||
|
||||
def rename_key(self) -> None:
|
||||
"""Renames a key in the vault."""
|
||||
backup = questionary.confirm("Create backup before committing changes").ask()
|
||||
if backup is None:
|
||||
return False
|
||||
if backup:
|
||||
self.vault.backup()
|
||||
|
||||
original_key = self.questions.ask_for_existing_key(
|
||||
question="Which key would you like to rename?"
|
||||
)
|
||||
if original_key is None:
|
||||
return
|
||||
if questionary.confirm(f"Commit {len(changed_notes)} changed files to disk?").ask():
|
||||
self.vault.commit_changes()
|
||||
|
||||
new_key = self.questions.ask_for_new_key()
|
||||
if new_key is None:
|
||||
return
|
||||
if not self.dry_run:
|
||||
alerts.success(f"{len(changed_notes)} changes committed to disk. Exiting")
|
||||
raise typer.Exit(0)
|
||||
|
||||
num_changed = self.vault.rename_metadata(original_key, new_key)
|
||||
if num_changed == 0:
|
||||
alerts.warning(f"No notes were changed")
|
||||
return
|
||||
return True
|
||||
|
||||
alerts.success(
|
||||
f"Renamed [reverse]{original_key}[/] to [reverse]{new_key}[/] in {num_changed} notes"
|
||||
)
|
||||
|
||||
def rename_inline_tag(self) -> None:
|
||||
"""Rename an inline tag."""
|
||||
|
||||
original_tag = self.questions.ask_for_existing_inline_tag(question="Which tag to rename?")
|
||||
if original_tag is None:
|
||||
return
|
||||
|
||||
new_tag = self.questions.ask_for_new_tag("New tag")
|
||||
if new_tag is None:
|
||||
return
|
||||
|
||||
num_changed = self.vault.rename_inline_tag(original_tag, new_tag)
|
||||
if num_changed == 0:
|
||||
alerts.warning(f"No notes were changed")
|
||||
return
|
||||
|
||||
alerts.success(
|
||||
f"Renamed [reverse]{original_tag}[/] to [reverse]{new_tag}[/] in {num_changed} notes"
|
||||
)
|
||||
return
|
||||
|
||||
def delete_inline_tag(self) -> None:
|
||||
def delete_tag(self) -> None:
|
||||
"""Delete an inline tag."""
|
||||
tag = self.questions.ask_for_existing_inline_tag(
|
||||
question="Which tag would you like to delete?"
|
||||
)
|
||||
tag = self.questions.ask_existing_tag(question="Which tag would you like to delete?")
|
||||
|
||||
num_changed = self.vault.delete_inline_tag(tag)
|
||||
num_changed = self.vault.delete_tag(tag)
|
||||
if num_changed == 0:
|
||||
alerts.warning(f"No notes were changed")
|
||||
alerts.warning("No notes were changed")
|
||||
return
|
||||
|
||||
alerts.success(f"Deleted inline tag: {tag} in {num_changed} notes")
|
||||
@@ -145,15 +497,17 @@ class Application:
|
||||
|
||||
def delete_key(self) -> None:
|
||||
"""Delete a key from the vault."""
|
||||
key_to_delete = self.questions.ask_for_existing_keys_regex(
|
||||
key_to_delete = self.questions.ask_existing_keys_regex(
|
||||
question="Regex for the key(s) you'd like to delete?"
|
||||
)
|
||||
if key_to_delete is None:
|
||||
if key_to_delete is None: # pragma: no cover
|
||||
return
|
||||
|
||||
num_changed = self.vault.delete_metadata(key_to_delete)
|
||||
num_changed = self.vault.delete_metadata(
|
||||
key=key_to_delete, meta_type=MetadataType.ALL, is_regex=True
|
||||
)
|
||||
if num_changed == 0:
|
||||
alerts.warning(f"No notes found with a key matching: [reverse]{key_to_delete}[/]")
|
||||
alerts.warning(f"No notes found with a key matching regex: [reverse]{key_to_delete}[/]")
|
||||
return
|
||||
|
||||
alerts.success(
|
||||
@@ -162,46 +516,20 @@ class Application:
|
||||
|
||||
return
|
||||
|
||||
def rename_value(self) -> None:
|
||||
"""Rename a value in the vault."""
|
||||
key = self.questions.ask_for_existing_key(
|
||||
question="Which key contains the value to rename?"
|
||||
)
|
||||
if key is None:
|
||||
return
|
||||
|
||||
question_key = Questions(vault=self.vault, key=key)
|
||||
value = question_key.ask_for_existing_value(
|
||||
question="Which value would you like to rename?"
|
||||
)
|
||||
if value is None:
|
||||
return
|
||||
|
||||
new_value = question_key.ask_for_new_value()
|
||||
if new_value is None:
|
||||
return
|
||||
|
||||
num_changes = self.vault.rename_metadata(key, value, new_value)
|
||||
if num_changes == 0:
|
||||
alerts.warning(f"No notes were changed")
|
||||
return
|
||||
|
||||
alerts.success(f"Renamed '{key}:{value}' to '{key}:{new_value}' in {num_changes} notes")
|
||||
|
||||
def delete_value(self) -> None:
|
||||
"""Delete a value from the vault."""
|
||||
key = self.questions.ask_for_existing_key(
|
||||
question="Which key contains the value to delete?"
|
||||
)
|
||||
if key is None:
|
||||
key = self.questions.ask_existing_key(question="Which key contains the value to delete?")
|
||||
if key is None: # pragma: no cover
|
||||
return
|
||||
|
||||
questions2 = Questions(vault=self.vault, key=key)
|
||||
value = questions2.ask_for_existing_value_regex(question="Regex for the value to delete")
|
||||
if value is None:
|
||||
value = questions2.ask_existing_value_regex(question="Regex for the value to delete")
|
||||
if value is None: # pragma: no cover
|
||||
return
|
||||
|
||||
num_changed = self.vault.delete_metadata(key, value)
|
||||
num_changed = self.vault.delete_metadata(
|
||||
key=key, value=value, meta_type=MetadataType.ALL, is_regex=True
|
||||
)
|
||||
if num_changed == 0:
|
||||
alerts.warning(f"No notes found matching: {key}: {value}")
|
||||
return
|
||||
@@ -212,6 +540,135 @@ class Application:
|
||||
|
||||
return
|
||||
|
||||
def move_inline_metadata(self, location: InsertLocation) -> None:
|
||||
"""Move inline metadata to the selected location."""
|
||||
num_changed = self.vault.move_inline_metadata(location)
|
||||
if num_changed == 0:
|
||||
alerts.warning("No notes were changed")
|
||||
return
|
||||
|
||||
alerts.success(f"Moved inline metadata to {location.value} in {num_changed} notes")
|
||||
|
||||
def noninteractive_bulk_import(self, path: Path) -> None:
|
||||
"""Bulk update metadata from a CSV from the command line.
|
||||
|
||||
Args:
|
||||
path: Path to the CSV file containing the metadata to update.
|
||||
"""
|
||||
self._load_vault()
|
||||
note_paths = [
|
||||
str(n.note_path.relative_to(self.vault.vault_path)) for n in self.vault.all_notes
|
||||
]
|
||||
dict_from_csv = validate_csv_bulk_imports(path, note_paths)
|
||||
num_changed = self.vault.update_from_dict(dict_from_csv)
|
||||
if num_changed == 0:
|
||||
alerts.warning("No notes were changed")
|
||||
return
|
||||
|
||||
alerts.success(f"{num_changed} notes specified in '{path}'")
|
||||
alerts.info("Review changes and commit.")
|
||||
while True:
|
||||
self.vault.info()
|
||||
|
||||
match self.questions.ask_application_main():
|
||||
case "vault_actions":
|
||||
self.application_vault()
|
||||
case "inspect_metadata":
|
||||
self.application_inspect_metadata()
|
||||
case "review_changes":
|
||||
self.review_changes()
|
||||
case "commit_changes":
|
||||
self.commit_changes()
|
||||
case _:
|
||||
break
|
||||
|
||||
console.print("Done!")
|
||||
|
||||
def noninteractive_export_csv(self, path: Path) -> None:
|
||||
"""Export the vault metadata to CSV."""
|
||||
self._load_vault()
|
||||
self.vault.export_metadata(export_format="csv", path=str(path))
|
||||
alerts.success(f"Exported metadata to {path}")
|
||||
|
||||
def noninteractive_export_json(self, path: Path) -> None:
|
||||
"""Export the vault metadata to JSON."""
|
||||
self._load_vault()
|
||||
self.vault.export_metadata(export_format="json", path=str(path))
|
||||
alerts.success(f"Exported metadata to {path}")
|
||||
|
||||
def noninteractive_export_template(self, path: Path) -> None:
|
||||
"""Export the vault metadata to CSV."""
|
||||
self._load_vault()
|
||||
with console.status(
|
||||
"Preparing export... [dim](Can take a while for large vaults)[/]",
|
||||
spinner="bouncingBall",
|
||||
):
|
||||
self.vault.export_notes_to_csv(path=str(path))
|
||||
alerts.success(f"Exported metadata to {path}")
|
||||
|
||||
def rename_key(self) -> None:
|
||||
"""Rename a key in the vault."""
|
||||
original_key = self.questions.ask_existing_key(
|
||||
question="Which key would you like to rename?"
|
||||
)
|
||||
if original_key is None: # pragma: no cover
|
||||
return
|
||||
|
||||
new_key = self.questions.ask_new_key()
|
||||
if new_key is None: # pragma: no cover
|
||||
return
|
||||
|
||||
num_changed = self.vault.rename_metadata(original_key, new_key)
|
||||
if num_changed == 0:
|
||||
alerts.warning("No notes were changed")
|
||||
return
|
||||
|
||||
alerts.success(
|
||||
f"Renamed [reverse]{original_key}[/] to [reverse]{new_key}[/] in {num_changed} notes"
|
||||
)
|
||||
|
||||
def rename_tag(self) -> None:
|
||||
"""Rename an inline tag."""
|
||||
original_tag = self.questions.ask_existing_tag(question="Which tag to rename?")
|
||||
if original_tag is None: # pragma: no cover
|
||||
return
|
||||
|
||||
new_tag = self.questions.ask_new_tag("New tag")
|
||||
if new_tag is None: # pragma: no cover
|
||||
return
|
||||
|
||||
num_changed = self.vault.rename_tag(original_tag, new_tag)
|
||||
if num_changed == 0:
|
||||
alerts.warning("No notes were changed")
|
||||
return
|
||||
|
||||
alerts.success(
|
||||
f"Renamed [reverse]{original_tag}[/] to [reverse]{new_tag}[/] in {num_changed} notes"
|
||||
)
|
||||
return
|
||||
|
||||
def rename_value(self) -> None:
|
||||
"""Rename a value in the vault."""
|
||||
key = self.questions.ask_existing_key(question="Which key contains the value to rename?")
|
||||
if key is None: # pragma: no cover
|
||||
return
|
||||
|
||||
question_key = Questions(vault=self.vault, key=key)
|
||||
value = question_key.ask_existing_value(question="Which value would you like to rename?")
|
||||
if value is None: # pragma: no cover
|
||||
return
|
||||
|
||||
new_value = question_key.ask_new_value()
|
||||
if new_value is None: # pragma: no cover
|
||||
return
|
||||
|
||||
num_changes = self.vault.rename_metadata(key, value, new_value)
|
||||
if num_changes == 0:
|
||||
alerts.warning("No notes were changed")
|
||||
return
|
||||
|
||||
alerts.success(f"Renamed '{key}:{value}' to '{key}:{new_value}' in {num_changes} notes")
|
||||
|
||||
def review_changes(self) -> None:
|
||||
"""Review all changes in the vault."""
|
||||
changed_notes = self.vault.get_changed_notes()
|
||||
@@ -220,14 +677,9 @@ class Application:
|
||||
alerts.info("No changes to review.")
|
||||
return
|
||||
|
||||
print(f"\nFound {len(changed_notes)} changed notes in the vault.\n")
|
||||
answer = self.questions.ask_confirm(
|
||||
question="View diffs of individual files?", default=False
|
||||
)
|
||||
if not answer:
|
||||
return
|
||||
|
||||
choices: list[dict[str, Any] | questionary.Separator] = [questionary.Separator()]
|
||||
alerts.info(f"Found {len(changed_notes)} changed notes in the vault")
|
||||
choices: list[dict[str, Any] | questionary.Separator] = []
|
||||
choices.append(questionary.Separator())
|
||||
for n, note in enumerate(changed_notes, start=1):
|
||||
_selection = {
|
||||
"name": f"{n}: {note.note_path.relative_to(self.vault.vault_path)}",
|
||||
@@ -239,37 +691,84 @@ class Application:
|
||||
choices.append({"name": "Return", "value": "return"})
|
||||
|
||||
while True:
|
||||
note_to_review = self.questions.ask_for_selection(
|
||||
note_to_review = self.questions.ask_selection(
|
||||
choices=choices,
|
||||
question="Select a new to view the diff",
|
||||
question="Select an updated note to view the diff",
|
||||
)
|
||||
if note_to_review is None or note_to_review == "return":
|
||||
break
|
||||
changed_notes[note_to_review].print_diff()
|
||||
|
||||
def commit_changes(self) -> bool:
|
||||
"""Write all changes to disk.
|
||||
def transpose_metadata(self, begin: MetadataType, end: MetadataType) -> None: # noqa: PLR0911
|
||||
"""Transpose metadata from one format to another.
|
||||
|
||||
Returns:
|
||||
True if changes were committed, False otherwise.
|
||||
Args:
|
||||
begin: The format to transpose from.
|
||||
end: The format to transpose to.
|
||||
"""
|
||||
changed_notes = self.vault.get_changed_notes()
|
||||
choices = [
|
||||
{"name": f"Transpose all {begin.value} to {end.value}", "value": "transpose_all"},
|
||||
{"name": "Transpose a key", "value": "transpose_key"},
|
||||
{"name": "Transpose a value", "value": "transpose_value"},
|
||||
questionary.Separator(),
|
||||
{"name": "Back", "value": "back"},
|
||||
]
|
||||
match self.questions.ask_selection(choices=choices, question="Select an action to perform"):
|
||||
case "transpose_all":
|
||||
num_changed = self.vault.transpose_metadata(
|
||||
begin=begin,
|
||||
end=end,
|
||||
location=self.vault.insert_location,
|
||||
)
|
||||
|
||||
if len(changed_notes) == 0:
|
||||
print("\n")
|
||||
alerts.notice("No changes to commit.\n")
|
||||
return False
|
||||
if num_changed == 0:
|
||||
alerts.warning("No notes were changed")
|
||||
return
|
||||
|
||||
backup = questionary.confirm("Create backup before committing changes").ask()
|
||||
if backup is None:
|
||||
return False
|
||||
if backup:
|
||||
self.vault.backup()
|
||||
alerts.success(f"Transposed {begin.value} to {end.value} in {num_changed} notes")
|
||||
case "transpose_key":
|
||||
key = self.questions.ask_existing_key(question="Which key to transpose?")
|
||||
if key is None: # pragma: no cover
|
||||
return
|
||||
|
||||
if questionary.confirm(f"Commit {len(changed_notes)} changed files to disk?").ask():
|
||||
num_changed = self.vault.transpose_metadata(
|
||||
begin=begin,
|
||||
end=end,
|
||||
key=key,
|
||||
location=self.vault.insert_location,
|
||||
)
|
||||
|
||||
self.vault.write()
|
||||
alerts.success(f"{len(changed_notes)} changes committed to disk. Exiting")
|
||||
return True
|
||||
if num_changed == 0:
|
||||
alerts.warning("No notes were changed")
|
||||
return
|
||||
|
||||
return False
|
||||
alerts.success(
|
||||
f"Transposed key: `{key}` from {begin.value} to {end.value} in {num_changed} notes"
|
||||
)
|
||||
case "transpose_value":
|
||||
key = self.questions.ask_existing_key(question="Which key contains the value?")
|
||||
if key is None: # pragma: no cover
|
||||
return
|
||||
|
||||
questions2 = Questions(vault=self.vault, key=key)
|
||||
value = questions2.ask_existing_value(question="Which value to transpose?")
|
||||
if value is None: # pragma: no cover
|
||||
return
|
||||
|
||||
num_changed = self.vault.transpose_metadata(
|
||||
begin=begin,
|
||||
end=end,
|
||||
key=key,
|
||||
value=value,
|
||||
location=self.vault.insert_location,
|
||||
)
|
||||
|
||||
if num_changed == 0:
|
||||
alerts.warning("No notes were changed")
|
||||
return
|
||||
|
||||
alerts.success(
|
||||
f"Transposed key: `{key}:{value}` from {begin.value} to {end.value} in {num_changed} notes"
|
||||
)
|
||||
case _:
|
||||
return
|
||||
|
||||
36
src/obsidian_metadata/models/enums.py
Normal file
36
src/obsidian_metadata/models/enums.py
Normal file
@@ -0,0 +1,36 @@
|
||||
"""Enum classes for the obsidian_metadata package."""
|
||||
|
||||
from enum import Enum
|
||||
|
||||
|
||||
class InsertLocation(Enum):
|
||||
"""Location to add metadata to notes.
|
||||
|
||||
TOP: Directly after frontmatter.
|
||||
AFTER_TITLE: After a header following frontmatter.
|
||||
BOTTOM: The bottom of the note
|
||||
|
||||
"""
|
||||
|
||||
TOP = "Top"
|
||||
AFTER_TITLE = "After title"
|
||||
BOTTOM = "Bottom"
|
||||
|
||||
|
||||
class MetadataType(Enum):
|
||||
"""Enum class for the type of metadata."""
|
||||
|
||||
ALL = "Inline, Frontmatter, and Tags"
|
||||
FRONTMATTER = "Frontmatter"
|
||||
INLINE = "Inline Metadata"
|
||||
KEYS = "Metadata Keys Only"
|
||||
META = "Inline and Frontmatter. No Tags"
|
||||
TAGS = "Inline Tags"
|
||||
|
||||
|
||||
class Wrapping(Enum):
|
||||
"""Wrapping for inline metadata within a block of text."""
|
||||
|
||||
BRACKETS = "Brackets"
|
||||
PARENS = "Parentheses"
|
||||
NONE = None
|
||||
17
src/obsidian_metadata/models/exceptions.py
Normal file
17
src/obsidian_metadata/models/exceptions.py
Normal file
@@ -0,0 +1,17 @@
|
||||
"""Custom exceptions for the obsidian_metadata package."""
|
||||
|
||||
|
||||
class ObsidianMetadataError(Exception):
|
||||
"""Base exception for the obsidian_metadata package."""
|
||||
|
||||
|
||||
class FrontmatterError(ObsidianMetadataError):
|
||||
"""Exception for errors in the frontmatter."""
|
||||
|
||||
|
||||
class InlineMetadataError(ObsidianMetadataError):
|
||||
"""Exception for errors in the inlined metadata."""
|
||||
|
||||
|
||||
class InlineTagError(ObsidianMetadataError):
|
||||
"""Exception for errors in the inline tags."""
|
||||
@@ -1,505 +1,138 @@
|
||||
"""Work with metadata items."""
|
||||
|
||||
|
||||
import re
|
||||
from io import StringIO
|
||||
|
||||
from rich import print
|
||||
from rich.columns import Columns
|
||||
from rich.console import Console
|
||||
from rich.table import Table
|
||||
import rich.repr
|
||||
from ruamel.yaml import YAML
|
||||
|
||||
from obsidian_metadata._utils import (
|
||||
clean_dictionary,
|
||||
dict_contains,
|
||||
dict_values_to_lists_strings,
|
||||
remove_markdown_sections,
|
||||
)
|
||||
from obsidian_metadata.models import Patterns # isort: ignore
|
||||
|
||||
PATTERNS = Patterns()
|
||||
INLINE_TAG_KEY: str = "Inline Tags"
|
||||
from obsidian_metadata.models.enums import MetadataType, Wrapping
|
||||
|
||||
|
||||
class VaultMetadata:
|
||||
"""Representation of all Metadata in the Vault."""
|
||||
def dict_to_yaml(dictionary: dict[str, list[str]], sort_keys: bool = False) -> str:
|
||||
"""Return the a dictionary of {key: [values]} as a YAML string.
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.dict: dict[str, list[str]] = {}
|
||||
Args:
|
||||
dictionary (dict[str, list[str]]): Dictionary of {key: [values]}.
|
||||
sort_keys (bool, optional): Sort the keys. Defaults to False.
|
||||
|
||||
def __repr__(self) -> str:
|
||||
"""Representation of all metadata."""
|
||||
return str(self.dict)
|
||||
Returns:
|
||||
str: Frontmatter as a YAML string.
|
||||
sort_keys (bool, optional): Sort the keys. Defaults to False.
|
||||
"""
|
||||
if sort_keys:
|
||||
dictionary = dict(sorted(dictionary.items()))
|
||||
|
||||
def add_metadata(self, metadata: dict[str, list[str]]) -> None:
|
||||
"""Add metadata to the vault. Takes a dictionary as input and merges it with the existing metadata. Does not overwrite existing keys.
|
||||
for key, value in dictionary.items():
|
||||
if len(value) == 1:
|
||||
dictionary[key] = value[0] # type: ignore [assignment]
|
||||
|
||||
yaml = YAML()
|
||||
yaml.indent(mapping=2, sequence=4, offset=2)
|
||||
string_stream = StringIO()
|
||||
yaml.dump(dictionary, string_stream)
|
||||
yaml_value = string_stream.getvalue()
|
||||
string_stream.close()
|
||||
if yaml_value == "{}\n":
|
||||
return ""
|
||||
return yaml_value
|
||||
|
||||
|
||||
@rich.repr.auto
|
||||
class InlineField:
|
||||
"""Representation of a single inline field.
|
||||
|
||||
Attributes:
|
||||
meta_type (MetadataType): Metadata category.
|
||||
clean_key (str): Cleaned key - Key without surround markdown
|
||||
key (str): Metadata key - Complete key found in note
|
||||
key_close (str): Closing key markdown.
|
||||
key_open (str): Opening key markdown.
|
||||
normalized_key (str): Key converted to lowercase w. spaces replaced with dashes
|
||||
normalized_value (str): Value stripped of leading and trailing whitespace.
|
||||
value (str): Metadata value - Complete value found in note.
|
||||
wrapping (Wrapping): Inline metadata may be wrapped with [] or ().
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
meta_type: MetadataType,
|
||||
key: str,
|
||||
value: str,
|
||||
wrapping: Wrapping = Wrapping.NONE,
|
||||
is_changed: bool = False,
|
||||
) -> None:
|
||||
self.meta_type = meta_type
|
||||
self.key = key
|
||||
self.value = value
|
||||
self.wrapping = wrapping
|
||||
self.is_changed = is_changed
|
||||
|
||||
# Clean keys of surrounding markdown and convert to lowercase
|
||||
self.clean_key, self.normalized_key, self.key_open, self.key_close = (
|
||||
self._clean_key(self.key) if self.key else (None, None, "", "")
|
||||
)
|
||||
|
||||
# Normalize value for display
|
||||
self.normalized_value = "-" if re.match(r"^\s*$", self.value) else self.value.strip()
|
||||
|
||||
def __rich_repr__(self) -> rich.repr.Result: # pragma: no cover
|
||||
"""Rich representation of the inline field."""
|
||||
yield "clean_key", self.clean_key
|
||||
yield "is_changed", self.is_changed
|
||||
yield "key_close", self.key_close
|
||||
yield "key_open", self.key_open
|
||||
yield "key", self.key
|
||||
yield "meta_type", self.meta_type.value
|
||||
yield "normalized_key", self.normalized_key
|
||||
yield "normalized_value", self.normalized_value
|
||||
yield "value", self.value
|
||||
yield "wrapping", self.wrapping.value
|
||||
|
||||
def __eq__(self, other: object) -> bool:
|
||||
"""Compare two InlineField objects."""
|
||||
if not isinstance(other, InlineField):
|
||||
return NotImplemented
|
||||
return (
|
||||
self.key == other.key
|
||||
and self.value == other.value
|
||||
and self.meta_type == other.meta_type
|
||||
)
|
||||
|
||||
def __hash__(self) -> int:
|
||||
"""Hash the InlineField object."""
|
||||
return hash((self.key, self.value, self.meta_type))
|
||||
|
||||
def _clean_key(self, text: str) -> tuple[str, str, str, str]:
|
||||
"""Remove markdown from the key.
|
||||
|
||||
Creates the following attributes:
|
||||
|
||||
clean_key : The key stripped of opening and closing markdown
|
||||
normalized_key: The key converted to lowercase with spaces replaced with dashes
|
||||
key_open : The opening markdown
|
||||
key_close : The closing markdown.
|
||||
|
||||
Args:
|
||||
metadata (dict): Metadata to add.
|
||||
"""
|
||||
existing_metadata = self.dict
|
||||
|
||||
new_metadata = clean_dictionary(metadata)
|
||||
|
||||
for k, v in new_metadata.items():
|
||||
if k in existing_metadata:
|
||||
if isinstance(v, list):
|
||||
existing_metadata[k].extend(v)
|
||||
else:
|
||||
existing_metadata[k] = v
|
||||
|
||||
for k, v in existing_metadata.items():
|
||||
if isinstance(v, list):
|
||||
existing_metadata[k] = sorted(set(v))
|
||||
elif isinstance(v, dict):
|
||||
for kk, vv in v.items():
|
||||
if isinstance(vv, list):
|
||||
v[kk] = sorted(set(vv))
|
||||
|
||||
self.dict = dict(sorted(existing_metadata.items()))
|
||||
|
||||
def print_keys(self) -> None:
|
||||
"""Print all metadata keys."""
|
||||
columns = Columns(
|
||||
sorted(self.dict.keys()),
|
||||
equal=True,
|
||||
expand=True,
|
||||
title="All metadata keys in Obsidian vault",
|
||||
)
|
||||
print(columns)
|
||||
|
||||
def print_tags(self) -> None:
|
||||
"""Print all tags."""
|
||||
columns = Columns(
|
||||
sorted(self.dict["tags"]),
|
||||
equal=True,
|
||||
expand=True,
|
||||
title="All tags in Obsidian vault",
|
||||
)
|
||||
print(columns)
|
||||
|
||||
def print_metadata(self) -> None:
|
||||
"""Print all metadata."""
|
||||
table = Table(show_footer=False, show_lines=True)
|
||||
table.add_column("Keys")
|
||||
table.add_column("Values")
|
||||
for key, value in sorted(self.dict.items()):
|
||||
values: str | dict[str, list[str]] = (
|
||||
"\n".join(sorted(value)) if isinstance(value, list) else value
|
||||
)
|
||||
table.add_row(f"[bold]{key}[/]", str(values))
|
||||
Console().print(table)
|
||||
|
||||
def contains(self, key: str, value: str = None, is_regex: bool = False) -> bool:
|
||||
"""Check if a key and/or a value exists in the metadata.
|
||||
|
||||
Args:
|
||||
key (str): Key to check.
|
||||
value (str, optional): Value to check.
|
||||
is_regex (bool, optional): Use regex to check. Defaults to False.
|
||||
text (str): Key to clean.
|
||||
|
||||
Returns:
|
||||
bool: True if the key exists.
|
||||
tuple[str, str, str, str]: Cleaned key, normalized key, opening markdown, closing markdown.
|
||||
"""
|
||||
return dict_contains(self.dict, key, value, is_regex)
|
||||
|
||||
def delete(self, key: str, value_to_delete: str = None) -> bool:
|
||||
"""Delete a key or a key's value from the metadata. Regex is supported to allow deleting more than one key or value.
|
||||
|
||||
Args:
|
||||
key (str): Key to check.
|
||||
value_to_delete (str, optional): Value to delete.
|
||||
|
||||
Returns:
|
||||
bool: True if a value was deleted
|
||||
"""
|
||||
new_dict = self.dict.copy()
|
||||
|
||||
if value_to_delete is None:
|
||||
for _k in list(new_dict):
|
||||
if re.search(key, _k):
|
||||
del new_dict[_k]
|
||||
cleaned = text
|
||||
if tmp := re.search(r"^([\*#_ `~]+)", text):
|
||||
key_open = tmp.group(0)
|
||||
cleaned = re.sub(rf"^{re.escape(key_open)}", "", text)
|
||||
else:
|
||||
for _k, _v in new_dict.items():
|
||||
if re.search(key, _k):
|
||||
new_values = [x for x in _v if not re.search(value_to_delete, x)]
|
||||
new_dict[_k] = sorted(new_values)
|
||||
key_open = ""
|
||||
|
||||
if new_dict != self.dict:
|
||||
self.dict = dict(new_dict)
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def rename(self, key: str, value_1: str, value_2: str = None) -> bool:
|
||||
"""Replace a value in the frontmatter.
|
||||
|
||||
Args:
|
||||
key (str): Key to check.
|
||||
value_1 (str): `With value_2` this is the value to rename. If `value_2` is None this is the renamed key
|
||||
value_2 (str, Optional): New value.
|
||||
bypass_check (bool, optional): Bypass the check if the key exists. Defaults to False.
|
||||
|
||||
Returns:
|
||||
bool: True if a value was renamed
|
||||
"""
|
||||
if value_2 is None:
|
||||
if key in self.dict and value_1 not in self.dict:
|
||||
self.dict[value_1] = self.dict.pop(key)
|
||||
return True
|
||||
return False
|
||||
|
||||
if key in self.dict and value_1 in self.dict[key]:
|
||||
self.dict[key] = sorted({value_2 if x == value_1 else x for x in self.dict[key]})
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
class Frontmatter:
|
||||
"""Representation of frontmatter metadata."""
|
||||
|
||||
def __init__(self, file_content: str):
|
||||
|
||||
self.dict: dict[str, list[str]] = self._grab_note_frontmatter(file_content)
|
||||
self.dict_original: dict[str, list[str]] = self.dict.copy()
|
||||
|
||||
def __repr__(self) -> str: # pragma: no cover
|
||||
"""Representation of the frontmatter.
|
||||
|
||||
Returns:
|
||||
str: frontmatter
|
||||
"""
|
||||
return f"Frontmatter(frontmatter={self.dict})"
|
||||
|
||||
def _grab_note_frontmatter(self, file_content: str) -> dict:
|
||||
"""Grab metadata from a note.
|
||||
|
||||
Args:
|
||||
note_path (Path): Path to the note file.
|
||||
|
||||
Returns:
|
||||
dict: Metadata from the note.
|
||||
"""
|
||||
try:
|
||||
frontmatter_block: str = PATTERNS.frontmatt_block_no_separators.search(
|
||||
file_content
|
||||
).group("frontmatter")
|
||||
except AttributeError:
|
||||
return {}
|
||||
|
||||
yaml = YAML(typ="safe")
|
||||
frontmatter: dict = yaml.load(frontmatter_block)
|
||||
|
||||
for k in frontmatter:
|
||||
if frontmatter[k] is None:
|
||||
frontmatter[k] = []
|
||||
|
||||
return dict_values_to_lists_strings(frontmatter, strip_null_values=True)
|
||||
|
||||
def contains(self, key: str, value: str = None, is_regex: bool = False) -> bool:
|
||||
"""Check if a key or value exists in the metadata.
|
||||
|
||||
Args:
|
||||
key (str): Key to check.
|
||||
value (str, optional): Value to check.
|
||||
is_regex (bool, optional): Use regex to check. Defaults to False.
|
||||
|
||||
Returns:
|
||||
bool: True if the key exists.
|
||||
"""
|
||||
return dict_contains(self.dict, key, value, is_regex)
|
||||
|
||||
def rename(self, key: str, value_1: str, value_2: str = None) -> bool:
|
||||
"""Replace a value in the frontmatter.
|
||||
|
||||
Args:
|
||||
key (str): Key to check.
|
||||
value_1 (str): `With value_2` this is the value to rename. If `value_2` is None this is the renamed key
|
||||
value_2 (str, Optional): New value.
|
||||
|
||||
Returns:
|
||||
bool: True if a value was renamed
|
||||
"""
|
||||
if value_2 is None:
|
||||
if key in self.dict and value_1 not in self.dict:
|
||||
self.dict[value_1] = self.dict.pop(key)
|
||||
return True
|
||||
return False
|
||||
|
||||
if key in self.dict and value_1 in self.dict[key]:
|
||||
self.dict[key] = sorted({value_2 if x == value_1 else x for x in self.dict[key]})
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def delete(self, key: str, value_to_delete: str = None) -> bool:
|
||||
"""Delete a value or key in the frontmatter. Regex is supported to allow deleting more than one key or value.
|
||||
|
||||
Args:
|
||||
key (str): If no value, key to delete. If value, key containing the value.
|
||||
value_to_delete (str, optional): Value to delete.
|
||||
|
||||
Returns:
|
||||
bool: True if a value was deleted
|
||||
"""
|
||||
new_dict = dict(self.dict)
|
||||
|
||||
if value_to_delete is None:
|
||||
for _k in list(new_dict):
|
||||
if re.search(key, _k):
|
||||
del new_dict[_k]
|
||||
if tmp := re.search(r"([\*#_ `~]+)$", text):
|
||||
key_close = tmp.group(0)
|
||||
cleaned = re.sub(rf"{re.escape(key_close)}$", "", cleaned)
|
||||
else:
|
||||
for _k, _v in new_dict.items():
|
||||
if re.search(key, _k):
|
||||
new_values = [x for x in _v if not re.search(value_to_delete, x)]
|
||||
new_dict[_k] = sorted(new_values)
|
||||
key_close = ""
|
||||
|
||||
if new_dict != self.dict:
|
||||
self.dict = dict(new_dict)
|
||||
return True
|
||||
normalized = cleaned.replace(" ", "-").lower()
|
||||
|
||||
return False
|
||||
|
||||
def has_changes(self) -> bool:
|
||||
"""Check if the frontmatter has changes.
|
||||
|
||||
Returns:
|
||||
bool: True if the frontmatter has changes.
|
||||
"""
|
||||
return self.dict != self.dict_original
|
||||
|
||||
def to_yaml(self, sort_keys: bool = False) -> str:
|
||||
"""Return the frontmatter as a YAML string.
|
||||
|
||||
Returns:
|
||||
str: Frontmatter as a YAML string.
|
||||
sort_keys (bool, optional): Sort the keys. Defaults to False.
|
||||
"""
|
||||
dict_to_dump = self.dict.copy()
|
||||
for k in dict_to_dump:
|
||||
if dict_to_dump[k] == []:
|
||||
dict_to_dump[k] = None
|
||||
if isinstance(dict_to_dump[k], list) and len(dict_to_dump[k]) == 1:
|
||||
new_val = dict_to_dump[k][0]
|
||||
dict_to_dump[k] = new_val # type: ignore [assignment]
|
||||
|
||||
# Converting stream to string from https://stackoverflow.com/questions/47614862/best-way-to-use-ruamel-yaml-to-dump-yaml-to-string-not-to-stream/63179923#63179923
|
||||
|
||||
if sort_keys:
|
||||
dict_to_dump = dict(sorted(dict_to_dump.items()))
|
||||
|
||||
yaml = YAML()
|
||||
yaml.indent(mapping=2, sequence=4, offset=2)
|
||||
string_stream = StringIO()
|
||||
yaml.dump(dict_to_dump, string_stream)
|
||||
yaml_value = string_stream.getvalue()
|
||||
string_stream.close()
|
||||
return yaml_value
|
||||
|
||||
|
||||
class InlineMetadata:
|
||||
"""Representation of inline metadata in the form of `key:: value`."""
|
||||
|
||||
def __init__(self, file_content: str):
|
||||
|
||||
self.dict: dict[str, list[str]] = self._grab_inline_metadata(file_content)
|
||||
self.dict_original: dict[str, list[str]] = self.dict.copy()
|
||||
|
||||
def __repr__(self) -> str: # pragma: no cover
|
||||
"""Representation of inline metadata.
|
||||
|
||||
Returns:
|
||||
str: inline metadata
|
||||
"""
|
||||
return f"InlineMetadata(inline_metadata={self.dict})"
|
||||
|
||||
def _grab_inline_metadata(self, file_content: str) -> dict[str, list[str]]:
|
||||
"""Grab inline metadata from a note.
|
||||
|
||||
Returns:
|
||||
dict[str, str]: Inline metadata from the note.
|
||||
"""
|
||||
content = remove_markdown_sections(
|
||||
file_content,
|
||||
strip_codeblocks=True,
|
||||
strip_inlinecode=True,
|
||||
strip_frontmatter=True,
|
||||
)
|
||||
all_results = PATTERNS.find_inline_metadata.findall(content)
|
||||
stripped_null_values = [tuple(filter(None, x)) for x in all_results]
|
||||
|
||||
inline_metadata: dict[str, list[str]] = {}
|
||||
for (k, v) in stripped_null_values:
|
||||
if k in inline_metadata:
|
||||
inline_metadata[k].append(str(v))
|
||||
else:
|
||||
inline_metadata[k] = [str(v)]
|
||||
|
||||
return clean_dictionary(inline_metadata)
|
||||
|
||||
def contains(self, key: str, value: str = None, is_regex: bool = False) -> bool:
|
||||
"""Check if a key or value exists in the inline metadata.
|
||||
|
||||
Args:
|
||||
key (str): Key to check.
|
||||
value (str, Optional): Value to check.
|
||||
is_regex (bool, optional): If True, key and value are treated as regex. Defaults to False.
|
||||
|
||||
Returns:
|
||||
bool: True if the key exists.
|
||||
"""
|
||||
return dict_contains(self.dict, key, value, is_regex)
|
||||
|
||||
def rename(self, key: str, value_1: str, value_2: str = None) -> bool:
|
||||
"""Replace a value in the inline metadata.
|
||||
|
||||
Args:
|
||||
key (str): Key to check.
|
||||
value_1 (str): `With value_2` this is the value to rename. If `value_2` is None this is the renamed key
|
||||
value_2 (str, Optional): New value.
|
||||
|
||||
Returns:
|
||||
bool: True if a value was renamed
|
||||
"""
|
||||
if value_2 is None:
|
||||
if key in self.dict and value_1 not in self.dict:
|
||||
self.dict[value_1] = self.dict.pop(key)
|
||||
return True
|
||||
return False
|
||||
|
||||
if key in self.dict and value_1 in self.dict[key]:
|
||||
self.dict[key] = sorted({value_2 if x == value_1 else x for x in self.dict[key]})
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def delete(self, key: str, value_to_delete: str = None) -> bool:
|
||||
"""Delete a value or key in the inline metadata. Regex is supported to allow deleting more than one key or value.
|
||||
|
||||
Args:
|
||||
key (str): If no value, key to delete. If value, key containing the value.
|
||||
value_to_delete (str, optional): Value to delete.
|
||||
|
||||
Returns:
|
||||
bool: True if a value was deleted
|
||||
"""
|
||||
new_dict = dict(self.dict)
|
||||
|
||||
if value_to_delete is None:
|
||||
for _k in list(new_dict):
|
||||
if re.search(key, _k):
|
||||
del new_dict[_k]
|
||||
else:
|
||||
for _k, _v in new_dict.items():
|
||||
if re.search(key, _k):
|
||||
new_values = [x for x in _v if not re.search(value_to_delete, x)]
|
||||
new_dict[_k] = sorted(new_values)
|
||||
|
||||
if new_dict != self.dict:
|
||||
self.dict = dict(new_dict)
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def has_changes(self) -> bool:
|
||||
"""Check if the metadata has changes.
|
||||
|
||||
Returns:
|
||||
bool: True if the metadata has changes.
|
||||
"""
|
||||
return self.dict != self.dict_original
|
||||
|
||||
|
||||
class InlineTags:
|
||||
"""Representation of inline tags."""
|
||||
|
||||
def __init__(self, file_content: str):
|
||||
|
||||
self.metadata_key = INLINE_TAG_KEY
|
||||
self.list: list[str] = self._grab_inline_tags(file_content)
|
||||
self.list_original: list[str] = self.list.copy()
|
||||
|
||||
def __repr__(self) -> str: # pragma: no cover
|
||||
"""Representation of the inline tags.
|
||||
|
||||
Returns:
|
||||
str: inline tags
|
||||
"""
|
||||
return f"InlineTags(tags={self.list})"
|
||||
|
||||
def _grab_inline_tags(self, file_content: str) -> list[str]:
|
||||
"""Grab inline tags from a note.
|
||||
|
||||
Args:
|
||||
file_content (str): Total contents of the note file (frontmatter and content).
|
||||
|
||||
Returns:
|
||||
list[str]: Inline tags from the note.
|
||||
"""
|
||||
return sorted(
|
||||
PATTERNS.find_inline_tags.findall(
|
||||
remove_markdown_sections(
|
||||
file_content,
|
||||
strip_codeblocks=True,
|
||||
strip_inlinecode=True,
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
def contains(self, tag: str, is_regex: bool = False) -> bool:
|
||||
"""Check if a tag exists in the metadata.
|
||||
|
||||
Args:
|
||||
tag (str): Tag to check.
|
||||
is_regex (bool, optional): If True, tag is treated as regex. Defaults to False.
|
||||
|
||||
Returns:
|
||||
bool: True if the tag exists.
|
||||
"""
|
||||
if is_regex is True:
|
||||
return any(re.search(tag, _t) for _t in self.list)
|
||||
|
||||
if tag in self.list:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def rename(self, old_tag: str, new_tag: str) -> bool:
|
||||
"""Replace an inline tag with another string.
|
||||
|
||||
Args:
|
||||
old_tag (str): `With value_2` this is the value to rename. If `value_2` is None this is the renamed key
|
||||
new_tag (str, Optional): New value.
|
||||
|
||||
Returns:
|
||||
bool: True if a value was renamed
|
||||
"""
|
||||
if old_tag in self.list:
|
||||
self.list = sorted([new_tag if i == old_tag else i for i in self.list])
|
||||
return True
|
||||
return False
|
||||
|
||||
def delete(self, tag_to_delete: str) -> bool:
|
||||
"""Delete a specified inline tag. Regex is supported to allow deleting more than one tag.
|
||||
|
||||
Args:
|
||||
tag_to_delete (str, optional): Value to delete.
|
||||
|
||||
Returns:
|
||||
bool: True if a value was deleted
|
||||
"""
|
||||
new_list = sorted([x for x in self.list if re.search(tag_to_delete, x) is None])
|
||||
|
||||
if new_list != self.list:
|
||||
self.list = new_list
|
||||
return True
|
||||
return False
|
||||
|
||||
def has_changes(self) -> bool:
|
||||
"""Check if the metadata has changes.
|
||||
|
||||
Returns:
|
||||
bool: True if the metadata has changes.
|
||||
"""
|
||||
return self.list != self.list_original
|
||||
return cleaned, normalized, key_open, key_close
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
194
src/obsidian_metadata/models/parsers.py
Normal file
194
src/obsidian_metadata/models/parsers.py
Normal file
@@ -0,0 +1,194 @@
|
||||
"""Parsers for Obsidian metadata files."""
|
||||
|
||||
from dataclasses import dataclass
|
||||
|
||||
import emoji
|
||||
import regex as re
|
||||
|
||||
from obsidian_metadata.models.enums import Wrapping
|
||||
|
||||
|
||||
@dataclass
|
||||
class Parser:
|
||||
"""Regex parsers for Obsidian metadata files.
|
||||
|
||||
All methods return a list of matches
|
||||
"""
|
||||
|
||||
# Reusable regex patterns
|
||||
internal_link = r"\[\[[^\[\]]*?\]\]" # An Obsidian link of the form [[<link>]]
|
||||
chars_not_in_tags = r"\u2000-\u206F\u2E00-\u2E7F'!\"#\$%&\(\)\*+,\.:;<=>?@\^`\{\|\}~\[\]\\\s"
|
||||
|
||||
# Compiled regex patterns
|
||||
tag = re.compile(
|
||||
r"""
|
||||
(?:
|
||||
(?:^|\s|\\{2}) # If tarts with newline, space, or "\\""
|
||||
(?P<tag>\#[^\u2000-\u206F\u2E00-\u2E7F'!\"\#\$%&\(\)\*+,\.:;<=>?@\^`\{\|\}~\[\]\\\s]+) # capture tag
|
||||
| # Else
|
||||
(?:(?<=
|
||||
\#[^\u2000-\u206F\u2E00-\u2E7F'!\"\#\$%&\(\)\*+,\.:;<=>?@\^`\{\|\}~\[\]\\\s]+
|
||||
)) # if lookbehind is a tag
|
||||
(?P<tag>\#[^\u2000-\u206F\u2E00-\u2E7F'!\"\#\$%&\(\)\*+,\.:;<=>?@\^`\{\|\}~\[\]\\\s]+) # capture tag
|
||||
| # Else
|
||||
(*FAIL)
|
||||
)
|
||||
""",
|
||||
re.X,
|
||||
)
|
||||
frontmatter_complete = re.compile(r"^\s*(?P<frontmatter>---.*?---)", flags=re.DOTALL)
|
||||
frontmatter_data = re.compile(
|
||||
r"(?P<open>^\s*---)(?P<frontmatter>.*?)(?P<close>---)", flags=re.DOTALL
|
||||
)
|
||||
code_block = re.compile(r"```.*?```", flags=re.DOTALL)
|
||||
inline_code = re.compile(r"(?<!`{2})`[^`]+?` ?")
|
||||
inline_metadata = re.compile(
|
||||
r"""
|
||||
(?: # Conditional
|
||||
(?= # If opening wrapper is a bracket or parenthesis
|
||||
(
|
||||
(?<!\[)\[(?!\[) # Single bracket
|
||||
| # Or
|
||||
(?<!\()\((?!\() # Single parenthesis
|
||||
)
|
||||
)
|
||||
(?: # Conditional
|
||||
(?= # If opening wrapper is a bracket
|
||||
(?<!\[)\[(?!\[) # Single bracket
|
||||
)
|
||||
(?<!\[)(?P<open>\[)(?!\[) # Open bracket
|
||||
(?P<key>[0-9\p{Letter}\w\s_/-;\*\~`]+?) # Find key
|
||||
(?<!:)::(?!:) # Separator
|
||||
(?P<value>.*?) # Value
|
||||
(?<!\])(?P<close>\])(?!\]) # Close bracket
|
||||
| # Else if opening wrapper is a parenthesis
|
||||
(?<!\()(?P<open>\()(?!\() # Open parens
|
||||
(?P<key>[0-9\p{Letter}\w\s_/-;\*\~`]+?) # Find key
|
||||
(?<!:)::(?!:) # Separator
|
||||
(?P<value>.*?) # Value
|
||||
(?<!\))(?P<close>\))(?!\)) # Close parenthesis
|
||||
)
|
||||
| # Else grab entire line
|
||||
(?P<key>[0-9\p{Letter}\w\s_/-;\*\~`]+?) # Find key
|
||||
(?<!:)::(?!:) # Separator
|
||||
(?P<value>.*) # Value
|
||||
)
|
||||
|
||||
""",
|
||||
re.X | re.I,
|
||||
)
|
||||
top_with_header = re.compile(
|
||||
r"""^\s* # Start of note
|
||||
(?P<top> # Capture the top of the note
|
||||
.* # Anything above the first header
|
||||
\#+[ ].*?[\r\n] # Full header, if it exists
|
||||
) # End capture group
|
||||
""",
|
||||
flags=re.DOTALL | re.X,
|
||||
)
|
||||
validate_key_text = re.compile(r"[^-_\w\d\/\*\u263a-\U0001f999]")
|
||||
validate_tag_text = re.compile(r"[ \|,;:\*\(\)\[\]\\\.\n#&]")
|
||||
|
||||
def return_inline_metadata(self, line: str) -> list[tuple[str, str, Wrapping]] | None:
|
||||
"""Return a list of metadata matches for a single line.
|
||||
|
||||
Args:
|
||||
line (str): The text to search.
|
||||
|
||||
Returns:
|
||||
list[tuple[str, str, Wrapping]] | None: A list of tuples containing the key, value, and wrapping type.
|
||||
"""
|
||||
sep = r"(?<!:)::(?!:)"
|
||||
if not re.search(sep, line):
|
||||
return None
|
||||
|
||||
# Replace emoji with text
|
||||
line = emoji.demojize(line, delimiters=(";", ";"))
|
||||
|
||||
matches = []
|
||||
for match in self.inline_metadata.finditer(line):
|
||||
match match.group("open"):
|
||||
case "[":
|
||||
wrapper = Wrapping.BRACKETS
|
||||
case "(":
|
||||
wrapper = Wrapping.PARENS
|
||||
case _:
|
||||
wrapper = Wrapping.NONE
|
||||
|
||||
matches.append(
|
||||
(
|
||||
emoji.emojize(match.group("key"), delimiters=(";", ";")),
|
||||
emoji.emojize(match.group("value"), delimiters=(";", ";")),
|
||||
wrapper,
|
||||
)
|
||||
)
|
||||
|
||||
return matches
|
||||
|
||||
def return_frontmatter(self, text: str, data_only: bool = False) -> str | None:
|
||||
"""Return a list of metadata matches.
|
||||
|
||||
Args:
|
||||
text (str): The text to search.
|
||||
data_only (bool, optional): If True, only return the frontmatter data and strip the "---" lines from the returned string. Defaults to False
|
||||
|
||||
Returns:
|
||||
str | None: The frontmatter block, or None if no frontmatter is found.
|
||||
"""
|
||||
if data_only:
|
||||
result = self.frontmatter_data.search(text)
|
||||
else:
|
||||
result = self.frontmatter_complete.search(text)
|
||||
|
||||
if result:
|
||||
return result.group("frontmatter").strip()
|
||||
return None
|
||||
|
||||
def return_tags(self, text: str) -> list[str]:
|
||||
"""Return a list of tags.
|
||||
|
||||
Args:
|
||||
text (str): The text to search.
|
||||
|
||||
Returns:
|
||||
list[str]: A list of tags.
|
||||
"""
|
||||
return [
|
||||
t.group("tag")
|
||||
for t in self.tag.finditer(text)
|
||||
if not re.match(r"^#[0-9]+$", t.group("tag"))
|
||||
]
|
||||
|
||||
def return_top_with_header(self, text: str) -> str:
|
||||
"""Returns the top content of a string until the end of the first markdown header found.
|
||||
|
||||
Args:
|
||||
text (str): The text to search.
|
||||
|
||||
Returns:
|
||||
str: The top content of the string.
|
||||
"""
|
||||
result = self.top_with_header.search(text)
|
||||
if result:
|
||||
return result.group("top")
|
||||
return None
|
||||
|
||||
def strip_frontmatter(self, text: str, data_only: bool = False) -> str:
|
||||
"""Strip frontmatter from a string.
|
||||
|
||||
Args:
|
||||
text (str): The text to search.
|
||||
data_only (bool, optional): If True, only strip the frontmatter data and leave the '---' lines. Defaults to False
|
||||
"""
|
||||
if data_only:
|
||||
return self.frontmatter_data.sub(r"\g<open>\n\g<close>", text)
|
||||
|
||||
return self.frontmatter_complete.sub("", text)
|
||||
|
||||
def strip_code_blocks(self, text: str) -> str:
|
||||
"""Strip code blocks from a string."""
|
||||
return self.code_block.sub("", text)
|
||||
|
||||
def strip_inline_code(self, text: str) -> str:
|
||||
"""Strip inline code from a string."""
|
||||
return self.inline_code.sub("", text)
|
||||
@@ -1,41 +0,0 @@
|
||||
"""Regexes for parsing frontmatter and note content."""
|
||||
|
||||
import re
|
||||
from dataclasses import dataclass
|
||||
from typing import Pattern
|
||||
|
||||
|
||||
@dataclass
|
||||
class Patterns:
|
||||
"""Regex patterns for parsing frontmatter and note content."""
|
||||
|
||||
find_inline_tags: Pattern[str] = re.compile(
|
||||
r"""
|
||||
(?:^|[ \|_,;:\*\(\)\[\]\\\.]) # Before tag is start of line or separator
|
||||
\#([^ \|,;:\*\(\)\[\]\\\.\n#&]+) # Match tag until separator or end of line
|
||||
""",
|
||||
re.MULTILINE | re.X,
|
||||
)
|
||||
|
||||
frontmatt_block_with_separators: Pattern[str] = re.compile(
|
||||
r"^\s*(?P<frontmatter>---.*?---)", flags=re.DOTALL
|
||||
)
|
||||
frontmatt_block_no_separators: Pattern[str] = re.compile(
|
||||
r"^\s*---(?P<frontmatter>.*?)---", flags=re.DOTALL
|
||||
)
|
||||
# This pattern will return a tuple of 4 values, two will be empty and will need to be stripped before processing further
|
||||
find_inline_metadata: Pattern[str] = re.compile(
|
||||
r""" # First look for in-text key values
|
||||
(?:^\[| \[) # Find key with starting bracket
|
||||
([-_\w\d\/\*\u263a-\U0001f645]+?)::[ ]? # Find key
|
||||
(.*?)\] # Find value until closing bracket
|
||||
| # Else look for key values at start of line
|
||||
(?:^|[^ \w\d]+| \[) # Any non-word or non-digit character
|
||||
([-_\w\d\/\*\u263a-\U0001f645]+?)::(?!\n)(?:[ ](?!\n))? # Capture the key if not a new line
|
||||
(.*?)$ # Capture the value
|
||||
""",
|
||||
re.X | re.MULTILINE,
|
||||
)
|
||||
|
||||
validate_tag_text: Pattern[str] = re.compile(r"[ \|,;:\*\(\)\[\]\\\.\n#&]")
|
||||
validate_key_text: Pattern[str] = re.compile(r"[^-_\w\d\/\*\u263a-\U0001f645]")
|
||||
@@ -12,10 +12,20 @@ from typing import Any
|
||||
import questionary
|
||||
import typer
|
||||
|
||||
from obsidian_metadata.models.patterns import Patterns
|
||||
from obsidian_metadata.models.enums import InsertLocation, MetadataType
|
||||
from obsidian_metadata.models.parsers import Parser
|
||||
from obsidian_metadata.models.vault import Vault
|
||||
|
||||
PATTERNS = Patterns()
|
||||
P = Parser()
|
||||
|
||||
# Reset the default style of the questionary prompts qmark
|
||||
questionary.prompts.checkbox.DEFAULT_STYLE = questionary.Style([("qmark", "")])
|
||||
questionary.prompts.common.DEFAULT_STYLE = questionary.Style([("qmark", "")])
|
||||
questionary.prompts.confirm.DEFAULT_STYLE = questionary.Style([("qmark", "")])
|
||||
questionary.prompts.confirm.DEFAULT_STYLE = questionary.Style([("qmark", "")])
|
||||
questionary.prompts.path.DEFAULT_STYLE = questionary.Style([("qmark", "")])
|
||||
questionary.prompts.select.DEFAULT_STYLE = questionary.Style([("qmark", "")])
|
||||
questionary.prompts.text.DEFAULT_STYLE = questionary.Style([("qmark", "")])
|
||||
|
||||
|
||||
class Questions:
|
||||
@@ -40,7 +50,7 @@ class Questions:
|
||||
|
||||
@staticmethod
|
||||
def _validate_valid_dir(path: str) -> bool | str:
|
||||
"""Validates a valid directory.
|
||||
"""Validate a valid directory.
|
||||
|
||||
Returns:
|
||||
bool | str: True if the path is valid, otherwise a string with the error message.
|
||||
@@ -53,7 +63,7 @@ class Questions:
|
||||
|
||||
return True
|
||||
|
||||
def __init__(self, vault: Vault = None, key: str = None) -> None:
|
||||
def __init__(self, vault: Vault = None, key: str | None = None) -> None:
|
||||
"""Initialize the class.
|
||||
|
||||
Args:
|
||||
@@ -63,243 +73,21 @@ class Questions:
|
||||
"""
|
||||
self.style = questionary.Style(
|
||||
[
|
||||
("separator", "bold fg:#6C6C6C"),
|
||||
("instruction", "fg:#6C6C6C"),
|
||||
("highlighted", "bold reverse"),
|
||||
("qmark", "bold"),
|
||||
("question", "bold"),
|
||||
("separator", "fg:#808080"),
|
||||
("answer", "fg:#FF9D00 bold"),
|
||||
("instruction", "fg:#808080"),
|
||||
("highlighted", "bold underline"),
|
||||
("text", ""),
|
||||
("pointer", "bold"),
|
||||
]
|
||||
)
|
||||
self.vault = vault
|
||||
self.key = key
|
||||
|
||||
def ask_confirm(self, question: str, default: bool = True) -> bool: # pragma: no cover
|
||||
"""Ask the user to confirm an action.
|
||||
|
||||
Args:
|
||||
question (str): The question to ask.
|
||||
default (bool, optional): The default value. Defaults to True.
|
||||
|
||||
Returns:
|
||||
bool: True if the user confirms, otherwise False.
|
||||
"""
|
||||
return questionary.confirm(question, default=default, style=self.style).ask()
|
||||
|
||||
def ask_main_application(self) -> str: # pragma: no cover
|
||||
"""Selectable list for the main application interface.
|
||||
|
||||
Args:
|
||||
style (questionary.Style): The style to use for the question.
|
||||
|
||||
Returns:
|
||||
str: The selected application.
|
||||
"""
|
||||
return questionary.select(
|
||||
"What do you want to do?",
|
||||
choices=[
|
||||
questionary.Separator("\n-- VAULT ACTIONS -----------------"),
|
||||
{"name": "Backup vault", "value": "backup_vault"},
|
||||
{"name": "Delete vault backup", "value": "delete_backup"},
|
||||
{"name": "View all metadata", "value": "all_metadata"},
|
||||
{"name": "List notes in scope", "value": "list_notes"},
|
||||
{
|
||||
"name": "Filter the notes being processed by their path",
|
||||
"value": "filter_notes",
|
||||
},
|
||||
questionary.Separator("\n-- INLINE TAG ACTIONS ---------"),
|
||||
questionary.Separator("Tags in the note body"),
|
||||
{
|
||||
"name": "Rename an inline tag",
|
||||
"value": "rename_inline_tag",
|
||||
},
|
||||
{
|
||||
"name": "Delete an inline tag",
|
||||
"value": "delete_inline_tag",
|
||||
},
|
||||
questionary.Separator("\n-- METADATA ACTIONS -----------"),
|
||||
questionary.Separator("Frontmatter or inline metadata"),
|
||||
{"name": "Rename Key", "value": "rename_key"},
|
||||
{"name": "Delete Key", "value": "delete_key"},
|
||||
{"name": "Rename Value", "value": "rename_value"},
|
||||
{"name": "Delete Value", "value": "delete_value"},
|
||||
questionary.Separator("\n-- REVIEW/COMMIT CHANGES ------"),
|
||||
{"name": "Review changes", "value": "review_changes"},
|
||||
{"name": "Commit changes", "value": "commit_changes"},
|
||||
questionary.Separator("-------------------------------"),
|
||||
{"name": "Quit", "value": "abort"},
|
||||
],
|
||||
use_shortcuts=False,
|
||||
style=self.style,
|
||||
).ask()
|
||||
|
||||
def ask_for_filter_path(self) -> str: # pragma: no cover
|
||||
"""Ask the user for the path to the filter file.
|
||||
|
||||
Returns:
|
||||
str: The regex to use for filtering.
|
||||
"""
|
||||
filter_path_regex = questionary.path(
|
||||
"Regex to filter the notes being processed by their path:",
|
||||
only_directories=False,
|
||||
validate=self._validate_valid_vault_regex,
|
||||
).ask()
|
||||
if filter_path_regex is None:
|
||||
raise typer.Exit(code=1)
|
||||
|
||||
return filter_path_regex
|
||||
|
||||
def ask_for_selection(
|
||||
self, choices: list[Any], question: str = "Select an option"
|
||||
) -> Any: # pragma: no cover
|
||||
"""Ask the user to select an item from a list.
|
||||
|
||||
Args:
|
||||
question (str, optional): The question to ask. Defaults to "Select an option".
|
||||
choices (list[Any]): The list of choices.
|
||||
|
||||
Returns:
|
||||
any: The selected item value.
|
||||
"""
|
||||
return questionary.select(
|
||||
"Select an item:",
|
||||
choices=choices,
|
||||
use_shortcuts=False,
|
||||
style=self.style,
|
||||
).ask()
|
||||
|
||||
def ask_for_existing_inline_tag(self, question: str = "Enter a tag") -> str: # pragma: no cover
|
||||
"""Ask the user for an existing inline tag."""
|
||||
return questionary.text(
|
||||
question,
|
||||
validate=self._validate_existing_inline_tag,
|
||||
).ask()
|
||||
|
||||
def ask_for_new_tag(self, question: str = "New tag name") -> str: # pragma: no cover
|
||||
"""Ask the user for a new inline tag."""
|
||||
return questionary.text(
|
||||
question,
|
||||
validate=self._validate_new_tag,
|
||||
).ask()
|
||||
|
||||
def ask_for_existing_key(self, question: str = "Enter a key") -> str: # pragma: no cover
|
||||
"""Ask the user for a metadata key.
|
||||
|
||||
Args:
|
||||
question (str, optional): The question to ask. Defaults to "Enter a key".
|
||||
|
||||
Returns:
|
||||
str: A metadata key that exists in the vault.
|
||||
"""
|
||||
return questionary.text(
|
||||
question,
|
||||
validate=self._validate_key_exists,
|
||||
).ask()
|
||||
|
||||
def ask_for_existing_keys_regex(
|
||||
self, question: str = "Regex for keys"
|
||||
) -> str: # pragma: no cover
|
||||
"""Ask the user for a regex for metadata keys.
|
||||
|
||||
Args:
|
||||
question (str, optional): The question to ask. Defaults to "Regex for keys".
|
||||
|
||||
Returns:
|
||||
str: A regex for metadata keys that exist in the vault.
|
||||
"""
|
||||
return questionary.text(
|
||||
question,
|
||||
validate=self._validate_key_exists_regex,
|
||||
).ask()
|
||||
|
||||
def ask_for_existing_value_regex(
|
||||
self, question: str = "Regex for values"
|
||||
) -> str: # pragma: no cover
|
||||
"""Ask the user for a regex for metadata values.
|
||||
|
||||
Args:
|
||||
question (str, optional): The question to ask. Defaults to "Regex for values".
|
||||
|
||||
Returns:
|
||||
str: A regex for metadata values that exist in the vault.
|
||||
"""
|
||||
return questionary.text(
|
||||
question,
|
||||
validate=self._validate_value_exists_regex,
|
||||
).ask()
|
||||
|
||||
def ask_for_existing_value(self, question: str = "Enter a value") -> str: # pragma: no cover
|
||||
"""Ask the user for a metadata value.
|
||||
|
||||
Args:
|
||||
question (str, optional): The question to ask. Defaults to "Enter a value".
|
||||
|
||||
Returns:
|
||||
str: A metadata value.
|
||||
"""
|
||||
return questionary.text(question, validate=self._validate_value).ask()
|
||||
|
||||
def ask_for_new_key(self, question: str = "New key name") -> str: # pragma: no cover
|
||||
"""Ask the user for a new metadata key.
|
||||
|
||||
Args:
|
||||
question (str, optional): The question to ask. Defaults to "New key name".
|
||||
|
||||
Returns:
|
||||
str: A new metadata key.
|
||||
"""
|
||||
return questionary.text(
|
||||
question,
|
||||
validate=self._validate_new_key,
|
||||
).ask()
|
||||
|
||||
def ask_for_new_value(self, question: str = "New value") -> str: # pragma: no cover
|
||||
"""Ask the user for a new metadata value.
|
||||
|
||||
Args:
|
||||
question (str, optional): The question to ask. Defaults to "New value".
|
||||
|
||||
Returns:
|
||||
str: A new metadata value.
|
||||
"""
|
||||
return questionary.text(
|
||||
question,
|
||||
validate=self._validate_new_value,
|
||||
).ask()
|
||||
|
||||
def _validate_key_exists(self, text: str) -> bool | str:
|
||||
"""Validates a valid key.
|
||||
|
||||
Returns:
|
||||
bool | str: True if the key is valid, otherwise a string with the error message.
|
||||
"""
|
||||
if len(text) < 1:
|
||||
return "Key cannot be empty"
|
||||
|
||||
if not self.vault.metadata.contains(text):
|
||||
return f"'{text}' does not exist as a key in the vault"
|
||||
|
||||
return True
|
||||
|
||||
def _validate_key_exists_regex(self, text: str) -> bool | str:
|
||||
"""Validates a valid key.
|
||||
|
||||
Returns:
|
||||
bool | str: True if the key is valid, otherwise a string with the error message.
|
||||
"""
|
||||
if len(text) < 1:
|
||||
return "Key cannot be empty"
|
||||
|
||||
try:
|
||||
re.compile(text)
|
||||
except re.error as error:
|
||||
return f"Invalid regex: {error}"
|
||||
|
||||
if not self.vault.metadata.contains(text, is_regex=True):
|
||||
return f"'{text}' does not exist as a key in the vault"
|
||||
|
||||
return True
|
||||
|
||||
def _validate_existing_inline_tag(self, text: str) -> bool | str:
|
||||
"""Validates an existing inline tag.
|
||||
def _validate_existing_tag(self, text: str) -> bool | str:
|
||||
"""Validate an existing inline tag.
|
||||
|
||||
Returns:
|
||||
bool | str: True if the tag is valid, otherwise a string with the error message.
|
||||
@@ -307,30 +95,41 @@ class Questions:
|
||||
if len(text) < 1:
|
||||
return "Tag cannot be empty"
|
||||
|
||||
if not self.vault.contains_inline_tag(text):
|
||||
if not self.vault.contains_metadata(meta_type=MetadataType.TAGS, key=None, value=text):
|
||||
return f"'{text}' does not exist as a tag in the vault"
|
||||
|
||||
return True
|
||||
|
||||
def _validate_valid_vault_regex(self, text: str) -> bool | str:
|
||||
"""Validates a valid regex.
|
||||
def _validate_key_exists(self, text: str) -> bool | str:
|
||||
"""Validate a valid key.
|
||||
|
||||
Returns:
|
||||
bool | str: True if the regex is valid, otherwise a string with the error message.
|
||||
bool | str: True if the key is valid, otherwise a string with the error message.
|
||||
"""
|
||||
if len(text) < 1:
|
||||
return "Regex cannot be empty"
|
||||
return "Key cannot be empty"
|
||||
|
||||
if not self.vault.contains_metadata(meta_type=MetadataType.META, key=text):
|
||||
return f"'{text}' does not exist as a key in the vault"
|
||||
|
||||
return True
|
||||
|
||||
def _validate_key_exists_regex(self, text: str) -> bool | str:
|
||||
"""Validate a valid key.
|
||||
|
||||
Returns:
|
||||
bool | str: True if the key is valid, otherwise a string with the error message.
|
||||
"""
|
||||
if len(text) < 1:
|
||||
return "Key cannot be empty"
|
||||
|
||||
try:
|
||||
re.compile(text)
|
||||
except re.error as error:
|
||||
return f"Invalid regex: {error}"
|
||||
|
||||
if self.vault is not None:
|
||||
for subdir in list(self.vault.vault_path.glob("**/*")):
|
||||
if re.search(text, str(subdir)):
|
||||
return True
|
||||
return "Regex does not match paths in the vault"
|
||||
if not self.vault.contains_metadata(meta_type=MetadataType.META, key=text, is_regex=True):
|
||||
return f"'{text}' does not exist as a key in the vault"
|
||||
|
||||
return True
|
||||
|
||||
@@ -343,7 +142,7 @@ class Questions:
|
||||
Returns:
|
||||
bool | str: True if the key is valid, otherwise a string with the error message.
|
||||
"""
|
||||
if PATTERNS.validate_key_text.search(text) is not None:
|
||||
if P.validate_key_text.search(text) is not None:
|
||||
return "Key cannot contain spaces or special characters"
|
||||
|
||||
if len(text) == 0:
|
||||
@@ -360,7 +159,7 @@ class Questions:
|
||||
Returns:
|
||||
bool | str: True if the tag is valid, otherwise a string with the error message.
|
||||
"""
|
||||
if PATTERNS.validate_tag_text.search(text) is not None:
|
||||
if P.validate_tag_text.search(text) is not None:
|
||||
return "Tag cannot contain spaces or special characters"
|
||||
|
||||
if len(text) == 0:
|
||||
@@ -368,6 +167,75 @@ class Questions:
|
||||
|
||||
return True
|
||||
|
||||
def _validate_new_value(self, text: str) -> bool | str:
|
||||
"""Validate a new value.
|
||||
|
||||
Args:
|
||||
text (str): The value to validate.
|
||||
|
||||
Returns:
|
||||
bool | str: True if the value is valid, otherwise a string with the error message.
|
||||
"""
|
||||
if len(text) < 1:
|
||||
return "Value cannot be empty"
|
||||
|
||||
if self.key is not None and self.vault.contains_metadata(
|
||||
meta_type=MetadataType.ALL, key=self.key, value=text
|
||||
):
|
||||
return f"{self.key}:{text} already exists"
|
||||
|
||||
return True
|
||||
|
||||
def _validate_number(self, text: str) -> bool | str:
|
||||
"""Validate a number.
|
||||
|
||||
Args:
|
||||
text (str): The number to validate.
|
||||
|
||||
Returns:
|
||||
bool | str: True if the number is valid, otherwise a string with the error message.
|
||||
"""
|
||||
if not text.isdigit():
|
||||
return "Must be an integer"
|
||||
|
||||
return True
|
||||
|
||||
def _validate_path_is_file(self, text: str) -> bool | str:
|
||||
"""Validate a path is a file.
|
||||
|
||||
Args:
|
||||
text (str): The path to validate.
|
||||
|
||||
Returns:
|
||||
bool | str: True if the path is valid, otherwise a string with the error message.
|
||||
"""
|
||||
path_to_validate: Path = Path(text).expanduser().resolve()
|
||||
if not path_to_validate.exists():
|
||||
return f"Path does not exist: {path_to_validate}"
|
||||
if not path_to_validate.is_file():
|
||||
return f"Path is not a file: {path_to_validate}"
|
||||
|
||||
return True
|
||||
|
||||
def _validate_valid_vault_regex(self, text: str) -> bool | str:
|
||||
"""Validate a valid regex.
|
||||
|
||||
Returns:
|
||||
bool | str: True if the regex is valid, otherwise a string with the error message.
|
||||
"""
|
||||
try:
|
||||
re.compile(text)
|
||||
except re.error as error:
|
||||
return f"Invalid regex: {error}"
|
||||
|
||||
if self.vault is not None:
|
||||
for subdir in list(self.vault.vault_path.glob("**/*")):
|
||||
if re.search(text, str(subdir)):
|
||||
return True
|
||||
return "Regex does not match paths in the vault"
|
||||
|
||||
return True
|
||||
|
||||
def _validate_value(self, text: str) -> bool | str:
|
||||
"""Validate the value.
|
||||
|
||||
@@ -377,10 +245,12 @@ class Questions:
|
||||
Returns:
|
||||
bool | str: True if the value is valid, otherwise a string with the error message.
|
||||
"""
|
||||
if len(text) < 1:
|
||||
return "Value cannot be empty"
|
||||
if len(text) == 0:
|
||||
return True
|
||||
|
||||
if self.key is not None and not self.vault.metadata.contains(self.key, text):
|
||||
if self.key is not None and not self.vault.contains_metadata(
|
||||
meta_type=MetadataType.ALL, key=self.key, value=text
|
||||
):
|
||||
return f"{self.key}:{text} does not exist"
|
||||
|
||||
return True
|
||||
@@ -402,24 +272,271 @@ class Questions:
|
||||
except re.error as error:
|
||||
return f"Invalid regex: {error}"
|
||||
|
||||
if self.key is not None and not self.vault.metadata.contains(self.key, text, is_regex=True):
|
||||
if self.key is not None and not self.vault.contains_metadata(
|
||||
meta_type=MetadataType.ALL, key=self.key, value=text, is_regex=True
|
||||
):
|
||||
return f"No values in {self.key} match regex: {text}"
|
||||
|
||||
return True
|
||||
|
||||
def _validate_new_value(self, text: str) -> bool | str:
|
||||
"""Validate a new value.
|
||||
def ask_application_main(self) -> str: # pragma: no cover
|
||||
"""List for the main application interface.
|
||||
|
||||
Args:
|
||||
text (str): The value to validate.
|
||||
style (questionary.Style): The style to use for the question.
|
||||
|
||||
Returns:
|
||||
bool | str: True if the value is valid, otherwise a string with the error message.
|
||||
str: The selected application.
|
||||
"""
|
||||
if len(text) < 1:
|
||||
return "Value cannot be empty"
|
||||
return questionary.select(
|
||||
"What do you want to do?",
|
||||
choices=[
|
||||
questionary.Separator("-------------------------------"),
|
||||
{"name": "Vault Actions", "value": "vault_actions"},
|
||||
{"name": "Export Metadata", "value": "export_metadata"},
|
||||
{"name": "Inspect Metadata", "value": "inspect_metadata"},
|
||||
{"name": "Filter Notes in Scope", "value": "filter_notes"},
|
||||
questionary.Separator("-------------------------------"),
|
||||
{"name": "Import bulk changes from CSV", "value": "import_from_csv"},
|
||||
{"name": "Add Metadata", "value": "add_metadata"},
|
||||
{"name": "Delete Metadata", "value": "delete_metadata"},
|
||||
{"name": "Rename Metadata", "value": "rename_metadata"},
|
||||
{"name": "Reorganize Metadata", "value": "reorganize_metadata"},
|
||||
questionary.Separator("-------------------------------"),
|
||||
{"name": "Review Changes", "value": "review_changes"},
|
||||
{"name": "Commit Changes", "value": "commit_changes"},
|
||||
questionary.Separator("-------------------------------"),
|
||||
{"name": "Quit", "value": "abort"},
|
||||
],
|
||||
use_shortcuts=False,
|
||||
style=self.style,
|
||||
qmark="INPUT |",
|
||||
).ask()
|
||||
|
||||
if self.key is not None and self.vault.metadata.contains(self.key, text):
|
||||
return f"{self.key}:{text} already exists"
|
||||
def ask_confirm(self, question: str, default: bool = True) -> bool: # pragma: no cover
|
||||
"""Ask the user to confirm an action.
|
||||
|
||||
return True
|
||||
Args:
|
||||
question (str): The question to ask.
|
||||
default (bool, optional): The default value. Defaults to True.
|
||||
|
||||
Returns:
|
||||
bool: True if the user confirms, otherwise False.
|
||||
"""
|
||||
return questionary.confirm(
|
||||
question, default=default, style=self.style, qmark="INPUT |"
|
||||
).ask()
|
||||
|
||||
def ask_existing_tag(self, question: str = "Enter a tag") -> str: # pragma: no cover
|
||||
"""Ask the user for an existing inline tag."""
|
||||
return questionary.text(
|
||||
question,
|
||||
validate=self._validate_existing_tag,
|
||||
style=self.style,
|
||||
qmark="INPUT |",
|
||||
).ask()
|
||||
|
||||
def ask_existing_key(self, question: str = "Enter a key") -> str: # pragma: no cover
|
||||
"""Ask the user for a metadata key.
|
||||
|
||||
Args:
|
||||
question (str, optional): The question to ask. Defaults to "Enter a key".
|
||||
|
||||
Returns:
|
||||
str: A metadata key that exists in the vault.
|
||||
"""
|
||||
return questionary.text(
|
||||
question, validate=self._validate_key_exists, style=self.style, qmark="INPUT |"
|
||||
).ask()
|
||||
|
||||
def ask_existing_keys_regex(self, question: str = "Regex for keys") -> str: # pragma: no cover
|
||||
"""Ask the user for a regex for metadata keys.
|
||||
|
||||
Args:
|
||||
question (str, optional): The question to ask. Defaults to "Regex for keys".
|
||||
|
||||
Returns:
|
||||
str: A regex for metadata keys that exist in the vault.
|
||||
"""
|
||||
return questionary.text(
|
||||
question, validate=self._validate_key_exists_regex, style=self.style, qmark="INPUT |"
|
||||
).ask()
|
||||
|
||||
def ask_existing_value(self, question: str = "Enter a value") -> str: # pragma: no cover
|
||||
"""Ask the user for a metadata value.
|
||||
|
||||
Args:
|
||||
question (str, optional): The question to ask. Defaults to "Enter a value".
|
||||
|
||||
Returns:
|
||||
str: A metadata value.
|
||||
"""
|
||||
return questionary.text(
|
||||
question, validate=self._validate_value, style=self.style, qmark="INPUT |"
|
||||
).ask()
|
||||
|
||||
def ask_filter_path(self) -> str: # pragma: no cover
|
||||
"""Ask the user for the path to the filter file.
|
||||
|
||||
Returns:
|
||||
str: The regex to use for filtering.
|
||||
"""
|
||||
filter_path_regex = questionary.path(
|
||||
"Regex to filter the notes being processed by their path:",
|
||||
only_directories=False,
|
||||
validate=self._validate_valid_vault_regex,
|
||||
style=self.style,
|
||||
qmark="INPUT |",
|
||||
).ask()
|
||||
if filter_path_regex is None:
|
||||
raise typer.Exit(code=1)
|
||||
|
||||
return filter_path_regex
|
||||
|
||||
def ask_existing_value_regex(
|
||||
self, question: str = "Regex for values"
|
||||
) -> str: # pragma: no cover
|
||||
"""Ask the user for a regex for metadata values.
|
||||
|
||||
Args:
|
||||
question (str, optional): The question to ask. Defaults to "Regex for values".
|
||||
|
||||
Returns:
|
||||
str: A regex for metadata values that exist in the vault.
|
||||
"""
|
||||
return questionary.text(
|
||||
question,
|
||||
validate=self._validate_value_exists_regex,
|
||||
style=self.style,
|
||||
qmark="INPUT |",
|
||||
).ask()
|
||||
|
||||
def ask_metadata_location(
|
||||
self, question: str = "Where in a note should we add metadata"
|
||||
) -> InsertLocation: # pragma: no cover
|
||||
"""Ask the user for the location within a note to place new metadata.
|
||||
|
||||
Returns:
|
||||
InsertLocation: The location within a note to place new metadata.
|
||||
"""
|
||||
choices = []
|
||||
for metadata_location in InsertLocation:
|
||||
choices.append({"name": metadata_location.value, "value": metadata_location})
|
||||
|
||||
return self.ask_selection(
|
||||
choices=choices,
|
||||
question=question,
|
||||
)
|
||||
|
||||
def ask_meta_type(self) -> MetadataType | str: # pragma: no cover
|
||||
"""Ask the user for the type of metadata to work on.
|
||||
|
||||
Returns:
|
||||
MetadataType: The metadata type
|
||||
"""
|
||||
choices = []
|
||||
for meta_type in MetadataType:
|
||||
match meta_type:
|
||||
case MetadataType.ALL | MetadataType.META | MetadataType.KEYS:
|
||||
continue
|
||||
case _:
|
||||
choices.append({"name": meta_type.value, "value": meta_type})
|
||||
|
||||
choices.append(questionary.Separator()) # type: ignore [arg-type]
|
||||
choices.append({"name": "Cancel", "value": "cancel"})
|
||||
return self.ask_selection(
|
||||
choices=choices,
|
||||
question="Select the type of metadata",
|
||||
)
|
||||
|
||||
def ask_new_key(self, question: str = "New key name") -> str: # pragma: no cover
|
||||
"""Ask the user for a new metadata key.
|
||||
|
||||
Args:
|
||||
question (str, optional): The question to ask. Defaults to "New key name".
|
||||
|
||||
Returns:
|
||||
str: A new metadata key.
|
||||
"""
|
||||
return questionary.text(
|
||||
question, validate=self._validate_new_key, style=self.style, qmark="INPUT |"
|
||||
).ask()
|
||||
|
||||
def ask_new_tag(self, question: str = "Enter a new tag") -> str: # pragma: no cover
|
||||
"""Ask the user for a new tag.
|
||||
|
||||
Args:
|
||||
question (str, optional): The question to ask. Defaults to "Enter a new tag".
|
||||
"""
|
||||
return questionary.text(
|
||||
question, validate=self._validate_new_tag, style=self.style, qmark="INPUT |"
|
||||
).ask()
|
||||
|
||||
def ask_new_value(self, question: str = "New value") -> str: # pragma: no cover
|
||||
"""Ask the user for a new metadata value.
|
||||
|
||||
Args:
|
||||
question (str, optional): The question to ask. Defaults to "New value".
|
||||
|
||||
Returns:
|
||||
str: A new metadata value.
|
||||
"""
|
||||
return questionary.text(
|
||||
question, validate=self._validate_new_value, style=self.style, qmark="INPUT |"
|
||||
).ask()
|
||||
|
||||
def ask_number(self, question: str = "Enter a number") -> int: # pragma: no cover
|
||||
"""Ask the user for a number.
|
||||
|
||||
Args:
|
||||
question (str, optional): The question to ask. Defaults to "Enter a number".
|
||||
|
||||
Returns:
|
||||
int: A number.
|
||||
"""
|
||||
return questionary.text(
|
||||
question, validate=self._validate_number, style=self.style, qmark="INPUT |"
|
||||
).ask()
|
||||
|
||||
def ask_path(
|
||||
self, question: str = "Enter a path", valid_file: bool = False
|
||||
) -> str: # pragma: no cover
|
||||
"""Ask the user for a path.
|
||||
|
||||
Args:
|
||||
question (str, optional): The question to ask. Defaults to "Enter a path".
|
||||
valid_file (bool, optional): Whether the path should be a valid file. Defaults to False.
|
||||
|
||||
Returns:
|
||||
str: A path.
|
||||
"""
|
||||
if valid_file:
|
||||
return questionary.path(
|
||||
question,
|
||||
only_directories=False,
|
||||
style=self.style,
|
||||
validate=self._validate_path_is_file,
|
||||
qmark="INPUT |",
|
||||
).ask()
|
||||
|
||||
return questionary.path(question, style=self.style, qmark="INPUT |").ask()
|
||||
|
||||
def ask_selection(
|
||||
self, choices: list[Any], question: str = "Select an option"
|
||||
) -> Any: # pragma: no cover
|
||||
"""Ask the user to select an item from a list.
|
||||
|
||||
Args:
|
||||
question (str, optional): The question to ask. Defaults to "Select an option".
|
||||
choices (list[Any]): The list of choices.
|
||||
|
||||
Returns:
|
||||
any: The selected item value.
|
||||
"""
|
||||
return questionary.select(
|
||||
question,
|
||||
choices=choices,
|
||||
use_shortcuts=False,
|
||||
style=self.style,
|
||||
qmark="INPUT |",
|
||||
).ask()
|
||||
|
||||
@@ -1,19 +1,35 @@
|
||||
"""Obsidian vault representation."""
|
||||
|
||||
import csv
|
||||
import json
|
||||
import re
|
||||
import shutil
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
import rich.repr
|
||||
from rich.console import Console
|
||||
from rich.progress import Progress, SpinnerColumn, TextColumn
|
||||
import typer
|
||||
from rich import box
|
||||
from rich.columns import Columns
|
||||
from rich.prompt import Confirm
|
||||
from rich.table import Table
|
||||
|
||||
from obsidian_metadata._config import VaultConfig
|
||||
from obsidian_metadata._utils import alerts
|
||||
from obsidian_metadata._config.config import VaultConfig
|
||||
from obsidian_metadata._utils import alerts, dict_contains, merge_dictionaries
|
||||
from obsidian_metadata._utils.alerts import logger as log
|
||||
from obsidian_metadata.models import Note, VaultMetadata
|
||||
from obsidian_metadata._utils.console import console, console_no_markup
|
||||
from obsidian_metadata.models import InsertLocation, MetadataType, Note
|
||||
|
||||
|
||||
@dataclass
|
||||
class VaultFilter:
|
||||
"""Vault filters."""
|
||||
|
||||
path_filter: str = None
|
||||
key_filter: str = None
|
||||
value_filter: str = None
|
||||
tag_filter: str = None
|
||||
|
||||
|
||||
@rich.repr.auto
|
||||
@@ -27,69 +43,232 @@ class Vault:
|
||||
notes (list[Note]): List of all notes in the vault.
|
||||
"""
|
||||
|
||||
def __init__(self, config: VaultConfig, dry_run: bool = False, path_filter: str = None):
|
||||
def __init__(
|
||||
self,
|
||||
config: VaultConfig,
|
||||
dry_run: bool = False,
|
||||
filters: list[VaultFilter] = [],
|
||||
) -> None:
|
||||
self.config = config.config
|
||||
self.vault_path: Path = config.path
|
||||
self.name = self.vault_path.name
|
||||
self.insert_location: InsertLocation = self._find_insert_location()
|
||||
self.dry_run: bool = dry_run
|
||||
self.backup_path: Path = self.vault_path.parent / f"{self.vault_path.name}.bak"
|
||||
self.frontmatter: dict[str, list[str]] = {}
|
||||
self.inline_meta: dict[str, list[str]] = {}
|
||||
self.tags: list[str] = []
|
||||
self.exclude_paths: list[Path] = []
|
||||
self.metadata = VaultMetadata()
|
||||
|
||||
for p in config.exclude_paths:
|
||||
self.exclude_paths.append(Path(self.vault_path / p))
|
||||
|
||||
self.path_filter = path_filter
|
||||
self.note_paths = self._find_markdown_notes(path_filter)
|
||||
self.filters = filters
|
||||
self.all_note_paths = self._find_markdown_notes()
|
||||
|
||||
with Progress(
|
||||
SpinnerColumn(),
|
||||
TextColumn("[progress.description]{task.description}"),
|
||||
transient=True,
|
||||
) as progress:
|
||||
progress.add_task(description="Processing notes...", total=None)
|
||||
self.notes: list[Note] = [
|
||||
Note(note_path=p, dry_run=self.dry_run) for p in self.note_paths
|
||||
with console.status(
|
||||
"Processing notes... [dim](Can take a while for a large vault)[/]",
|
||||
spinner="bouncingBall",
|
||||
):
|
||||
self.all_notes: list[Note] = [
|
||||
Note(note_path=p, dry_run=self.dry_run) for p in self.all_note_paths
|
||||
]
|
||||
for _note in self.notes:
|
||||
self.metadata.add_metadata(_note.frontmatter.dict)
|
||||
self.metadata.add_metadata(_note.inline_metadata.dict)
|
||||
self.metadata.add_metadata({_note.inline_tags.metadata_key: _note.inline_tags.list})
|
||||
self.notes_in_scope = self._filter_notes()
|
||||
|
||||
self._rebuild_vault_metadata()
|
||||
|
||||
def __rich_repr__(self) -> rich.repr.Result: # pragma: no cover
|
||||
"""Define rich representation of Vault."""
|
||||
yield "vault_path", self.vault_path
|
||||
yield "dry_run", self.dry_run
|
||||
yield "backup_path", self.backup_path
|
||||
yield "num_notes", self.num_notes()
|
||||
yield "config", self.config
|
||||
yield "dry_run", self.dry_run
|
||||
yield "exclude_paths", self.exclude_paths
|
||||
yield "filters", self.filters
|
||||
yield "insert_location", self.insert_location
|
||||
yield "name", self.name
|
||||
yield "num_notes_in_scope", len(self.notes_in_scope)
|
||||
yield "num_notes", len(self.all_notes)
|
||||
yield "vault_path", self.vault_path
|
||||
|
||||
def _find_markdown_notes(self, path_filter: str = None) -> list[Path]:
|
||||
"""Build list of all markdown files in the vault.
|
||||
def _filter_notes(self) -> list[Note]:
|
||||
"""Filter notes by path and metadata using the filters defined in self.filters.
|
||||
|
||||
Returns:
|
||||
list[Note]: List of notes matching the filters.
|
||||
"""
|
||||
notes_list = self.all_notes.copy()
|
||||
|
||||
for _filter in self.filters:
|
||||
if _filter.path_filter is not None:
|
||||
notes_list = [
|
||||
n
|
||||
for n in notes_list
|
||||
if re.search(_filter.path_filter, str(n.note_path.relative_to(self.vault_path)))
|
||||
]
|
||||
|
||||
if _filter.tag_filter is not None:
|
||||
notes_list = [
|
||||
n
|
||||
for n in notes_list
|
||||
if n.contains_metadata(
|
||||
MetadataType.TAGS, search_key="", search_value=_filter.tag_filter
|
||||
)
|
||||
]
|
||||
|
||||
if _filter.key_filter is not None and _filter.value_filter is not None:
|
||||
notes_list = [
|
||||
n
|
||||
for n in notes_list
|
||||
if n.contains_metadata(
|
||||
meta_type=MetadataType.META,
|
||||
search_key=_filter.key_filter,
|
||||
search_value=_filter.value_filter,
|
||||
)
|
||||
]
|
||||
|
||||
if _filter.key_filter is not None and _filter.value_filter is None:
|
||||
notes_list = [
|
||||
n
|
||||
for n in notes_list
|
||||
if n.contains_metadata(
|
||||
MetadataType.META, search_key=_filter.key_filter, search_value=None
|
||||
)
|
||||
]
|
||||
|
||||
return notes_list
|
||||
|
||||
def _find_insert_location(self) -> InsertLocation:
|
||||
"""Find the insert location for a note from the configuration file.
|
||||
|
||||
Returns:
|
||||
InsertLocation: Insert location for the note.
|
||||
"""
|
||||
if self.config["insert_location"].upper() == "TOP":
|
||||
return InsertLocation.TOP
|
||||
|
||||
if self.config["insert_location"].upper() == "AFTER_TITLE":
|
||||
return InsertLocation.AFTER_TITLE
|
||||
|
||||
if self.config["insert_location"].upper() == "BOTTOM":
|
||||
return InsertLocation.BOTTOM
|
||||
|
||||
return InsertLocation.BOTTOM
|
||||
|
||||
@property
|
||||
def insert_location(self) -> InsertLocation:
|
||||
"""Location to insert new or reorganized metadata.
|
||||
|
||||
Returns:
|
||||
InsertLocation: The insert location.
|
||||
"""
|
||||
return self._insert_location
|
||||
|
||||
@insert_location.setter
|
||||
def insert_location(self, value: InsertLocation) -> None:
|
||||
"""Set the insert location for the vault.
|
||||
|
||||
Args:
|
||||
path_filter (str, optional): Regex to filter notes by path.
|
||||
value (InsertLocation): The insert location to set.
|
||||
"""
|
||||
self._insert_location = value
|
||||
|
||||
def _find_markdown_notes(self) -> list[Path]:
|
||||
"""Build list of all markdown files in the vault.
|
||||
|
||||
Returns:
|
||||
list[Path]: List of paths to all matching files in the vault.
|
||||
|
||||
"""
|
||||
notes_list = [
|
||||
return [
|
||||
p.resolve()
|
||||
for p in self.vault_path.glob("**/*")
|
||||
if p.suffix in [".md", ".MD", ".markdown", ".MARKDOWN"]
|
||||
and not any(item in p.parents for item in self.exclude_paths)
|
||||
]
|
||||
|
||||
if path_filter is not None:
|
||||
notes_list = [
|
||||
p for p in notes_list if re.search(path_filter, str(p.relative_to(self.vault_path)))
|
||||
]
|
||||
def _rebuild_vault_metadata(self) -> None:
|
||||
"""Rebuild vault metadata. Indexes all frontmatter, inline metadata, and tags and adds them to dictionary objects."""
|
||||
with console.status(
|
||||
"Processing notes... [dim](Can take a while for a large vault)[/]",
|
||||
spinner="bouncingBall",
|
||||
):
|
||||
vault_frontmatter = {}
|
||||
vault_inline_meta = {}
|
||||
vault_tags = []
|
||||
for _note in self.notes_in_scope:
|
||||
for field in _note.metadata:
|
||||
match field.meta_type:
|
||||
case MetadataType.FRONTMATTER:
|
||||
if field.clean_key not in vault_frontmatter:
|
||||
vault_frontmatter[field.clean_key] = (
|
||||
[field.normalized_value]
|
||||
if field.normalized_value != "-"
|
||||
else []
|
||||
)
|
||||
elif field.normalized_value != "-":
|
||||
vault_frontmatter[field.clean_key].append(field.normalized_value)
|
||||
case MetadataType.INLINE:
|
||||
if field.clean_key not in vault_inline_meta:
|
||||
vault_inline_meta[field.clean_key] = (
|
||||
[field.normalized_value]
|
||||
if field.normalized_value != "-"
|
||||
else []
|
||||
)
|
||||
elif field.normalized_value != "-":
|
||||
vault_inline_meta[field.clean_key].append(field.normalized_value)
|
||||
case MetadataType.TAGS:
|
||||
if field.normalized_value not in vault_tags:
|
||||
vault_tags.append(field.normalized_value)
|
||||
|
||||
return notes_list
|
||||
self.frontmatter = {
|
||||
k: sorted(list(set(v))) for k, v in sorted(vault_frontmatter.items())
|
||||
}
|
||||
self.inline_meta = {
|
||||
k: sorted(list(set(v))) for k, v in sorted(vault_inline_meta.items())
|
||||
}
|
||||
self.tags = sorted(list(set(vault_tags)))
|
||||
|
||||
def add_metadata(
|
||||
self,
|
||||
meta_type: MetadataType,
|
||||
key: str | None = None,
|
||||
value: str | None = None,
|
||||
location: InsertLocation = None,
|
||||
) -> int:
|
||||
"""Add metadata to all notes in the vault which do not already contain it.
|
||||
|
||||
Args:
|
||||
meta_type (MetadataType): Area of metadata to add to.
|
||||
key (str): Key to add.
|
||||
value (str, optional): Value to add.
|
||||
location (InsertLocation, optional): Location to insert metadata. (Defaults to `vault.config.insert_location`)
|
||||
|
||||
Returns:
|
||||
int: Number of notes updated.
|
||||
"""
|
||||
if location is None:
|
||||
location = self.insert_location
|
||||
|
||||
num_changed = 0
|
||||
|
||||
for _note in self.notes_in_scope:
|
||||
if _note.add_metadata(
|
||||
meta_type=meta_type, added_key=key, added_value=value, location=location
|
||||
):
|
||||
log.trace(f"Added metadata to {_note.note_path}")
|
||||
num_changed += 1
|
||||
|
||||
if num_changed > 0:
|
||||
self._rebuild_vault_metadata()
|
||||
|
||||
return num_changed
|
||||
|
||||
def backup(self) -> None:
|
||||
"""Backup the vault."""
|
||||
log.debug("Backing up vault")
|
||||
if self.dry_run:
|
||||
alerts.dryrun(f"Backup up vault to: {self.backup_path}")
|
||||
console.print("\n")
|
||||
return
|
||||
|
||||
try:
|
||||
@@ -107,33 +286,58 @@ class Vault:
|
||||
|
||||
alerts.success(f"Vault backed up to: {self.backup_path}")
|
||||
|
||||
def contains_inline_tag(self, tag: str, is_regex: bool = False) -> bool:
|
||||
"""Check if vault contains the given inline tag.
|
||||
def commit_changes(self) -> None:
|
||||
"""Commit changes by writing to disk."""
|
||||
log.debug("Writing changes to vault...")
|
||||
if self.dry_run:
|
||||
for _note in self.notes_in_scope:
|
||||
if _note.has_changes():
|
||||
alerts.dryrun(
|
||||
f"writing changes to {_note.note_path.relative_to(self.vault_path)}"
|
||||
)
|
||||
return
|
||||
|
||||
for _note in self.notes_in_scope:
|
||||
if _note.has_changes():
|
||||
log.trace(f"writing to {_note.note_path}")
|
||||
_note.commit()
|
||||
|
||||
def contains_metadata(
|
||||
self, meta_type: MetadataType, key: str, value: str | None = None, is_regex: bool = False
|
||||
) -> bool:
|
||||
"""Check if the vault contains metadata.
|
||||
|
||||
Args:
|
||||
tag (str): Tag to check for.
|
||||
is_regex (bool, optional): Whether to use regex to match tag.
|
||||
meta_type (MetadataType): Area of metadata to check.
|
||||
key (str): Key to check.
|
||||
value (str, optional): Value to check. Defaults to None.
|
||||
is_regex (bool, optional): Whether the value is a regex. Defaults to False.
|
||||
|
||||
Returns:
|
||||
bool: True if tag is found in vault.
|
||||
bool: Whether the vault contains the metadata.
|
||||
"""
|
||||
return any(_note.contains_inline_tag(tag) for _note in self.notes)
|
||||
if meta_type == MetadataType.FRONTMATTER and key is not None:
|
||||
return dict_contains(self.frontmatter, key, value, is_regex)
|
||||
|
||||
def contains_metadata(self, key: str, value: str = None, is_regex: bool = False) -> bool:
|
||||
"""Check if vault contains the given metadata.
|
||||
if meta_type == MetadataType.INLINE and key is not None:
|
||||
return dict_contains(self.inline_meta, key, value, is_regex)
|
||||
|
||||
Args:
|
||||
key (str): Key to check for. If value is None, will check vault for key.
|
||||
value (str, optional): Value to check for.
|
||||
is_regex (bool, optional): Whether to use regex to match key/value.
|
||||
if meta_type == MetadataType.TAGS and value is not None:
|
||||
if not is_regex:
|
||||
value = f"^{re.escape(value)}$"
|
||||
return any(re.search(value, item) for item in self.tags)
|
||||
|
||||
Returns:
|
||||
bool: True if tag is found in vault.
|
||||
"""
|
||||
if value is None:
|
||||
return self.metadata.contains(key, is_regex=is_regex)
|
||||
if meta_type == MetadataType.META:
|
||||
return self.contains_metadata(
|
||||
MetadataType.FRONTMATTER, key, value, is_regex
|
||||
) or self.contains_metadata(MetadataType.INLINE, key, value, is_regex)
|
||||
|
||||
return self.metadata.contains(key, value, is_regex=is_regex)
|
||||
if meta_type == MetadataType.ALL:
|
||||
return self.contains_metadata(
|
||||
MetadataType.TAGS, key, value, is_regex
|
||||
) or self.contains_metadata(MetadataType.META, key, value, is_regex)
|
||||
|
||||
return False
|
||||
|
||||
def delete_backup(self) -> None:
|
||||
"""Delete the vault backup."""
|
||||
@@ -146,7 +350,7 @@ class Vault:
|
||||
else:
|
||||
alerts.info("No backup found")
|
||||
|
||||
def delete_inline_tag(self, tag: str) -> int:
|
||||
def delete_tag(self, tag: str) -> int:
|
||||
"""Delete an inline tag in the vault.
|
||||
|
||||
Args:
|
||||
@@ -157,19 +361,28 @@ class Vault:
|
||||
"""
|
||||
num_changed = 0
|
||||
|
||||
for _note in self.notes:
|
||||
if _note.delete_inline_tag(tag):
|
||||
for _note in self.notes_in_scope:
|
||||
if _note.delete_metadata(MetadataType.TAGS, value=tag):
|
||||
log.trace(f"Deleted tag from {_note.note_path}")
|
||||
num_changed += 1
|
||||
|
||||
if num_changed > 0:
|
||||
self.metadata.delete(self.notes[0].inline_tags.metadata_key, tag)
|
||||
self._rebuild_vault_metadata()
|
||||
|
||||
return num_changed
|
||||
|
||||
def delete_metadata(self, key: str, value: str = None) -> int:
|
||||
def delete_metadata(
|
||||
self,
|
||||
key: str,
|
||||
value: str | None = None,
|
||||
meta_type: MetadataType = MetadataType.ALL,
|
||||
is_regex: bool = False,
|
||||
) -> int:
|
||||
"""Delete metadata in the vault.
|
||||
|
||||
Args:
|
||||
meta_type (MetadataType): Area of metadata to delete from.
|
||||
is_regex (bool): Whether to use regex for key and value. Defaults to False.
|
||||
key (str): Key to delete. Regex is supported
|
||||
value (str, optional): Value to delete. Regex is supported
|
||||
|
||||
@@ -178,69 +391,229 @@ class Vault:
|
||||
"""
|
||||
num_changed = 0
|
||||
|
||||
for _note in self.notes:
|
||||
if _note.delete_metadata(key, value):
|
||||
for _note in self.notes_in_scope:
|
||||
if _note.delete_metadata(meta_type=meta_type, key=key, value=value, is_regex=is_regex):
|
||||
log.trace(f"Deleted metadata from {_note.note_path}")
|
||||
num_changed += 1
|
||||
|
||||
if num_changed > 0:
|
||||
self.metadata.delete(key, value)
|
||||
self._rebuild_vault_metadata()
|
||||
|
||||
return num_changed
|
||||
|
||||
def export_metadata(self, path: str, export_format: str = "csv") -> None:
|
||||
"""Write metadata to a csv file.
|
||||
|
||||
Args:
|
||||
path (Path): Path to write csv file to.
|
||||
export_format (str, optional): Export as 'csv' or 'json'. Defaults to "csv".
|
||||
"""
|
||||
export_file = Path(path).expanduser().resolve()
|
||||
if not export_file.parent.exists():
|
||||
alerts.error(f"Path does not exist: {export_file.parent}")
|
||||
raise typer.Exit(code=1)
|
||||
|
||||
match export_format:
|
||||
case "csv":
|
||||
with export_file.open(mode="w", encoding="utf-8") as f:
|
||||
writer = csv.writer(f)
|
||||
writer.writerow(["Metadata Type", "Key", "Value"])
|
||||
|
||||
for key, value in self.frontmatter.items():
|
||||
if len(value) > 0:
|
||||
for v in value:
|
||||
writer.writerow(["frontmatter", key, v])
|
||||
else:
|
||||
writer.writerow(["frontmatter", key, ""])
|
||||
|
||||
for key, value in self.inline_meta.items():
|
||||
if len(value) > 0:
|
||||
for v in value:
|
||||
writer.writerow(["inline_metadata", key, v])
|
||||
else:
|
||||
writer.writerow(["inline_metadata", key, ""])
|
||||
|
||||
for tag in self.tags:
|
||||
writer.writerow(["tags", "", f"{tag}"])
|
||||
|
||||
case "json":
|
||||
dict_to_dump = {
|
||||
"frontmatter": self.frontmatter,
|
||||
"inline_metadata": self.inline_meta,
|
||||
"tags": self.tags,
|
||||
}
|
||||
|
||||
with export_file.open(mode="w", encoding="utf-8") as f:
|
||||
json.dump(dict_to_dump, f, indent=4, ensure_ascii=False, sort_keys=True)
|
||||
|
||||
def export_notes_to_csv(self, path: str) -> None:
|
||||
"""Export notes and their associated metadata to a csv file. This is useful as a template for importing metadata changes to a vault.
|
||||
|
||||
Args:
|
||||
path (str): Path to write csv file to.
|
||||
"""
|
||||
export_file = Path(path).expanduser().resolve()
|
||||
if not export_file.parent.exists():
|
||||
alerts.error(f"Path does not exist: {export_file.parent}")
|
||||
raise typer.Exit(code=1)
|
||||
|
||||
with export_file.open(mode="w", encoding="utf-8") as f:
|
||||
writer = csv.writer(f)
|
||||
writer.writerow(["path", "type", "key", "value"])
|
||||
|
||||
for _note in self.all_notes:
|
||||
for field in sorted(
|
||||
_note.metadata,
|
||||
key=lambda x: (
|
||||
x.meta_type.name,
|
||||
x.clean_key,
|
||||
x.normalized_value,
|
||||
),
|
||||
):
|
||||
writer.writerow(
|
||||
[
|
||||
_note.note_path.relative_to(self.vault_path),
|
||||
field.meta_type.name,
|
||||
field.clean_key if field.clean_key is not None else "",
|
||||
field.normalized_value if field.normalized_value != "-" else "",
|
||||
]
|
||||
)
|
||||
|
||||
def get_changed_notes(self) -> list[Note]:
|
||||
"""Returns a list of notes that have changes.
|
||||
"""Return a list of notes that have changes.
|
||||
|
||||
Returns:
|
||||
list[Note]: List of notes that have changes.
|
||||
"""
|
||||
changed_notes = []
|
||||
for _note in self.notes:
|
||||
for _note in self.notes_in_scope:
|
||||
if _note.has_changes():
|
||||
changed_notes.append(_note)
|
||||
|
||||
changed_notes = sorted(changed_notes, key=lambda x: x.note_path)
|
||||
return changed_notes
|
||||
return sorted(changed_notes, key=lambda x: x.note_path)
|
||||
|
||||
def info(self) -> None:
|
||||
"""Print information about the vault."""
|
||||
log.debug("Printing vault info")
|
||||
table = Table(title="Vault Info", show_header=False)
|
||||
table = Table(show_header=False)
|
||||
table.add_row("Vault", str(self.vault_path))
|
||||
table.add_row("Notes being edited", str(self.num_notes()))
|
||||
table.add_row("Notes excluded from editing", str(self.num_excluded_notes()))
|
||||
if self.backup_path.exists():
|
||||
table.add_row("Backup path", str(self.backup_path))
|
||||
else:
|
||||
table.add_row("Backup", "None")
|
||||
table.add_row("Active path filter", str(self.path_filter))
|
||||
table.add_row("Notes with updates", str(len(self.get_changed_notes())))
|
||||
table.add_row("Notes in scope", str(len(self.notes_in_scope)))
|
||||
table.add_row("Notes excluded from scope", str(self.num_excluded_notes()))
|
||||
table.add_row("Active filters", str(len(self.filters)))
|
||||
table.add_row("Notes with changes", str(len(self.get_changed_notes())))
|
||||
table.add_row("Insert Location", str(self.insert_location.value))
|
||||
|
||||
Console().print(table)
|
||||
console_no_markup.print(table)
|
||||
|
||||
def list_editable_notes(self) -> None:
|
||||
"""Print a list of notes within the scope that are being edited."""
|
||||
for _note in self.notes:
|
||||
print(_note.note_path.relative_to(self.vault_path))
|
||||
table = Table(title="Notes in current scope", show_header=False, box=box.HORIZONTALS)
|
||||
for _n, _note in enumerate(self.notes_in_scope, start=1):
|
||||
table.add_row(str(_n), str(_note.note_path.relative_to(self.vault_path)))
|
||||
console_no_markup.print(table)
|
||||
|
||||
def move_inline_metadata(self, location: InsertLocation) -> int:
|
||||
"""Move all inline metadata to the selected location.
|
||||
|
||||
Args:
|
||||
location (InsertLocation): Location to move inline metadata to.
|
||||
|
||||
Returns:
|
||||
int: Number of notes that had inline metadata moved.
|
||||
"""
|
||||
num_changed = 0
|
||||
|
||||
for _note in self.notes_in_scope:
|
||||
if _note.transpose_metadata(
|
||||
begin=MetadataType.INLINE,
|
||||
end=MetadataType.INLINE,
|
||||
key=None,
|
||||
value=None,
|
||||
location=location,
|
||||
):
|
||||
log.trace(f"Moved inline metadata in {_note.note_path}")
|
||||
num_changed += 1
|
||||
|
||||
if num_changed > 0:
|
||||
self._rebuild_vault_metadata()
|
||||
|
||||
return num_changed
|
||||
|
||||
def num_excluded_notes(self) -> int:
|
||||
"""Count number of excluded notes."""
|
||||
excluded_notes = [
|
||||
p.resolve()
|
||||
for p in self.vault_path.glob("**/*")
|
||||
if p.suffix in [".md", ".MD", ".markdown", ".MARKDOWN"] and p not in self.note_paths
|
||||
]
|
||||
return len(excluded_notes)
|
||||
return len(self.all_notes) - len(self.notes_in_scope)
|
||||
|
||||
def num_notes(self) -> int:
|
||||
"""Number of notes in the vault.
|
||||
def print_metadata(self, meta_type: MetadataType = MetadataType.ALL) -> None:
|
||||
"""Print metadata for the vault."""
|
||||
dict_to_print = None
|
||||
list_to_print = None
|
||||
match meta_type:
|
||||
case MetadataType.INLINE:
|
||||
dict_to_print = self.inline_meta
|
||||
header = "All inline metadata"
|
||||
case MetadataType.FRONTMATTER:
|
||||
dict_to_print = self.frontmatter
|
||||
header = "All frontmatter"
|
||||
case MetadataType.TAGS:
|
||||
list_to_print = [f"#{x}" for x in self.tags]
|
||||
header = "All inline tags"
|
||||
case MetadataType.KEYS:
|
||||
list_to_print = sorted(
|
||||
merge_dictionaries(self.frontmatter, self.inline_meta).keys()
|
||||
)
|
||||
header = "All Keys"
|
||||
case MetadataType.ALL:
|
||||
dict_to_print = merge_dictionaries(self.frontmatter, self.inline_meta)
|
||||
list_to_print = [f"#{x}" for x in self.tags]
|
||||
header = "All metadata"
|
||||
|
||||
if dict_to_print is not None:
|
||||
table = Table(title=header, show_footer=False, show_lines=True)
|
||||
table.add_column("Keys", style="bold")
|
||||
table.add_column("Values")
|
||||
for key, value in sorted(dict_to_print.items()):
|
||||
values: str | dict[str, list[str]] = (
|
||||
"\n".join(sorted(value)) if isinstance(value, list) else value
|
||||
)
|
||||
table.add_row(f"{key}", str(values))
|
||||
console_no_markup.print(table)
|
||||
|
||||
if list_to_print is not None:
|
||||
columns = Columns(
|
||||
sorted(list_to_print),
|
||||
equal=True,
|
||||
expand=True,
|
||||
title=header if meta_type != MetadataType.ALL else "All inline tags",
|
||||
)
|
||||
console_no_markup.print(columns)
|
||||
|
||||
def rename_tag(self, old_tag: str, new_tag: str) -> int:
|
||||
"""Rename an inline tag in the vault.
|
||||
|
||||
Args:
|
||||
old_tag (str): Old tag name.
|
||||
new_tag (str): New tag name.
|
||||
|
||||
Returns:
|
||||
int: Number of notes in the vault.
|
||||
int: Number of notes that had inline tags renamed.
|
||||
"""
|
||||
return len(self.notes)
|
||||
num_changed = 0
|
||||
|
||||
def rename_metadata(self, key: str, value_1: str, value_2: str = None) -> int:
|
||||
"""Renames a key or key-value pair in the note's metadata.
|
||||
for _note in self.notes_in_scope:
|
||||
if _note.rename_tag(old_tag, new_tag):
|
||||
log.trace(f"Renamed inline tag in {_note.note_path}")
|
||||
num_changed += 1
|
||||
|
||||
if num_changed > 0:
|
||||
self._rebuild_vault_metadata()
|
||||
|
||||
return num_changed
|
||||
|
||||
def rename_metadata(self, key: str, value_1: str, value_2: str | None = None) -> int:
|
||||
"""Rename a key or key-value pair in the note's metadata.
|
||||
|
||||
If no value is provided, will rename an entire key.
|
||||
|
||||
@@ -254,40 +627,110 @@ class Vault:
|
||||
"""
|
||||
num_changed = 0
|
||||
|
||||
for _note in self.notes:
|
||||
for _note in self.notes_in_scope:
|
||||
if _note.rename_metadata(key, value_1, value_2):
|
||||
log.trace(f"Renamed metadata in {_note.note_path}")
|
||||
num_changed += 1
|
||||
|
||||
if num_changed > 0:
|
||||
self.metadata.rename(key, value_1, value_2)
|
||||
self._rebuild_vault_metadata()
|
||||
|
||||
return num_changed
|
||||
|
||||
def rename_inline_tag(self, old_tag: str, new_tag: str) -> int:
|
||||
"""Rename an inline tag in the vault.
|
||||
def transpose_metadata(
|
||||
self,
|
||||
begin: MetadataType,
|
||||
end: MetadataType,
|
||||
key: str | None = None,
|
||||
value: str | None = None,
|
||||
location: InsertLocation = None,
|
||||
) -> int:
|
||||
"""Transpose metadata from one type to another.
|
||||
|
||||
Args:
|
||||
old_tag (str): Old tag name.
|
||||
new_tag (str): New tag name.
|
||||
begin (MetadataType): Metadata type to transpose from.
|
||||
end (MetadataType): Metadata type to transpose to.
|
||||
key (str, optional): Key to transpose. Defaults to None.
|
||||
value (str, optional): Value to transpose. Defaults to None.
|
||||
location (InsertLocation, optional): Location to insert metadata. (Defaults to `vault.config.insert_location`)
|
||||
|
||||
Returns:
|
||||
int: Number of notes that had inline tags renamed.
|
||||
int: Number of notes that had metadata transposed.
|
||||
"""
|
||||
if location is None:
|
||||
location = self.insert_location
|
||||
|
||||
num_changed = 0
|
||||
for note in self.notes_in_scope:
|
||||
if note.transpose_metadata(
|
||||
begin=begin,
|
||||
end=end,
|
||||
key=key,
|
||||
value=value,
|
||||
location=location,
|
||||
):
|
||||
num_changed += 1
|
||||
|
||||
if num_changed > 0:
|
||||
self._rebuild_vault_metadata()
|
||||
log.trace(f"Transposed metadata in {note.note_path}")
|
||||
|
||||
return num_changed
|
||||
|
||||
def update_from_dict(self, dictionary: dict[str, Any]) -> int:
|
||||
"""Update note metadata from a dictionary. This method is used when updating note metadata from a CSV file. This is a destructive operation. All existing metadata in the specified notes not in the dictionary will be removed.
|
||||
|
||||
Requires a dictionary with the note path as the key and a dictionary of metadata as the value. Each key must have a list of associated dictionaries in the following format:
|
||||
|
||||
{
|
||||
'type': 'frontmatter|inline_metadata|tag',
|
||||
'key': 'string',
|
||||
'value': 'string'
|
||||
}
|
||||
|
||||
Args:
|
||||
dictionary (dict[str, Any]): Dictionary to update metadata from.
|
||||
|
||||
Returns:
|
||||
int: Number of notes that had metadata updated.
|
||||
"""
|
||||
num_changed = 0
|
||||
|
||||
for _note in self.notes:
|
||||
if _note.rename_inline_tag(old_tag, new_tag):
|
||||
for _note in self.all_notes:
|
||||
path = _note.note_path.relative_to(self.vault_path)
|
||||
if str(path) in dictionary:
|
||||
log.debug(f"Bulk update metadata for '{path}'")
|
||||
num_changed += 1
|
||||
|
||||
# Deleta all existing metadata in the note
|
||||
_note.delete_metadata(meta_type=MetadataType.META, key=r".*", is_regex=True)
|
||||
_note.delete_metadata(meta_type=MetadataType.TAGS, value=r".*", is_regex=True)
|
||||
|
||||
# Add the new metadata
|
||||
for row in dictionary[str(path)]:
|
||||
if row["type"].lower() == "frontmatter":
|
||||
_note.add_metadata(
|
||||
meta_type=MetadataType.FRONTMATTER,
|
||||
added_key=row["key"],
|
||||
added_value=row["value"],
|
||||
)
|
||||
|
||||
if row["type"].lower() == "inline_metadata":
|
||||
_note.add_metadata(
|
||||
meta_type=MetadataType.INLINE,
|
||||
added_key=row["key"],
|
||||
added_value=row["value"],
|
||||
location=self.insert_location,
|
||||
)
|
||||
|
||||
if row["type"].lower() == "tag":
|
||||
_note.add_metadata(
|
||||
meta_type=MetadataType.TAGS,
|
||||
added_value=row["value"],
|
||||
location=self.insert_location,
|
||||
)
|
||||
|
||||
if num_changed > 0:
|
||||
self.metadata.rename(self.notes[0].inline_tags.metadata_key, old_tag, new_tag)
|
||||
self._rebuild_vault_metadata()
|
||||
|
||||
return num_changed
|
||||
|
||||
def write(self) -> None:
|
||||
"""Write changes to the vault."""
|
||||
log.debug("Writing changes to vault...")
|
||||
if self.dry_run is False:
|
||||
for _note in self.notes:
|
||||
log.trace(f"writing to {_note.note_path}")
|
||||
_note.write()
|
||||
|
||||
@@ -6,56 +6,87 @@ import pytest
|
||||
|
||||
from obsidian_metadata._utils import alerts
|
||||
from obsidian_metadata._utils.alerts import logger as log
|
||||
from tests.helpers import Regex
|
||||
from tests.helpers import Regex, strip_ansi
|
||||
|
||||
|
||||
def test_dryrun(capsys):
|
||||
"""Test dry run."""
|
||||
alerts.dryrun("This prints in dry run")
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == "DRYRUN | This prints in dry run\n"
|
||||
captured = strip_ansi(capsys.readouterr().out)
|
||||
assert captured == "DRYRUN | This prints in dry run\n"
|
||||
|
||||
|
||||
def test_success(capsys):
|
||||
"""Test success."""
|
||||
alerts.success("This prints in success")
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == "SUCCESS | This prints in success\n"
|
||||
captured = strip_ansi(capsys.readouterr().out)
|
||||
assert captured == "SUCCESS | This prints in success\n"
|
||||
|
||||
|
||||
def test_error(capsys):
|
||||
"""Test success."""
|
||||
alerts.error("This prints in error")
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == "ERROR | This prints in error\n"
|
||||
captured = strip_ansi(capsys.readouterr().out)
|
||||
assert captured == "ERROR | This prints in error\n"
|
||||
|
||||
|
||||
def test_warning(capsys):
|
||||
"""Test warning."""
|
||||
alerts.warning("This prints in warning")
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == "WARNING | This prints in warning\n"
|
||||
captured = strip_ansi(capsys.readouterr().out)
|
||||
assert captured == "WARNING | This prints in warning\n"
|
||||
|
||||
|
||||
def test_notice(capsys):
|
||||
"""Test notice."""
|
||||
alerts.notice("This prints in notice")
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == "NOTICE | This prints in notice\n"
|
||||
captured = strip_ansi(capsys.readouterr().out)
|
||||
assert captured == "NOTICE | This prints in notice\n"
|
||||
|
||||
|
||||
def test_alerts_debug(capsys):
|
||||
"""Test debug."""
|
||||
alerts.debug("This prints in debug")
|
||||
captured = strip_ansi(capsys.readouterr().out)
|
||||
assert captured == "DEBUG | This prints in debug\n"
|
||||
|
||||
|
||||
def test_usage(capsys):
|
||||
"""Test usage."""
|
||||
alerts.usage("This prints in usage")
|
||||
captured = strip_ansi(capsys.readouterr().out)
|
||||
assert captured == "USAGE | This prints in usage\n"
|
||||
|
||||
alerts.usage(
|
||||
"Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua",
|
||||
width=80,
|
||||
)
|
||||
captured = strip_ansi(capsys.readouterr().out)
|
||||
assert "USAGE | Lorem ipsum dolor sit amet" in captured
|
||||
assert " | incididunt ut labore et dolore magna aliqua" in captured
|
||||
|
||||
alerts.usage(
|
||||
"Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua",
|
||||
width=20,
|
||||
)
|
||||
captured = strip_ansi(capsys.readouterr().out)
|
||||
assert "USAGE | Lorem ipsum dolor" in captured
|
||||
assert " | sit amet," in captured
|
||||
assert " | adipisicing elit," in captured
|
||||
|
||||
|
||||
def test_info(capsys):
|
||||
"""Test info."""
|
||||
alerts.info("This prints in info")
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == "INFO | This prints in info\n"
|
||||
captured = strip_ansi(capsys.readouterr().out)
|
||||
assert captured == "INFO | This prints in info\n"
|
||||
|
||||
|
||||
def test_dim(capsys):
|
||||
"""Test info."""
|
||||
alerts.dim("This prints in dim")
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == "This prints in dim\n"
|
||||
captured = strip_ansi(capsys.readouterr().out)
|
||||
assert captured == "This prints in dim\n"
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
@@ -75,74 +106,74 @@ def test_logging(capsys, tmp_path, verbosity, log_to_file) -> None:
|
||||
|
||||
if verbosity >= 3:
|
||||
assert logging.is_trace() is True
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == ""
|
||||
captured = strip_ansi(capsys.readouterr().out)
|
||||
assert not captured
|
||||
|
||||
assert logging.is_trace("trace text") is True
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == "trace text\n"
|
||||
captured = strip_ansi(capsys.readouterr().out)
|
||||
assert captured == "trace text\n"
|
||||
|
||||
log.trace("This is Trace logging")
|
||||
captured = capsys.readouterr()
|
||||
assert captured.err == Regex(r"^TRACE \| This is Trace logging \([\w\._:]+:\d+\)$")
|
||||
cap_error = strip_ansi(capsys.readouterr().err)
|
||||
assert cap_error == Regex(r"^TRACE \| This is Trace logging \([\w\._:]+:\d+\)$")
|
||||
else:
|
||||
assert logging.is_trace("trace text") is False
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out != "trace text\n"
|
||||
captured = strip_ansi(capsys.readouterr().out)
|
||||
assert captured != "trace text\n"
|
||||
|
||||
log.trace("This is Trace logging")
|
||||
captured = capsys.readouterr()
|
||||
assert captured.err != Regex(r"^TRACE \| This is Trace logging \([\w\._:]+:\d+\)$")
|
||||
cap_error = strip_ansi(capsys.readouterr().err)
|
||||
assert cap_error != Regex(r"^TRACE \| This is Trace logging \([\w\._:]+:\d+\)$")
|
||||
|
||||
if verbosity >= 2:
|
||||
assert logging.is_debug() is True
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == ""
|
||||
captured = strip_ansi(capsys.readouterr().out)
|
||||
assert not captured
|
||||
|
||||
assert logging.is_debug("debug text") is True
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == "debug text\n"
|
||||
captured = strip_ansi(capsys.readouterr().out)
|
||||
assert captured == "debug text\n"
|
||||
|
||||
log.debug("This is Debug logging")
|
||||
captured = capsys.readouterr()
|
||||
assert captured.err == Regex(r"^DEBUG \| This is Debug logging \([\w\._:]+:\d+\)$")
|
||||
captured = strip_ansi(capsys.readouterr().err)
|
||||
assert captured == Regex(r"^DEBUG \| This is Debug logging \([\w\._:]+:\d+\)$")
|
||||
else:
|
||||
assert logging.is_debug("debug text") is False
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out != "debug text\n"
|
||||
captured = strip_ansi(capsys.readouterr().out)
|
||||
assert captured != "debug text\n"
|
||||
|
||||
log.debug("This is Debug logging")
|
||||
captured = capsys.readouterr()
|
||||
assert captured.err != Regex(r"^DEBUG \| This is Debug logging \([\w\._:]+:\d+\)$")
|
||||
captured = strip_ansi(capsys.readouterr().err)
|
||||
assert captured != Regex(r"^DEBUG \| This is Debug logging \([\w\._:]+:\d+\)$")
|
||||
|
||||
if verbosity >= 1:
|
||||
assert logging.is_info() is True
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == ""
|
||||
captured = strip_ansi(capsys.readouterr().out)
|
||||
assert not captured
|
||||
|
||||
assert logging.is_info("info text") is True
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == "info text\n"
|
||||
captured = strip_ansi(capsys.readouterr().out)
|
||||
assert captured == "info text\n"
|
||||
|
||||
log.info("This is Info logging")
|
||||
captured = capsys.readouterr()
|
||||
assert captured.err == "INFO | This is Info logging\n"
|
||||
captured = strip_ansi(capsys.readouterr().err)
|
||||
assert captured == "INFO | This is Info logging\n"
|
||||
else:
|
||||
assert logging.is_info("info text") is False
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out != "info text\n"
|
||||
captured = strip_ansi(capsys.readouterr().out)
|
||||
assert captured != "info text\n"
|
||||
|
||||
log.info("This is Info logging")
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == ""
|
||||
captured = strip_ansi(capsys.readouterr().out)
|
||||
assert not captured
|
||||
|
||||
assert logging.is_default() is True
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == ""
|
||||
captured = strip_ansi(capsys.readouterr().out)
|
||||
assert not captured
|
||||
|
||||
assert logging.is_default("default text") is True
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == "default text\n"
|
||||
captured = strip_ansi(capsys.readouterr().out)
|
||||
assert captured == "default text\n"
|
||||
|
||||
if log_to_file:
|
||||
assert tmp_log.exists() is True
|
||||
|
||||
@@ -3,412 +3,681 @@
|
||||
|
||||
How mocking works in this test suite:
|
||||
|
||||
1. The main_app() method is mocked using a side effect iterable. This allows us to pass a value in the first run, and then a KeyError in the second run to exit the loop.
|
||||
1. The application_main() method is mocked using a side effect iterable. This allows us to pass a value in the first run, and then a KeyError in the second run to exit the loop.
|
||||
2. All questions are mocked using return_value. This allows us to pass in a value to the question and then the method will return that value. This is useful for testing questionary prompts without user input.
|
||||
"""
|
||||
|
||||
import re
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
from tests.helpers import Regex
|
||||
from obsidian_metadata.models.enums import MetadataType
|
||||
from tests.helpers import Regex, strip_ansi
|
||||
|
||||
|
||||
def test_instantiate_application(test_application) -> None:
|
||||
"""Test application."""
|
||||
"""Test application.
|
||||
|
||||
GIVEN an application
|
||||
WHEN the application is instantiated
|
||||
THEN check the attributes are set correctly
|
||||
"""
|
||||
app = test_application
|
||||
app.load_vault()
|
||||
app._load_vault()
|
||||
|
||||
assert app.dry_run is False
|
||||
assert app.config.name == "command_line_vault"
|
||||
assert app.config.exclude_paths == [".git", ".obsidian"]
|
||||
assert app.dry_run is False
|
||||
assert app.vault.num_notes() == 13
|
||||
assert len(app.vault.all_notes) == 13
|
||||
|
||||
|
||||
def test_abort(test_application, mocker, capsys) -> None:
|
||||
"""Test renaming a key."""
|
||||
"""Test aborting the application.
|
||||
|
||||
GIVEN an application
|
||||
WHEN the users selects "abort" from the main menu
|
||||
THEN check the application exits
|
||||
"""
|
||||
app = test_application
|
||||
app.load_vault()
|
||||
app._load_vault()
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_main_application",
|
||||
"obsidian_metadata.models.application.Questions.ask_application_main",
|
||||
return_value="abort",
|
||||
)
|
||||
|
||||
app.main_app()
|
||||
captured = capsys.readouterr()
|
||||
assert "Vault Info" in captured.out
|
||||
assert "Done!" in captured.out
|
||||
app.application_main()
|
||||
captured = strip_ansi(capsys.readouterr().out)
|
||||
assert "Done!" in captured
|
||||
|
||||
|
||||
def test_list_notes(test_application, mocker, capsys) -> None:
|
||||
"""Test renaming a key."""
|
||||
def test_add_metadata_frontmatter(test_application, mocker, capsys) -> None:
|
||||
"""Test adding new metadata to the vault.
|
||||
|
||||
GIVEN an application
|
||||
WHEN the wants to update a key in the frontmatter
|
||||
THEN check the application updates the key
|
||||
"""
|
||||
app = test_application
|
||||
app.load_vault()
|
||||
app._load_vault()
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_main_application",
|
||||
side_effect=["list_notes", KeyError],
|
||||
"obsidian_metadata.models.application.Questions.ask_application_main",
|
||||
side_effect=["add_metadata", KeyError],
|
||||
)
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_meta_type",
|
||||
return_value=MetadataType.FRONTMATTER,
|
||||
)
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_new_key",
|
||||
return_value="new_key",
|
||||
)
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_new_value",
|
||||
return_value="new_key_value",
|
||||
)
|
||||
|
||||
with pytest.raises(KeyError):
|
||||
app.main_app()
|
||||
captured = capsys.readouterr()
|
||||
assert "04 no metadata/no_metadata_1.md" in captured.out
|
||||
assert "02 inline/inline 2.md" in captured.out
|
||||
assert "+inbox/Untitled.md" in captured.out
|
||||
assert "00 meta/templates/data sample.md" in captured.out
|
||||
app.application_main()
|
||||
captured = strip_ansi(capsys.readouterr().out)
|
||||
assert captured == Regex(r"SUCCESS +\| Added metadata to \d+ notes", re.DOTALL)
|
||||
|
||||
|
||||
def test_all_metadata(test_application, mocker, capsys) -> None:
|
||||
"""Test renaming a key."""
|
||||
def test_add_metadata_inline(test_application, mocker, capsys) -> None:
|
||||
"""Test adding new metadata to the vault.
|
||||
|
||||
GIVEN an application
|
||||
WHEN the user wants to add a key in the inline metadata
|
||||
THEN check the application updates the key
|
||||
"""
|
||||
app = test_application
|
||||
app.load_vault()
|
||||
app._load_vault()
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_main_application",
|
||||
side_effect=["all_metadata", KeyError],
|
||||
"obsidian_metadata.models.application.Questions.ask_application_main",
|
||||
side_effect=["add_metadata", KeyError],
|
||||
)
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_meta_type",
|
||||
return_value=MetadataType.INLINE,
|
||||
)
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_new_key",
|
||||
return_value="new_key",
|
||||
)
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_new_value",
|
||||
return_value="new_key_value",
|
||||
)
|
||||
|
||||
with pytest.raises(KeyError):
|
||||
app.main_app()
|
||||
captured = capsys.readouterr()
|
||||
expected = re.escape("┃ Keys ┃ Values")
|
||||
assert captured.out == Regex(expected)
|
||||
expected = re.escape("Inline Tags │ breakfast")
|
||||
assert captured.out == Regex(expected)
|
||||
app.application_main()
|
||||
captured = strip_ansi(capsys.readouterr().out)
|
||||
assert captured == Regex(r"SUCCESS +\| Added metadata to \d+ notes", re.DOTALL)
|
||||
|
||||
|
||||
def test_add_metadata_tag(test_application, mocker, capsys) -> None:
|
||||
"""Test adding new metadata to the vault.
|
||||
|
||||
GIVEN an application
|
||||
WHEN the user wants to add a tag
|
||||
THEN check the application adds the tag
|
||||
"""
|
||||
app = test_application
|
||||
app._load_vault()
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_application_main",
|
||||
side_effect=["add_metadata", KeyError],
|
||||
)
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_meta_type",
|
||||
return_value=MetadataType.TAGS,
|
||||
)
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_new_tag",
|
||||
return_value="new_tag",
|
||||
)
|
||||
|
||||
with pytest.raises(KeyError):
|
||||
app.application_main()
|
||||
captured = strip_ansi(capsys.readouterr().out)
|
||||
assert captured == Regex(r"SUCCESS +\| Added metadata to \d+ notes", re.DOTALL)
|
||||
|
||||
|
||||
def test_delete_tag_1(test_application, mocker, capsys) -> None:
|
||||
"""Test renaming an inline tag.
|
||||
|
||||
GIVEN an application
|
||||
WHEN the user wants to delete an inline tag
|
||||
THEN check the application deletes the tag
|
||||
"""
|
||||
app = test_application
|
||||
app._load_vault()
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_application_main",
|
||||
side_effect=["delete_metadata", KeyError],
|
||||
)
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_selection",
|
||||
side_effect=["delete_tag", "back"],
|
||||
)
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_existing_tag",
|
||||
return_value="breakfast",
|
||||
)
|
||||
|
||||
with pytest.raises(KeyError):
|
||||
app.application_main()
|
||||
captured = strip_ansi(capsys.readouterr().out)
|
||||
assert captured == Regex(r"SUCCESS +\| Deleted inline tag: breakfast in \d+ notes", re.DOTALL)
|
||||
|
||||
|
||||
def test_delete_tag_2(test_application, mocker, capsys) -> None:
|
||||
"""Test renaming an inline tag.
|
||||
|
||||
GIVEN an application
|
||||
WHEN the user wants to delete an inline tag that does not exist
|
||||
THEN check the application does not update any notes
|
||||
"""
|
||||
app = test_application
|
||||
app._load_vault()
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_application_main",
|
||||
side_effect=["delete_metadata", KeyError],
|
||||
)
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_selection",
|
||||
side_effect=["delete_tag", "back"],
|
||||
)
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_existing_tag",
|
||||
return_value="not_a_tag_in_vault",
|
||||
)
|
||||
|
||||
with pytest.raises(KeyError):
|
||||
app.application_main()
|
||||
captured = strip_ansi(capsys.readouterr().out)
|
||||
assert "WARNING | No notes were changed" in captured
|
||||
|
||||
|
||||
def test_delete_key(test_application, mocker, capsys) -> None:
|
||||
"""Test renaming an inline tag."""
|
||||
app = test_application
|
||||
app._load_vault()
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_application_main",
|
||||
side_effect=["delete_metadata", KeyError],
|
||||
)
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_selection",
|
||||
side_effect=["delete_key", "back"],
|
||||
)
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_existing_keys_regex",
|
||||
return_value=r"\d{7}",
|
||||
)
|
||||
|
||||
with pytest.raises(KeyError):
|
||||
app.application_main()
|
||||
captured = strip_ansi(capsys.readouterr().out)
|
||||
assert r"WARNING | No notes found with a key matching regex: \d{7}" in captured
|
||||
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_application_main",
|
||||
side_effect=["delete_metadata", KeyError],
|
||||
)
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_selection",
|
||||
side_effect=["delete_key", "back"],
|
||||
)
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_existing_keys_regex",
|
||||
return_value=r"d\w+",
|
||||
)
|
||||
|
||||
with pytest.raises(KeyError):
|
||||
app.application_main()
|
||||
captured = strip_ansi(capsys.readouterr().out)
|
||||
assert captured == Regex(r"SUCCESS \| Deleted keys matching: d\\w\+ from \d+ notes", re.DOTALL)
|
||||
|
||||
|
||||
def test_delete_value(test_application, mocker, capsys) -> None:
|
||||
"""Test renaming an inline tag."""
|
||||
app = test_application
|
||||
app._load_vault()
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_application_main",
|
||||
side_effect=["delete_metadata", KeyError],
|
||||
)
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_selection",
|
||||
side_effect=["delete_value", "back"],
|
||||
)
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_existing_key",
|
||||
return_value="area",
|
||||
)
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_existing_value_regex",
|
||||
return_value=r"\d{7}",
|
||||
)
|
||||
with pytest.raises(KeyError):
|
||||
app.application_main()
|
||||
captured = strip_ansi(capsys.readouterr().out)
|
||||
assert r"WARNING | No notes found matching: area: \d{7}" in captured
|
||||
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_application_main",
|
||||
side_effect=["delete_metadata", KeyError],
|
||||
)
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_selection",
|
||||
side_effect=["delete_value", "back"],
|
||||
)
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_existing_key",
|
||||
return_value="area",
|
||||
)
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_existing_value_regex",
|
||||
return_value=r"^front\w+$",
|
||||
)
|
||||
with pytest.raises(KeyError):
|
||||
app.application_main()
|
||||
captured = strip_ansi(capsys.readouterr().out)
|
||||
assert captured == Regex(r"SUCCESS | Deleted value \^front\\w\+\$ from key area in \d+ notes")
|
||||
|
||||
|
||||
def test_filter_notes(test_application, mocker, capsys) -> None:
|
||||
"""Test renaming a key."""
|
||||
app = test_application
|
||||
app.load_vault()
|
||||
app._load_vault()
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_main_application",
|
||||
side_effect=["filter_notes", "list_notes", KeyError],
|
||||
"obsidian_metadata.models.application.Questions.ask_application_main",
|
||||
side_effect=["filter_notes", KeyError],
|
||||
)
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_for_filter_path",
|
||||
"obsidian_metadata.models.application.Questions.ask_selection",
|
||||
side_effect=["apply_path_filter", "list_notes", "back"],
|
||||
)
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_filter_path",
|
||||
return_value="inline",
|
||||
)
|
||||
|
||||
with pytest.raises(KeyError):
|
||||
app.main_app()
|
||||
app.application_main()
|
||||
captured = strip_ansi(capsys.readouterr().out)
|
||||
assert captured == Regex(r"SUCCESS +\| Loaded \d+ notes from \d+ total", re.DOTALL)
|
||||
assert "02 inline/inline 2.md" in captured
|
||||
assert "03 mixed/mixed 1.md" not in captured
|
||||
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_application_main",
|
||||
side_effect=["filter_notes", KeyError],
|
||||
)
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_selection",
|
||||
side_effect=["apply_metadata_filter", "list_notes", "back"],
|
||||
)
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_existing_key",
|
||||
return_value="on_one_note",
|
||||
)
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_existing_value",
|
||||
return_value="",
|
||||
)
|
||||
with pytest.raises(KeyError):
|
||||
app.application_main()
|
||||
captured = capsys.readouterr()
|
||||
assert "04 no metadata/no_metadata_1.md" not in captured.out
|
||||
assert "02 inline/inline 1.md" in captured.out
|
||||
assert captured.out == Regex(r"SUCCESS +\| Loaded.*1.*notes from.*\d+.*total", re.DOTALL)
|
||||
assert "02 inline/inline 2.md" in captured.out
|
||||
assert "+inbox/Untitled.md" not in captured.out
|
||||
assert "00 meta/templates/data sample.md" not in captured.out
|
||||
assert "03 mixed/mixed 1.md" not in captured.out
|
||||
|
||||
|
||||
def test_rename_key_success(test_application, mocker, capsys) -> None:
|
||||
"""Test renaming a key."""
|
||||
def test_filter_clear(test_application, mocker, capsys) -> None:
|
||||
"""Test clearing filters."""
|
||||
app = test_application
|
||||
app.load_vault()
|
||||
app._load_vault()
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_main_application",
|
||||
side_effect=["rename_key", KeyError],
|
||||
"obsidian_metadata.models.application.Questions.ask_application_main",
|
||||
side_effect=["filter_notes", "filter_notes", KeyError],
|
||||
)
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_for_existing_key",
|
||||
return_value="tags",
|
||||
"obsidian_metadata.models.application.Questions.ask_selection",
|
||||
side_effect=["apply_metadata_filter", "list_filters", "list_notes", "back"],
|
||||
)
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_for_new_key",
|
||||
return_value="new_tags",
|
||||
"obsidian_metadata.models.application.Questions.ask_existing_key",
|
||||
return_value="on_one_note",
|
||||
)
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_existing_value",
|
||||
return_value="",
|
||||
)
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_number",
|
||||
return_value="1",
|
||||
)
|
||||
|
||||
with pytest.raises(KeyError):
|
||||
app.main_app()
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == Regex(r"Renamed.*tags.*to.*new_tags.*in.*\d+.*notes", re.DOTALL)
|
||||
app.application_main()
|
||||
captured = strip_ansi(capsys.readouterr().out)
|
||||
assert "02 inline/inline 2.md" in captured
|
||||
assert "03 mixed/mixed 1.md" in captured
|
||||
assert "01 frontmatter/frontmatter 4.md" in captured
|
||||
assert "04 no metadata/no_metadata_1.md " in captured
|
||||
|
||||
|
||||
def test_rename_key_fail(test_application, mocker, capsys) -> None:
|
||||
"""Test renaming a key."""
|
||||
def test_inspect_metadata_all(test_application, mocker, capsys) -> None:
|
||||
"""Test backing up a vault."""
|
||||
app = test_application
|
||||
app.load_vault()
|
||||
app._load_vault()
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_main_application",
|
||||
side_effect=["rename_key", KeyError],
|
||||
"obsidian_metadata.models.application.Questions.ask_application_main",
|
||||
side_effect=["inspect_metadata", KeyError],
|
||||
)
|
||||
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_for_existing_key",
|
||||
return_value="tag",
|
||||
"obsidian_metadata.models.application.Questions.ask_selection",
|
||||
side_effect=["all_metadata", "back"],
|
||||
)
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_for_new_key",
|
||||
return_value="new_tags",
|
||||
)
|
||||
|
||||
with pytest.raises(KeyError):
|
||||
app.main_app()
|
||||
captured = capsys.readouterr()
|
||||
assert "WARNING | No notes were changed" in captured.out
|
||||
app.application_main()
|
||||
captured = strip_ansi(capsys.readouterr().out)
|
||||
assert captured == Regex(r"tags +│ bar ")
|
||||
assert captured == Regex(r"status +│ new ")
|
||||
assert captured == Regex(r"in_text_key +│ in-text value")
|
||||
assert "#breakfast" in captured
|
||||
|
||||
|
||||
def test_rename_inline_tag_success(test_application, mocker, capsys) -> None:
|
||||
def test_rename_tag(test_application, mocker, capsys) -> None:
|
||||
"""Test renaming an inline tag."""
|
||||
app = test_application
|
||||
app.load_vault()
|
||||
app._load_vault()
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_main_application",
|
||||
side_effect=["rename_inline_tag", KeyError],
|
||||
"obsidian_metadata.models.application.Questions.ask_application_main",
|
||||
side_effect=["rename_metadata", KeyError],
|
||||
)
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_for_existing_inline_tag",
|
||||
return_value="breakfast",
|
||||
"obsidian_metadata.models.application.Questions.ask_selection",
|
||||
side_effect=["rename_tag", "back"],
|
||||
)
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_for_new_tag",
|
||||
return_value="new_tag",
|
||||
)
|
||||
|
||||
with pytest.raises(KeyError):
|
||||
app.main_app()
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == Regex(r"Renamed.*breakfast.*to.*new_tag.*in.*\d+.*notes", re.DOTALL)
|
||||
|
||||
|
||||
def test_rename_inline_tag_fail(test_application, mocker, capsys) -> None:
|
||||
"""Test renaming an inline tag."""
|
||||
app = test_application
|
||||
app.load_vault()
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_main_application",
|
||||
side_effect=["rename_inline_tag", KeyError],
|
||||
)
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_for_existing_inline_tag",
|
||||
"obsidian_metadata.models.application.Questions.ask_existing_tag",
|
||||
return_value="not_a_tag",
|
||||
)
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_for_new_tag",
|
||||
"obsidian_metadata.models.application.Questions.ask_new_tag",
|
||||
return_value="new_tag",
|
||||
)
|
||||
|
||||
with pytest.raises(KeyError):
|
||||
app.main_app()
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == Regex(r"WARNING +\| No notes were changed", re.DOTALL)
|
||||
app.application_main()
|
||||
captured = strip_ansi(capsys.readouterr().out)
|
||||
assert "No notes were changed" in captured
|
||||
|
||||
|
||||
def test_delete_inline_tag_success(test_application, mocker, capsys) -> None:
|
||||
"""Test renaming an inline tag."""
|
||||
app = test_application
|
||||
app.load_vault()
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_main_application",
|
||||
side_effect=["delete_inline_tag", KeyError],
|
||||
"obsidian_metadata.models.application.Questions.ask_application_main",
|
||||
side_effect=["rename_metadata", KeyError],
|
||||
)
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_for_existing_inline_tag",
|
||||
"obsidian_metadata.models.application.Questions.ask_selection",
|
||||
side_effect=["rename_tag", "back"],
|
||||
)
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_existing_tag",
|
||||
return_value="breakfast",
|
||||
)
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_new_tag",
|
||||
return_value="new_tag",
|
||||
)
|
||||
|
||||
with pytest.raises(KeyError):
|
||||
app.main_app()
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == Regex(r"SUCCESS +\| Deleted.*\d+.*notes", re.DOTALL)
|
||||
app.application_main()
|
||||
captured = strip_ansi(capsys.readouterr().out)
|
||||
assert captured == Regex(r"Renamed breakfast to new_tag in \d+ notes", re.DOTALL)
|
||||
|
||||
|
||||
def test_delete_inline_tag_fail(test_application, mocker, capsys) -> None:
|
||||
"""Test renaming an inline tag."""
|
||||
def test_rename_key(test_application, mocker, capsys) -> None:
|
||||
"""Test renaming a key."""
|
||||
app = test_application
|
||||
app.load_vault()
|
||||
app._load_vault()
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_main_application",
|
||||
side_effect=["delete_inline_tag", KeyError],
|
||||
"obsidian_metadata.models.application.Questions.ask_application_main",
|
||||
side_effect=["rename_metadata", KeyError],
|
||||
)
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_for_existing_inline_tag",
|
||||
return_value="not_a_tag_in_vault",
|
||||
"obsidian_metadata.models.application.Questions.ask_selection",
|
||||
side_effect=["rename_key", "back"],
|
||||
)
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_existing_key",
|
||||
return_value="tag",
|
||||
)
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_new_key",
|
||||
return_value="new_tags",
|
||||
)
|
||||
|
||||
with pytest.raises(KeyError):
|
||||
app.main_app()
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == Regex(r"WARNING +\| No notes were changed", re.DOTALL)
|
||||
app.application_main()
|
||||
captured = strip_ansi(capsys.readouterr().out)
|
||||
assert "WARNING | No notes were changed" in captured
|
||||
|
||||
|
||||
def test_delete_key_success(test_application, mocker, capsys) -> None:
|
||||
"""Test renaming an inline tag."""
|
||||
app = test_application
|
||||
app.load_vault()
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_main_application",
|
||||
side_effect=["delete_key", KeyError],
|
||||
"obsidian_metadata.models.application.Questions.ask_application_main",
|
||||
side_effect=["rename_metadata", KeyError],
|
||||
)
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_for_existing_keys_regex",
|
||||
return_value=r"d\w+",
|
||||
"obsidian_metadata.models.application.Questions.ask_selection",
|
||||
side_effect=["rename_key", "back"],
|
||||
)
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_existing_key",
|
||||
return_value="tags",
|
||||
)
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_new_key",
|
||||
return_value="new_tags",
|
||||
)
|
||||
|
||||
with pytest.raises(KeyError):
|
||||
app.main_app()
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == Regex(
|
||||
r"SUCCESS +\|.*Deleted.*keys.*matching:.*d\\w\+.*from.*10", re.DOTALL
|
||||
)
|
||||
|
||||
|
||||
def test_delete_key_fail(test_application, mocker, capsys) -> None:
|
||||
"""Test renaming an inline tag."""
|
||||
app = test_application
|
||||
app.load_vault()
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_main_application",
|
||||
side_effect=["delete_key", KeyError],
|
||||
)
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_for_existing_keys_regex",
|
||||
return_value=r"\d{7}",
|
||||
)
|
||||
|
||||
with pytest.raises(KeyError):
|
||||
app.main_app()
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == Regex(r"WARNING +\| No notes found with a.*key.*matching", re.DOTALL)
|
||||
|
||||
|
||||
def test_rename_value_success(test_application, mocker, capsys) -> None:
|
||||
"""Test renaming an inline tag."""
|
||||
app = test_application
|
||||
app.load_vault()
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_main_application",
|
||||
side_effect=["rename_value", KeyError],
|
||||
)
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_for_existing_key",
|
||||
return_value="area",
|
||||
)
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_for_existing_value",
|
||||
return_value="frontmatter",
|
||||
)
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_for_new_value",
|
||||
return_value="new_key",
|
||||
)
|
||||
with pytest.raises(KeyError):
|
||||
app.main_app()
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == Regex(
|
||||
r"SUCCESS | Renamed 'area:frontmatter' to 'area:new_key'", re.DOTALL
|
||||
)
|
||||
assert captured.out == Regex(r".*in.*\d+.*notes.*", re.DOTALL)
|
||||
app.application_main()
|
||||
captured = strip_ansi(capsys.readouterr().out)
|
||||
assert captured == Regex(r"Renamed tags to new_tags in \d+ notes", re.DOTALL)
|
||||
|
||||
|
||||
def test_rename_value_fail(test_application, mocker, capsys) -> None:
|
||||
"""Test renaming an inline tag."""
|
||||
app = test_application
|
||||
app.load_vault()
|
||||
app._load_vault()
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_main_application",
|
||||
side_effect=["rename_value", KeyError],
|
||||
"obsidian_metadata.models.application.Questions.ask_application_main",
|
||||
side_effect=["rename_metadata", KeyError],
|
||||
)
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_for_existing_key",
|
||||
"obsidian_metadata.models.application.Questions.ask_selection",
|
||||
side_effect=["rename_value", "back"],
|
||||
)
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_existing_key",
|
||||
return_value="area",
|
||||
)
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_for_existing_value",
|
||||
"obsidian_metadata.models.application.Questions.ask_existing_value",
|
||||
return_value="not_exists",
|
||||
)
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_for_new_value",
|
||||
"obsidian_metadata.models.application.Questions.ask_new_value",
|
||||
return_value="new_key",
|
||||
)
|
||||
with pytest.raises(KeyError):
|
||||
app.main_app()
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == Regex(r"WARNING +\| No notes were changed", re.DOTALL)
|
||||
app.application_main()
|
||||
captured = strip_ansi(capsys.readouterr().out)
|
||||
assert "WARNING | No notes were changed" in captured
|
||||
|
||||
|
||||
def test_delete_value_success(test_application, mocker, capsys) -> None:
|
||||
"""Test renaming an inline tag."""
|
||||
app = test_application
|
||||
app.load_vault()
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_main_application",
|
||||
side_effect=["delete_value", KeyError],
|
||||
"obsidian_metadata.models.application.Questions.ask_application_main",
|
||||
side_effect=["rename_metadata", KeyError],
|
||||
)
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_for_existing_key",
|
||||
"obsidian_metadata.models.application.Questions.ask_selection",
|
||||
side_effect=["rename_value", "back"],
|
||||
)
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_existing_key",
|
||||
return_value="area",
|
||||
)
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_for_existing_value_regex",
|
||||
return_value=r"^front\w+$",
|
||||
"obsidian_metadata.models.application.Questions.ask_existing_value",
|
||||
return_value="frontmatter",
|
||||
)
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_new_value",
|
||||
return_value="new_key",
|
||||
)
|
||||
with pytest.raises(KeyError):
|
||||
app.main_app()
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == Regex(
|
||||
r"SUCCESS +\| Deleted value.*\^front\\w\+\$.*from.*key.*area.*in.*\d+.*notes", re.DOTALL
|
||||
app.application_main()
|
||||
captured = strip_ansi(capsys.readouterr().out)
|
||||
assert captured == Regex(
|
||||
r"SUCCESS +\| Renamed 'area:frontmatter' to 'area:new_key' in \d+ notes", re.DOTALL
|
||||
)
|
||||
|
||||
|
||||
def test_delete_value_fail(test_application, mocker, capsys) -> None:
|
||||
"""Test renaming an inline tag."""
|
||||
app = test_application
|
||||
app.load_vault()
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_main_application",
|
||||
side_effect=["delete_value", KeyError],
|
||||
)
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_for_existing_key",
|
||||
return_value="area",
|
||||
)
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_for_existing_value_regex",
|
||||
return_value=r"\d{7}",
|
||||
)
|
||||
with pytest.raises(KeyError):
|
||||
app.main_app()
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == Regex(r"WARNING +\| No notes found matching:", re.DOTALL)
|
||||
|
||||
|
||||
def test_review_no_changes(test_application, mocker, capsys) -> None:
|
||||
"""Review changes when no changes to vault."""
|
||||
app = test_application
|
||||
app.load_vault()
|
||||
app._load_vault()
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_main_application",
|
||||
"obsidian_metadata.models.application.Questions.ask_application_main",
|
||||
side_effect=["review_changes", KeyError],
|
||||
)
|
||||
with pytest.raises(KeyError):
|
||||
app.main_app()
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == Regex(r"INFO +\| No changes to review", re.DOTALL)
|
||||
app.application_main()
|
||||
captured = strip_ansi(capsys.readouterr().out)
|
||||
assert "INFO | No changes to review" in captured
|
||||
|
||||
|
||||
def test_review_changes(test_application, mocker, capsys) -> None:
|
||||
"""Review changes when no changes to vault."""
|
||||
app = test_application
|
||||
app.load_vault()
|
||||
app._load_vault()
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_main_application",
|
||||
side_effect=["delete_key", "review_changes", KeyError],
|
||||
"obsidian_metadata.models.application.Questions.ask_application_main",
|
||||
side_effect=["rename_metadata", "review_changes", KeyError],
|
||||
)
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_for_existing_keys_regex",
|
||||
return_value=r"d\w+",
|
||||
"obsidian_metadata.models.application.Questions.ask_existing_key",
|
||||
return_value="tags",
|
||||
)
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_confirm",
|
||||
return_value=True,
|
||||
"obsidian_metadata.models.application.Questions.ask_new_key",
|
||||
return_value="new_tags",
|
||||
)
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_for_selection",
|
||||
side_effect=[1, "return"],
|
||||
"obsidian_metadata.models.application.Questions.ask_selection",
|
||||
side_effect=["rename_key", 1, "return"],
|
||||
)
|
||||
with pytest.raises(KeyError):
|
||||
app.main_app()
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == Regex(r".*Found.*\d+.*changed notes in the vault.*", re.DOTALL)
|
||||
assert "- date_created: 2022-12-22" in captured.out
|
||||
assert "+ - breakfast" in captured.out
|
||||
app.application_main()
|
||||
captured = strip_ansi(capsys.readouterr().out)
|
||||
assert captured == Regex(r".*Found \d+ changed notes in the vault", re.DOTALL)
|
||||
assert "- tags:" in captured
|
||||
assert "+ new_tags:" in captured
|
||||
|
||||
|
||||
def test_transpose_metadata_1(test_application, mocker, capsys) -> None:
|
||||
"""Transpose metadata.
|
||||
|
||||
GIVEN a test application
|
||||
WHEN the user wants to transpose all inline metadata to frontmatter
|
||||
THEN the metadata is transposed
|
||||
"""
|
||||
app = test_application
|
||||
app._load_vault()
|
||||
|
||||
assert app.vault.inline_meta["inline_key"] == ["inline_key_value"]
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_application_main",
|
||||
side_effect=["reorganize_metadata", KeyError],
|
||||
)
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_selection",
|
||||
side_effect=["inline_to_frontmatter", "transpose_all"],
|
||||
)
|
||||
with pytest.raises(KeyError):
|
||||
app.application_main()
|
||||
|
||||
assert app.vault.inline_meta == {}
|
||||
assert app.vault.frontmatter["inline_key"] == ["inline_key_value"]
|
||||
captured = strip_ansi(capsys.readouterr().out)
|
||||
assert "SUCCESS | Transposed Inline Metadata to Frontmatter in 5 notes" in captured
|
||||
|
||||
|
||||
def test_transpose_metadata_2(test_application, mocker) -> None:
|
||||
"""Transpose metadata.
|
||||
|
||||
GIVEN a test application
|
||||
WHEN the user wants to transpose all frontmatter to inline metadata
|
||||
THEN the metadata is transposed
|
||||
"""
|
||||
app = test_application
|
||||
app._load_vault()
|
||||
|
||||
assert app.vault.frontmatter["date_created"] == ["2022-12-21", "2022-12-22"]
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_application_main",
|
||||
side_effect=["reorganize_metadata", KeyError],
|
||||
)
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_selection",
|
||||
side_effect=["frontmatter_to_inline", "transpose_all"],
|
||||
)
|
||||
with pytest.raises(KeyError):
|
||||
app.application_main()
|
||||
assert app.vault.inline_meta["date_created"] == ["2022-12-21", "2022-12-22"]
|
||||
assert app.vault.frontmatter == {}
|
||||
|
||||
|
||||
def test_vault_backup(test_application, mocker, capsys) -> None:
|
||||
"""Test backing up a vault."""
|
||||
app = test_application
|
||||
app._load_vault()
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_application_main",
|
||||
side_effect=["vault_actions", KeyError],
|
||||
)
|
||||
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_selection",
|
||||
side_effect=["backup_vault", "back"],
|
||||
)
|
||||
with pytest.raises(KeyError):
|
||||
app.application_main()
|
||||
captured = strip_ansi(capsys.readouterr().out)
|
||||
assert captured == Regex(
|
||||
r"SUCCESS +\| Vault backed up to:[-\w\d\/\s]+application\.bak", re.DOTALL
|
||||
)
|
||||
|
||||
|
||||
def test_vault_delete(test_application, mocker, capsys, tmp_path) -> None:
|
||||
"""Test backing up a vault."""
|
||||
app = test_application
|
||||
backup_path = Path(tmp_path / "application.bak")
|
||||
backup_path.mkdir()
|
||||
app._load_vault()
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_application_main",
|
||||
side_effect=["vault_actions", KeyError],
|
||||
)
|
||||
|
||||
mocker.patch(
|
||||
"obsidian_metadata.models.application.Questions.ask_selection",
|
||||
side_effect=["delete_backup", "back"],
|
||||
)
|
||||
with pytest.raises(KeyError):
|
||||
app.application_main()
|
||||
captured = strip_ansi(capsys.readouterr().out)
|
||||
assert captured == Regex(r"SUCCESS +\| Backup deleted", re.DOTALL)
|
||||
|
||||
@@ -1,9 +1,13 @@
|
||||
# type: ignore
|
||||
"""Test obsidian-metadata CLI."""
|
||||
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
|
||||
from typer.testing import CliRunner
|
||||
|
||||
from obsidian_metadata.cli import app
|
||||
from tests.helpers import Regex, strip_ansi
|
||||
|
||||
from .helpers import KeyInputs, Regex # noqa: F401
|
||||
|
||||
@@ -14,19 +18,28 @@ def test_version() -> None:
|
||||
"""Test printing version and then exiting."""
|
||||
result = runner.invoke(app, ["--version"])
|
||||
assert result.exit_code == 0
|
||||
assert result.output == Regex(r"obsidian_metadata: v\d+\.\d+\.\d+$")
|
||||
assert "obsidian_metadata: v" in result.output
|
||||
|
||||
|
||||
def test_application(test_vault, tmp_path) -> None:
|
||||
def test_application(tmp_path) -> None:
|
||||
"""Test the application."""
|
||||
vault_path = test_vault
|
||||
source_dir = Path(__file__).parent / "fixtures" / "test_vault"
|
||||
dest_dir = Path(tmp_path / "vault")
|
||||
|
||||
if not source_dir.exists():
|
||||
raise FileNotFoundError(f"Sample vault not found: {source_dir}")
|
||||
|
||||
shutil.copytree(source_dir, dest_dir)
|
||||
|
||||
config_path = tmp_path / "config.toml"
|
||||
result = runner.invoke(
|
||||
app,
|
||||
["--vault-path", vault_path, "--config-file", config_path],
|
||||
["--vault-path", dest_dir, "--config-file", config_path],
|
||||
# input=KeyInputs.DOWN + KeyInputs.DOWN + KeyInputs.DOWN + KeyInputs.ENTER, # noqa: ERA001
|
||||
)
|
||||
|
||||
output = strip_ansi(result.output)
|
||||
|
||||
banner = r"""
|
||||
___ _ _ _ _
|
||||
/ _ \| |__ ___(_) __| (_) __ _ _ __
|
||||
@@ -39,5 +52,28 @@ def test_application(test_vault, tmp_path) -> None:
|
||||
|_| |_|\___|\__\__,_|\__,_|\__,_|\__\__,_|
|
||||
"""
|
||||
|
||||
assert banner in result.output
|
||||
assert banner in output
|
||||
assert output == Regex(r"SUCCESS \| Loaded \d+ notes from \d+ total notes")
|
||||
assert result.exit_code == 1
|
||||
|
||||
|
||||
def test_export_template(tmp_path) -> None:
|
||||
"""Test the export template command."""
|
||||
source_dir = Path(__file__).parent / "fixtures" / "test_vault"
|
||||
dest_dir = Path(tmp_path / "vault")
|
||||
|
||||
if not source_dir.exists():
|
||||
raise FileNotFoundError(f"Sample vault not found: {source_dir}")
|
||||
|
||||
shutil.copytree(source_dir, dest_dir)
|
||||
|
||||
config_path = tmp_path / "config.toml"
|
||||
export_path = tmp_path / "export_template.csv"
|
||||
result = runner.invoke(
|
||||
app,
|
||||
["--vault-path", dest_dir, "--config-file", config_path, "--export-template", export_path],
|
||||
)
|
||||
|
||||
assert "SUCCESS | Exported metadata to" in result.output
|
||||
assert result.exit_code == 0
|
||||
assert export_path.exists()
|
||||
|
||||
@@ -36,7 +36,7 @@ def test_vault_path_errors(tmp_path, capsys) -> None:
|
||||
assert "Vault path not found" in captured.out
|
||||
|
||||
with pytest.raises(typer.Exit):
|
||||
Config(config_path=config_file, vault_path=Path("tests/fixtures/sample_note.md"))
|
||||
Config(config_path=config_file, vault_path=Path("tests/fixtures/test_vault/sample_note.md"))
|
||||
captured = capsys.readouterr()
|
||||
assert "Vault path is not a directory" in captured.out
|
||||
|
||||
@@ -49,6 +49,7 @@ def test_multiple_vaults_okay() -> None:
|
||||
assert config.config == {
|
||||
"Sample Vault": {
|
||||
"exclude_paths": [".git", ".obsidian", "ignore_folder"],
|
||||
"insert_location": "top",
|
||||
"path": "tests/fixtures/sample_vault",
|
||||
},
|
||||
"Test Vault": {
|
||||
@@ -74,6 +75,7 @@ def test_single_vault() -> None:
|
||||
"Test Vault": {
|
||||
"exclude_paths": [".git", ".obsidian", "ignore_folder"],
|
||||
"path": "tests/fixtures/test_vault",
|
||||
"insert_location": "BOTTOM",
|
||||
}
|
||||
}
|
||||
assert len(config.vaults) == 1
|
||||
@@ -101,10 +103,19 @@ def test_no_config_no_vault(tmp_path, mocker) -> None:
|
||||
["Vault 1"] # Name of the vault.
|
||||
|
||||
# Path to your obsidian vault
|
||||
# Note for Windows users: Windows paths must use `\\` as the path separator due to a limitation with how TOML parses strings.
|
||||
# Example: "C:\\Users\\username\\Documents\\Obsidian"
|
||||
path = "{str(fake_vault)}"
|
||||
|
||||
# Folders within the vault to ignore when indexing metadata
|
||||
exclude_paths = [".git", ".obsidian"]"""
|
||||
exclude_paths = [".git", ".obsidian"]
|
||||
|
||||
# Location to add new metadata. One of:
|
||||
# TOP: Directly after frontmatter.
|
||||
# AFTER_TITLE: After the first header following frontmatter.
|
||||
# BOTTOM: The bottom of the note
|
||||
insert_location = "BOTTOM\"
|
||||
"""
|
||||
|
||||
assert config_file.exists() is True
|
||||
assert content == dedent(sample_config)
|
||||
@@ -114,5 +125,6 @@ def test_no_config_no_vault(tmp_path, mocker) -> None:
|
||||
"Vault 1": {
|
||||
"path": str(fake_vault),
|
||||
"exclude_paths": [".git", ".obsidian"],
|
||||
"insert_location": "BOTTOM",
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,6 +9,13 @@ import pytest
|
||||
from obsidian_metadata._config import Config
|
||||
from obsidian_metadata.models.application import Application
|
||||
|
||||
CONFIG_1 = """
|
||||
["Test Vault"]
|
||||
exclude_paths = [".git", ".obsidian", "ignore_folder"]
|
||||
insert_location = "TOP"
|
||||
path = "TMPDIR_VAULT_PATH"
|
||||
"""
|
||||
|
||||
|
||||
def remove_all(root: Path):
|
||||
"""Remove all files and directories in a directory."""
|
||||
@@ -25,7 +32,7 @@ def remove_all(root: Path):
|
||||
@pytest.fixture()
|
||||
def sample_note(tmp_path) -> Path:
|
||||
"""Fixture which creates a temporary note file."""
|
||||
source_file: Path = Path("tests/fixtures/test_vault/test1.md")
|
||||
source_file: Path = Path("tests/fixtures/test_vault/sample_note.md")
|
||||
if not source_file.exists():
|
||||
raise FileNotFoundError(f"Original file not found: {source_file}")
|
||||
|
||||
@@ -37,6 +44,33 @@ def sample_note(tmp_path) -> Path:
|
||||
dest_file.unlink()
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def short_notes(tmp_path) -> Path:
|
||||
"""Fixture which creates two temporary note files.
|
||||
|
||||
Yields:
|
||||
Tuple[Path, Path]: Tuple of two temporary note files.
|
||||
1. Very short note with frontmatter
|
||||
2. Very short note without any frontmatter
|
||||
"""
|
||||
source_file1: Path = Path("tests/fixtures/short_textfile.md")
|
||||
source_file2: Path = Path("tests/fixtures/no_metadata.md")
|
||||
if not source_file1.exists():
|
||||
raise FileNotFoundError(f"Original file not found: {source_file1}")
|
||||
if not source_file2.exists():
|
||||
raise FileNotFoundError(f"Original file not found: {source_file2}")
|
||||
|
||||
dest_file1: Path = Path(tmp_path / source_file1.name)
|
||||
dest_file2: Path = Path(tmp_path / source_file2.name)
|
||||
shutil.copy(source_file1, dest_file1)
|
||||
shutil.copy(source_file2, dest_file2)
|
||||
yield dest_file1, dest_file2
|
||||
|
||||
# after test - remove fixtures
|
||||
dest_file1.unlink()
|
||||
dest_file2.unlink()
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def sample_vault(tmp_path) -> Path:
|
||||
"""Fixture which creates a sample vault."""
|
||||
@@ -68,10 +102,16 @@ def test_vault(tmp_path) -> Path:
|
||||
raise FileNotFoundError(f"Sample vault not found: {source_dir}")
|
||||
|
||||
shutil.copytree(source_dir, dest_dir)
|
||||
yield dest_dir
|
||||
config_path = Path(tmp_path / "config.toml")
|
||||
config_path.write_text(CONFIG_1.replace("TMPDIR_VAULT_PATH", str(dest_dir)))
|
||||
config = Config(config_path=config_path)
|
||||
vault_config = config.vaults[0]
|
||||
|
||||
yield vault_config
|
||||
|
||||
# after test - remove fixtures
|
||||
shutil.rmtree(dest_dir)
|
||||
config_path.unlink()
|
||||
|
||||
if backup_dir.exists():
|
||||
shutil.rmtree(backup_dir)
|
||||
|
||||
44
tests/fixtures/CP1250.md
vendored
Normal file
44
tests/fixtures/CP1250.md
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
---
|
||||
date_created: 2022-12-22 # confirm dates are translated to strings
|
||||
tags:
|
||||
- foo
|
||||
- bar
|
||||
frontmatter1: foo
|
||||
frontmatter2: ["bar", "baz", "qux"]
|
||||
??: ??
|
||||
# Nested lists are not supported
|
||||
# invalid:
|
||||
# invalid:
|
||||
# - invalid
|
||||
# - invalid2
|
||||
french1: "Voix ambigu<67> d'un cour qui, au z<>phyr, pr<70>fere les jattes de kiwis"
|
||||
---
|
||||
|
||||
# Heading 1
|
||||
|
||||
inline1:: foo
|
||||
inline1::bar baz
|
||||
**inline2**:: [[foo]]
|
||||
_inline3_:: value
|
||||
??::??
|
||||
key with space:: foo
|
||||
french2:: Voix ambigu<67> d'un cour qui, au z<>phyr, pr<70>fere les jattes de kiwis.
|
||||
|
||||
> inline4:: foo
|
||||
|
||||
inline5::
|
||||
|
||||
foo bar [intext1:: foo] baz `#invalid` qux (intext2:: foo) foobar. #tag1 Foo bar #tag2 baz qux. [[link]]
|
||||
|
||||
The quick brown fox jumped over the lazy dog.
|
||||
|
||||
# tag3
|
||||
|
||||
---
|
||||
|
||||
## invalid: invalid
|
||||
|
||||
```python
|
||||
invalid:: invalid
|
||||
#invalid
|
||||
```
|
||||
5
tests/fixtures/multiple_vaults.toml
vendored
5
tests/fixtures/multiple_vaults.toml
vendored
@@ -1,6 +1,7 @@
|
||||
["Sample Vault"]
|
||||
exclude_paths = [".git", ".obsidian", "ignore_folder"]
|
||||
path = "tests/fixtures/sample_vault"
|
||||
exclude_paths = [".git", ".obsidian", "ignore_folder"]
|
||||
insert_location = "top"
|
||||
path = "tests/fixtures/sample_vault"
|
||||
["Test Vault"]
|
||||
exclude_paths = [".git", ".obsidian", "ignore_folder"]
|
||||
path = "tests/fixtures/test_vault"
|
||||
|
||||
1
tests/fixtures/no_metadata.md
vendored
Normal file
1
tests/fixtures/no_metadata.md
vendored
Normal file
@@ -0,0 +1 @@
|
||||
Lorem ipsum dolor sit amet.
|
||||
39
tests/fixtures/sample_note.md
vendored
39
tests/fixtures/sample_note.md
vendored
@@ -1,39 +0,0 @@
|
||||
---
|
||||
date_created: 2022-12-22
|
||||
tags:
|
||||
- food/fruit/apple
|
||||
- dinner
|
||||
- breakfast
|
||||
- not_food
|
||||
author: John Doe
|
||||
nested_list:
|
||||
nested_list_one:
|
||||
- nested_list_one_a
|
||||
- nested_list_one_b
|
||||
type:
|
||||
- article
|
||||
- note
|
||||
---
|
||||
|
||||
area:: mixed
|
||||
date_modified:: 2022-12-22
|
||||
status:: new
|
||||
type:: book
|
||||
inline_key:: inline_key_value
|
||||
type:: [[article]]
|
||||
tags:: from_inline_metadata
|
||||
**bold_key**:: **bold** key value
|
||||
|
||||
|
||||
|
||||
|
||||
Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.
|
||||
|
||||
Sed ut perspiciatis unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam, [in_text_key:: in-text value] eaque ipsa quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt explicabo. Nemo enim ipsam voluptatem quia voluptas sit aspernatur aut odit aut fugit, sed quia consequuntur magni dolores eos qui ratione voluptatem sequi nesciunt. Neque porro quisquam est, qui dolorem ipsum quia dolor sit amet, consectetur, adipisci velit, sed quia non numquam eius modi tempora incidunt ut labore et dolore magnam aliquam quaerat voluptatem. Ut enim ad minima veniam, quis nostrum exercitationem ullam corporis suscipit laboriosam, nisi ut aliquid ex ea commodi consequatur? Quis autem vel eum iure reprehenderit qui in ea voluptate velit esse quam nihil molestiae consequatur, vel illum qui dolorem eum fugiat quo voluptas nulla pariatur? #inline_tag
|
||||
|
||||
At vero eos et accusamus et iusto odio dignissimos ducimus qui blanditiis praesentium voluptatum deleniti atque corrupti quos dolores et quas molestias excepturi sint occaecati cupiditate non provident, similique sunt in culpa qui officia deserunt mollitia animi, id est laborum et dolorum fuga. Et harum quidem rerum facilis est et expedita distinctio. Nam libero tempore, #inline_tag2 cum soluta nobis est eligendi optio cumque nihil impedit quo minus id quod maxime placeat facere possimus, omnis voluptas assumenda est, omnis dolor repellendus. Temporibus autem quibusdam et aut officiis debitis aut rerum necessitatibus saepe eveniet ut et voluptates repudiandae sint et molestiae non recusandae. Itaque earum rerum hic tenetur a sapiente delectus, ut aut reiciendis voluptatibus maiores alias consequatur aut perferendis doloribus asperiores repellat.
|
||||
|
||||
#food/fruit/pear
|
||||
#food/fruit/orange
|
||||
#dinner #breakfast
|
||||
#brunch
|
||||
@@ -3,25 +3,16 @@ area: frontmatter
|
||||
date_created: 2022-12-22
|
||||
date_modified: 2022-12-22
|
||||
tags:
|
||||
- food/fruit/apple
|
||||
- food/fruit/pear
|
||||
- dinner
|
||||
- lunch
|
||||
- breakfast
|
||||
thoughts:
|
||||
rating: 8
|
||||
reviewable: false
|
||||
levels:
|
||||
level1:
|
||||
- level1a
|
||||
- level1b
|
||||
level2:
|
||||
- level2a
|
||||
- level2b
|
||||
- food/fruit/apple
|
||||
- food/fruit/pear
|
||||
- dinner
|
||||
- lunch
|
||||
- breakfast
|
||||
author: John Doe
|
||||
status: new
|
||||
type: ["book", "article", "note", "one-off"]
|
||||
---
|
||||
|
||||
# Page Title H1
|
||||
|
||||
# Headings
|
||||
|
||||
@@ -3,25 +3,16 @@ area: frontmatter
|
||||
date_created: 2022-12-22
|
||||
date_modified: 2022-11-14
|
||||
tags:
|
||||
- food/fruit/apple
|
||||
- food/fruit/pear
|
||||
- dinner
|
||||
- lunch
|
||||
- breakfast
|
||||
thoughts:
|
||||
rating: 8
|
||||
reviewable: false
|
||||
levels:
|
||||
level1:
|
||||
- level1a
|
||||
- level1b
|
||||
level2:
|
||||
- level2a
|
||||
- level2b
|
||||
- food/fruit/apple
|
||||
- food/fruit/pear
|
||||
- dinner
|
||||
- lunch
|
||||
- breakfast
|
||||
author: John Doe
|
||||
status: new
|
||||
type: ["book", "article", "note"]
|
||||
---
|
||||
|
||||
# Page Title H1
|
||||
|
||||
# Headings
|
||||
|
||||
@@ -3,25 +3,16 @@ area: frontmatter
|
||||
date_created: 2022-12-22
|
||||
date_modified: 2022-10-01
|
||||
tags:
|
||||
- food/fruit/apple
|
||||
- food/fruit/pear
|
||||
- dinner
|
||||
- lunch
|
||||
- breakfast
|
||||
thoughts:
|
||||
rating: 8
|
||||
reviewable: false
|
||||
levels:
|
||||
level1:
|
||||
- level1a
|
||||
- level1b
|
||||
level2:
|
||||
- level2a
|
||||
- level2b
|
||||
- food/fruit/apple
|
||||
- food/fruit/pear
|
||||
- dinner
|
||||
- lunch
|
||||
- breakfast
|
||||
author: John Doe
|
||||
status: new
|
||||
type: ["book", "article", "note"]
|
||||
---
|
||||
|
||||
# Page Title H1
|
||||
|
||||
# Headings
|
||||
|
||||
@@ -3,21 +3,11 @@ area: frontmatter
|
||||
date_created: 2022-12-22
|
||||
date_modified: 2022-12-22
|
||||
tags:
|
||||
- food/fruit/apple
|
||||
- food/fruit/pear
|
||||
- dinner
|
||||
- lunch
|
||||
- breakfast
|
||||
thoughts:
|
||||
rating: 8
|
||||
reviewable: false
|
||||
levels:
|
||||
level1:
|
||||
- level1a
|
||||
- level1b
|
||||
level2:
|
||||
- level2a
|
||||
- level2b
|
||||
- food/fruit/apple
|
||||
- food/fruit/pear
|
||||
- dinner
|
||||
- lunch
|
||||
- breakfast
|
||||
author: John Doe
|
||||
status: new
|
||||
type: ["book", "article", "note"]
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
|
||||
area:: frontmatter
|
||||
date_created:: 2022-12-22
|
||||
date_modified:: 2022-12-22
|
||||
@@ -6,13 +5,17 @@ author:: John Doe
|
||||
status:: new
|
||||
type:: book
|
||||
type:: article
|
||||
#food/fruit/apple
|
||||
#food/fruit/pear
|
||||
#dinner #lunch #breakfast
|
||||
on_one_note:: one
|
||||
#food/fruit/apple
|
||||
#food/fruit/pear
|
||||
#dinner #lunch #breakfast
|
||||
|
||||
# note header
|
||||
|
||||
Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.
|
||||
|
||||
Sed ut perspiciatis unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam, eaque ipsa quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt explicabo. Nemo enim ipsam voluptatem quia voluptas sit aspernatur aut odit aut fugit, sed quia consequuntur magni dolores eos qui ratione voluptatem sequi nesciunt. Neque porro quisquam est, qui dolorem ipsum quia dolor sit amet, consectetur, adipisci velit, sed quia non numquam eius modi tempora incidunt ut labore et dolore magnam aliquam quaerat voluptatem. Ut enim ad minima veniam, quis nostrum exercitationem ullam corporis suscipit laboriosam, nisi ut aliquid ex ea commodi consequatur? Quis autem vel eum iure reprehenderit qui in ea voluptate velit esse quam nihil molestiae consequatur, vel illum qui dolorem eum fugiat quo voluptas nulla pariatur?
|
||||
|
||||
At vero eos et accusamus et iusto odio dignissimos ducimus qui blanditiis praesentium voluptatum deleniti atque corrupti quos dolores et quas molestias excepturi sint occaecati cupiditate non provident, similique sunt in culpa qui officia deserunt mollitia animi, id est laborum et dolorum fuga. Et harum quidem rerum facilis est et expedita distinctio. Nam libero tempore, cum soluta nobis est eligendi optio cumque nihil impedit quo minus id quod maxime placeat facere possimus, omnis voluptas assumenda est, omnis dolor repellendus. Temporibus autem quibusdam et aut officiis debitis aut rerum necessitatibus saepe eveniet ut et voluptates repudiandae sint et molestiae non recusandae. Itaque earum rerum hic tenetur a sapiente delectus, ut aut reiciendis voluptatibus maiores alias consequatur aut perferendis doloribus asperiores repellat.
|
||||
### header 3
|
||||
|
||||
At vero eos et accusamus et iusto odio dignissimos ducimus qui blanditiis praesentium voluptatum deleniti atque corrupti quos dolores et quas molestias excepturi sint occaecati cupiditate non provident, similique sunt in culpa qui officia deserunt mollitia animi, id est laborum et dolorum fuga. Et harum quidem rerum facilis est et expedita distinctio. Nam libero tempore, cum soluta nobis est eligendi optio cumque nihil impedit quo minus id quod maxime placeat facere possimus, omnis voluptas assumenda est, omnis dolor repellendus. Temporibus autem quibusdam et aut officiis debitis aut rerum necessitatibus saepe eveniet ut et voluptates repudiandae sint et molestiae non recusandae. Itaque earum rerum hic tenetur a sapiente delectus, ut aut reiciendis voluptatibus maiores alias consequatur aut perferendis doloribus asperiores repellat.
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# Header 1
|
||||
|
||||
area:: frontmatter
|
||||
date_created:: 2022-12-22
|
||||
@@ -6,13 +7,16 @@ author:: John Doe
|
||||
status:: new
|
||||
type:: book
|
||||
type:: article
|
||||
#food/fruit/apple
|
||||
#food/fruit/pear
|
||||
#dinner #lunch #breakfast
|
||||
#food/fruit/apple
|
||||
#food/fruit/pear
|
||||
#dinner #lunch #breakfast
|
||||
|
||||
## Header 2
|
||||
|
||||
Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.
|
||||
|
||||
Sed ut perspiciatis unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam, eaque ipsa quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt explicabo. Nemo enim ipsam voluptatem quia voluptas sit aspernatur aut odit aut fugit, sed quia consequuntur magni dolores eos qui ratione voluptatem sequi nesciunt. Neque porro quisquam est, qui dolorem ipsum quia dolor sit amet, consectetur, adipisci velit, sed quia non numquam eius modi tempora incidunt ut labore et dolore magnam aliquam quaerat voluptatem. Ut enim ad minima veniam, quis nostrum exercitationem ullam corporis suscipit laboriosam, nisi ut aliquid ex ea commodi consequatur? Quis autem vel eum iure reprehenderit qui in ea voluptate velit esse quam nihil molestiae consequatur, vel illum qui dolorem eum fugiat quo voluptas nulla pariatur?
|
||||
|
||||
At vero eos et accusamus et iusto odio dignissimos ducimus qui blanditiis praesentium voluptatum deleniti atque corrupti quos dolores et quas molestias excepturi sint occaecati cupiditate non provident, similique sunt in culpa qui officia deserunt mollitia animi, id est laborum et dolorum fuga. Et harum quidem rerum facilis est et expedita distinctio. Nam libero tempore, cum soluta nobis est eligendi optio cumque nihil impedit quo minus id quod maxime placeat facere possimus, omnis voluptas assumenda est, omnis dolor repellendus. Temporibus autem quibusdam et aut officiis debitis aut rerum necessitatibus saepe eveniet ut et voluptates repudiandae sint et molestiae non recusandae. Itaque earum rerum hic tenetur a sapiente delectus, ut aut reiciendis voluptatibus maiores alias consequatur aut perferendis doloribus asperiores repellat.
|
||||
### Header 3
|
||||
|
||||
At vero eos et accusamus et iusto odio dignissimos ducimus qui blanditiis praesentium voluptatum deleniti atque corrupti quos dolores et quas molestias excepturi sint occaecati cupiditate non provident, similique sunt in culpa qui officia deserunt mollitia animi, id est laborum et dolorum fuga. Et harum quidem rerum facilis est et expedita distinctio. Nam libero tempore, cum soluta nobis est eligendi optio cumque nihil impedit quo minus id quod maxime placeat facere possimus, omnis voluptas assumenda est, omnis dolor repellendus. Temporibus autem quibusdam et aut officiis debitis aut rerum necessitatibus saepe eveniet ut et voluptates repudiandae sint et molestiae non recusandae. Itaque earum rerum hic tenetur a sapiente delectus, ut aut reiciendis voluptatibus maiores alias consequatur aut perferendis doloribus asperiores repellat.
|
||||
|
||||
23
tests/fixtures/sample_vault/03 mixed/mixed 1.md
vendored
23
tests/fixtures/sample_vault/03 mixed/mixed 1.md
vendored
@@ -1,18 +1,14 @@
|
||||
---
|
||||
date_created: 2022-12-22
|
||||
tags:
|
||||
- food/fruit/apple
|
||||
- dinner
|
||||
- breakfast
|
||||
- not_food
|
||||
- food/fruit/apple
|
||||
- dinner
|
||||
- breakfast
|
||||
- not_food
|
||||
author: John Doe
|
||||
nested_list:
|
||||
nested_list_one:
|
||||
- nested_list_one_a
|
||||
- nested_list_one_b
|
||||
type:
|
||||
- article
|
||||
- note
|
||||
- article
|
||||
- note
|
||||
---
|
||||
|
||||
area:: mixed
|
||||
@@ -24,13 +20,16 @@ type:: [[article]]
|
||||
tags:: from_inline_metadata
|
||||
**bold_key**:: **bold** key value
|
||||
|
||||
|
||||
|
||||
# Note header
|
||||
|
||||
Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.
|
||||
|
||||
## Header 2
|
||||
|
||||
Sed ut perspiciatis unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam, [in_text_key:: in-text value] eaque ipsa quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt explicabo. Nemo enim ipsam voluptatem quia voluptas sit aspernatur aut odit aut fugit, sed quia consequuntur magni dolores eos qui ratione voluptatem sequi nesciunt. Neque porro quisquam est, qui dolorem ipsum quia dolor sit amet, consectetur, adipisci velit, sed quia non numquam eius modi tempora incidunt ut labore et dolore magnam aliquam quaerat voluptatem. Ut enim ad minima veniam, quis nostrum exercitationem ullam corporis suscipit laboriosam, nisi ut aliquid ex ea commodi consequatur? Quis autem vel eum iure reprehenderit qui in ea voluptate velit esse quam nihil molestiae consequatur, vel illum qui dolorem eum fugiat quo voluptas nulla pariatur? #inline_tag
|
||||
|
||||
### header 3
|
||||
|
||||
At vero eos et accusamus et iusto odio dignissimos ducimus qui blanditiis praesentium voluptatum deleniti atque corrupti quos dolores et quas molestias excepturi sint occaecati cupiditate non provident, similique sunt in culpa qui officia deserunt mollitia animi, id est laborum et dolorum fuga. Et harum quidem rerum facilis est et expedita distinctio. Nam libero tempore, #inline_tag2 cum soluta nobis est eligendi optio cumque nihil impedit quo minus id quod maxime placeat facere possimus, omnis voluptas assumenda est, omnis dolor repellendus. Temporibus autem quibusdam et aut officiis debitis aut rerum necessitatibus saepe eveniet ut et voluptates repudiandae sint et molestiae non recusandae. Itaque earum rerum hic tenetur a sapiente delectus, ut aut reiciendis voluptatibus maiores alias consequatur aut perferendis doloribus asperiores repellat.
|
||||
|
||||
#food/fruit/pear
|
||||
|
||||
7
tests/fixtures/short_textfile.md
vendored
Normal file
7
tests/fixtures/short_textfile.md
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
---
|
||||
key: value
|
||||
---
|
||||
|
||||
# header 1
|
||||
|
||||
Lorem ipsum dolor sit amet.
|
||||
44
tests/fixtures/test_vault/sample_note.md
vendored
Normal file
44
tests/fixtures/test_vault/sample_note.md
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
---
|
||||
date_created: 2022-12-22 # confirm dates are translated to strings
|
||||
tags:
|
||||
- foo
|
||||
- bar
|
||||
frontmatter1: foo
|
||||
frontmatter2: ["bar", "baz", "qux"]
|
||||
🌱: 🌿
|
||||
# Nested lists are not supported
|
||||
# invalid:
|
||||
# invalid:
|
||||
# - invalid
|
||||
# - invalid2
|
||||
french1: "Voix ambiguë d'un cœur qui, au zéphyr, préfère les jattes de kiwis"
|
||||
---
|
||||
|
||||
# Heading 1
|
||||
|
||||
inline1:: foo
|
||||
inline1::bar baz
|
||||
**inline2**:: [[foo]]
|
||||
_inline3_:: value
|
||||
🌱::🌿
|
||||
key with space:: foo
|
||||
french2:: Voix ambiguë d'un cœur qui, au zéphyr, préfère les jattes de kiwis.
|
||||
|
||||
> inline4:: foo
|
||||
|
||||
inline5::
|
||||
|
||||
foo bar [intext1:: foo] baz `#invalid` qux (intext2:: foo) foobar. #tag1 Foo bar #tag2 baz qux. [[link]]
|
||||
|
||||
The quick brown fox jumped over the lazy dog.
|
||||
|
||||
# tag3
|
||||
|
||||
---
|
||||
|
||||
## invalid: invalid
|
||||
|
||||
```python
|
||||
invalid:: invalid
|
||||
#invalid
|
||||
```
|
||||
44
tests/fixtures/test_vault/test1.md
vendored
44
tests/fixtures/test_vault/test1.md
vendored
@@ -1,44 +0,0 @@
|
||||
---
|
||||
date_created: 2022-12-22
|
||||
tags:
|
||||
- shared_tag
|
||||
- frontmatter_tag1
|
||||
- frontmatter_tag2
|
||||
-
|
||||
- 📅/frontmatter_tag3
|
||||
frontmatter_Key1: author name
|
||||
frontmatter_Key2: ["article", "note"]
|
||||
shared_key1: shared_key1_value
|
||||
shared_key2: shared_key2_value1
|
||||
---
|
||||
|
||||
#inline_tag_top1 #inline_tag_top2
|
||||
|
||||
top_key1:: top_key1_value
|
||||
**top_key2:: top_key2_value**
|
||||
top_key3:: [[top_key3_value_as_link]]
|
||||
shared_key1:: shared_key1_value
|
||||
shared_key2:: shared_key2_value2
|
||||
emoji_📅_key:: emoji_📅_key_value
|
||||
|
||||
# Heading 1
|
||||
Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. #intext_tag1 Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu [intext_key:: intext_value] fugiat nulla (#intext_tag2) pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est lab
|
||||
|
||||
```python
|
||||
#ffffff
|
||||
# This is sample text with tags and metadata
|
||||
#in_codeblock_tag1
|
||||
#ffffff;
|
||||
codeblock_key:: some text
|
||||
in_codeblock_key:: in_codeblock_value
|
||||
The quick brown fox jumped over the #in_codeblock_tag2
|
||||
```
|
||||
|
||||
Sed ut perspiciatis unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam, eaque ipsa quae ab `this is #inline_code_tag1` illo inventore veritatis et quasi architecto beatae vitae dicta sunt explicabo. Nemo enim ipsam voluptatem quia voluptas sit aspernatur aut odit aut fugit, sed quia consequuntur magni dolores eos qui ratione voluptatem sequi nesciunt. Neque porro quisquam est, qui dolorem ipsum quia dolor sit amet, consectetur, adipisci velit, sed quia non numquam eius modi tempora incidunt ut labore et dolore magnam aliquam quaerat voluptatem. Ut enim ad minima veniam, quis nostrum exercitationem ullam corporis suscipit laboriosam, nisi ut aliquid ex ea commodi consequatur? `this is #inline_code_tag2` Quis autem vel eum iure reprehenderit qui in ea voluptate velit esse quam nihil molestiae consequatur, vel illum qui dolorem eum fugiat quo voluptas nulla pari
|
||||
|
||||
bottom_key1:: bottom_key1_value
|
||||
bottom_key2:: bottom_key2_value
|
||||
|
||||
#inline_tag_bottom1
|
||||
#inline_tag_bottom2
|
||||
#shared_tag
|
||||
5
tests/fixtures/test_vault_config.toml
vendored
5
tests/fixtures/test_vault_config.toml
vendored
@@ -1,3 +1,4 @@
|
||||
["Test Vault"]
|
||||
exclude_paths = [".git", ".obsidian", "ignore_folder"]
|
||||
path = "tests/fixtures/test_vault"
|
||||
exclude_paths = [".git", ".obsidian", "ignore_folder"]
|
||||
insert_location = "BOTTOM"
|
||||
path = "tests/fixtures/test_vault"
|
||||
|
||||
@@ -22,6 +22,19 @@ class KeyInputs:
|
||||
THREE = "3"
|
||||
|
||||
|
||||
def strip_ansi(text) -> str:
|
||||
"""Remove ANSI escape sequences from a string.
|
||||
|
||||
Args:
|
||||
text (str): String to remove ANSI escape sequences from.
|
||||
|
||||
Returns:
|
||||
str: String without ANSI escape sequences.
|
||||
"""
|
||||
ansi_chars = re.compile(r"(\x9B|\x1B\[)[0-?]*[ -\/]*[@-~]")
|
||||
return ansi_chars.sub("", text)
|
||||
|
||||
|
||||
class Regex:
|
||||
"""Assert that a given string meets some expectations.
|
||||
|
||||
|
||||
@@ -1,491 +1,209 @@
|
||||
# type: ignore
|
||||
"""Test metadata.py."""
|
||||
from pathlib import Path
|
||||
"""Test the InlineField class."""
|
||||
|
||||
from obsidian_metadata.models.metadata import (
|
||||
Frontmatter,
|
||||
InlineMetadata,
|
||||
InlineTags,
|
||||
VaultMetadata,
|
||||
import pytest
|
||||
|
||||
from obsidian_metadata.models.enums import MetadataType, Wrapping
|
||||
from obsidian_metadata.models.metadata import InlineField, dict_to_yaml
|
||||
|
||||
|
||||
def test_dict_to_yaml_1():
|
||||
"""Test dict_to_yaml() function.
|
||||
|
||||
GIVEN a dictionary
|
||||
WHEN values contain lists
|
||||
THEN confirm the output is not sorted
|
||||
"""
|
||||
test_dict = {"k2": ["v1", "v2"], "k1": ["v1", "v2"]}
|
||||
assert dict_to_yaml(test_dict) == "k2:\n - v1\n - v2\nk1:\n - v1\n - v2\n"
|
||||
|
||||
|
||||
def test_dict_to_yaml_2():
|
||||
"""Test dict_to_yaml() function.
|
||||
|
||||
GIVEN a dictionary
|
||||
WHEN values contain lists and sort_keys is True
|
||||
THEN confirm the output is sorted
|
||||
"""
|
||||
test_dict = {"k2": ["v1", "v2"], "k1": ["v1", "v2"]}
|
||||
assert dict_to_yaml(test_dict, sort_keys=True) == "k1:\n - v1\n - v2\nk2:\n - v1\n - v2\n"
|
||||
|
||||
|
||||
def test_dict_to_yaml_3():
|
||||
"""Test dict_to_yaml() function.
|
||||
|
||||
GIVEN a dictionary
|
||||
WHEN values contain a list with a single value
|
||||
THEN confirm single-value lists are converted to strings
|
||||
"""
|
||||
test_dict = {"k2": ["v1"], "k1": ["v1", "v2"]}
|
||||
assert dict_to_yaml(test_dict, sort_keys=True) == "k1:\n - v1\n - v2\nk2: v1\n"
|
||||
|
||||
|
||||
def test_init_1():
|
||||
"""Test creating an InlineField object.
|
||||
|
||||
GIVEN an inline tag
|
||||
WHEN an InlineField object is created
|
||||
THEN confirm the object's attributes match the expected values
|
||||
"""
|
||||
obj = InlineField(
|
||||
meta_type=MetadataType.TAGS,
|
||||
key=None,
|
||||
value="tag1",
|
||||
)
|
||||
assert obj.meta_type == MetadataType.TAGS
|
||||
assert obj.key is None
|
||||
assert obj.value == "tag1"
|
||||
assert obj.normalized_value == "tag1"
|
||||
assert obj.wrapping == Wrapping.NONE
|
||||
assert obj.clean_key is None
|
||||
assert obj.normalized_key is None
|
||||
assert not obj.key_open
|
||||
assert not obj.key_close
|
||||
assert obj.is_changed is False
|
||||
|
||||
|
||||
def test_init_2():
|
||||
"""Test creating an InlineField object.
|
||||
|
||||
GIVEN an inline key/value pair
|
||||
WHEN an InlineField object is created
|
||||
THEN confirm the object's attributes match the expected values
|
||||
"""
|
||||
obj = InlineField(meta_type=MetadataType.INLINE, key="key", value="value")
|
||||
assert obj.meta_type == MetadataType.INLINE
|
||||
assert obj.key == "key"
|
||||
assert obj.value == "value"
|
||||
assert obj.normalized_value == "value"
|
||||
assert obj.wrapping == Wrapping.NONE
|
||||
assert obj.clean_key == "key"
|
||||
assert obj.normalized_key == "key"
|
||||
assert not obj.key_open
|
||||
assert not obj.key_close
|
||||
assert obj.is_changed is False
|
||||
|
||||
obj = InlineField(
|
||||
meta_type=MetadataType.INLINE,
|
||||
key="key",
|
||||
value="value",
|
||||
wrapping=Wrapping.PARENS,
|
||||
)
|
||||
assert obj.meta_type == MetadataType.INLINE
|
||||
assert obj.key == "key"
|
||||
assert obj.value == "value"
|
||||
assert obj.normalized_value == "value"
|
||||
assert obj.wrapping == Wrapping.PARENS
|
||||
assert obj.clean_key == "key"
|
||||
assert obj.normalized_key == "key"
|
||||
assert not obj.key_open
|
||||
assert not obj.key_close
|
||||
assert obj.is_changed is False
|
||||
|
||||
obj = InlineField(
|
||||
meta_type=MetadataType.INLINE,
|
||||
key="**key**",
|
||||
value="value",
|
||||
wrapping=Wrapping.BRACKETS,
|
||||
)
|
||||
assert obj.meta_type == MetadataType.INLINE
|
||||
assert obj.key == "**key**"
|
||||
assert obj.value == "value"
|
||||
assert obj.normalized_value == "value"
|
||||
assert obj.wrapping == Wrapping.BRACKETS
|
||||
assert obj.clean_key == "key"
|
||||
assert obj.normalized_key == "key"
|
||||
assert obj.key_open == "**"
|
||||
assert obj.key_close == "**"
|
||||
assert obj.is_changed is False
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
(
|
||||
"original",
|
||||
"cleaned",
|
||||
"normalized",
|
||||
"key_open",
|
||||
"key_close",
|
||||
),
|
||||
[
|
||||
("foo", "foo", "foo", "", ""),
|
||||
("🌱/🌿", "🌱/🌿", "🌱/🌿", "", ""),
|
||||
("FOO 1", "FOO 1", "foo-1", "", ""),
|
||||
("**key foo**", "key foo", "key-foo", "**", "**"),
|
||||
("## KEY", "KEY", "key", "## ", ""),
|
||||
],
|
||||
)
|
||||
from tests.helpers import Regex
|
||||
def test_init_3(original, cleaned, normalized, key_open, key_close):
|
||||
"""Test creating an InlineField object.
|
||||
|
||||
FILE_CONTENT: str = Path("tests/fixtures/test_vault/test1.md").read_text()
|
||||
METADATA: dict[str, list[str]] = {
|
||||
"frontmatter_Key1": ["author name"],
|
||||
"frontmatter_Key2": ["note", "article"],
|
||||
"shared_key1": ["shared_key1_value"],
|
||||
"shared_key2": ["shared_key2_value"],
|
||||
"tags": ["tag 2", "tag 1", "tag 3"],
|
||||
"top_key1": ["top_key1_value"],
|
||||
"top_key2": ["top_key2_value"],
|
||||
"top_key3": ["top_key3_value"],
|
||||
"intext_key": ["intext_key_value"],
|
||||
}
|
||||
FRONTMATTER_CONTENT: str = """
|
||||
---
|
||||
tags:
|
||||
- tag_1
|
||||
- tag_2
|
||||
-
|
||||
- 📅/tag_3
|
||||
frontmatter_Key1: "frontmatter_Key1_value"
|
||||
frontmatter_Key2: ["note", "article"]
|
||||
shared_key1: "shared_key1_value"
|
||||
---
|
||||
more content
|
||||
|
||||
---
|
||||
horizontal: rule
|
||||
---
|
||||
"""
|
||||
INLINE_CONTENT = """\
|
||||
repeated_key:: repeated_key_value1
|
||||
|
||||
#inline_tag_top1,#inline_tag_top2
|
||||
**bold_key1**:: bold_key1_value
|
||||
**bold_key2:: bold_key2_value**
|
||||
link_key:: [[link_key_value]]
|
||||
tag_key:: #tag_key_value
|
||||
emoji_📅_key:: emoji_📅_key_value
|
||||
**#bold_tag**
|
||||
|
||||
Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. [in_text_key1:: in_text_key1_value] Ut enim ad minim veniam, quis nostrud exercitation [in_text_key2:: in_text_key2_value] ullamco laboris nisi ut aliquip ex ea commodo consequat. #in_text_tag Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.
|
||||
|
||||
```python
|
||||
#ffffff
|
||||
# This is sample text [no_key:: value]with tags and metadata
|
||||
#in_codeblock_tag1
|
||||
#ffffff;
|
||||
in_codeblock_key:: in_codeblock_value
|
||||
The quick brown fox jumped over the #in_codeblock_tag2
|
||||
```
|
||||
repeated_key:: repeated_key_value2
|
||||
"""
|
||||
GIVEN an InlineField object is created
|
||||
WHEN the key needs to be normalized
|
||||
THEN confirm clean_key() returns the expected value
|
||||
"""
|
||||
obj = InlineField(meta_type=MetadataType.INLINE, key=original, value="value")
|
||||
assert obj.clean_key == cleaned
|
||||
assert obj.normalized_key == normalized
|
||||
assert obj.key_open == key_open
|
||||
assert obj.key_close == key_close
|
||||
|
||||
|
||||
def test_vault_metadata(capsys) -> None:
|
||||
"""Test VaultMetadata class."""
|
||||
vm = VaultMetadata()
|
||||
assert vm.dict == {}
|
||||
@pytest.mark.parametrize(
|
||||
("original", "normalized"),
|
||||
[("foo", "foo"), ("🌱/🌿", "🌱/🌿"), (" value ", "value"), (" ", "-"), ("", "-")],
|
||||
)
|
||||
def test_init_4(original, normalized):
|
||||
"""Test creating an InlineField object.
|
||||
|
||||
vm.add_metadata(METADATA)
|
||||
assert vm.dict == {
|
||||
"frontmatter_Key1": ["author name"],
|
||||
"frontmatter_Key2": ["article", "note"],
|
||||
"intext_key": ["intext_key_value"],
|
||||
"shared_key1": ["shared_key1_value"],
|
||||
"shared_key2": ["shared_key2_value"],
|
||||
"tags": ["tag 1", "tag 2", "tag 3"],
|
||||
"top_key1": ["top_key1_value"],
|
||||
"top_key2": ["top_key2_value"],
|
||||
"top_key3": ["top_key3_value"],
|
||||
}
|
||||
|
||||
vm.print_keys()
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == Regex(r"frontmatter_Key1 +frontmatter_Key2 +intext_key")
|
||||
|
||||
vm.print_tags()
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == Regex(r"tag 1 +tag 2 +tag 3")
|
||||
|
||||
vm.print_metadata()
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == Regex(r"┃ Keys +┃ Values +┃")
|
||||
assert captured.out == Regex(r"│ +│ tag 3 +│")
|
||||
assert captured.out == Regex(r"│ frontmatter_Key1 +│ author name +│")
|
||||
|
||||
new_metadata = {"added_key": ["added_value"], "frontmatter_Key2": ["new_value"]}
|
||||
vm.add_metadata(new_metadata)
|
||||
assert vm.dict == {
|
||||
"added_key": ["added_value"],
|
||||
"frontmatter_Key1": ["author name"],
|
||||
"frontmatter_Key2": ["article", "new_value", "note"],
|
||||
"intext_key": ["intext_key_value"],
|
||||
"shared_key1": ["shared_key1_value"],
|
||||
"shared_key2": ["shared_key2_value"],
|
||||
"tags": ["tag 1", "tag 2", "tag 3"],
|
||||
"top_key1": ["top_key1_value"],
|
||||
"top_key2": ["top_key2_value"],
|
||||
"top_key3": ["top_key3_value"],
|
||||
}
|
||||
GIVEN an InlineField object is created
|
||||
WHEN the value needs to be normalized
|
||||
THEN create the normalized_value attribute
|
||||
"""
|
||||
obj = InlineField(meta_type=MetadataType.INLINE, key="key", value=original)
|
||||
assert obj.value == original
|
||||
assert obj.normalized_value == normalized
|
||||
|
||||
|
||||
def test_vault_metadata_contains() -> None:
|
||||
"""Test contains method."""
|
||||
vm = VaultMetadata()
|
||||
vm.add_metadata(METADATA)
|
||||
assert vm.dict == {
|
||||
"frontmatter_Key1": ["author name"],
|
||||
"frontmatter_Key2": ["article", "note"],
|
||||
"intext_key": ["intext_key_value"],
|
||||
"shared_key1": ["shared_key1_value"],
|
||||
"shared_key2": ["shared_key2_value"],
|
||||
"tags": ["tag 1", "tag 2", "tag 3"],
|
||||
"top_key1": ["top_key1_value"],
|
||||
"top_key2": ["top_key2_value"],
|
||||
"top_key3": ["top_key3_value"],
|
||||
}
|
||||
def test_inline_field_init_5():
|
||||
"""Test updating the is_changed attribute.
|
||||
|
||||
assert vm.contains("frontmatter_Key1") is True
|
||||
assert vm.contains("frontmatter_Key2", "article") is True
|
||||
assert vm.contains("frontmatter_Key3") is False
|
||||
assert vm.contains("frontmatter_Key2", "no value") is False
|
||||
assert vm.contains("1$", is_regex=True) is True
|
||||
assert vm.contains("5$", is_regex=True) is False
|
||||
assert vm.contains("tags", r"\d", is_regex=True) is True
|
||||
assert vm.contains("tags", r"^\d", is_regex=True) is False
|
||||
GIVEN creating an object
|
||||
WHEN is_changed set to True at init
|
||||
THEN confirm is_changed is True
|
||||
"""
|
||||
obj = InlineField(meta_type=MetadataType.TAGS, key="key", value="tag1", is_changed=True)
|
||||
assert obj.is_changed is True
|
||||
|
||||
|
||||
def test_vault_metadata_delete() -> None:
|
||||
"""Test delete method."""
|
||||
vm = VaultMetadata()
|
||||
vm.add_metadata(METADATA)
|
||||
assert vm.dict == {
|
||||
"frontmatter_Key1": ["author name"],
|
||||
"frontmatter_Key2": ["article", "note"],
|
||||
"intext_key": ["intext_key_value"],
|
||||
"shared_key1": ["shared_key1_value"],
|
||||
"shared_key2": ["shared_key2_value"],
|
||||
"tags": ["tag 1", "tag 2", "tag 3"],
|
||||
"top_key1": ["top_key1_value"],
|
||||
"top_key2": ["top_key2_value"],
|
||||
"top_key3": ["top_key3_value"],
|
||||
}
|
||||
def test_inline_field_init_6():
|
||||
"""Test updating the is_changed attribute.
|
||||
|
||||
assert vm.delete("no key") is False
|
||||
assert vm.delete("tags", "no value") is False
|
||||
assert vm.delete("tags", "tag 2") is True
|
||||
assert vm.dict["tags"] == ["tag 1", "tag 3"]
|
||||
assert vm.delete("tags") is True
|
||||
assert "tags" not in vm.dict
|
||||
GIVEN creating an object
|
||||
WHEN is_changed set to True at after init
|
||||
THEN confirm is_changed is True
|
||||
"""
|
||||
obj = InlineField(meta_type=MetadataType.TAGS, key="key", value="tag1", is_changed=False)
|
||||
assert obj.is_changed is False
|
||||
obj.is_changed = True
|
||||
assert obj.is_changed is True
|
||||
|
||||
|
||||
def test_vault_metadata_rename() -> None:
|
||||
"""Test rename method."""
|
||||
vm = VaultMetadata()
|
||||
vm.add_metadata(METADATA)
|
||||
assert vm.dict == {
|
||||
"frontmatter_Key1": ["author name"],
|
||||
"frontmatter_Key2": ["article", "note"],
|
||||
"intext_key": ["intext_key_value"],
|
||||
"shared_key1": ["shared_key1_value"],
|
||||
"shared_key2": ["shared_key2_value"],
|
||||
"tags": ["tag 1", "tag 2", "tag 3"],
|
||||
"top_key1": ["top_key1_value"],
|
||||
"top_key2": ["top_key2_value"],
|
||||
"top_key3": ["top_key3_value"],
|
||||
}
|
||||
def test_inline_field_init_4():
|
||||
"""Test updating the is_changed attribute.
|
||||
|
||||
assert vm.rename("no key", "new key") is False
|
||||
assert vm.rename("tags", "no tag", "new key") is False
|
||||
assert vm.rename("tags", "tag 2", "new tag") is True
|
||||
assert vm.dict["tags"] == ["new tag", "tag 1", "tag 3"]
|
||||
assert vm.rename("tags", "old_tags") is True
|
||||
assert vm.dict["old_tags"] == ["new tag", "tag 1", "tag 3"]
|
||||
assert "tags" not in vm.dict
|
||||
|
||||
|
||||
def test_frontmatter_create() -> None:
|
||||
"""Test frontmatter creation."""
|
||||
frontmatter = Frontmatter(INLINE_CONTENT)
|
||||
assert frontmatter.dict == {}
|
||||
|
||||
frontmatter = Frontmatter(FRONTMATTER_CONTENT)
|
||||
assert frontmatter.dict == {
|
||||
"frontmatter_Key1": ["frontmatter_Key1_value"],
|
||||
"frontmatter_Key2": ["article", "note"],
|
||||
"shared_key1": ["shared_key1_value"],
|
||||
"tags": ["tag_1", "tag_2", "📅/tag_3"],
|
||||
}
|
||||
assert frontmatter.dict_original == {
|
||||
"frontmatter_Key1": ["frontmatter_Key1_value"],
|
||||
"frontmatter_Key2": ["article", "note"],
|
||||
"shared_key1": ["shared_key1_value"],
|
||||
"tags": ["tag_1", "tag_2", "📅/tag_3"],
|
||||
}
|
||||
|
||||
|
||||
def test_frontmatter_contains() -> None:
|
||||
"""Test frontmatter contains."""
|
||||
frontmatter = Frontmatter(FRONTMATTER_CONTENT)
|
||||
|
||||
assert frontmatter.contains("frontmatter_Key1") is True
|
||||
assert frontmatter.contains("frontmatter_Key2", "article") is True
|
||||
assert frontmatter.contains("frontmatter_Key3") is False
|
||||
assert frontmatter.contains("frontmatter_Key2", "no value") is False
|
||||
|
||||
assert frontmatter.contains(r"\d$", is_regex=True) is True
|
||||
assert frontmatter.contains(r"^\d", is_regex=True) is False
|
||||
assert frontmatter.contains("key", r"_\d", is_regex=True) is False
|
||||
assert frontmatter.contains("key", r"\w\d_", is_regex=True) is True
|
||||
|
||||
|
||||
def test_frontmatter_rename() -> None:
|
||||
"""Test frontmatter rename."""
|
||||
frontmatter = Frontmatter(FRONTMATTER_CONTENT)
|
||||
assert frontmatter.dict == {
|
||||
"frontmatter_Key1": ["frontmatter_Key1_value"],
|
||||
"frontmatter_Key2": ["article", "note"],
|
||||
"shared_key1": ["shared_key1_value"],
|
||||
"tags": ["tag_1", "tag_2", "📅/tag_3"],
|
||||
}
|
||||
|
||||
assert frontmatter.rename("no key", "new key") is False
|
||||
assert frontmatter.rename("tags", "no tag", "new key") is False
|
||||
|
||||
assert frontmatter.has_changes() is False
|
||||
assert frontmatter.rename("tags", "tag_2", "new tag") is True
|
||||
|
||||
assert frontmatter.dict["tags"] == ["new tag", "tag_1", "📅/tag_3"]
|
||||
assert frontmatter.rename("tags", "old_tags") is True
|
||||
assert frontmatter.dict["old_tags"] == ["new tag", "tag_1", "📅/tag_3"]
|
||||
assert "tags" not in frontmatter.dict
|
||||
|
||||
assert frontmatter.has_changes() is True
|
||||
|
||||
|
||||
def test_frontmatter_delete() -> None:
|
||||
"""Test Frontmatter delete method."""
|
||||
frontmatter = Frontmatter(FRONTMATTER_CONTENT)
|
||||
assert frontmatter.dict == {
|
||||
"frontmatter_Key1": ["frontmatter_Key1_value"],
|
||||
"frontmatter_Key2": ["article", "note"],
|
||||
"shared_key1": ["shared_key1_value"],
|
||||
"tags": ["tag_1", "tag_2", "📅/tag_3"],
|
||||
}
|
||||
|
||||
assert frontmatter.delete("no key") is False
|
||||
assert frontmatter.delete("tags", "no value") is False
|
||||
assert frontmatter.delete(r"\d{3}") is False
|
||||
assert frontmatter.has_changes() is False
|
||||
assert frontmatter.delete("tags", "tag_2") is True
|
||||
assert frontmatter.dict["tags"] == ["tag_1", "📅/tag_3"]
|
||||
assert frontmatter.delete("tags") is True
|
||||
assert "tags" not in frontmatter.dict
|
||||
assert frontmatter.has_changes() is True
|
||||
assert frontmatter.delete("shared_key1", r"\w+") is True
|
||||
assert frontmatter.dict["shared_key1"] == []
|
||||
assert frontmatter.delete(r"\w.tter") is True
|
||||
assert frontmatter.dict == {"shared_key1": []}
|
||||
|
||||
|
||||
def test_frontmatter_yaml_conversion():
|
||||
"""Test Frontmatter to_yaml method."""
|
||||
new_frontmatter: str = """\
|
||||
tags:
|
||||
- tag_1
|
||||
- tag_2
|
||||
- 📅/tag_3
|
||||
frontmatter_Key1: frontmatter_Key1_value
|
||||
frontmatter_Key2:
|
||||
- article
|
||||
- note
|
||||
shared_key1: shared_key1_value
|
||||
"""
|
||||
new_frontmatter_sorted: str = """\
|
||||
frontmatter_Key1: frontmatter_Key1_value
|
||||
frontmatter_Key2:
|
||||
- article
|
||||
- note
|
||||
shared_key1: shared_key1_value
|
||||
tags:
|
||||
- tag_1
|
||||
- tag_2
|
||||
- 📅/tag_3
|
||||
"""
|
||||
frontmatter = Frontmatter(FRONTMATTER_CONTENT)
|
||||
assert frontmatter.to_yaml() == new_frontmatter
|
||||
assert frontmatter.to_yaml(sort_keys=True) == new_frontmatter_sorted
|
||||
|
||||
|
||||
def test_inline_metadata_create() -> None:
|
||||
"""Test inline metadata creation."""
|
||||
inline = InlineMetadata(FRONTMATTER_CONTENT)
|
||||
assert inline.dict == {}
|
||||
inline = InlineMetadata(INLINE_CONTENT)
|
||||
assert inline.dict == {
|
||||
"bold_key1": ["bold_key1_value"],
|
||||
"bold_key2": ["bold_key2_value"],
|
||||
"emoji_📅_key": ["emoji_📅_key_value"],
|
||||
"in_text_key1": ["in_text_key1_value"],
|
||||
"in_text_key2": ["in_text_key2_value"],
|
||||
"link_key": ["link_key_value"],
|
||||
"repeated_key": ["repeated_key_value1", "repeated_key_value2"],
|
||||
"tag_key": ["tag_key_value"],
|
||||
}
|
||||
assert inline.dict_original == {
|
||||
"bold_key1": ["bold_key1_value"],
|
||||
"bold_key2": ["bold_key2_value"],
|
||||
"emoji_📅_key": ["emoji_📅_key_value"],
|
||||
"in_text_key1": ["in_text_key1_value"],
|
||||
"in_text_key2": ["in_text_key2_value"],
|
||||
"link_key": ["link_key_value"],
|
||||
"repeated_key": ["repeated_key_value1", "repeated_key_value2"],
|
||||
"tag_key": ["tag_key_value"],
|
||||
}
|
||||
|
||||
|
||||
def test_inline_contains() -> None:
|
||||
"""Test inline metadata contains method."""
|
||||
inline = InlineMetadata(INLINE_CONTENT)
|
||||
|
||||
assert inline.contains("bold_key1") is True
|
||||
assert inline.contains("bold_key2", "bold_key2_value") is True
|
||||
assert inline.contains("bold_key3") is False
|
||||
assert inline.contains("bold_key2", "no value") is False
|
||||
|
||||
assert inline.contains(r"\w{4}_key", is_regex=True) is True
|
||||
assert inline.contains(r"^\d", is_regex=True) is False
|
||||
assert inline.contains("1$", r"\d_value", is_regex=True) is True
|
||||
assert inline.contains("key", r"^\d_value", is_regex=True) is False
|
||||
|
||||
|
||||
def test_inline_metadata_rename() -> None:
|
||||
"""Test inline metadata rename."""
|
||||
inline = InlineMetadata(INLINE_CONTENT)
|
||||
assert inline.dict == {
|
||||
"bold_key1": ["bold_key1_value"],
|
||||
"bold_key2": ["bold_key2_value"],
|
||||
"emoji_📅_key": ["emoji_📅_key_value"],
|
||||
"in_text_key1": ["in_text_key1_value"],
|
||||
"in_text_key2": ["in_text_key2_value"],
|
||||
"link_key": ["link_key_value"],
|
||||
"repeated_key": ["repeated_key_value1", "repeated_key_value2"],
|
||||
"tag_key": ["tag_key_value"],
|
||||
}
|
||||
|
||||
assert inline.rename("no key", "new key") is False
|
||||
assert inline.rename("repeated_key", "no value", "new key") is False
|
||||
assert inline.has_changes() is False
|
||||
assert inline.rename("repeated_key", "repeated_key_value1", "new value") is True
|
||||
assert inline.dict["repeated_key"] == ["new value", "repeated_key_value2"]
|
||||
assert inline.rename("repeated_key", "old_key") is True
|
||||
assert inline.dict["old_key"] == ["new value", "repeated_key_value2"]
|
||||
assert "repeated_key" not in inline.dict
|
||||
assert inline.has_changes() is True
|
||||
|
||||
|
||||
def test_inline_metadata_delete() -> None:
|
||||
"""Test inline metadata delete."""
|
||||
inline = InlineMetadata(INLINE_CONTENT)
|
||||
assert inline.dict == {
|
||||
"bold_key1": ["bold_key1_value"],
|
||||
"bold_key2": ["bold_key2_value"],
|
||||
"emoji_📅_key": ["emoji_📅_key_value"],
|
||||
"in_text_key1": ["in_text_key1_value"],
|
||||
"in_text_key2": ["in_text_key2_value"],
|
||||
"link_key": ["link_key_value"],
|
||||
"repeated_key": ["repeated_key_value1", "repeated_key_value2"],
|
||||
"tag_key": ["tag_key_value"],
|
||||
}
|
||||
|
||||
assert inline.delete("no key") is False
|
||||
assert inline.delete("repeated_key", "no value") is False
|
||||
assert inline.has_changes() is False
|
||||
assert inline.delete("repeated_key", "repeated_key_value1") is True
|
||||
assert inline.dict["repeated_key"] == ["repeated_key_value2"]
|
||||
assert inline.delete("repeated_key") is True
|
||||
assert "repeated_key" not in inline.dict
|
||||
assert inline.has_changes() is True
|
||||
assert inline.delete(r"\d{3}") is False
|
||||
assert inline.delete(r"bold_key\d") is True
|
||||
assert inline.dict == {
|
||||
"emoji_📅_key": ["emoji_📅_key_value"],
|
||||
"in_text_key1": ["in_text_key1_value"],
|
||||
"in_text_key2": ["in_text_key2_value"],
|
||||
"link_key": ["link_key_value"],
|
||||
"tag_key": ["tag_key_value"],
|
||||
}
|
||||
assert inline.delete("emoji_📅_key", ".*📅.*") is True
|
||||
assert inline.dict == {
|
||||
"emoji_📅_key": [],
|
||||
"in_text_key1": ["in_text_key1_value"],
|
||||
"in_text_key2": ["in_text_key2_value"],
|
||||
"link_key": ["link_key_value"],
|
||||
"tag_key": ["tag_key_value"],
|
||||
}
|
||||
|
||||
|
||||
def test_inline_tags_create() -> None:
|
||||
"""Test inline tags creation."""
|
||||
tags = InlineTags(FRONTMATTER_CONTENT)
|
||||
tags.metadata_key
|
||||
assert tags.list == []
|
||||
|
||||
tags = InlineTags(INLINE_CONTENT)
|
||||
assert tags.list == [
|
||||
"bold_tag",
|
||||
"in_text_tag",
|
||||
"inline_tag_top1",
|
||||
"inline_tag_top2",
|
||||
"tag_key_value",
|
||||
]
|
||||
assert tags.list_original == [
|
||||
"bold_tag",
|
||||
"in_text_tag",
|
||||
"inline_tag_top1",
|
||||
"inline_tag_top2",
|
||||
"tag_key_value",
|
||||
]
|
||||
|
||||
|
||||
def test_inline_tags_contains() -> None:
|
||||
"""Test inline tags contains."""
|
||||
tags = InlineTags(INLINE_CONTENT)
|
||||
assert tags.contains("bold_tag") is True
|
||||
assert tags.contains("no tag") is False
|
||||
|
||||
assert tags.contains(r"\w_\w", is_regex=True) is True
|
||||
assert tags.contains(r"\d_\d", is_regex=True) is False
|
||||
|
||||
|
||||
def test_inline_tags_rename() -> None:
|
||||
"""Test inline tags rename."""
|
||||
tags = InlineTags(INLINE_CONTENT)
|
||||
assert tags.list == [
|
||||
"bold_tag",
|
||||
"in_text_tag",
|
||||
"inline_tag_top1",
|
||||
"inline_tag_top2",
|
||||
"tag_key_value",
|
||||
]
|
||||
|
||||
assert tags.rename("no tag", "new tag") is False
|
||||
assert tags.has_changes() is False
|
||||
assert tags.rename("bold_tag", "new tag") is True
|
||||
assert tags.list == [
|
||||
"in_text_tag",
|
||||
"inline_tag_top1",
|
||||
"inline_tag_top2",
|
||||
"new tag",
|
||||
"tag_key_value",
|
||||
]
|
||||
assert tags.has_changes() is True
|
||||
|
||||
|
||||
def test_inline_tags_delete() -> None:
|
||||
"""Test inline tags delete."""
|
||||
tags = InlineTags(INLINE_CONTENT)
|
||||
assert tags.list == [
|
||||
"bold_tag",
|
||||
"in_text_tag",
|
||||
"inline_tag_top1",
|
||||
"inline_tag_top2",
|
||||
"tag_key_value",
|
||||
]
|
||||
|
||||
assert tags.delete("no tag") is False
|
||||
assert tags.has_changes() is False
|
||||
assert tags.delete("bold_tag") is True
|
||||
assert tags.list == [
|
||||
"in_text_tag",
|
||||
"inline_tag_top1",
|
||||
"inline_tag_top2",
|
||||
"tag_key_value",
|
||||
]
|
||||
assert tags.has_changes() is True
|
||||
assert tags.delete(r"\d{3}") is False
|
||||
assert tags.delete(r"inline_tag_top\d") is True
|
||||
assert tags.list == ["in_text_tag", "tag_key_value"]
|
||||
GIVEN creating an object
|
||||
WHEN key_open and key_close are set after init
|
||||
THEN confirm they are set correctly
|
||||
"""
|
||||
obj = InlineField(
|
||||
meta_type=MetadataType.INLINE,
|
||||
key="_key_",
|
||||
value="value",
|
||||
is_changed=False,
|
||||
)
|
||||
assert obj.key_open == "_"
|
||||
assert obj.key_close == "_"
|
||||
obj.key_open = "**"
|
||||
obj.key_close = "**"
|
||||
assert obj.key_open == "**"
|
||||
assert obj.key_close == "**"
|
||||
|
||||
236
tests/notes/note_init_test.py
Normal file
236
tests/notes/note_init_test.py
Normal file
@@ -0,0 +1,236 @@
|
||||
# type: ignore
|
||||
"""Test notes.py."""
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
import typer
|
||||
|
||||
from obsidian_metadata.models.enums import MetadataType
|
||||
from obsidian_metadata.models.exceptions import FrontmatterError
|
||||
from obsidian_metadata.models.metadata import InlineField
|
||||
from obsidian_metadata.models.notes import Note
|
||||
|
||||
|
||||
def test_note_not_exists() -> None:
|
||||
"""Test target not found.
|
||||
|
||||
GIVEN a path to a non-existent file
|
||||
WHEN a Note object is created pointing to that file
|
||||
THEN a typer.Exit exception is raised
|
||||
"""
|
||||
with pytest.raises(typer.Exit):
|
||||
Note(note_path="nonexistent_file.md")
|
||||
|
||||
|
||||
def test_create_note_1(sample_note):
|
||||
"""Test creating a note object.
|
||||
|
||||
GIVEN a path to a markdown file
|
||||
WHEN a Note object is created pointing to that file
|
||||
THEN the Note object is created
|
||||
"""
|
||||
note = Note(note_path=sample_note, dry_run=True)
|
||||
assert note.note_path == Path(sample_note)
|
||||
assert note.dry_run is True
|
||||
assert note.encoding == "utf_8"
|
||||
assert len(note.metadata) == 22
|
||||
|
||||
with sample_note.open():
|
||||
content = sample_note.read_text()
|
||||
|
||||
assert note.file_content == content
|
||||
assert note.original_file_content == content
|
||||
|
||||
|
||||
def test_create_note_2(tmp_path) -> None:
|
||||
"""Test creating a note object.
|
||||
|
||||
GIVEN a text file with invalid frontmatter
|
||||
WHEN the note is initialized
|
||||
THEN a typer exit is raised
|
||||
"""
|
||||
note_path = Path(tmp_path) / "broken_frontmatter.md"
|
||||
note_path.touch()
|
||||
note_path.write_text(
|
||||
"""---
|
||||
tags:
|
||||
invalid = = "content"
|
||||
---
|
||||
"""
|
||||
)
|
||||
with pytest.raises(typer.Exit):
|
||||
Note(note_path=note_path)
|
||||
|
||||
|
||||
def test_create_note_3(tmp_path) -> None:
|
||||
"""Test creating a note object.
|
||||
|
||||
GIVEN a text file with invalid frontmatter
|
||||
WHEN the note is initialized
|
||||
THEN a typer exit is raised
|
||||
"""
|
||||
note_path = Path(tmp_path) / "broken_frontmatter.md"
|
||||
note_path.touch()
|
||||
note_path.write_text(
|
||||
"""---
|
||||
nested1:
|
||||
nested2: "content"
|
||||
nested3:
|
||||
- "content"
|
||||
- "content"
|
||||
---
|
||||
"""
|
||||
)
|
||||
with pytest.raises(typer.Exit):
|
||||
Note(note_path=note_path)
|
||||
|
||||
|
||||
def test_create_note_6(tmp_path):
|
||||
"""Test creating a note object.
|
||||
|
||||
GIVEN a text file
|
||||
WHEN there is no content in the file
|
||||
THEN a note is returned with no metadata or content
|
||||
"""
|
||||
note_path = Path(tmp_path) / "empty_file.md"
|
||||
note_path.touch()
|
||||
note = Note(note_path=note_path)
|
||||
assert note.note_path == note_path
|
||||
assert not note.file_content
|
||||
assert not note.original_file_content
|
||||
assert note.metadata == []
|
||||
|
||||
|
||||
def test__grab_metadata_1(tmp_path):
|
||||
"""Test the _grab_metadata method.
|
||||
|
||||
GIVEN a text file
|
||||
WHEN there is frontmatter
|
||||
THEN the frontmatter is returned in the metadata list
|
||||
"""
|
||||
note_path = Path(tmp_path) / "test_file.md"
|
||||
note_path.touch()
|
||||
note_path.write_text(
|
||||
"""
|
||||
---
|
||||
key1: value1
|
||||
key2: 2022-12-22
|
||||
key3:
|
||||
- value3
|
||||
- value4
|
||||
key4:
|
||||
key5: "value5"
|
||||
---
|
||||
"""
|
||||
)
|
||||
note = Note(note_path=note_path)
|
||||
assert sorted(note.metadata, key=lambda x: (x.key, x.value)) == [
|
||||
InlineField(meta_type=MetadataType.FRONTMATTER, key="key1", value="value1"),
|
||||
InlineField(meta_type=MetadataType.FRONTMATTER, key="key2", value="2022-12-22"),
|
||||
InlineField(meta_type=MetadataType.FRONTMATTER, key="key3", value="value3"),
|
||||
InlineField(meta_type=MetadataType.FRONTMATTER, key="key3", value="value4"),
|
||||
InlineField(meta_type=MetadataType.FRONTMATTER, key="key4", value="None"),
|
||||
InlineField(meta_type=MetadataType.FRONTMATTER, key="key5", value="value5"),
|
||||
]
|
||||
|
||||
|
||||
def test__grab_metadata_2(tmp_path):
|
||||
"""Test the _grab_metadata method.
|
||||
|
||||
GIVEN a text file
|
||||
WHEN there is inline metadata
|
||||
THEN the inline metadata is returned in the metadata list
|
||||
"""
|
||||
note_path = Path(tmp_path) / "test_file.md"
|
||||
note_path.touch()
|
||||
note_path.write_text(
|
||||
"""
|
||||
|
||||
key1::value1
|
||||
key2::2022-12-22
|
||||
foo [key3::value3] bar
|
||||
key4::value4
|
||||
foo (key4::value) bar
|
||||
key5::value5
|
||||
key6:: `value6`
|
||||
`key7::value7`
|
||||
`key8`::`value8`
|
||||
|
||||
"""
|
||||
)
|
||||
note = Note(note_path=note_path)
|
||||
assert sorted(note.metadata, key=lambda x: (x.key, x.value)) == [
|
||||
InlineField(meta_type=MetadataType.INLINE, key="`key7", value="value7`"),
|
||||
InlineField(meta_type=MetadataType.INLINE, key="`key8`", value="`value8`"),
|
||||
InlineField(meta_type=MetadataType.INLINE, key="key1", value="value1"),
|
||||
InlineField(meta_type=MetadataType.INLINE, key="key2", value="2022-12-22"),
|
||||
InlineField(meta_type=MetadataType.INLINE, key="key3", value="value3"),
|
||||
InlineField(meta_type=MetadataType.INLINE, key="key4", value="value"),
|
||||
InlineField(meta_type=MetadataType.INLINE, key="key4", value="value4"),
|
||||
InlineField(meta_type=MetadataType.INLINE, key="key5", value="value5"),
|
||||
InlineField(meta_type=MetadataType.INLINE, key="key6", value=" `value6`"),
|
||||
]
|
||||
|
||||
|
||||
def test__grab_metadata_3(tmp_path):
|
||||
"""Test the _grab_metadata method.
|
||||
|
||||
GIVEN a text file
|
||||
WHEN there are tags
|
||||
THEN the tags are returned in the metadata list
|
||||
"""
|
||||
note_path = Path(tmp_path) / "test_file.md"
|
||||
note_path.touch()
|
||||
note_path.write_text("#tag1\n#tag2")
|
||||
note = Note(note_path=note_path)
|
||||
assert sorted(note.metadata, key=lambda x: x.value) == [
|
||||
InlineField(meta_type=MetadataType.TAGS, key=None, value="tag1"),
|
||||
InlineField(meta_type=MetadataType.TAGS, key=None, value="tag2"),
|
||||
]
|
||||
|
||||
|
||||
def test__grab_metadata_4(tmp_path):
|
||||
"""Test the _grab_metadata method.
|
||||
|
||||
GIVEN a text file
|
||||
WHEN there are tags, frontmatter, and inline metadata
|
||||
THEN all metadata is returned
|
||||
"""
|
||||
note_path = Path(tmp_path) / "test_file.md"
|
||||
note_path.touch()
|
||||
note_path.write_text(
|
||||
"""\
|
||||
---
|
||||
key1: value1
|
||||
---
|
||||
key2::value2
|
||||
#tag1\n#tag2"""
|
||||
)
|
||||
note = Note(note_path=note_path)
|
||||
assert sorted(note.metadata, key=lambda x: x.value) == [
|
||||
InlineField(meta_type=MetadataType.TAGS, key=None, value="tag1"),
|
||||
InlineField(meta_type=MetadataType.TAGS, key=None, value="tag2"),
|
||||
InlineField(meta_type=MetadataType.FRONTMATTER, key="key1", value="value1"),
|
||||
InlineField(meta_type=MetadataType.INLINE, key="key2", value="value2"),
|
||||
]
|
||||
|
||||
|
||||
def test__grab_metadata_5(tmp_path):
|
||||
"""Test the _grab_metadata method.
|
||||
|
||||
GIVEN a text file
|
||||
WHEN invalid metadata is present
|
||||
THEN raise a FrontmatterError
|
||||
"""
|
||||
note_path = Path(tmp_path) / "broken_frontmatter.md"
|
||||
note_path.touch()
|
||||
note_path.write_text(
|
||||
"""---
|
||||
tags:
|
||||
invalid = = "content"
|
||||
---
|
||||
"""
|
||||
)
|
||||
with pytest.raises(typer.Exit):
|
||||
Note(note_path=note_path)
|
||||
1136
tests/notes/note_methods_test.py
Normal file
1136
tests/notes/note_methods_test.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,358 +0,0 @@
|
||||
# type: ignore
|
||||
"""Test notes.py."""
|
||||
|
||||
import re
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
import typer
|
||||
|
||||
from obsidian_metadata.models.notes import Note
|
||||
from tests.helpers import Regex
|
||||
|
||||
|
||||
def test_note_not_exists() -> None:
|
||||
"""Test target not found."""
|
||||
with pytest.raises(typer.Exit):
|
||||
note = Note(note_path="nonexistent_file.md")
|
||||
|
||||
assert note.note_path == "tests/test_data/test_note.md"
|
||||
assert note.file_content == "This is a test note."
|
||||
assert note.frontmatter == {}
|
||||
assert note.inline_tags == []
|
||||
assert note.inline_metadata == {}
|
||||
assert note.dry_run is False
|
||||
|
||||
|
||||
def test_note_create(sample_note) -> None:
|
||||
"""Test creating note class."""
|
||||
note = Note(note_path=sample_note, dry_run=True)
|
||||
assert note.note_path == Path(sample_note)
|
||||
|
||||
assert note.dry_run is True
|
||||
assert "Lorem ipsum dolor" in note.file_content
|
||||
assert note.frontmatter.dict == {
|
||||
"date_created": ["2022-12-22"],
|
||||
"frontmatter_Key1": ["author name"],
|
||||
"frontmatter_Key2": ["article", "note"],
|
||||
"shared_key1": ["shared_key1_value"],
|
||||
"shared_key2": ["shared_key2_value1"],
|
||||
"tags": [
|
||||
"frontmatter_tag1",
|
||||
"frontmatter_tag2",
|
||||
"shared_tag",
|
||||
"📅/frontmatter_tag3",
|
||||
],
|
||||
}
|
||||
|
||||
assert note.inline_tags.list == [
|
||||
"inline_tag_bottom1",
|
||||
"inline_tag_bottom2",
|
||||
"inline_tag_top1",
|
||||
"inline_tag_top2",
|
||||
"intext_tag1",
|
||||
"intext_tag2",
|
||||
"shared_tag",
|
||||
]
|
||||
assert note.inline_metadata.dict == {
|
||||
"bottom_key1": ["bottom_key1_value"],
|
||||
"bottom_key2": ["bottom_key2_value"],
|
||||
"emoji_📅_key": ["emoji_📅_key_value"],
|
||||
"intext_key": ["intext_value"],
|
||||
"shared_key1": ["shared_key1_value"],
|
||||
"shared_key2": ["shared_key2_value2"],
|
||||
"top_key1": ["top_key1_value"],
|
||||
"top_key2": ["top_key2_value"],
|
||||
"top_key3": ["top_key3_value_as_link"],
|
||||
}
|
||||
|
||||
with sample_note.open():
|
||||
content = sample_note.read_text()
|
||||
|
||||
assert note.file_content == content
|
||||
assert note.original_file_content == content
|
||||
|
||||
|
||||
def test_append(sample_note) -> None:
|
||||
"""Test appending to note."""
|
||||
note = Note(note_path=sample_note)
|
||||
assert note.dry_run is False
|
||||
|
||||
string = "This is a test string."
|
||||
string2 = "Lorem ipsum dolor sit"
|
||||
|
||||
note.append(string_to_append=string)
|
||||
assert string in note.file_content
|
||||
assert len(re.findall(re.escape(string), note.file_content)) == 1
|
||||
|
||||
note.append(string_to_append=string)
|
||||
assert string in note.file_content
|
||||
assert len(re.findall(re.escape(string), note.file_content)) == 1
|
||||
|
||||
note.append(string_to_append=string, allow_multiple=True)
|
||||
assert string in note.file_content
|
||||
assert len(re.findall(re.escape(string), note.file_content)) == 2
|
||||
|
||||
note.append(string_to_append=string2)
|
||||
assert string2 in note.file_content
|
||||
assert len(re.findall(re.escape(string2), note.file_content)) == 1
|
||||
|
||||
note.append(string_to_append=string2, allow_multiple=True)
|
||||
assert string2 in note.file_content
|
||||
assert len(re.findall(re.escape(string2), note.file_content)) == 2
|
||||
|
||||
|
||||
def test_contains_inline_tag(sample_note) -> None:
|
||||
"""Test contains inline tag."""
|
||||
note = Note(note_path=sample_note)
|
||||
assert note.contains_inline_tag("intext_tag1") is True
|
||||
assert note.contains_inline_tag("nonexistent_tag") is False
|
||||
assert note.contains_inline_tag(r"\d$", is_regex=True) is True
|
||||
assert note.contains_inline_tag(r"^\d", is_regex=True) is False
|
||||
|
||||
|
||||
def test_contains_metadata(sample_note) -> None:
|
||||
"""Test contains metadata."""
|
||||
note = Note(note_path=sample_note)
|
||||
|
||||
assert note.contains_metadata("no key") is False
|
||||
assert note.contains_metadata("frontmatter_Key2") is True
|
||||
assert note.contains_metadata(r"^\d", is_regex=True) is False
|
||||
assert note.contains_metadata(r"^[\w_]+\d", is_regex=True) is True
|
||||
assert note.contains_metadata("frontmatter_Key2", "no value") is False
|
||||
assert note.contains_metadata("frontmatter_Key2", "article") is True
|
||||
assert note.contains_metadata("bottom_key1", "bottom_key1_value") is True
|
||||
assert note.contains_metadata(r"bottom_key\d$", r"bottom_key\d_value", is_regex=True) is True
|
||||
|
||||
|
||||
def test_delete_inline_metadata(sample_note) -> None:
|
||||
"""Test deleting inline metadata."""
|
||||
note = Note(note_path=sample_note)
|
||||
|
||||
note._delete_inline_metadata("nonexistent_key")
|
||||
assert note.file_content == note.original_file_content
|
||||
note._delete_inline_metadata("frontmatter_Key1")
|
||||
assert note.file_content == note.original_file_content
|
||||
|
||||
note._delete_inline_metadata("intext_key")
|
||||
assert note.file_content == Regex(r"dolore eu fugiat", re.DOTALL)
|
||||
|
||||
note._delete_inline_metadata("bottom_key2", "bottom_key2_value")
|
||||
assert note.file_content != Regex(r"bottom_key2_value")
|
||||
assert note.file_content == Regex(r"bottom_key2::")
|
||||
note._delete_inline_metadata("bottom_key1")
|
||||
assert note.file_content != Regex(r"bottom_key1::")
|
||||
|
||||
|
||||
def test_delete_inline_tag(sample_note) -> None:
|
||||
"""Test deleting inline tags."""
|
||||
note = Note(note_path=sample_note)
|
||||
|
||||
assert note.delete_inline_tag("not_a_tag") is False
|
||||
assert note.delete_inline_tag("intext_tag[1]") is True
|
||||
assert "intext_tag1" not in note.inline_tags.list
|
||||
assert note.file_content == Regex("consequat. Duis")
|
||||
|
||||
|
||||
def test_delete_metadata(sample_note) -> Note:
|
||||
"""Test deleting metadata."""
|
||||
note = Note(note_path=sample_note)
|
||||
|
||||
assert note.delete_metadata("nonexistent_key") is False
|
||||
assert note.delete_metadata("frontmatter_Key1", "no value") is False
|
||||
assert note.delete_metadata("frontmatter_Key1") is True
|
||||
assert "frontmatter_Key1" not in note.frontmatter.dict
|
||||
|
||||
assert note.delete_metadata("frontmatter_Key2", "article") is True
|
||||
assert note.frontmatter.dict["frontmatter_Key2"] == ["note"]
|
||||
|
||||
assert note.delete_metadata("bottom_key1", "bottom_key1_value") is True
|
||||
assert note.inline_metadata.dict["bottom_key1"] == []
|
||||
assert note.file_content == Regex(r"bottom_key1::\n")
|
||||
|
||||
assert note.delete_metadata("bottom_key2") is True
|
||||
assert "bottom_key2" not in note.inline_metadata.dict
|
||||
assert note.file_content != Regex(r"bottom_key2")
|
||||
|
||||
|
||||
def test_has_changes(sample_note) -> None:
|
||||
"""Test has changes."""
|
||||
note = Note(note_path=sample_note)
|
||||
|
||||
assert note.has_changes() is False
|
||||
note.append("This is a test string.")
|
||||
assert note.has_changes() is True
|
||||
|
||||
note = Note(note_path=sample_note)
|
||||
assert note.has_changes() is False
|
||||
note.delete_metadata("frontmatter_Key1")
|
||||
assert note.has_changes() is True
|
||||
|
||||
note = Note(note_path=sample_note)
|
||||
assert note.has_changes() is False
|
||||
note.delete_metadata("bottom_key2")
|
||||
assert note.has_changes() is True
|
||||
|
||||
note = Note(note_path=sample_note)
|
||||
assert note.has_changes() is False
|
||||
note.delete_inline_tag("intext_tag1")
|
||||
assert note.has_changes() is True
|
||||
|
||||
|
||||
def test_print_note(sample_note, capsys) -> None:
|
||||
"""Test printing note."""
|
||||
note = Note(note_path=sample_note)
|
||||
note.print_note()
|
||||
captured = capsys.readouterr()
|
||||
assert "```python" in captured.out
|
||||
assert "---" in captured.out
|
||||
assert "#shared_tag" in captured.out
|
||||
|
||||
|
||||
def test_print_diff(sample_note, capsys) -> None:
|
||||
"""Test printing diff."""
|
||||
note = Note(note_path=sample_note)
|
||||
note.print_diff()
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == ""
|
||||
|
||||
note.append("This is a test string.")
|
||||
note.print_diff()
|
||||
captured = capsys.readouterr()
|
||||
assert "+ This is a test string." in captured.out
|
||||
|
||||
note.sub("The quick brown fox", "The quick brown hedgehog")
|
||||
note.print_diff()
|
||||
captured = capsys.readouterr()
|
||||
assert "- The quick brown fox" in captured.out
|
||||
assert "+ The quick brown hedgehog" in captured.out
|
||||
|
||||
|
||||
def test_sub(sample_note) -> None:
|
||||
"""Test substituting text in a note."""
|
||||
note = Note(note_path=sample_note)
|
||||
note.sub("#shared_tag", "#unshared_tags", is_regex=True)
|
||||
assert note.file_content != Regex(r"#shared_tag")
|
||||
assert note.file_content == Regex(r"#unshared_tags")
|
||||
|
||||
note.sub(" ut ", "")
|
||||
assert note.file_content != Regex(r" ut ")
|
||||
assert note.file_content == Regex(r"laboriosam, nisialiquid ex ea")
|
||||
|
||||
|
||||
def test_rename_inline_tag(sample_note) -> None:
|
||||
"""Test renaming an inline tag."""
|
||||
note = Note(note_path=sample_note)
|
||||
|
||||
assert note.rename_inline_tag("no_note_tag", "intext_tag2") is False
|
||||
assert note.rename_inline_tag("intext_tag1", "intext_tag26") is True
|
||||
assert note.inline_tags.list == [
|
||||
"inline_tag_bottom1",
|
||||
"inline_tag_bottom2",
|
||||
"inline_tag_top1",
|
||||
"inline_tag_top2",
|
||||
"intext_tag2",
|
||||
"intext_tag26",
|
||||
"shared_tag",
|
||||
]
|
||||
assert note.file_content == Regex(r"#intext_tag26")
|
||||
assert note.file_content != Regex(r"#intext_tag1")
|
||||
|
||||
|
||||
def test_rename_inline_metadata(sample_note) -> None:
|
||||
"""Test renaming inline metadata."""
|
||||
note = Note(note_path=sample_note)
|
||||
|
||||
note._rename_inline_metadata("nonexistent_key", "new_key")
|
||||
assert note.file_content == note.original_file_content
|
||||
note._rename_inline_metadata("bottom_key1", "no_value", "new_value")
|
||||
assert note.file_content == note.original_file_content
|
||||
|
||||
note._rename_inline_metadata("bottom_key1", "new_key")
|
||||
assert note.file_content != Regex(r"bottom_key1::")
|
||||
assert note.file_content == Regex(r"new_key::")
|
||||
|
||||
note._rename_inline_metadata("emoji_📅_key", "emoji_📅_key_value", "new_value")
|
||||
assert note.file_content != Regex(r"emoji_📅_key:: ?emoji_📅_key_value")
|
||||
assert note.file_content == Regex(r"emoji_📅_key:: ?new_value")
|
||||
|
||||
|
||||
def test_rename_metadata(sample_note) -> None:
|
||||
"""Test renaming metadata."""
|
||||
note = Note(note_path=sample_note)
|
||||
|
||||
assert note.rename_metadata("nonexistent_key", "new_key") is False
|
||||
assert note.rename_metadata("frontmatter_Key1", "nonexistent_value", "article") is False
|
||||
|
||||
assert note.rename_metadata("frontmatter_Key1", "new_key") is True
|
||||
assert "frontmatter_Key1" not in note.frontmatter.dict
|
||||
assert "new_key" in note.frontmatter.dict
|
||||
assert note.frontmatter.dict["new_key"] == ["author name"]
|
||||
assert note.file_content == Regex(r"new_key: author name")
|
||||
|
||||
assert note.rename_metadata("frontmatter_Key2", "article", "new_key") is True
|
||||
assert note.frontmatter.dict["frontmatter_Key2"] == ["new_key", "note"]
|
||||
assert note.file_content == Regex(r" - new_key")
|
||||
assert note.file_content != Regex(r" - article")
|
||||
|
||||
assert note.rename_metadata("bottom_key1", "new_key") is True
|
||||
assert "bottom_key1" not in note.inline_metadata.dict
|
||||
assert "new_key" in note.inline_metadata.dict
|
||||
assert note.file_content == Regex(r"new_key:: bottom_key1_value")
|
||||
|
||||
assert note.rename_metadata("new_key", "bottom_key1_value", "new_value") is True
|
||||
assert note.inline_metadata.dict["new_key"] == ["new_value"]
|
||||
assert note.file_content == Regex(r"new_key:: new_value")
|
||||
|
||||
|
||||
def test_replace_frontmatter(sample_note) -> None:
|
||||
"""Test replacing frontmatter."""
|
||||
note = Note(note_path=sample_note)
|
||||
|
||||
note.rename_metadata("frontmatter_Key1", "author name", "some_new_key_here")
|
||||
note.replace_frontmatter()
|
||||
new_frontmatter = """---
|
||||
date_created: '2022-12-22'
|
||||
tags:
|
||||
- frontmatter_tag1
|
||||
- frontmatter_tag2
|
||||
- shared_tag
|
||||
- 📅/frontmatter_tag3
|
||||
frontmatter_Key1: some_new_key_here
|
||||
frontmatter_Key2:
|
||||
- article
|
||||
- note
|
||||
shared_key1: shared_key1_value
|
||||
shared_key2: shared_key2_value1
|
||||
---"""
|
||||
assert new_frontmatter in note.file_content
|
||||
assert "# Heading 1" in note.file_content
|
||||
assert "```python" in note.file_content
|
||||
|
||||
note2 = Note(note_path="tests/fixtures/test_vault/no_metadata.md")
|
||||
note2.replace_frontmatter()
|
||||
note2.frontmatter.dict = {"key1": "value1", "key2": "value2"}
|
||||
note2.replace_frontmatter()
|
||||
new_frontmatter = """---
|
||||
key1: value1
|
||||
key2: value2
|
||||
---"""
|
||||
assert new_frontmatter in note2.file_content
|
||||
assert "Lorem ipsum dolor sit amet" in note2.file_content
|
||||
|
||||
|
||||
def test_write(sample_note, tmp_path) -> None:
|
||||
"""Test writing note to file."""
|
||||
note = Note(note_path=sample_note)
|
||||
note.sub(pattern="Heading 1", replacement="Heading 2")
|
||||
|
||||
note.write()
|
||||
note = Note(note_path=sample_note)
|
||||
assert "Heading 2" in note.file_content
|
||||
assert "Heading 1" not in note.file_content
|
||||
|
||||
new_path = Path(tmp_path / "new_note.md")
|
||||
note.write(new_path)
|
||||
note2 = Note(note_path=new_path)
|
||||
assert "Heading 2" in note2.file_content
|
||||
assert "Heading 1" not in note2.file_content
|
||||
379
tests/parsers_test.py
Normal file
379
tests/parsers_test.py
Normal file
@@ -0,0 +1,379 @@
|
||||
# type: ignore
|
||||
"""Test the parsers module."""
|
||||
|
||||
import re
|
||||
|
||||
import pytest
|
||||
|
||||
from obsidian_metadata.models.enums import Wrapping
|
||||
from obsidian_metadata.models.parsers import Parser
|
||||
|
||||
P = Parser()
|
||||
|
||||
|
||||
def test_identify_internal_link_1():
|
||||
"""Test the internal_link attribute.
|
||||
|
||||
GIVEN a string with an external link
|
||||
WHEN the internal_link attribute is called within a regex
|
||||
THEN the external link is not found
|
||||
"""
|
||||
assert re.findall(P.internal_link, "[link](https://example.com/somepage.html)") == []
|
||||
|
||||
|
||||
def test_identify_internal_link_2():
|
||||
"""Test the internal_link attribute.
|
||||
|
||||
GIVEN a string with out any links
|
||||
WHEN the internal_link attribute is called within a regex
|
||||
THEN no links are found
|
||||
"""
|
||||
assert re.findall(P.internal_link, "foo bar baz") == []
|
||||
|
||||
|
||||
def test_identify_internal_link_3():
|
||||
"""Test the internal_link attribute.
|
||||
|
||||
GIVEN a string with an internal link
|
||||
WHEN the internal_link attribute is called within a regex
|
||||
THEN the internal link is found
|
||||
"""
|
||||
assert re.findall(P.internal_link, "[[internal_link]]") == ["[[internal_link]]"]
|
||||
assert re.findall(P.internal_link, "[[internal_link|text]]") == ["[[internal_link|text]]"]
|
||||
assert re.findall(P.internal_link, "[[test/Main.md]]") == ["[[test/Main.md]]"]
|
||||
assert re.findall(P.internal_link, "[[%Man &Machine + Mind%]]") == ["[[%Man &Machine + Mind%]]"]
|
||||
assert re.findall(P.internal_link, "[[Hello \\| There]]") == ["[[Hello \\| There]]"]
|
||||
assert re.findall(P.internal_link, "[[\\||Yes]]") == ["[[\\||Yes]]"]
|
||||
assert re.findall(P.internal_link, "[[test/Main|Yes]]") == ["[[test/Main|Yes]]"]
|
||||
assert re.findall(P.internal_link, "[[2020#^14df]]") == ["[[2020#^14df]]"]
|
||||
assert re.findall(P.internal_link, "!foo[[bar]]baz") == ["[[bar]]"]
|
||||
assert re.findall(P.internal_link, "[[]]") == ["[[]]"]
|
||||
|
||||
|
||||
def test_return_frontmatter_1():
|
||||
"""Test the return_frontmatter method.
|
||||
|
||||
GIVEN a string with frontmatter
|
||||
WHEN the return_frontmatter method is called
|
||||
THEN the frontmatter is returned
|
||||
"""
|
||||
content = """
|
||||
---
|
||||
key: value
|
||||
---
|
||||
# Hello World
|
||||
"""
|
||||
assert P.return_frontmatter(content) == "---\nkey: value\n---"
|
||||
|
||||
|
||||
def test_return_frontmatter_2():
|
||||
"""Test the return_frontmatter method.
|
||||
|
||||
GIVEN a string without frontmatter
|
||||
WHEN the return_frontmatter method is called
|
||||
THEN None is returned
|
||||
"""
|
||||
content = """
|
||||
# Hello World
|
||||
---
|
||||
key: value
|
||||
---
|
||||
"""
|
||||
assert P.return_frontmatter(content) is None
|
||||
|
||||
|
||||
def test_return_frontmatter_3():
|
||||
"""Test the return_frontmatter method.
|
||||
|
||||
GIVEN a string with frontmatter
|
||||
WHEN the return_frontmatter method is called with data_only=True
|
||||
THEN the frontmatter is returned
|
||||
"""
|
||||
content = """
|
||||
---
|
||||
key: value
|
||||
key2: value2
|
||||
---
|
||||
# Hello World
|
||||
"""
|
||||
assert P.return_frontmatter(content, data_only=True) == "key: value\nkey2: value2"
|
||||
|
||||
|
||||
def test_return_frontmatter_4():
|
||||
"""Test the return_frontmatter method.
|
||||
|
||||
GIVEN a string without frontmatter
|
||||
WHEN the return_frontmatter method is called with data_only=True
|
||||
THEN None is returned
|
||||
"""
|
||||
content = """
|
||||
# Hello World
|
||||
---
|
||||
key: value
|
||||
---
|
||||
"""
|
||||
assert P.return_frontmatter(content, data_only=True) is None
|
||||
|
||||
|
||||
def test_return_inline_metadata_1():
|
||||
"""Test the return_inline_metadata method.
|
||||
|
||||
GIVEN a string with no inline metadata
|
||||
WHEN the return_inline_metadata method is called
|
||||
THEN return None
|
||||
"""
|
||||
assert P.return_inline_metadata("foo bar baz") is None
|
||||
assert P.return_inline_metadata("foo:bar baz") is None
|
||||
assert P.return_inline_metadata("foo:::bar baz") is None
|
||||
assert P.return_inline_metadata("[foo:::bar] baz") is None
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("string", "returned"),
|
||||
[
|
||||
("[k1:: v1]", [("k1", " v1", Wrapping.BRACKETS)]),
|
||||
("(k/1:: v/1)", [("k/1", " v/1", Wrapping.PARENS)]),
|
||||
(
|
||||
"[k1::v1] and (k2:: v2)",
|
||||
[("k1", "v1", Wrapping.BRACKETS), ("k2", " v2", Wrapping.PARENS)],
|
||||
),
|
||||
("(début::début)", [("début", "début", Wrapping.PARENS)]),
|
||||
("[😉::🚀]", [("😉", "🚀", Wrapping.BRACKETS)]),
|
||||
(
|
||||
"(🛸rocket🚀ship:: a 🎅 [console] game)",
|
||||
[("🛸rocket🚀ship", " a 🎅 [console] game", Wrapping.PARENS)],
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_return_inline_metadata_2(string, returned):
|
||||
"""Test the return_inline_metadata method.
|
||||
|
||||
GIVEN a string with inline metadata within a wrapping
|
||||
WHEN the return_inline_metadata method is called
|
||||
THEN return the wrapped inline metadata
|
||||
"""
|
||||
assert P.return_inline_metadata(string) == returned
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("string", "returned"),
|
||||
[
|
||||
("k1::v1", [("k1", "v1", Wrapping.NONE)]),
|
||||
("😉::🚀", [("😉", "🚀", Wrapping.NONE)]),
|
||||
("k1:: w/ !@#$| ", [("k1", " w/ !@#$| ", Wrapping.NONE)]),
|
||||
("クリスマス:: 家庭用ゲーム機", [("クリスマス", " 家庭用ゲ\u30fcム機", Wrapping.NONE)]),
|
||||
("Noël:: Un jeu de console", [("Noël", " Un jeu de console", Wrapping.NONE)]),
|
||||
("🎅:: a console game", [("🎅", " a console game", Wrapping.NONE)]),
|
||||
("🛸rocket🚀ship:: a 🎅 console game", [("🛸rocket🚀ship", " a 🎅 console game", Wrapping.NONE)]),
|
||||
(">flag::irish flag 🇮🇪", [("flag", "irish flag 🇮🇪", Wrapping.NONE)]),
|
||||
("foo::[bar] baz", [("foo", "[bar] baz", Wrapping.NONE)]),
|
||||
("foo::bar) baz", [("foo", "bar) baz", Wrapping.NONE)]),
|
||||
("[foo::bar baz", [("foo", "bar baz", Wrapping.NONE)]),
|
||||
("_foo_::bar baz", [("_foo_", "bar baz", Wrapping.NONE)]),
|
||||
("**foo**::bar_baz", [("**foo**", "bar_baz", Wrapping.NONE)]),
|
||||
("`foo`::`bar baz`", [("`foo`", "`bar baz`", Wrapping.NONE)]),
|
||||
("`foo`:: `bar baz`", [("`foo`", " `bar baz`", Wrapping.NONE)]),
|
||||
("`foo::bar baz`", [("`foo", "bar baz`", Wrapping.NONE)]),
|
||||
("`foo:: bar baz`", [("`foo", " bar baz`", Wrapping.NONE)]),
|
||||
("**URL**::`https://example.com`", [("**URL**", "`https://example.com`", Wrapping.NONE)]),
|
||||
],
|
||||
)
|
||||
def test_return_inline_metadata_3(string, returned):
|
||||
"""Test the return_inline_metadata method.
|
||||
|
||||
GIVEN a string with inline metadata without a wrapping
|
||||
WHEN the return_inline_metadata method is called
|
||||
THEN return the wrapped inline metadata
|
||||
"""
|
||||
assert P.return_inline_metadata(string) == returned
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("string", "returned"),
|
||||
[
|
||||
("#foo", ["#foo"]),
|
||||
("#tag1 #tag2 #tag3", ["#tag1", "#tag2", "#tag3"]),
|
||||
("#foo.bar", ["#foo"]),
|
||||
("#foo-bar_baz#", ["#foo-bar_baz"]),
|
||||
("#daily/2021/20/08", ["#daily/2021/20/08"]),
|
||||
("#🌱/🌿", ["#🌱/🌿"]),
|
||||
("#début", ["#début"]),
|
||||
("#/some/🚀/tag", ["#/some/🚀/tag"]),
|
||||
(r"\\#foo", ["#foo"]),
|
||||
("#f#oo", ["#f", "#oo"]),
|
||||
("#foo#bar#baz", ["#foo", "#bar", "#baz"]),
|
||||
],
|
||||
)
|
||||
def test_return_tags_1(string, returned):
|
||||
"""Test the return_tags method.
|
||||
|
||||
GIVEN a string with tags
|
||||
WHEN the return_tags method is called
|
||||
THEN the valid tags are returned
|
||||
"""
|
||||
assert P.return_tags(string) == returned
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("string"),
|
||||
[
|
||||
("##foo# ##bar # baz ##"),
|
||||
("##foo"),
|
||||
("foo##bar"),
|
||||
("#1123"),
|
||||
("foo bar"),
|
||||
("aa#foo"),
|
||||
("$#foo"),
|
||||
],
|
||||
)
|
||||
def test_return_tags_2(string):
|
||||
"""Test the return_tags method.
|
||||
|
||||
GIVEN a string without valid tags
|
||||
WHEN the return_tags method is called
|
||||
THEN None is returned
|
||||
"""
|
||||
assert P.return_tags(string) == []
|
||||
|
||||
|
||||
def test_return_top_with_header_1():
|
||||
"""Test the return_top_with_header method.
|
||||
|
||||
GIVEN a string with frontmatter above a first markdown header
|
||||
WHEN return_top_with_header is called
|
||||
THEN return the content up to the end of the first header
|
||||
"""
|
||||
content = """
|
||||
---
|
||||
key: value
|
||||
---
|
||||
# Hello World
|
||||
|
||||
foo bar baz
|
||||
"""
|
||||
assert P.return_top_with_header(content) == "---\nkey: value\n---\n# Hello World\n"
|
||||
|
||||
|
||||
def test_return_top_with_header_2():
|
||||
"""Test the return_top_with_header method.
|
||||
|
||||
GIVEN a string with content above a first markdown header on the first line
|
||||
WHEN return_top_with_header is called
|
||||
THEN return the content up to the end of the first header
|
||||
"""
|
||||
content = "\n\n### Hello World\nfoo bar\nfoo bar"
|
||||
assert P.return_top_with_header(content) == "### Hello World\n"
|
||||
|
||||
|
||||
def test_return_top_with_header_3():
|
||||
"""Test the return_top_with_header method.
|
||||
|
||||
GIVEN a string with no markdown headers
|
||||
WHEN return_top_with_header is called
|
||||
THEN return None
|
||||
"""
|
||||
content = "Hello World\nfoo bar\nfoo bar"
|
||||
assert not P.return_top_with_header(content)
|
||||
|
||||
|
||||
def test_return_top_with_header_4():
|
||||
"""Test the return_top_with_header method.
|
||||
|
||||
GIVEN a string with no markdown headers
|
||||
WHEN return_top_with_header is called
|
||||
THEN return None
|
||||
"""
|
||||
content = "qux bar baz\nbaz\nfoo\n### bar\n# baz foo bar"
|
||||
assert P.return_top_with_header(content) == "qux bar baz\nbaz\nfoo\n### bar\n"
|
||||
|
||||
|
||||
def test_strip_frontmatter_1():
|
||||
"""Test the strip_frontmatter method.
|
||||
|
||||
GIVEN a string with frontmatter
|
||||
WHEN the strip_frontmatter method is called
|
||||
THEN the frontmatter is removed
|
||||
"""
|
||||
content = """
|
||||
---
|
||||
key: value
|
||||
---
|
||||
# Hello World
|
||||
"""
|
||||
assert P.strip_frontmatter(content).strip() == "# Hello World"
|
||||
|
||||
|
||||
def test_strip_frontmatter_2():
|
||||
"""Test the strip_frontmatter method.
|
||||
|
||||
GIVEN a string without frontmatter
|
||||
WHEN the strip_frontmatter method is called
|
||||
THEN nothing is removed
|
||||
"""
|
||||
content = """
|
||||
# Hello World
|
||||
---
|
||||
key: value
|
||||
---
|
||||
"""
|
||||
assert P.strip_frontmatter(content) == content
|
||||
|
||||
|
||||
def test_strip_frontmatter_3():
|
||||
"""Test the strip_frontmatter method.
|
||||
|
||||
GIVEN a string with frontmatter
|
||||
WHEN the strip_frontmatter method is called with data_only=True
|
||||
THEN the frontmatter is removed
|
||||
"""
|
||||
content = """
|
||||
---
|
||||
key: value
|
||||
---
|
||||
# Hello World
|
||||
"""
|
||||
assert P.strip_frontmatter(content, data_only=True).strip() == "---\n---\n# Hello World"
|
||||
|
||||
|
||||
def test_strip_frontmatter_4():
|
||||
"""Test the strip_frontmatter method.
|
||||
|
||||
GIVEN a string without frontmatter
|
||||
WHEN the strip_frontmatter method is called with data_only=True
|
||||
THEN nothing is removed
|
||||
"""
|
||||
content = """
|
||||
# Hello World
|
||||
---
|
||||
key: value
|
||||
---
|
||||
"""
|
||||
assert P.strip_frontmatter(content, data_only=True) == content
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("content", "expected"),
|
||||
[
|
||||
("Foo `bar` baz `Qux` ```bar\n```", "Foo baz ```bar\n```"),
|
||||
("foo", "foo"),
|
||||
("foo `bar` baz `qux`", "foo baz "),
|
||||
("key:: `value`", "key:: "),
|
||||
("foo\nbar\n`baz`", "foo\nbar\n"),
|
||||
("foo\nbar::baz\n`qux`", "foo\nbar::baz\n"),
|
||||
("`foo::bar`", ""),
|
||||
],
|
||||
)
|
||||
def test_strip_inline_code_1(content, expected):
|
||||
"""Test the strip_inline_code method.
|
||||
|
||||
GIVEN a string with inline code
|
||||
WHEN the strip_inline_code method is called
|
||||
THEN the inline code is removed
|
||||
"""
|
||||
assert P.strip_inline_code(content) == expected
|
||||
|
||||
|
||||
def test_validators():
|
||||
"""Test validators."""
|
||||
assert P.validate_tag_text.search("test_tag") is None
|
||||
assert P.validate_tag_text.search("#asdf").group(0) == "#"
|
||||
@@ -1,112 +0,0 @@
|
||||
# type: ignore
|
||||
"""Tests for the regex module."""
|
||||
|
||||
import pytest
|
||||
|
||||
from obsidian_metadata.models.patterns import Patterns
|
||||
|
||||
TAG_CONTENT: str = "#1 #2 **#3** [[#4]] [[#5|test]] #6#notag #7_8 #9/10 #11-12 #13; #14, #15. #16: #17* #18(#19) #20[#21] #22\\ #23& #24# #25 **#26** #📅/tag"
|
||||
INLINE_METADATA: str = """
|
||||
**1:: 1**
|
||||
2_2:: [[2_2]] | 2
|
||||
asdfasdf [3:: 3] asdfasdf [7::7] asdf
|
||||
[4:: 4] [5:: 5]
|
||||
> 6:: 6
|
||||
**8**:: **8**
|
||||
10::
|
||||
📅11:: 11/📅/11
|
||||
emoji_📅_key:: 📅emoji_📅_key_value
|
||||
"""
|
||||
FRONTMATTER_CONTENT: str = """
|
||||
---
|
||||
tags:
|
||||
- tag_1
|
||||
- tag_2
|
||||
-
|
||||
- 📅/tag_3
|
||||
frontmatter_Key1: "frontmatter_Key1_value"
|
||||
frontmatter_Key2: ["note", "article"]
|
||||
shared_key1: 'shared_key1_value'
|
||||
---
|
||||
more content
|
||||
|
||||
---
|
||||
horizontal: rule
|
||||
---
|
||||
"""
|
||||
CORRECT_FRONTMATTER_WITH_SEPARATORS: str = """---
|
||||
tags:
|
||||
- tag_1
|
||||
- tag_2
|
||||
-
|
||||
- 📅/tag_3
|
||||
frontmatter_Key1: "frontmatter_Key1_value"
|
||||
frontmatter_Key2: ["note", "article"]
|
||||
shared_key1: 'shared_key1_value'
|
||||
---"""
|
||||
CORRECT_FRONTMATTER_NO_SEPARATORS: str = """
|
||||
tags:
|
||||
- tag_1
|
||||
- tag_2
|
||||
-
|
||||
- 📅/tag_3
|
||||
frontmatter_Key1: "frontmatter_Key1_value"
|
||||
frontmatter_Key2: ["note", "article"]
|
||||
shared_key1: 'shared_key1_value'
|
||||
"""
|
||||
|
||||
|
||||
def test_regex():
|
||||
"""Test regexes."""
|
||||
pattern = Patterns()
|
||||
|
||||
assert pattern.find_inline_tags.findall(TAG_CONTENT) == [
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4",
|
||||
"5",
|
||||
"6",
|
||||
"7_8",
|
||||
"9/10",
|
||||
"11-12",
|
||||
"13",
|
||||
"14",
|
||||
"15",
|
||||
"16",
|
||||
"17",
|
||||
"18",
|
||||
"19",
|
||||
"20",
|
||||
"21",
|
||||
"22",
|
||||
"23",
|
||||
"24",
|
||||
"25",
|
||||
"26",
|
||||
"📅/tag",
|
||||
]
|
||||
|
||||
result = pattern.find_inline_metadata.findall(INLINE_METADATA)
|
||||
assert result == [
|
||||
("", "", "1", "1**"),
|
||||
("", "", "2_2", "[[2_2]] | 2"),
|
||||
("3", "3", "", ""),
|
||||
("7", "7", "", ""),
|
||||
("", "", "4", "4] [5:: 5]"),
|
||||
("", "", "8**", "**8**"),
|
||||
("", "", "11", "11/📅/11"),
|
||||
("", "", "emoji_📅_key", "📅emoji_📅_key_value"),
|
||||
]
|
||||
|
||||
found = pattern.frontmatt_block_with_separators.search(FRONTMATTER_CONTENT).group("frontmatter")
|
||||
assert found == CORRECT_FRONTMATTER_WITH_SEPARATORS
|
||||
|
||||
found = pattern.frontmatt_block_no_separators.search(FRONTMATTER_CONTENT).group("frontmatter")
|
||||
assert found == CORRECT_FRONTMATTER_NO_SEPARATORS
|
||||
|
||||
with pytest.raises(AttributeError):
|
||||
pattern.frontmatt_block_no_separators.search(TAG_CONTENT).group("frontmatter")
|
||||
|
||||
assert pattern.validate_tag_text.search("test_tag") is None
|
||||
assert pattern.validate_tag_text.search("#asdf").group(0) == "#"
|
||||
@@ -26,7 +26,6 @@ def test_validate_valid_regex() -> None:
|
||||
questions = Questions(vault=VAULT)
|
||||
assert questions._validate_valid_vault_regex(r".*\.md") is True
|
||||
assert "Invalid regex" in questions._validate_valid_vault_regex("[")
|
||||
assert "Regex cannot be empty" in questions._validate_valid_vault_regex("")
|
||||
assert "Regex does not match paths" in questions._validate_valid_vault_regex(r"\d\d\d\w\d")
|
||||
|
||||
|
||||
@@ -35,7 +34,7 @@ def test_validate_key_exists() -> None:
|
||||
questions = Questions(vault=VAULT)
|
||||
assert "'test' does not exist" in questions._validate_key_exists("test")
|
||||
assert "Key cannot be empty" in questions._validate_key_exists("")
|
||||
assert questions._validate_key_exists("frontmatter_Key1") is True
|
||||
assert questions._validate_key_exists("frontmatter1") is True
|
||||
|
||||
|
||||
def test_validate_new_key() -> None:
|
||||
@@ -61,12 +60,20 @@ def test_validate_new_tag() -> None:
|
||||
assert questions._validate_new_tag("new_tag") is True
|
||||
|
||||
|
||||
def test_validate_existing_inline_tag() -> None:
|
||||
def test_validate_number() -> None:
|
||||
"""Test number validation."""
|
||||
questions = Questions(vault=VAULT)
|
||||
assert "Must be an integer" in questions._validate_number("test")
|
||||
assert "Must be an integer" in questions._validate_number("1.1")
|
||||
assert questions._validate_number("1") is True
|
||||
|
||||
|
||||
def test_validate_existing_tag() -> None:
|
||||
"""Test existing tag validation."""
|
||||
questions = Questions(vault=VAULT)
|
||||
assert "Tag cannot be empty" in questions._validate_existing_inline_tag("")
|
||||
assert "'test' does not exist" in questions._validate_existing_inline_tag("test")
|
||||
assert questions._validate_existing_inline_tag("shared_tag") is True
|
||||
assert "Tag cannot be empty" in questions._validate_existing_tag("")
|
||||
assert "'test' does not exist" in questions._validate_existing_tag("test")
|
||||
assert questions._validate_existing_tag("shared_tag") is True
|
||||
|
||||
|
||||
def test_validate_key_exists_regex() -> None:
|
||||
@@ -75,39 +82,34 @@ def test_validate_key_exists_regex() -> None:
|
||||
assert "'test' does not exist" in questions._validate_key_exists_regex("test")
|
||||
assert "Key cannot be empty" in questions._validate_key_exists_regex("")
|
||||
assert "Invalid regex" in questions._validate_key_exists_regex("[")
|
||||
assert questions._validate_key_exists_regex(r"\w+_Key\d") is True
|
||||
assert questions._validate_key_exists_regex(r"f\w+\d") is True
|
||||
|
||||
|
||||
def test_validate_value() -> None:
|
||||
"""Test value validation."""
|
||||
questions = Questions(vault=VAULT)
|
||||
assert questions._validate_value("test") is True
|
||||
assert "Value cannot be empty" in questions._validate_value("")
|
||||
|
||||
questions2 = Questions(vault=VAULT, key="frontmatter_Key1")
|
||||
assert questions2._validate_value("test") == "frontmatter_Key1:test does not exist"
|
||||
assert "Value cannot be empty" in questions2._validate_value("")
|
||||
assert questions2._validate_value("author name") is True
|
||||
assert questions._validate_value("test") is True
|
||||
questions2 = Questions(vault=VAULT, key="frontmatter1")
|
||||
assert questions2._validate_value("test") == "frontmatter1:test does not exist"
|
||||
assert questions2._validate_value("foo") is True
|
||||
|
||||
|
||||
def test_validate_value_exists_regex() -> None:
|
||||
"""Test value exists regex validation."""
|
||||
questions2 = Questions(vault=VAULT, key="frontmatter_Key1")
|
||||
questions2 = Questions(vault=VAULT, key="frontmatter1")
|
||||
assert "Invalid regex" in questions2._validate_value_exists_regex("[")
|
||||
assert "Regex cannot be empty" in questions2._validate_value_exists_regex("")
|
||||
assert (
|
||||
questions2._validate_value_exists_regex(r"\d\d\d\w\d")
|
||||
== r"No values in frontmatter_Key1 match regex: \d\d\d\w\d"
|
||||
== r"No values in frontmatter1 match regex: \d\d\d\w\d"
|
||||
)
|
||||
assert questions2._validate_value_exists_regex(r"^author \w+") is True
|
||||
assert questions2._validate_value_exists_regex(r"^f\w{2}$") is True
|
||||
|
||||
|
||||
def test_validate_new_value() -> None:
|
||||
"""Test new value validation."""
|
||||
questions = Questions(vault=VAULT, key="frontmatter_Key1")
|
||||
assert questions._validate_new_value("new_value") is True
|
||||
questions = Questions(vault=VAULT, key="frontmatter1")
|
||||
assert questions._validate_new_value("not_exists") is True
|
||||
assert "Value cannot be empty" in questions._validate_new_value("")
|
||||
assert (
|
||||
questions._validate_new_value("author name")
|
||||
== "frontmatter_Key1:author name already exists"
|
||||
)
|
||||
assert questions._validate_new_value("foo") == "frontmatter1:foo already exists"
|
||||
|
||||
@@ -1,108 +1,535 @@
|
||||
# type: ignore
|
||||
"""Test the utilities module."""
|
||||
|
||||
import pytest
|
||||
import typer
|
||||
|
||||
from obsidian_metadata._utils import (
|
||||
clean_dictionary,
|
||||
dict_contains,
|
||||
dict_values_to_lists_strings,
|
||||
remove_markdown_sections,
|
||||
dict_keys_to_lower,
|
||||
merge_dictionaries,
|
||||
rename_in_dict,
|
||||
validate_csv_bulk_imports,
|
||||
)
|
||||
|
||||
|
||||
def test_dict_contains() -> None:
|
||||
"""Test dict_contains."""
|
||||
d = {"key1": ["value1", "value2"], "key2": ["value3", "value4"], "key3": ["value5", "value6"]}
|
||||
def test_clean_dictionary_1():
|
||||
"""Test clean_dictionary() function.
|
||||
|
||||
assert dict_contains(d, "key1") is True
|
||||
assert dict_contains(d, "key5") is False
|
||||
assert dict_contains(d, "key1", "value1") is True
|
||||
assert dict_contains(d, "key1", "value5") is False
|
||||
assert dict_contains(d, "key[1-2]", is_regex=True) is True
|
||||
assert dict_contains(d, "^1", is_regex=True) is False
|
||||
assert dict_contains(d, r"key\d", r"value\d", is_regex=True) is True
|
||||
assert dict_contains(d, "key1$", "^alue", is_regex=True) is False
|
||||
assert dict_contains(d, r"key\d", "value5", is_regex=True) is True
|
||||
|
||||
|
||||
def test_dict_values_to_lists_strings():
|
||||
"""Test converting dictionary values to lists of strings."""
|
||||
dictionary = {
|
||||
"key1": "value1",
|
||||
"key2": ["value2", "value3", None],
|
||||
"key3": {"key4": "value4"},
|
||||
"key5": {"key6": {"key7": "value7"}},
|
||||
"key6": None,
|
||||
"key8": [1, 3, None, 4],
|
||||
"key9": [None, "", "None"],
|
||||
"key10": "None",
|
||||
"key11": "",
|
||||
}
|
||||
|
||||
result = dict_values_to_lists_strings(dictionary)
|
||||
assert result == {
|
||||
"key1": ["value1"],
|
||||
"key10": ["None"],
|
||||
"key11": [""],
|
||||
"key2": ["None", "value2", "value3"],
|
||||
"key3": {"key4": ["value4"]},
|
||||
"key5": {"key6": {"key7": ["value7"]}},
|
||||
"key6": ["None"],
|
||||
"key8": ["1", "3", "4", "None"],
|
||||
"key9": ["", "None", "None"],
|
||||
}
|
||||
|
||||
result = dict_values_to_lists_strings(dictionary, strip_null_values=True)
|
||||
assert result == {
|
||||
"key1": ["value1"],
|
||||
"key10": [],
|
||||
"key11": [],
|
||||
"key2": ["value2", "value3"],
|
||||
"key3": {"key4": ["value4"]},
|
||||
"key5": {"key6": {"key7": ["value7"]}},
|
||||
"key6": [],
|
||||
"key8": ["1", "3", "4"],
|
||||
"key9": ["", "None"],
|
||||
}
|
||||
|
||||
|
||||
def test_remove_markdown_sections():
|
||||
"""Test removing markdown sections."""
|
||||
text: str = """
|
||||
---
|
||||
key: value
|
||||
---
|
||||
|
||||
Lorem ipsum `dolor sit` amet.
|
||||
|
||||
```bash
|
||||
echo "Hello World"
|
||||
```
|
||||
---
|
||||
dd
|
||||
---
|
||||
GIVEN a dictionary passed to clean_dictionary()
|
||||
WHEN the dictionary is empty
|
||||
THEN return an empty dictionary
|
||||
"""
|
||||
result = remove_markdown_sections(
|
||||
text,
|
||||
strip_codeblocks=True,
|
||||
strip_frontmatter=True,
|
||||
strip_inlinecode=True,
|
||||
assert clean_dictionary({}) == {}
|
||||
|
||||
|
||||
def test_clean_dictionary_2():
|
||||
"""Test clean_dictionary() function.
|
||||
|
||||
GIVEN a dictionary passed to clean_dictionary()
|
||||
WHEN keys contain leading/trailing spaces
|
||||
THEN remove the spaces from the keys
|
||||
"""
|
||||
assert clean_dictionary({" key 1 ": "value 1"}) == {"key 1": "value 1"}
|
||||
|
||||
|
||||
def test_clean_dictionary_3():
|
||||
"""Test clean_dictionary() function.
|
||||
|
||||
GIVEN a dictionary passed to clean_dictionary()
|
||||
WHEN values contain leading/trailing spaces
|
||||
THEN remove the spaces from the values
|
||||
"""
|
||||
assert clean_dictionary({"key 1": " value 1 "}) == {"key 1": "value 1"}
|
||||
|
||||
|
||||
def test_clean_dictionary_4():
|
||||
"""Test clean_dictionary() function.
|
||||
|
||||
GIVEN a dictionary passed to clean_dictionary()
|
||||
WHEN keys or values contain leading/trailing asterisks
|
||||
THEN remove the asterisks from the keys or values
|
||||
"""
|
||||
assert clean_dictionary({"**key_1**": ["**value 1**", "value 2"]}) == {
|
||||
"key_1": ["value 1", "value 2"]
|
||||
}
|
||||
|
||||
|
||||
def test_clean_dictionary_5():
|
||||
"""Test clean_dictionary() function.
|
||||
|
||||
GIVEN a dictionary passed to clean_dictionary()
|
||||
WHEN keys or values contain leading/trailing brackets
|
||||
THEN remove the brackets from the keys and values
|
||||
"""
|
||||
assert clean_dictionary({"[[key_1]]": ["[[value 1]]", "[value 2]"]}) == {
|
||||
"key_1": ["value 1", "value 2"]
|
||||
}
|
||||
|
||||
|
||||
def test_clean_dictionary_6():
|
||||
"""Test clean_dictionary() function.
|
||||
|
||||
GIVEN a dictionary passed to clean_dictionary()
|
||||
WHEN keys or values contain leading/trailing hashtags
|
||||
THEN remove the hashtags from the keys and values
|
||||
"""
|
||||
assert clean_dictionary({"#key_1": ["#value 1", "value 2#"]}) == {
|
||||
"key_1": ["value 1", "value 2"]
|
||||
}
|
||||
|
||||
|
||||
def test_dict_contains_1():
|
||||
"""Test dict_contains() function.
|
||||
|
||||
GIVEN calling dict_contains() with a dictionary
|
||||
WHEN the dictionary is empty
|
||||
THEN the function should return False
|
||||
"""
|
||||
assert dict_contains({}, "key1") is False
|
||||
|
||||
|
||||
def test_dict_contains_2():
|
||||
"""Test dict_contains() function.
|
||||
|
||||
GIVEN calling dict_contains() with a dictionary
|
||||
WHEN when the key is not in the dictionary
|
||||
THEN the function should return False
|
||||
"""
|
||||
assert dict_contains({"key1": "value1"}, "key2") is False
|
||||
|
||||
|
||||
def test_dict_contains_3():
|
||||
"""Test dict_contains() function.
|
||||
|
||||
GIVEN calling dict_contains() with a dictionary
|
||||
WHEN when the key is in the dictionary
|
||||
THEN the function should return True
|
||||
"""
|
||||
assert dict_contains({"key1": "value1"}, "key1") is True
|
||||
|
||||
|
||||
def test_dict_contains_4():
|
||||
"""Test dict_contains() function.
|
||||
|
||||
GIVEN calling dict_contains() with a dictionary
|
||||
WHEN when the key and value are in the dictionary
|
||||
THEN the function should return True
|
||||
"""
|
||||
assert dict_contains({"key1": "value1"}, "key1", "value1") is True
|
||||
|
||||
|
||||
def test_dict_contains_5():
|
||||
"""Test dict_contains() function.
|
||||
|
||||
GIVEN calling dict_contains() with a dictionary
|
||||
WHEN when the key and value are not in the dictionary
|
||||
THEN the function should return False
|
||||
"""
|
||||
assert dict_contains({"key1": "value1"}, "key1", "value2") is False
|
||||
|
||||
|
||||
def test_dict_contains_6():
|
||||
"""Test dict_contains() function.
|
||||
|
||||
GIVEN calling dict_contains() with a dictionary
|
||||
WHEN a regex is used for the key and the key is in the dictionary
|
||||
THEN the function should return True
|
||||
"""
|
||||
assert dict_contains({"key1": "value1"}, r"key\d", is_regex=True) is True
|
||||
|
||||
|
||||
def test_dict_contains_7():
|
||||
"""Test dict_contains() function.
|
||||
|
||||
GIVEN calling dict_contains() with a dictionary
|
||||
WHEN a regex is used for the key and the key is not in the dictionary
|
||||
THEN the function should return False
|
||||
"""
|
||||
assert dict_contains({"key1": "value1"}, r"key\d\d", is_regex=True) is False
|
||||
|
||||
|
||||
def test_dict_contains_8():
|
||||
"""Test dict_contains() function.
|
||||
|
||||
GIVEN calling dict_contains() with a dictionary
|
||||
WHEN a regex is used for a value and the value is in the dictionary
|
||||
THEN the function should return True
|
||||
"""
|
||||
assert dict_contains({"key1": "value1"}, "key1", r"\w+", is_regex=True) is True
|
||||
|
||||
|
||||
def test_dict_contains_9():
|
||||
"""Test dict_contains() function.
|
||||
|
||||
GIVEN calling dict_contains() with a dictionary
|
||||
WHEN a regex is used for a value and the value is not in the dictionary
|
||||
THEN the function should return False
|
||||
"""
|
||||
assert dict_contains({"key1": "value1"}, "key1", r"\d{2}", is_regex=True) is False
|
||||
|
||||
|
||||
def test_dict_keys_to_lower() -> None:
|
||||
"""Test the dict_keys_to_lower() function.
|
||||
|
||||
GIVEN a dictionary with mixed case keys
|
||||
WHEN the dict_keys_to_lower() function is called
|
||||
THEN the dictionary keys should be converted to lowercase
|
||||
"""
|
||||
test_dict = {"Key1": "Value1", "KEY2": "Value2", "key3": "Value3"}
|
||||
assert dict_keys_to_lower(test_dict) == {"key1": "Value1", "key2": "Value2", "key3": "Value3"}
|
||||
|
||||
|
||||
def test_merge_dictionaries_1():
|
||||
"""Test merge_dictionaries() function.
|
||||
|
||||
GIVEN two dictionaries supplied to the merge_dictionaries() function
|
||||
WHEN a value in dict1 is not a list
|
||||
THEN raise a TypeError
|
||||
"""
|
||||
test_dict_1 = {"key1": "value1", "key2": "value2"}
|
||||
test_dict_2 = {"key3": ["value3"], "key4": ["value4"]}
|
||||
|
||||
with pytest.raises(TypeError, match=r"key.*is not a list"):
|
||||
merge_dictionaries(test_dict_1, test_dict_2)
|
||||
|
||||
|
||||
def test_merge_dictionaries_2():
|
||||
"""Test merge_dictionaries() function.
|
||||
|
||||
GIVEN two dictionaries supplied to the merge_dictionaries() function
|
||||
WHEN a value in dict2 is not a list
|
||||
THEN raise a TypeError
|
||||
"""
|
||||
test_dict_1 = {"key3": ["value3"], "key4": ["value4"]}
|
||||
test_dict_2 = {"key1": "value1", "key2": "value2"}
|
||||
|
||||
with pytest.raises(TypeError, match=r"key.*is not a list"):
|
||||
merge_dictionaries(test_dict_1, test_dict_2)
|
||||
|
||||
|
||||
def test_merge_dictionaries_3():
|
||||
"""Test merge_dictionaries() function.
|
||||
|
||||
GIVEN two dictionaries supplied to the merge_dictionaries() function
|
||||
WHEN keys and values in both dictionaries are unique
|
||||
THEN return a dictionary with the keys and values from both dictionaries
|
||||
"""
|
||||
test_dict_1 = {"key1": ["value1"], "key2": ["value2"]}
|
||||
test_dict_2 = {"key3": ["value3"], "key4": ["value4"]}
|
||||
|
||||
assert merge_dictionaries(test_dict_1, test_dict_2) == {
|
||||
"key1": ["value1"],
|
||||
"key2": ["value2"],
|
||||
"key3": ["value3"],
|
||||
"key4": ["value4"],
|
||||
}
|
||||
|
||||
|
||||
def test_merge_dictionaries_4():
|
||||
"""Test merge_dictionaries() function.
|
||||
|
||||
GIVEN two dictionaries supplied to the merge_dictionaries() function
|
||||
WHEN keys in both dictionaries are not unique
|
||||
THEN return a dictionary with the merged keys and values from both dictionaries
|
||||
"""
|
||||
test_dict_1 = {"key1": ["value1"], "key2": ["value2"]}
|
||||
test_dict_2 = {"key1": ["value3"], "key2": ["value4"]}
|
||||
|
||||
assert merge_dictionaries(test_dict_1, test_dict_2) == {
|
||||
"key1": ["value1", "value3"],
|
||||
"key2": ["value2", "value4"],
|
||||
}
|
||||
|
||||
|
||||
def test_merge_dictionaries_5():
|
||||
"""Test merge_dictionaries() function.
|
||||
|
||||
GIVEN two dictionaries supplied to the merge_dictionaries() function
|
||||
WHEN keys and values both dictionaries are not unique
|
||||
THEN return a dictionary with the merged keys and values from both dictionaries
|
||||
"""
|
||||
test_dict_1 = {"key1": ["a", "c"], "key2": ["a", "b"]}
|
||||
test_dict_2 = {"key1": ["a", "b"], "key2": ["a", "c"]}
|
||||
|
||||
assert merge_dictionaries(test_dict_1, test_dict_2) == {
|
||||
"key1": ["a", "b", "c"],
|
||||
"key2": ["a", "b", "c"],
|
||||
}
|
||||
|
||||
|
||||
def test_merge_dictionaries_6():
|
||||
"""Test merge_dictionaries() function.
|
||||
|
||||
GIVEN two dictionaries supplied to the merge_dictionaries() function
|
||||
WHEN one of the dictionaries is empty
|
||||
THEN return a dictionary the other dictionary
|
||||
"""
|
||||
test_dict_1 = {"key1": ["a", "c"], "key2": ["a", "b"]}
|
||||
test_dict_2 = {}
|
||||
|
||||
assert merge_dictionaries(test_dict_1, test_dict_2) == {"key1": ["a", "c"], "key2": ["a", "b"]}
|
||||
|
||||
test_dict_1 = {}
|
||||
test_dict_2 = {"key1": ["a", "c"], "key2": ["a", "b"]}
|
||||
assert merge_dictionaries(test_dict_1, test_dict_2) == {"key1": ["a", "c"], "key2": ["a", "b"]}
|
||||
|
||||
|
||||
def test_merge_dictionaries_7():
|
||||
"""Test merge_dictionaries() function.
|
||||
|
||||
GIVEN two dictionaries supplied to the merge_dictionaries() function
|
||||
WHEN keys and values both dictionaries are not unique
|
||||
THEN ensure the original dictionaries objects are not modified
|
||||
"""
|
||||
test_dict_1 = {"key1": ["a", "c"], "key2": ["a", "b"]}
|
||||
test_dict_2 = {"key1": ["a", "b"], "key2": ["a", "c"]}
|
||||
|
||||
assert merge_dictionaries(test_dict_1, test_dict_2) == {
|
||||
"key1": ["a", "b", "c"],
|
||||
"key2": ["a", "b", "c"],
|
||||
}
|
||||
assert test_dict_1 == {"key1": ["a", "c"], "key2": ["a", "b"]}
|
||||
assert test_dict_2 == {"key1": ["a", "b"], "key2": ["a", "c"]}
|
||||
|
||||
|
||||
def test_rename_in_dict_1():
|
||||
"""Test rename_in_dict() function.
|
||||
|
||||
GIVEN a dictionary with values as a list
|
||||
WHEN the rename_in_dict() function is called with a key that does not exist
|
||||
THEN no keys should be renamed in the dictionary
|
||||
"""
|
||||
test_dict = {"key1": ["value1"], "key2": ["value2", "value3"]}
|
||||
|
||||
assert rename_in_dict(dictionary=test_dict, key="key4", value_1="key5") == test_dict
|
||||
|
||||
|
||||
def test_rename_in_dict_2():
|
||||
"""Test rename_in_dict() function.
|
||||
|
||||
GIVEN a dictionary with values as a list
|
||||
WHEN the rename_in_dict() function is called with a key that exists and a new value for the key
|
||||
THEN the key should be renamed in the returned dictionary and the original dictionary should not be modified
|
||||
"""
|
||||
test_dict = {"key1": ["value1"], "key2": ["value2", "value3"]}
|
||||
|
||||
assert rename_in_dict(dictionary=test_dict, key="key2", value_1="new_key") == {
|
||||
"key1": ["value1"],
|
||||
"new_key": ["value2", "value3"],
|
||||
}
|
||||
assert test_dict == {"key1": ["value1"], "key2": ["value2", "value3"]}
|
||||
|
||||
|
||||
def test_rename_in_dict_3():
|
||||
"""Test rename_in_dict() function.
|
||||
|
||||
GIVEN a dictionary with values as a list
|
||||
WHEN the rename_in_dict() function is called with a key that exists value that does not exist
|
||||
THEN the dictionary should not be modified
|
||||
"""
|
||||
test_dict = {"key1": ["value1"], "key2": ["value2", "value3"]}
|
||||
|
||||
assert (
|
||||
rename_in_dict(dictionary=test_dict, key="key2", value_1="no_value", value_2="new_value")
|
||||
== test_dict
|
||||
)
|
||||
assert "```bash" not in result
|
||||
assert "`dolor sit`" not in result
|
||||
assert "---\nkey: value" not in result
|
||||
assert "`" not in result
|
||||
|
||||
result = remove_markdown_sections(text)
|
||||
assert "```bash" in result
|
||||
assert "`dolor sit`" in result
|
||||
assert "---\nkey: value" in result
|
||||
assert "`" in result
|
||||
|
||||
|
||||
def test_clean_dictionary():
|
||||
"""Test cleaning a dictionary."""
|
||||
dictionary = {" *key* ": ["**value**", "[[value2]]", "#value3"]}
|
||||
def test_rename_in_dict_4():
|
||||
"""Test rename_in_dict() function.
|
||||
|
||||
new_dict = clean_dictionary(dictionary)
|
||||
assert new_dict == {"key": ["value", "value2", "value3"]}
|
||||
GIVEN a dictionary with values as a list
|
||||
WHEN the rename_in_dict() function is called with a key that exists and a new value for a value
|
||||
THEN update the specified value in the dictionary
|
||||
"""
|
||||
test_dict = {"key1": ["value1"], "key2": ["value2", "value3"]}
|
||||
|
||||
assert rename_in_dict(
|
||||
dictionary=test_dict, key="key2", value_1="value2", value_2="new_value"
|
||||
) == {"key1": ["value1"], "key2": ["new_value", "value3"]}
|
||||
|
||||
|
||||
def test_rename_in_dict_5():
|
||||
"""Test rename_in_dict() function.
|
||||
|
||||
GIVEN a dictionary with values as a list
|
||||
WHEN the rename_in_dict() function is called with a key that exists and a an existing value for a renamed value
|
||||
THEN only one instance of the new value should be in the key
|
||||
"""
|
||||
test_dict = {"key1": ["value1"], "key2": ["value2", "value3"]}
|
||||
|
||||
assert rename_in_dict(dictionary=test_dict, key="key2", value_1="value2", value_2="value3") == {
|
||||
"key1": ["value1"],
|
||||
"key2": ["value3"],
|
||||
}
|
||||
|
||||
|
||||
def test_validate_csv_bulk_imports_1(tmp_path):
|
||||
"""Test the validate_csv_bulk_imports function.
|
||||
|
||||
GIVEN a csv file missing the `path` column
|
||||
WHEN the validate_csv_bulk_imports function is called
|
||||
THEN an exception should be raised
|
||||
"""
|
||||
csv_path = tmp_path / "test.csv"
|
||||
csv_content = """\
|
||||
PATH,type,key,value
|
||||
note1.md,frontmatter,key,value"""
|
||||
csv_path.write_text(csv_content)
|
||||
|
||||
with pytest.raises(typer.BadParameter):
|
||||
validate_csv_bulk_imports(csv_path=csv_path, note_paths=[])
|
||||
|
||||
|
||||
def test_validate_csv_bulk_imports_2(tmp_path):
|
||||
"""Test the validate_csv_bulk_imports function.
|
||||
|
||||
GIVEN a csv file missing the `type` column
|
||||
WHEN the validate_csv_bulk_imports function is called
|
||||
THEN an exception should be raised
|
||||
"""
|
||||
csv_path = tmp_path / "test.csv"
|
||||
csv_content = """\
|
||||
path,Type,key,value
|
||||
note1.md,frontmatter,key,value"""
|
||||
csv_path.write_text(csv_content)
|
||||
|
||||
with pytest.raises(typer.BadParameter):
|
||||
validate_csv_bulk_imports(csv_path=csv_path, note_paths=[])
|
||||
|
||||
|
||||
def test_validate_csv_bulk_imports_3(tmp_path):
|
||||
"""Test the validate_csv_bulk_imports function.
|
||||
|
||||
GIVEN a csv file missing the `key` column
|
||||
WHEN the validate_csv_bulk_imports function is called
|
||||
THEN an exception should be raised
|
||||
"""
|
||||
csv_path = tmp_path / "test.csv"
|
||||
csv_content = """\
|
||||
path,type,value
|
||||
note1.md,frontmatter,key,value"""
|
||||
csv_path.write_text(csv_content)
|
||||
|
||||
with pytest.raises(typer.BadParameter):
|
||||
validate_csv_bulk_imports(csv_path=csv_path, note_paths=[])
|
||||
|
||||
|
||||
def test_validate_csv_bulk_imports_4(tmp_path):
|
||||
"""Test the validate_csv_bulk_imports function.
|
||||
|
||||
GIVEN a csv file missing the `value` column
|
||||
WHEN the validate_csv_bulk_imports function is called
|
||||
THEN an exception should be raised
|
||||
"""
|
||||
csv_path = tmp_path / "test.csv"
|
||||
csv_content = """\
|
||||
path,type,key,values
|
||||
note1.md,frontmatter,key,value"""
|
||||
csv_path.write_text(csv_content)
|
||||
|
||||
with pytest.raises(typer.BadParameter):
|
||||
validate_csv_bulk_imports(csv_path=csv_path, note_paths=[])
|
||||
|
||||
|
||||
def test_validate_csv_bulk_imports_5(tmp_path):
|
||||
"""Test the validate_csv_bulk_imports function.
|
||||
|
||||
GIVEN a csv file with only headers
|
||||
WHEN the validate_csv_bulk_imports function is called
|
||||
THEN an exception should be raised
|
||||
"""
|
||||
csv_path = tmp_path / "test.csv"
|
||||
csv_content = "path,type,key,value"
|
||||
csv_path.write_text(csv_content)
|
||||
|
||||
with pytest.raises(typer.BadParameter):
|
||||
validate_csv_bulk_imports(csv_path=csv_path, note_paths=[])
|
||||
|
||||
|
||||
def test_validate_csv_bulk_imports_6(tmp_path):
|
||||
"""Test the validate_csv_bulk_imports function.
|
||||
|
||||
GIVEN a valid csv file
|
||||
WHEN a path is given that does not exist in the vault
|
||||
THEN show the user a warning
|
||||
"""
|
||||
csv_path = tmp_path / "test.csv"
|
||||
csv_content = """\
|
||||
path,type,key,value
|
||||
note1.md,frontmatter,key,value
|
||||
note1.md,tag,key,value
|
||||
note1.md,inline_metadata,key,value
|
||||
note1.md,inline_metadata,key2,value
|
||||
note1.md,inline_metadata,key2,value2
|
||||
note2.md,frontmatter,key,value
|
||||
note2.md,tag,key,value
|
||||
note2.md,inline_metadata,key,value
|
||||
note2.md,inline_metadata,key2,value
|
||||
note2.md,inline_metadata,key2,value2
|
||||
"""
|
||||
csv_path.write_text(csv_content)
|
||||
|
||||
with pytest.raises(typer.BadParameter):
|
||||
validate_csv_bulk_imports(csv_path=csv_path, note_paths=["note1.md"])
|
||||
|
||||
|
||||
def test_validate_csv_bulk_imports_7(tmp_path):
|
||||
"""Test the validate_csv_bulk_imports function.
|
||||
|
||||
GIVEN a valid csv file
|
||||
WHEN if a type is not 'frontmatter' or 'inline_metadata', 'tag'
|
||||
THEN exit the program
|
||||
"""
|
||||
csv_path = tmp_path / "test.csv"
|
||||
csv_content = """\
|
||||
path,type,key,value
|
||||
note1.md,frontmatter,key,value
|
||||
note2.md,notvalid,key,value
|
||||
"""
|
||||
csv_path.write_text(csv_content)
|
||||
with pytest.raises(typer.BadParameter):
|
||||
validate_csv_bulk_imports(csv_path=csv_path, note_paths=["note1.md", "note2.md"])
|
||||
|
||||
|
||||
def test_validate_csv_bulk_imports_8(tmp_path):
|
||||
"""Test the validate_csv_bulk_imports function.
|
||||
|
||||
GIVEN a valid csv file
|
||||
WHEN more than one row has the same path
|
||||
THEN add the row to the list of rows for that path
|
||||
"""
|
||||
csv_path = tmp_path / "test.csv"
|
||||
csv_content = """\
|
||||
path,type,key,value
|
||||
note1.md,frontmatter,key,value
|
||||
note1.md,tag,key,value
|
||||
note1.md,inline_metadata,key,value
|
||||
note1.md,inline_metadata,key2,value
|
||||
note1.md,inline_metadata,key2,value2
|
||||
note2.md,frontmatter,key,value
|
||||
note2.md,tag,key,value
|
||||
note2.md,inline_metadata,key,value
|
||||
note2.md,inline_metadata,key2,value
|
||||
note2.md,inline_metadata,key2,value2
|
||||
"""
|
||||
csv_path.write_text(csv_content)
|
||||
csv_dict = validate_csv_bulk_imports(csv_path=csv_path, note_paths=["note1.md", "note2.md"])
|
||||
assert csv_dict == {
|
||||
"note1.md": [
|
||||
{"key": "key", "type": "frontmatter", "value": "value"},
|
||||
{"key": "key", "type": "tag", "value": "value"},
|
||||
{"key": "key", "type": "inline_metadata", "value": "value"},
|
||||
{"key": "key2", "type": "inline_metadata", "value": "value"},
|
||||
{"key": "key2", "type": "inline_metadata", "value": "value2"},
|
||||
],
|
||||
"note2.md": [
|
||||
{"key": "key", "type": "frontmatter", "value": "value"},
|
||||
{"key": "key", "type": "tag", "value": "value"},
|
||||
{"key": "key", "type": "inline_metadata", "value": "value"},
|
||||
{"key": "key2", "type": "inline_metadata", "value": "value"},
|
||||
{"key": "key2", "type": "inline_metadata", "value": "value2"},
|
||||
],
|
||||
}
|
||||
|
||||
@@ -1,91 +1,129 @@
|
||||
# type: ignore
|
||||
"""Tests for the Vault module."""
|
||||
|
||||
import re
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
import typer
|
||||
|
||||
from obsidian_metadata._config import Config
|
||||
from obsidian_metadata.models import Vault
|
||||
from tests.helpers import Regex
|
||||
from obsidian_metadata._utils.console import console
|
||||
from obsidian_metadata.models import Vault, VaultFilter
|
||||
from obsidian_metadata.models.enums import InsertLocation, MetadataType
|
||||
from tests.helpers import Regex, strip_ansi
|
||||
|
||||
|
||||
def test_vault_creation(test_vault):
|
||||
"""Test creating a Vault object."""
|
||||
vault_path = test_vault
|
||||
config = Config(config_path="tests/fixtures/test_vault_config.toml", vault_path=vault_path)
|
||||
vault_config = config.vaults[0]
|
||||
vault = Vault(config=vault_config)
|
||||
def test_vault_creation(test_vault, tmp_path):
|
||||
"""Test creating a Vault object.
|
||||
|
||||
assert vault.vault_path == vault_path
|
||||
assert vault.backup_path == Path(f"{vault_path}.bak")
|
||||
GIVEN a Config object
|
||||
WHEN a Vault object is created
|
||||
THEN the Vault object is created with the correct attributes.
|
||||
"""
|
||||
vault = Vault(config=test_vault)
|
||||
|
||||
assert vault.name == "vault"
|
||||
assert vault.insert_location == InsertLocation.TOP
|
||||
assert vault.backup_path == Path(tmp_path, "vault.bak")
|
||||
assert vault.dry_run is False
|
||||
assert str(vault.exclude_paths[0]) == Regex(r".*\.git")
|
||||
assert vault.num_notes() == 3
|
||||
|
||||
assert vault.metadata.dict == {
|
||||
"Inline Tags": [
|
||||
"ignored_file_tag2",
|
||||
"inline_tag_bottom1",
|
||||
"inline_tag_bottom2",
|
||||
"inline_tag_top1",
|
||||
"inline_tag_top2",
|
||||
"intext_tag1",
|
||||
"intext_tag2",
|
||||
"shared_tag",
|
||||
],
|
||||
"author": ["author name"],
|
||||
"bottom_key1": ["bottom_key1_value"],
|
||||
"bottom_key2": ["bottom_key2_value"],
|
||||
assert len(vault.all_notes) == 2
|
||||
assert vault.frontmatter == {
|
||||
"date_created": ["2022-12-22"],
|
||||
"emoji_📅_key": ["emoji_📅_key_value"],
|
||||
"frontmatter_Key1": ["author name"],
|
||||
"frontmatter_Key2": ["article", "note"],
|
||||
"ignored_frontmatter": ["ignore_me"],
|
||||
"intext_key": ["intext_value"],
|
||||
"shared_key1": ["shared_key1_value"],
|
||||
"shared_key2": ["shared_key2_value1", "shared_key2_value2"],
|
||||
"tags": [
|
||||
"frontmatter_tag1",
|
||||
"frontmatter_tag2",
|
||||
"frontmatter_tag3",
|
||||
"ignored_file_tag1",
|
||||
"shared_tag",
|
||||
"📅/frontmatter_tag3",
|
||||
"french1": [
|
||||
"Voix ambiguë d'un cœur qui, au zéphyr, préfère les jattes de kiwis",
|
||||
],
|
||||
"top_key1": ["top_key1_value"],
|
||||
"top_key2": ["top_key2_value"],
|
||||
"top_key3": ["top_key3_value_as_link"],
|
||||
"type": ["article", "note"],
|
||||
"frontmatter1": ["foo"],
|
||||
"frontmatter2": ["bar", "baz", "qux"],
|
||||
"tags": ["bar", "foo"],
|
||||
"🌱": ["🌿"],
|
||||
}
|
||||
assert vault.inline_meta == {
|
||||
"french2": [
|
||||
"Voix ambiguë d'un cœur qui, au zéphyr, préfère les jattes de kiwis.",
|
||||
],
|
||||
"inline1": ["bar baz", "foo"],
|
||||
"inline2": ["[[foo]]"],
|
||||
"inline3": ["value"],
|
||||
"inline4": ["foo"],
|
||||
"inline5": [],
|
||||
"intext1": ["foo"],
|
||||
"intext2": ["foo"],
|
||||
"key with space": ["foo"],
|
||||
"🌱": ["🌿"],
|
||||
}
|
||||
assert vault.tags == ["tag1", "tag2"]
|
||||
assert vault.exclude_paths == [
|
||||
tmp_path / "vault" / ".git",
|
||||
tmp_path / "vault" / ".obsidian",
|
||||
tmp_path / "vault" / "ignore_folder",
|
||||
]
|
||||
assert vault.filters == []
|
||||
assert len(vault.all_note_paths) == 2
|
||||
assert len(vault.notes_in_scope) == 2
|
||||
|
||||
|
||||
def test_get_filtered_notes(sample_vault) -> None:
|
||||
"""Test filtering notes."""
|
||||
vault_path = sample_vault
|
||||
config = Config(config_path="tests/fixtures/sample_vault_config.toml", vault_path=vault_path)
|
||||
vault_config = config.vaults[0]
|
||||
vault = Vault(config=vault_config, path_filter="front")
|
||||
def set_insert_location(test_vault):
|
||||
"""Test setting a new insert location.
|
||||
|
||||
assert vault.num_notes() == 4
|
||||
GIVEN a vault object
|
||||
WHEN the insert location is changed
|
||||
THEN the insert location is changed
|
||||
"""
|
||||
vault = Vault(config=test_vault)
|
||||
|
||||
vault_path = sample_vault
|
||||
config = Config(config_path="tests/fixtures/sample_vault_config.toml", vault_path=vault_path)
|
||||
vault_config = config.vaults[0]
|
||||
vault2 = Vault(config=vault_config, path_filter="mixed")
|
||||
|
||||
assert vault2.num_notes() == 1
|
||||
assert vault.name == "vault"
|
||||
assert vault.insert_location == InsertLocation.TOP
|
||||
vault.insert_location = InsertLocation.BOTTOM
|
||||
assert vault.insert_location == InsertLocation.BOTTOM
|
||||
|
||||
|
||||
def test_backup(test_vault, capsys):
|
||||
"""Test backing up the vault."""
|
||||
vault_path = test_vault
|
||||
config = Config(config_path="tests/fixtures/test_vault_config.toml", vault_path=vault_path)
|
||||
vault_config = config.vaults[0]
|
||||
vault = Vault(config=vault_config)
|
||||
@pytest.mark.parametrize(
|
||||
("meta_type", "key", "value", "expected"),
|
||||
[
|
||||
(MetadataType.FRONTMATTER, "new_key", "new_value", 2),
|
||||
(MetadataType.FRONTMATTER, "frontmatter1", "new_value", 2),
|
||||
(MetadataType.INLINE, "new_key", "new_value", 2),
|
||||
(MetadataType.INLINE, "inline5", "new_value", 2),
|
||||
(MetadataType.INLINE, "inline1", "foo", 1),
|
||||
(MetadataType.TAGS, None, "new_value", 2),
|
||||
(MetadataType.TAGS, None, "tag1", 1),
|
||||
],
|
||||
)
|
||||
def test_add_metadata(test_vault, meta_type, key, value, expected):
|
||||
"""Test add_metadata method.
|
||||
|
||||
GIVEN a vault object
|
||||
WHEN metadata is added
|
||||
THEN add the metadata and return the number of notes updated
|
||||
"""
|
||||
vault = Vault(config=test_vault)
|
||||
assert vault.add_metadata(meta_type, key, value) == expected
|
||||
|
||||
if meta_type == MetadataType.FRONTMATTER:
|
||||
assert value in vault.frontmatter[key]
|
||||
|
||||
if meta_type == MetadataType.INLINE:
|
||||
assert value in vault.inline_meta[key]
|
||||
|
||||
if meta_type == MetadataType.TAGS:
|
||||
assert value in vault.tags
|
||||
|
||||
|
||||
def test_backup_1(test_vault, capsys):
|
||||
"""Test the backup method.
|
||||
|
||||
GIVEN a vault object
|
||||
WHEN the backup method is called
|
||||
THEN the vault is backed up
|
||||
"""
|
||||
vault = Vault(config=test_vault)
|
||||
|
||||
vault.backup()
|
||||
|
||||
captured = capsys.readouterr()
|
||||
assert Path(f"{vault_path}.bak").exists() is True
|
||||
assert vault.backup_path.exists() is True
|
||||
assert captured.out == Regex(r"SUCCESS +| backed up to")
|
||||
|
||||
vault.info()
|
||||
@@ -94,14 +132,15 @@ def test_backup(test_vault, capsys):
|
||||
assert captured.out == Regex(r"Backup path +\│[\s ]+/[\d\w]+")
|
||||
|
||||
|
||||
def test_backup_dryrun(test_vault, capsys):
|
||||
"""Test backing up the vault."""
|
||||
vault_path = test_vault
|
||||
config = Config(config_path="tests/fixtures/test_vault_config.toml", vault_path=vault_path)
|
||||
vault_config = config.vaults[0]
|
||||
vault = Vault(config=vault_config, dry_run=True)
|
||||
def test_backup_2(test_vault, capsys):
|
||||
"""Test the backup method.
|
||||
|
||||
GIVEN a vault object
|
||||
WHEN dry_run is set to True and the backup method is called
|
||||
THEN the vault is not backed up
|
||||
"""
|
||||
vault = Vault(config=test_vault, dry_run=True)
|
||||
|
||||
print(f"vault.dry_run: {vault.dry_run}")
|
||||
vault.backup()
|
||||
|
||||
captured = capsys.readouterr()
|
||||
@@ -109,12 +148,100 @@ def test_backup_dryrun(test_vault, capsys):
|
||||
assert captured.out == Regex(r"DRYRUN +| Backup up vault to")
|
||||
|
||||
|
||||
def test_delete_backup(test_vault, capsys):
|
||||
"""Test deleting the vault backup."""
|
||||
vault_path = test_vault
|
||||
config = Config(config_path="tests/fixtures/test_vault_config.toml", vault_path=vault_path)
|
||||
vault_config = config.vaults[0]
|
||||
vault = Vault(config=vault_config)
|
||||
@pytest.mark.parametrize(
|
||||
("meta_type", "key", "value", "is_regex", "expected"),
|
||||
[
|
||||
(MetadataType.FRONTMATTER, "frontmatter1", None, False, True),
|
||||
(MetadataType.FRONTMATTER, "frontmatter1", "foo", False, True),
|
||||
(MetadataType.FRONTMATTER, "no_key", None, False, False),
|
||||
(MetadataType.FRONTMATTER, "frontmatter1", "no_value", False, False),
|
||||
(MetadataType.FRONTMATTER, r"f\w+\d", None, True, True),
|
||||
(MetadataType.FRONTMATTER, r"f\w+\d", r"\w+", True, True),
|
||||
(MetadataType.FRONTMATTER, r"^\d+", None, True, False),
|
||||
(MetadataType.FRONTMATTER, r"frontmatter1", r"^\d+", True, False),
|
||||
(MetadataType.INLINE, "intext1", None, False, True),
|
||||
(MetadataType.INLINE, "intext1", "foo", False, True),
|
||||
(MetadataType.INLINE, "no_key", None, False, False),
|
||||
(MetadataType.INLINE, "intext1", "no_value", False, False),
|
||||
(MetadataType.INLINE, r"i\w+\d", None, True, True),
|
||||
(MetadataType.INLINE, r"i\w+\d", r"\w+", True, True),
|
||||
(MetadataType.INLINE, r"^\d+", None, True, False),
|
||||
(MetadataType.INLINE, r"intext1", r"^\d+", True, False),
|
||||
(MetadataType.TAGS, None, "tag1", False, True),
|
||||
(MetadataType.TAGS, None, "no tag", False, False),
|
||||
(MetadataType.TAGS, None, r"^\w+\d", True, True),
|
||||
(MetadataType.TAGS, None, r"^\d", True, False),
|
||||
##############3
|
||||
(MetadataType.META, "frontmatter1", None, False, True),
|
||||
(MetadataType.META, "frontmatter1", "foo", False, True),
|
||||
(MetadataType.META, "no_key", None, False, False),
|
||||
(MetadataType.META, "frontmatter1", "no_value", False, False),
|
||||
(MetadataType.META, r"f\w+\d", None, True, True),
|
||||
(MetadataType.META, r"f\w+\d", r"\w+", True, True),
|
||||
(MetadataType.META, r"^\d+", None, True, False),
|
||||
(MetadataType.META, r"frontmatter1", r"^\d+", True, False),
|
||||
(MetadataType.META, r"i\w+\d", None, True, True),
|
||||
(MetadataType.ALL, None, "tag1", False, True),
|
||||
(MetadataType.ALL, None, "no tag", False, False),
|
||||
(MetadataType.ALL, None, r"^\w+\d", True, True),
|
||||
(MetadataType.ALL, None, r"^\d", True, False),
|
||||
(MetadataType.ALL, "frontmatter1", "foo", False, True),
|
||||
(MetadataType.ALL, r"i\w+\d", None, True, True),
|
||||
],
|
||||
)
|
||||
def test_contains_metadata(test_vault, meta_type, key, value, is_regex, expected):
|
||||
"""Test the contains_metadata method.
|
||||
|
||||
GIVEN a vault object
|
||||
WHEN the contains_metadata method is called
|
||||
THEN the method returns True if the metadata is found
|
||||
"""
|
||||
vault = Vault(config=test_vault)
|
||||
assert vault.contains_metadata(meta_type, key, value, is_regex) == expected
|
||||
|
||||
|
||||
def test_commit_changes_1(test_vault, tmp_path):
|
||||
"""Test committing changes to content in the vault.
|
||||
|
||||
GIVEN a vault object
|
||||
WHEN the commit_changes method is called
|
||||
THEN the changes are committed to the vault
|
||||
"""
|
||||
vault = Vault(config=test_vault)
|
||||
|
||||
content = Path(f"{tmp_path}/vault/sample_note.md").read_text()
|
||||
assert "new_key: new_key_value" not in content
|
||||
vault.add_metadata(MetadataType.FRONTMATTER, "new_key", "new_key_value")
|
||||
vault.commit_changes()
|
||||
committed_content = Path(f"{tmp_path}/vault/sample_note.md").read_text()
|
||||
assert "new_key: new_key_value" in committed_content
|
||||
|
||||
|
||||
def test_commit_changes_2(test_vault, tmp_path):
|
||||
"""Test committing changes to content in the vault in dry run mode.
|
||||
|
||||
GIVEN a vault object
|
||||
WHEN dry_run is set to True
|
||||
THEN no changes are committed to the vault
|
||||
"""
|
||||
vault = Vault(config=test_vault, dry_run=True)
|
||||
content = Path(f"{tmp_path}/vault/sample_note.md").read_text()
|
||||
assert "new_key: new_key_value" not in content
|
||||
|
||||
vault.add_metadata(MetadataType.FRONTMATTER, "new_key", "new_key_value")
|
||||
vault.commit_changes()
|
||||
committed_content = Path(f"{tmp_path}/vault/sample_note.md").read_text()
|
||||
assert "new_key: new_key_value" not in committed_content
|
||||
|
||||
|
||||
def test_delete_backup_1(test_vault, capsys):
|
||||
"""Test deleting the vault backup.
|
||||
|
||||
GIVEN a vault object
|
||||
WHEN the delete_backup method is called
|
||||
THEN the backup is deleted
|
||||
"""
|
||||
vault = Vault(config=test_vault)
|
||||
|
||||
vault.backup()
|
||||
vault.delete_backup()
|
||||
@@ -129,12 +256,14 @@ def test_delete_backup(test_vault, capsys):
|
||||
assert captured.out == Regex(r"Backup +\│ None")
|
||||
|
||||
|
||||
def test_delete_backup_dryrun(test_vault, capsys):
|
||||
"""Test deleting the vault backup."""
|
||||
vault_path = test_vault
|
||||
config = Config(config_path="tests/fixtures/test_vault_config.toml", vault_path=vault_path)
|
||||
vault_config = config.vaults[0]
|
||||
vault = Vault(config=vault_config, dry_run=True)
|
||||
def test_delete_backup_2(test_vault, capsys):
|
||||
"""Test delete_backup method in dry run mode.
|
||||
|
||||
GIVEN a vault object
|
||||
WHEN the dry_run is True and the delete_backup method is called
|
||||
THEN the backup is not deleted
|
||||
"""
|
||||
vault = Vault(config=test_vault, dry_run=True)
|
||||
|
||||
Path.mkdir(vault.backup_path)
|
||||
vault.delete_backup()
|
||||
@@ -144,130 +273,492 @@ def test_delete_backup_dryrun(test_vault, capsys):
|
||||
assert vault.backup_path.exists() is True
|
||||
|
||||
|
||||
def test_info(test_vault, capsys):
|
||||
"""Test printing vault information."""
|
||||
vault_path = test_vault
|
||||
config = Config(config_path="tests/fixtures/test_vault_config.toml", vault_path=vault_path)
|
||||
@pytest.mark.parametrize(
|
||||
("tag_to_delete", "expected"),
|
||||
[
|
||||
("tag1", 1),
|
||||
("tag2", 1),
|
||||
("tag3", 0),
|
||||
],
|
||||
)
|
||||
def test_delete_tag(test_vault, tag_to_delete, expected):
|
||||
"""Test delete_tag method.
|
||||
|
||||
GIVEN a vault object
|
||||
WHEN the delete_tag method is called
|
||||
THEN delete tags if found and return the number of notes updated
|
||||
"""
|
||||
vault = Vault(config=test_vault)
|
||||
|
||||
assert vault.delete_tag(tag_to_delete) == expected
|
||||
assert tag_to_delete not in vault.tags
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("meta_type", "key_to_delete", "value_to_delete", "expected"),
|
||||
[
|
||||
(MetadataType.FRONTMATTER, "frontmatter1", "foo", 1),
|
||||
(MetadataType.FRONTMATTER, "frontmatter1", None, 1),
|
||||
(MetadataType.FRONTMATTER, "frontmatter1", "bar", 0),
|
||||
(MetadataType.FRONTMATTER, "frontmatter2", "bar", 1),
|
||||
(MetadataType.META, "frontmatter1", "foo", 1),
|
||||
(MetadataType.INLINE, "frontmatter1", "foo", 0),
|
||||
(MetadataType.INLINE, "inline1", "foo", 1),
|
||||
(MetadataType.INLINE, "inline1", None, 1),
|
||||
],
|
||||
)
|
||||
def test_delete_metadata(test_vault, meta_type, key_to_delete, value_to_delete, expected):
|
||||
"""Test delete_metadata method.
|
||||
|
||||
GIVEN a vault object
|
||||
WHEN the delete_metadata method is called
|
||||
THEN delete metadata if found and return the number of notes updated
|
||||
"""
|
||||
vault = Vault(config=test_vault)
|
||||
assert (
|
||||
vault.delete_metadata(meta_type=meta_type, key=key_to_delete, value=value_to_delete)
|
||||
== expected
|
||||
)
|
||||
|
||||
if meta_type == MetadataType.FRONTMATTER or meta_type == MetadataType.META:
|
||||
if value_to_delete is None:
|
||||
assert key_to_delete not in vault.frontmatter
|
||||
elif key_to_delete in vault.frontmatter:
|
||||
assert value_to_delete not in vault.frontmatter[key_to_delete]
|
||||
|
||||
if meta_type == MetadataType.INLINE or meta_type == MetadataType.META:
|
||||
if value_to_delete is None:
|
||||
assert key_to_delete not in vault.inline_meta
|
||||
elif key_to_delete in vault.inline_meta:
|
||||
assert value_to_delete not in vault.inline_meta[key_to_delete]
|
||||
|
||||
|
||||
def test_export_csv_1(tmp_path, test_vault):
|
||||
"""Test exporting the vault to a CSV file.
|
||||
|
||||
GIVEN a vault object
|
||||
WHEN the export_metadata method is called with a path and export_format of csv
|
||||
THEN the vault metadata is exported to a CSV file
|
||||
"""
|
||||
vault = Vault(config=test_vault)
|
||||
export_file = tmp_path / "export.csv"
|
||||
|
||||
vault.export_metadata(path=export_file, export_format="csv")
|
||||
assert export_file.exists() is True
|
||||
result = export_file.read_text()
|
||||
assert "Metadata Type,Key,Value" in result
|
||||
assert "frontmatter,date_created,2022-12-22" in result
|
||||
assert "inline_metadata,🌱,🌿" in result
|
||||
assert "inline_metadata,inline5,\n" in result
|
||||
assert "tags,,tag1" in result
|
||||
|
||||
|
||||
def test_export_csv_2(tmp_path, test_vault):
|
||||
"""Test exporting the vault to a CSV file.
|
||||
|
||||
GIVEN a vault object
|
||||
WHEN the export_metadata method is called with a path that does not exist and export_format of csv
|
||||
THEN an error is raised
|
||||
"""
|
||||
vault = Vault(config=test_vault)
|
||||
export_file = tmp_path / "does_not_exist" / "export.csv"
|
||||
|
||||
with pytest.raises(typer.Exit):
|
||||
vault.export_metadata(path=export_file, export_format="csv")
|
||||
assert export_file.exists() is False
|
||||
|
||||
|
||||
def test_export_json(tmp_path, test_vault):
|
||||
"""Test exporting the vault to a JSON file.
|
||||
|
||||
GIVEN a vault object
|
||||
WHEN the export_metadata method is called with a path and export_format of csv
|
||||
THEN the vault metadata is exported to a JSON file
|
||||
"""
|
||||
vault = Vault(config=test_vault)
|
||||
export_file = tmp_path / "export.json"
|
||||
|
||||
vault.export_metadata(path=export_file, export_format="json")
|
||||
assert export_file.exists() is True
|
||||
result = export_file.read_text()
|
||||
assert '"frontmatter": {' in result
|
||||
assert '"inline_metadata": {' in result
|
||||
assert '"tags": [' in result
|
||||
|
||||
|
||||
def test_export_notes_to_csv_1(tmp_path, test_vault):
|
||||
"""Test export_notes_to_csv() method.
|
||||
|
||||
GIVEN a vault object
|
||||
WHEN the export_notes_to_csv method is called with a path
|
||||
THEN the notes are exported to a CSV file
|
||||
"""
|
||||
vault = Vault(config=test_vault)
|
||||
export_file = tmp_path / "export.csv"
|
||||
vault.export_notes_to_csv(path=export_file)
|
||||
assert export_file.exists() is True
|
||||
result = export_file.read_text()
|
||||
assert "path,type,key,value" in result
|
||||
assert "sample_note.md,FRONTMATTER,date_created,2022-12-22" in result
|
||||
assert "sample_note.md,FRONTMATTER,🌱,🌿" in result
|
||||
assert "sample_note.md,INLINE,inline2,[[foo]]" in result
|
||||
assert "sample_note.md,INLINE,inline1,bar baz" in result
|
||||
assert "sample_note.md,TAGS,,tag1" in result
|
||||
assert "sample_note.md,INLINE,inline5,\n" in result
|
||||
|
||||
|
||||
def test_export_notes_to_csv_2(test_vault):
|
||||
"""Test export_notes_to_csv() method.
|
||||
|
||||
GIVEN a vault object
|
||||
WHEN the export_notes_to_csv method is called with a path where the parent directory does not exist
|
||||
THEN an error is raised
|
||||
"""
|
||||
vault = Vault(config=test_vault)
|
||||
export_file = Path("/I/do/not/exist/export.csv")
|
||||
with pytest.raises(typer.Exit):
|
||||
vault.export_notes_to_csv(path=export_file)
|
||||
|
||||
|
||||
def test_get_filtered_notes_1(sample_vault) -> None:
|
||||
"""Test filtering notes.
|
||||
|
||||
GIVEN a vault object
|
||||
WHEN the get_filtered_notes method is called with a path filter
|
||||
THEN the notes in scope are filtered
|
||||
"""
|
||||
vault_path = sample_vault
|
||||
config = Config(config_path="tests/fixtures/sample_vault_config.toml", vault_path=vault_path)
|
||||
vault_config = config.vaults[0]
|
||||
vault = Vault(config=vault_config)
|
||||
|
||||
filters = [VaultFilter(path_filter="front")]
|
||||
vault = Vault(config=vault_config, filters=filters)
|
||||
assert len(vault.all_notes) == 13
|
||||
assert len(vault.notes_in_scope) == 4
|
||||
|
||||
filters = [VaultFilter(path_filter="mixed")]
|
||||
vault = Vault(config=vault_config, filters=filters)
|
||||
assert len(vault.all_notes) == 13
|
||||
assert len(vault.notes_in_scope) == 1
|
||||
|
||||
|
||||
def test_get_filtered_notes_2(sample_vault) -> None:
|
||||
"""Test filtering notes.
|
||||
|
||||
GIVEN a vault object
|
||||
WHEN the get_filtered_notes method is called with a key filter
|
||||
THEN the notes in scope are filtered
|
||||
"""
|
||||
vault_path = sample_vault
|
||||
config = Config(config_path="tests/fixtures/sample_vault_config.toml", vault_path=vault_path)
|
||||
vault_config = config.vaults[0]
|
||||
|
||||
filters = [VaultFilter(key_filter="on_one_note")]
|
||||
vault = Vault(config=vault_config, filters=filters)
|
||||
assert len(vault.all_notes) == 13
|
||||
assert len(vault.notes_in_scope) == 1
|
||||
|
||||
|
||||
def test_get_filtered_notes_3(sample_vault) -> None:
|
||||
"""Test filtering notes.
|
||||
|
||||
GIVEN a vault object
|
||||
WHEN the get_filtered_notes method is called with a key and a value filter
|
||||
THEN the notes in scope are filtered
|
||||
"""
|
||||
vault_path = sample_vault
|
||||
config = Config(config_path="tests/fixtures/sample_vault_config.toml", vault_path=vault_path)
|
||||
vault_config = config.vaults[0]
|
||||
filters = [VaultFilter(key_filter="type", value_filter="book")]
|
||||
vault = Vault(config=vault_config, filters=filters)
|
||||
assert len(vault.all_notes) == 13
|
||||
assert len(vault.notes_in_scope) == 10
|
||||
|
||||
|
||||
def test_get_filtered_notes_4(sample_vault) -> None:
|
||||
"""Test filtering notes.
|
||||
|
||||
GIVEN a vault object
|
||||
WHEN the get_filtered_notes method is called with a tag filter
|
||||
THEN the notes in scope are filtered
|
||||
"""
|
||||
vault_path = sample_vault
|
||||
config = Config(config_path="tests/fixtures/sample_vault_config.toml", vault_path=vault_path)
|
||||
vault_config = config.vaults[0]
|
||||
filters = [VaultFilter(tag_filter="brunch")]
|
||||
vault = Vault(config=vault_config, filters=filters)
|
||||
assert len(vault.all_notes) == 13
|
||||
assert len(vault.notes_in_scope) == 0
|
||||
|
||||
|
||||
def test_get_filtered_notes_5(sample_vault) -> None:
|
||||
"""Test filtering notes.
|
||||
|
||||
GIVEN a vault object
|
||||
WHEN the get_filtered_notes method is called with a tag and a path filter
|
||||
THEN the notes in scope are filtered
|
||||
"""
|
||||
vault_path = sample_vault
|
||||
config = Config(config_path="tests/fixtures/sample_vault_config.toml", vault_path=vault_path)
|
||||
vault_config = config.vaults[0]
|
||||
filters = [VaultFilter(tag_filter="brunch"), VaultFilter(path_filter="inbox")]
|
||||
vault = Vault(config=vault_config, filters=filters)
|
||||
assert len(vault.all_notes) == 13
|
||||
assert len(vault.notes_in_scope) == 0
|
||||
|
||||
|
||||
def test_get_changed_notes(test_vault, tmp_path):
|
||||
"""Test get_changed_notes() method.
|
||||
|
||||
GIVEN a vault object
|
||||
WHEN the get_changed_notes method is called
|
||||
THEN the changed notes are returned
|
||||
"""
|
||||
vault = Vault(config=test_vault)
|
||||
assert vault.get_changed_notes() == []
|
||||
vault.delete_metadata(key="frontmatter1", meta_type=MetadataType.FRONTMATTER)
|
||||
changed_notes = vault.get_changed_notes()
|
||||
assert len(changed_notes) == 1
|
||||
assert changed_notes[0].note_path == tmp_path / "vault" / "sample_note.md"
|
||||
|
||||
|
||||
def test_info(test_vault, capsys):
|
||||
"""Test info() method.
|
||||
|
||||
GIVEN a vault object
|
||||
WHEN the info method is called
|
||||
THEN the vault info is printed
|
||||
"""
|
||||
vault = Vault(config=test_vault)
|
||||
|
||||
vault.info()
|
||||
|
||||
captured = strip_ansi(capsys.readouterr().out)
|
||||
assert captured == Regex(r"Vault +\│ /[\d\w]+")
|
||||
assert captured == Regex(r"Notes in scope +\│ \d+")
|
||||
assert captured == Regex(r"Backup +\│ None")
|
||||
|
||||
|
||||
def test_list_editable_notes(test_vault, capsys) -> None:
|
||||
"""Test list_editable_notes() method.
|
||||
|
||||
GIVEN a vault object
|
||||
WHEN the list_editable_notes() method is called
|
||||
THEN the editable notes in scope are printed
|
||||
"""
|
||||
vault = Vault(config=test_vault)
|
||||
|
||||
vault.list_editable_notes()
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == Regex(r"Vault +\│ /[\d\w]+")
|
||||
assert captured.out == Regex(r"Notes being edited +\│ \d+")
|
||||
assert captured.out == Regex(r"Backup +\│ None")
|
||||
assert captured.out == Regex("Notes in current scope")
|
||||
assert captured.out == Regex(r"\d +sample_note\.md")
|
||||
|
||||
|
||||
def test_contains_inline_tag(test_vault) -> None:
|
||||
"""Test if the vault contains an inline tag."""
|
||||
vault_path = test_vault
|
||||
config = Config(config_path="tests/fixtures/test_vault_config.toml", vault_path=vault_path)
|
||||
vault_config = config.vaults[0]
|
||||
vault = Vault(config=vault_config)
|
||||
def test_move_inline_metadata_1(test_vault) -> None:
|
||||
"""Test move_inline_metadata() method.
|
||||
|
||||
assert vault.contains_inline_tag("tag") is False
|
||||
assert vault.contains_inline_tag("intext_tag2") is True
|
||||
GIVEN a vault with inline metadata.
|
||||
WHEN the move_inline_metadata() method is called.
|
||||
THEN the inline metadata is moved to the top of the file.
|
||||
"""
|
||||
vault = Vault(config=test_vault)
|
||||
|
||||
assert vault.move_inline_metadata(location=InsertLocation.TOP) == 1
|
||||
|
||||
|
||||
def test_contains_metadata(test_vault) -> None:
|
||||
"""Test if the vault contains a metadata key."""
|
||||
vault_path = test_vault
|
||||
config = Config(config_path="tests/fixtures/test_vault_config.toml", vault_path=vault_path)
|
||||
vault_config = config.vaults[0]
|
||||
vault = Vault(config=vault_config)
|
||||
@pytest.mark.parametrize(
|
||||
("meta_type", "expected_regex"),
|
||||
[
|
||||
(
|
||||
MetadataType.ALL,
|
||||
r"All metadata.*Keys +┃ Values +┃.*frontmatter1 +│ foo.*inline1 +│ bar baz.*tags +│ bar.*All inline tags.*#tag1.*#tag2",
|
||||
),
|
||||
(
|
||||
MetadataType.FRONTMATTER,
|
||||
r"All frontmatter.*Keys +┃ Values +┃.*frontmatter1 +│ foo.*tags +│ bar",
|
||||
),
|
||||
(
|
||||
MetadataType.INLINE,
|
||||
r"All inline metadata.*Keys +┃ Values +┃.*inline2 +│ \[\[foo\]\]",
|
||||
),
|
||||
(
|
||||
MetadataType.TAGS,
|
||||
r"All inline tags.*#tag1.*#tag2",
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_print_metadata(test_vault, capsys, meta_type, expected_regex) -> None:
|
||||
"""Test print_metadata() method.
|
||||
|
||||
assert vault.contains_metadata("key") is False
|
||||
assert vault.contains_metadata("top_key1") is True
|
||||
assert vault.contains_metadata("top_key1", "no_value") is False
|
||||
assert vault.contains_metadata("top_key1", "top_key1_value") is True
|
||||
GIVEN a vault object
|
||||
WHEN the print_metadata() method is called
|
||||
THEN the metadata is printed
|
||||
"""
|
||||
vault = Vault(config=test_vault)
|
||||
vault.print_metadata(meta_type=meta_type)
|
||||
captured = strip_ansi(capsys.readouterr().out)
|
||||
assert captured == Regex(expected_regex, re.DOTALL)
|
||||
|
||||
|
||||
def test_delete_inline_tag(test_vault) -> None:
|
||||
"""Test deleting an inline tag."""
|
||||
vault_path = test_vault
|
||||
config = Config(config_path="tests/fixtures/test_vault_config.toml", vault_path=vault_path)
|
||||
vault_config = config.vaults[0]
|
||||
vault = Vault(config=vault_config)
|
||||
def test_rename_tag_1(test_vault) -> None:
|
||||
"""Test rename_tag() method.
|
||||
|
||||
assert vault.delete_inline_tag("no tag") == 0
|
||||
assert vault.delete_inline_tag("intext_tag2") == 2
|
||||
assert vault.metadata.dict["Inline Tags"] == [
|
||||
"ignored_file_tag2",
|
||||
"inline_tag_bottom1",
|
||||
"inline_tag_bottom2",
|
||||
"inline_tag_top1",
|
||||
"inline_tag_top2",
|
||||
"intext_tag1",
|
||||
"shared_tag",
|
||||
]
|
||||
GIVEN a vault object
|
||||
WHEN the rename_tag() method is called with a tag that is found
|
||||
THEN the inline tag is renamed
|
||||
"""
|
||||
vault = Vault(config=test_vault)
|
||||
|
||||
assert vault.rename_tag("tag1", "new_tag") == 1
|
||||
assert "tag1" not in vault.tags
|
||||
assert "new_tag" in vault.tags
|
||||
|
||||
|
||||
def test_delete_metadata(test_vault) -> None:
|
||||
"""Test deleting a metadata key/value."""
|
||||
vault_path = test_vault
|
||||
config = Config(config_path="tests/fixtures/test_vault_config.toml", vault_path=vault_path)
|
||||
vault_config = config.vaults[0]
|
||||
vault = Vault(config=vault_config)
|
||||
def test_rename_tag_2(test_vault) -> None:
|
||||
"""Test rename_tag() method.
|
||||
|
||||
assert vault.delete_metadata("no key") == 0
|
||||
assert vault.delete_metadata("top_key1", "no_value") == 0
|
||||
GIVEN a vault object
|
||||
WHEN the rename_tag() method is called with a tag that is not found
|
||||
THEN the inline tag is not renamed
|
||||
"""
|
||||
vault = Vault(config=test_vault)
|
||||
|
||||
assert vault.delete_metadata("top_key1", "top_key1_value") == 2
|
||||
assert vault.metadata.dict["top_key1"] == []
|
||||
|
||||
assert vault.delete_metadata("top_key2") == 2
|
||||
assert "top_key2" not in vault.metadata.dict
|
||||
assert vault.rename_tag("no tag", "new_tag") == 0
|
||||
assert "new_tag" not in vault.tags
|
||||
|
||||
|
||||
def test_rename_inline_tag(test_vault) -> None:
|
||||
"""Test renaming an inline tag."""
|
||||
vault_path = test_vault
|
||||
config = Config(config_path="tests/fixtures/test_vault_config.toml", vault_path=vault_path)
|
||||
vault_config = config.vaults[0]
|
||||
vault = Vault(config=vault_config)
|
||||
@pytest.mark.parametrize(
|
||||
("key", "value1", "value2", "expected"),
|
||||
[
|
||||
("no key", "new_value", None, 0),
|
||||
("frontmatter1", "no_value", "new_value", 0),
|
||||
("frontmatter1", "foo", "new_value", 1),
|
||||
("inline1", "foo", "new_value", 1),
|
||||
("frontmatter1", "new_key", None, 1),
|
||||
("inline1", "new_key", None, 1),
|
||||
],
|
||||
)
|
||||
def test_rename_metadata(test_vault, key, value1, value2, expected) -> None:
|
||||
"""Test rename_metadata() method.
|
||||
|
||||
assert vault.rename_inline_tag("no tag", "new_tag") == 0
|
||||
assert vault.rename_inline_tag("intext_tag2", "new_tag") == 2
|
||||
assert vault.metadata.dict["Inline Tags"] == [
|
||||
"ignored_file_tag2",
|
||||
"inline_tag_bottom1",
|
||||
"inline_tag_bottom2",
|
||||
"inline_tag_top1",
|
||||
"inline_tag_top2",
|
||||
"intext_tag1",
|
||||
"new_tag",
|
||||
"shared_tag",
|
||||
]
|
||||
GIVEN a vault object
|
||||
WHEN the rename_metadata() method is called with a key or key/value that is found
|
||||
THEN the metadata is not renamed
|
||||
"""
|
||||
vault = Vault(config=test_vault)
|
||||
|
||||
assert vault.rename_metadata(key, value1, value2) == expected
|
||||
|
||||
if expected > 0 and value2 is None:
|
||||
assert key not in vault.frontmatter
|
||||
assert key not in vault.inline_meta
|
||||
|
||||
if expected > 0 and value2:
|
||||
if key in vault.frontmatter:
|
||||
assert value1 not in vault.frontmatter[key]
|
||||
assert value2 in vault.frontmatter[key]
|
||||
if key in vault.inline_meta:
|
||||
assert value1 not in vault.inline_meta[key]
|
||||
assert value2 in vault.inline_meta[key]
|
||||
|
||||
|
||||
def test_rename_metadata(test_vault) -> None:
|
||||
"""Test renaming a metadata key/value."""
|
||||
vault_path = test_vault
|
||||
config = Config(config_path="tests/fixtures/test_vault_config.toml", vault_path=vault_path)
|
||||
vault_config = config.vaults[0]
|
||||
vault = Vault(config=vault_config)
|
||||
@pytest.mark.parametrize(
|
||||
("begin", "end", "key", "value", "expected"),
|
||||
[
|
||||
# no matches
|
||||
(MetadataType.INLINE, MetadataType.FRONTMATTER, "no key", None, 0),
|
||||
(MetadataType.INLINE, MetadataType.FRONTMATTER, "no key", "new_value", 0),
|
||||
(MetadataType.INLINE, MetadataType.FRONTMATTER, "inline1", "new_value", 0),
|
||||
(MetadataType.FRONTMATTER, MetadataType.INLINE, "no key", None, 0),
|
||||
(MetadataType.FRONTMATTER, MetadataType.INLINE, "no key", "new_value", 0),
|
||||
(MetadataType.FRONTMATTER, MetadataType.INLINE, "frontmatter1", "new_value", 0),
|
||||
# entire keys
|
||||
(MetadataType.FRONTMATTER, MetadataType.INLINE, "frontmatter1", None, 1),
|
||||
(MetadataType.FRONTMATTER, MetadataType.INLINE, "frontmatter2", None, 1),
|
||||
(MetadataType.INLINE, MetadataType.FRONTMATTER, "inline1", None, 1),
|
||||
# specific values
|
||||
(MetadataType.FRONTMATTER, MetadataType.INLINE, "frontmatter1", "foo", 1),
|
||||
(MetadataType.INLINE, MetadataType.FRONTMATTER, "inline1", "bar baz", 1),
|
||||
(MetadataType.INLINE, MetadataType.FRONTMATTER, "inline2", "[[foo]]", 1),
|
||||
],
|
||||
)
|
||||
def test_transpose_metadata_1(test_vault, begin, end, key, value, expected) -> None:
|
||||
"""Test transpose_metadata() method.
|
||||
|
||||
assert vault.rename_metadata("no key", "new_key") == 0
|
||||
assert vault.rename_metadata("tags", "nonexistent_value", "new_vaule") == 0
|
||||
GIVEN a vault object
|
||||
WHEN the transpose_metadata() method is called
|
||||
THEN the number of notes with transposed metadata is returned and the vault metadata is updated
|
||||
"""
|
||||
vault = Vault(config=test_vault)
|
||||
|
||||
assert vault.rename_metadata("tags", "frontmatter_tag1", "new_vaule") == 2
|
||||
assert vault.metadata.dict["tags"] == [
|
||||
"frontmatter_tag2",
|
||||
"frontmatter_tag3",
|
||||
"ignored_file_tag1",
|
||||
"new_vaule",
|
||||
"shared_tag",
|
||||
"📅/frontmatter_tag3",
|
||||
]
|
||||
assert vault.transpose_metadata(begin=begin, end=end, key=key, value=value) == expected
|
||||
|
||||
assert vault.rename_metadata("tags", "new_key") == 2
|
||||
assert "tags" not in vault.metadata.dict
|
||||
assert vault.metadata.dict["new_key"] == [
|
||||
"frontmatter_tag2",
|
||||
"frontmatter_tag3",
|
||||
"ignored_file_tag1",
|
||||
"new_vaule",
|
||||
"shared_tag",
|
||||
"📅/frontmatter_tag3",
|
||||
]
|
||||
if expected > 0:
|
||||
if begin == MetadataType.INLINE and value is None:
|
||||
assert key not in vault.inline_meta
|
||||
assert key in vault.frontmatter
|
||||
elif begin == MetadataType.FRONTMATTER and value is None:
|
||||
assert key not in vault.frontmatter
|
||||
assert key in vault.inline_meta
|
||||
elif begin == MetadataType.INLINE and value:
|
||||
assert value in vault.frontmatter[key]
|
||||
elif begin == MetadataType.FRONTMATTER and value:
|
||||
assert value in vault.inline_meta[key]
|
||||
|
||||
|
||||
def test_update_from_dict_1(test_vault):
|
||||
"""Test update_from_dict() method.
|
||||
|
||||
GIVEN a vault object and an update dictionary
|
||||
WHEN no dictionary keys match paths in the vault
|
||||
THEN no notes are updated and 0 is returned
|
||||
"""
|
||||
update_dict = {
|
||||
"path1": {"type": "frontmatter", "key": "new_key", "value": "new_value"},
|
||||
"path2": {"type": "frontmatter", "key": "new_key", "value": "new_value"},
|
||||
}
|
||||
vault = Vault(config=test_vault)
|
||||
|
||||
assert vault.update_from_dict(update_dict) == 0
|
||||
assert vault.get_changed_notes() == []
|
||||
|
||||
|
||||
def test_update_from_dict_2(test_vault):
|
||||
"""Test update_from_dict() method.
|
||||
|
||||
GIVEN a vault object and an update dictionary
|
||||
WHEN the dictionary is empty
|
||||
THEN no notes are updated and 0 is returned
|
||||
"""
|
||||
vault = Vault(config=test_vault)
|
||||
update_dict = {}
|
||||
|
||||
assert vault.update_from_dict(update_dict) == 0
|
||||
assert vault.get_changed_notes() == []
|
||||
|
||||
|
||||
def test_update_from_dict_3(test_vault):
|
||||
"""Test update_from_dict() method.
|
||||
|
||||
GIVEN a vault object and an update dictionary
|
||||
WHEN a dictionary key matches a path in the vault
|
||||
THEN the note is updated to match the dictionary values
|
||||
"""
|
||||
vault = Vault(config=test_vault)
|
||||
|
||||
update_dict = {
|
||||
"sample_note.md": [
|
||||
{"type": "frontmatter", "key": "new_key", "value": "new_value"},
|
||||
{"type": "inline_metadata", "key": "new_key2", "value": "new_value"},
|
||||
{"type": "tag", "key": "", "value": "new_tag"},
|
||||
]
|
||||
}
|
||||
assert vault.update_from_dict(update_dict) == 1
|
||||
|
||||
note = vault.get_changed_notes()[0]
|
||||
|
||||
assert note.note_path.name == "sample_note.md"
|
||||
assert len(note.metadata) == 3
|
||||
assert vault.frontmatter == {"new_key": ["new_value"]}
|
||||
assert vault.inline_meta == {"new_key2": ["new_value"]}
|
||||
assert vault.tags == ["new_tag"]
|
||||
|
||||
Reference in New Issue
Block a user