diff --git a/.gitignore b/.gitignore
index 464ec3d..33c4a47 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1 +1,43 @@
+# Docusaurus build artifacts
+website/build/
+website/.docusaurus/
+website/docs/
+website/node_modules/
+
+# Dependencies
+node_modules/
+npm-debug.log*
+yarn-debug.log*
+yarn-error.log*
+package-lock.json
+yarn.lock
+
+# Environment variables
+.env
+.env.local
+.env.development.local
+.env.test.local
+.env.production.local
+
+# OS generated files
+.DS_Store
+.DS_Store?
+._*
+.Spotlight-V100
+.Trashes
+ehthumbs.db
+Thumbs.db
+
+# Editor files
+*.swp
+*.swo
+*~
+.vscode/
+.idea/
+
+# Spell check artifacts
*.dic
+
+# Build outputs
+/build
+/dist
diff --git a/.wordlist.txt b/.wordlist.txt
index cc5c5df..294d22c 100644
--- a/.wordlist.txt
+++ b/.wordlist.txt
@@ -752,3 +752,11 @@ FDB
xlarge
SaaS
GCP's
+Docusaurus
+MDX
+admonition
+admonitions
+gitignore
+webpack
+kramdown
+JSX
diff --git a/add_h1_headings.py b/add_h1_headings.py
deleted file mode 100644
index e231e71..0000000
--- a/add_h1_headings.py
+++ /dev/null
@@ -1,81 +0,0 @@
-#!/usr/bin/env python3
-"""Add H1 headings to all markdown files missing them."""
-
-import os
-import re
-from pathlib import Path
-
-def has_h1_heading(content_after_frontmatter):
- """Check if content has an H1 heading."""
- lines = content_after_frontmatter.strip().split('\n')
- for line in lines[:10]: # Check first 10 lines
- if line.strip().startswith('# '):
- return True
- return False
-
-def extract_title_from_frontmatter(content):
- """Extract title from YAML frontmatter."""
- match = re.search(r'^---\s*\n(.*?)\n---', content, re.DOTALL | re.MULTILINE)
- if match:
- frontmatter = match.group(1)
- title_match = re.search(r'^title:\s*(.+)$', frontmatter, re.MULTILINE)
- if title_match:
- title = title_match.group(1).strip()
- # Remove quotes if present
- title = title.strip('"').strip("'")
- return title
- return None
-
-def add_h1_heading(filepath):
- """Add H1 heading to a file if it's missing."""
- with open(filepath, 'r', encoding='utf-8') as f:
- content = f.read()
-
- # Check if file has frontmatter
- if not content.startswith('---'):
- return False
-
- # Extract title
- title = extract_title_from_frontmatter(content)
- if not title:
- return False
-
- # Split at end of frontmatter
- match = re.search(r'^---\s*\n.*?\n---\s*\n', content, re.DOTALL | re.MULTILINE)
- if not match:
- return False
-
- frontmatter_end = match.end()
- frontmatter = content[:frontmatter_end]
- after_frontmatter = content[frontmatter_end:]
-
- # Check if H1 already exists
- if has_h1_heading(after_frontmatter):
- return False
-
- # Add H1 heading
- new_content = f"{frontmatter}\n# {title}\n{after_frontmatter}"
-
- with open(filepath, 'w', encoding='utf-8') as f:
- f.write(new_content)
-
- return True
-
-def main():
- """Process all markdown files in website/docs."""
- docs_dir = Path('/Users/danshalev/docs-staging/website/docs')
-
- fixed_files = []
-
- for md_file in docs_dir.rglob('*.md'):
- if add_h1_heading(md_file):
- rel_path = md_file.relative_to(docs_dir)
- fixed_files.append(str(rel_path))
- print(f"✓ Added H1 to: {rel_path}")
-
- print(f"\n{'='*60}")
- print(f"Fixed {len(fixed_files)} files")
- print(f"{'='*60}")
-
-if __name__ == '__main__':
- main()
diff --git a/agentic-memory/graphiti-mcp-server.md b/agentic-memory/graphiti-mcp-server.md
index 68c9337..2c889a0 100644
--- a/agentic-memory/graphiti-mcp-server.md
+++ b/agentic-memory/graphiti-mcp-server.md
@@ -359,8 +359,11 @@ The Graphiti MCP server stores information in FalkorDB using the following schem
### Programmatic Access
-{: .warning }
-> **Important**: The Graphiti MCP server is designed to be used by MCP clients (like Claude Desktop or Cursor) via the HTTP transport protocol. It does **not** expose direct REST API endpoints outside of the MCP protocol.
+:::warning Important
+
+The Graphiti MCP server is designed to be used by MCP clients (like Claude Desktop or Cursor) via the HTTP transport protocol. It does **not** expose direct REST API endpoints outside of the MCP protocol.
+
+:::
The server exposes:
- `/mcp/` - HTTP MCP protocol endpoint
diff --git a/check_h1_headings.py b/check_h1_headings.py
deleted file mode 100644
index d2e19ee..0000000
--- a/check_h1_headings.py
+++ /dev/null
@@ -1,131 +0,0 @@
-#!/usr/bin/env python3
-"""
-Check for .md and .mdx files in website/docs that have frontmatter titles
-but are missing H1 headings.
-"""
-
-import os
-import re
-from pathlib import Path
-
-def extract_frontmatter_title(lines):
- """Extract title from frontmatter if it exists."""
- if not lines or not lines[0].strip().startswith('---'):
- return None
-
- in_frontmatter = False
- title = None
-
- for i, line in enumerate(lines):
- if i == 0 and line.strip() == '---':
- in_frontmatter = True
- continue
-
- if in_frontmatter:
- if line.strip() == '---':
- break
-
- # Check for title field
- title_match = re.match(r'^title:\s*(.+)$', line.strip())
- if title_match:
- title = title_match.group(1).strip()
- # Remove quotes if present
- title = title.strip('"').strip("'")
-
- return title
-
-def has_h1_heading(lines):
- """Check if content has an H1 heading after frontmatter."""
- in_frontmatter = False
- frontmatter_ended = False
-
- for i, line in enumerate(lines):
- if i == 0 and line.strip().startswith('---'):
- in_frontmatter = True
- continue
-
- if in_frontmatter and line.strip() == '---':
- in_frontmatter = False
- frontmatter_ended = True
- continue
-
- if frontmatter_ended or not in_frontmatter:
- # Check for H1 heading (line starting with "# ")
- if line.strip().startswith('# ') and len(line.strip()) > 2:
- return True
-
- return False
-
-def check_files(docs_dir):
- """Check all .md and .mdx files in docs directory."""
- missing_h1 = []
-
- for root, dirs, files in os.walk(docs_dir):
- for file in files:
- if file.endswith(('.md', '.mdx')):
- file_path = os.path.join(root, file)
- rel_path = os.path.relpath(file_path, docs_dir)
-
- try:
- with open(file_path, 'r', encoding='utf-8') as f:
- lines = f.readlines()
-
- title = extract_frontmatter_title(lines)
-
- # If there's a title in frontmatter but no H1 heading
- if title and not has_h1_heading(lines):
- missing_h1.append({
- 'path': rel_path,
- 'full_path': file_path,
- 'title': title
- })
-
- except Exception as e:
- print(f"Error reading {file_path}: {e}")
-
- return missing_h1
-
-if __name__ == '__main__':
- docs_dir = '/Users/danshalev/docs-staging/website/docs'
-
- if not os.path.exists(docs_dir):
- print(f"Directory not found: {docs_dir}")
- exit(1)
-
- missing_h1_files = check_files(docs_dir)
-
- # Sort by path
- missing_h1_files.sort(key=lambda x: x['path'])
-
- # Focus on index and main section pages
- index_files = [f for f in missing_h1_files if 'index' in f['path']]
- other_main_files = [f for f in missing_h1_files if 'index' not in f['path']]
-
- print("=" * 80)
- print("FILES MISSING H1 HEADINGS")
- print("=" * 80)
- print()
-
- if index_files:
- print("INDEX PAGES:")
- print("-" * 80)
- for file in index_files:
- print(f"File: {file['path']}")
- print(f" Title: {file['title']}")
- print(f" Suggested H1: # {file['title']}")
- print()
-
- if other_main_files:
- print("\nOTHER PAGES:")
- print("-" * 80)
- for file in other_main_files:
- print(f"File: {file['path']}")
- print(f" Title: {file['title']}")
- print(f" Suggested H1: # {file['title']}")
- print()
-
- print("\n" + "=" * 80)
- print(f"TOTAL FILES MISSING H1: {len(missing_h1_files)}")
- print(f" - Index pages: {len(index_files)}")
- print(f" - Other pages: {len(other_main_files)}")
- print("=" * 80)
diff --git a/check_navigation_complete.py b/check_navigation_complete.py
deleted file mode 100644
index 6dfcc8c..0000000
--- a/check_navigation_complete.py
+++ /dev/null
@@ -1,98 +0,0 @@
-#!/usr/bin/env python3
-"""
-Final comprehensive navigation check - verifies the site has no navigation errors
-"""
-import re
-from pathlib import Path
-
-def main():
- workspace = Path('/Users/danshalev/docs-staging')
- sidebars_path = workspace / 'website' / 'sidebars.ts'
- docs_dir = workspace / 'website' / 'docs'
-
- with open(sidebars_path, 'r') as f:
- content = f.read()
-
- # Extract all doc references (not in id: fields which are for category links)
- # Match patterns like 'path/file' but not in "id: 'path/file'" context
- lines = content.split('\n')
- doc_refs = []
-
- for i, line in enumerate(lines):
- # Skip lines that are just category link definitions
- if 'id:' in line and i > 0 and 'link:' in lines[i-1]:
- continue
-
- # Match quoted doc paths
- matches = re.findall(r"['\"]([a-z0-9_-]+/[a-z0-9_/-]+)['\"]", line)
- for match in matches:
- if not any(x in match for x in ['type', 'label', 'doc', 'category']):
- doc_refs.append(match)
-
- # Match simple doc names at root level
- simple = re.findall(r"^\s*['\"]([a-z0-9_-]+)['\"],?\s*$", line)
- for s in simple:
- if s not in ['doc', 'category', 'shell']:
- doc_refs.append(s)
-
- print("=" * 80)
- print("COMPLETE NAVIGATION CHECK")
- print("=" * 80)
- print()
-
- # Check each reference
- missing = []
- found = []
-
- for doc_id in set(doc_refs):
- possible = [
- docs_dir / f"{doc_id}.md",
- docs_dir / f"{doc_id}.mdx",
- ]
-
- if any(p.exists() for p in possible):
- found.append(doc_id)
- else:
- missing.append(doc_id)
-
- print(f"✅ Found {len(found)} valid document references")
-
- if missing:
- print(f"\n❌ Missing {len(missing)} documents:")
- for m in sorted(missing):
- print(f" - {m}")
- return 1
-
- # Check build log
- build_log = Path('/tmp/full-build.log')
- if build_log.exists():
- with open(build_log) as f:
- log = f.read()
-
- if 'BUILD STATUS: SUCCESS' in log or 'Generated static files' in log:
- print("\n✅ Build completed successfully")
- else:
- print("\n❌ Build did not complete successfully")
- return 1
-
- # Check for critical errors only
- if re.search(r'Error:|ERROR:|Module not found|Cannot find', log, re.IGNORECASE):
- print("❌ Critical errors found in build")
- return 1
- else:
- print("✅ No critical errors in build")
-
- print("\n" + "=" * 80)
- print("✅ ✅ ✅ ALL NAVIGATION CHECKS PASSED ✅ ✅ ✅")
- print("=" * 80)
- print("\nSummary:")
- print(f" • {len(found)} pages correctly referenced in sidebar")
- print(f" • 0 missing page references")
- print(f" • Build completes successfully")
- print(f" • No critical errors")
- print("\nThe only warnings are broken anchor links (internal sections),")
- print("which are minor content issues, not navigation problems.")
- return 0
-
-if __name__ == '__main__':
- exit(main())
diff --git a/cloud/enterprise-tier.md b/cloud/enterprise-tier.md
index 6c37968..0140371 100644
--- a/cloud/enterprise-tier.md
+++ b/cloud/enterprise-tier.md
@@ -43,7 +43,7 @@ The Enterprise Tier is fully optimized for mission-critical applications, provid
## Getting Started
-
+
⚙️ To begin your Enterprise journey, schedule a consultation:
diff --git a/cloud/index.md b/cloud/index.md
index cfede3a..1f39ba4 100644
--- a/cloud/index.md
+++ b/cloud/index.md
@@ -13,11 +13,11 @@ Get started with FalkorDB's cloud offering. The platform provides several enterp
| Group | Features |
| :--- | :--- |
-| **Availability & Resilience** | - High Availability
- Multi-zone Deployment
- Multi-Graph / Multi-Tenancy
- Automated Backups
- Continuous Persistence |
-| **Security & Access** | - Graph Access Control
- TLS
- VPC Peering |
-| **Deployment & Scaling** | - Dedicated Cluster Deployment
- Scalability |
-| **Support & Monitoring** | - Dedicated Support
- Advanced Monitoring
- Dedicated Account Manager |
-| ☁️ **Cloud Providers** | - AWS
- GCP
- Azure (BYOC) |
+| **Availability & Resilience** | - High Availability
- Multi-zone Deployment
- Multi-Graph / Multi-Tenancy
- Automated Backups
- Continuous Persistence |
+| **Security & Access** | - Graph Access Control
- TLS
- VPC Peering |
+| **Deployment & Scaling** | - Dedicated Cluster Deployment
- Scalability |
+| **Support & Monitoring** | - Dedicated Support
- Advanced Monitoring
- Dedicated Account Manager |
+| ☁️ **Cloud Providers** | - AWS
- GCP
- Azure (BYOC) |
[](https://github.com/FalkorDB/docs/edit/Cloud-Docs/cloud/features.md)
diff --git a/cloud/pro-tier.md b/cloud/pro-tier.md
index e5db5c2..8260045 100644
--- a/cloud/pro-tier.md
+++ b/cloud/pro-tier.md
@@ -81,7 +81,7 @@ The Pro Tier provides a robust environment to scale your application with confid
## Getting Started
-
+
⚙️ Spin up your first FalkorDB Cloud instance:
diff --git a/cloud/startup-tier.md b/cloud/startup-tier.md
index c67f990..c2d8ddb 100644
--- a/cloud/startup-tier.md
+++ b/cloud/startup-tier.md
@@ -50,7 +50,7 @@ The Startup Tier includes essential features like **TLS** and **Automated Backup
## Getting Started
-
+
⚙️ Spin up your first FalkorDB Cloud instance:
diff --git a/commands/acl.md b/commands/acl.md
index 6cb39e8..b770c97 100644
--- a/commands/acl.md
+++ b/commands/acl.md
@@ -46,10 +46,10 @@ Usage: `ACL SETUSER [rule1] [rule2] ...`
* on / off: Enables or disables the user account.
* nopass: Allows access without a password.
- * password:: Sets a password for the user.
- * ~: Restricts access to graphs matching the given pattern.
- * +: Grants permission to execute specific commands.
- * -: Denies permission to execute specific commands.
+ * password:``: Sets a password for the user.
+ * ~``: Restricts access to graphs matching the given pattern.
+ * +``: Grants permission to execute specific commands.
+ * -``: Denies permission to execute specific commands.
#### Example
diff --git a/commands/graph.constraint-create.md b/commands/graph.constraint-create.md
index 9ba921a..6f5c700 100644
--- a/commands/graph.constraint-create.md
+++ b/commands/graph.constraint-create.md
@@ -53,7 +53,7 @@ E.g., a graph can contain the following `Person` nodes:
But trying to create a third node with `first_name` Frank and `last_name` Costanza, will issue an error and the query will fail.
-Notes:
+:::note **Notes:**
- A unique constraint requires the existence of an exact-match index prior to its creation. For example, trying to create a unique constraint governing attributes: `first_name` and `last_name` of nodes with label `Person` without having an exact-match index over `Person`'s `first_name` and `last_name` attributes will fail.
@@ -61,7 +61,7 @@ But trying to create a third node with `first_name` Frank and `last_name` Costan
- Unique constraints are not enforced for array-valued properties.
- Trying to delete an index that supports a constraint will fail.
-
+:::
## Creating a constraint
@@ -73,36 +73,42 @@ GRAPH.CONSTRAINT CREATE key constraintType {NODE label | RELATIONSHIP reltype} P
## Required arguments
-key
+
+key
is key name for the graph.
+
-constraintType
+
+constraintType
is the constraint type: either `MANDATORY` or `UNIQUE`.
-NODE label | RELATIONSHIP reltype
+
+NODE label | RELATIONSHIP reltype
is the graph entity type (`NODE` or `RELATIONSHIP`) and the name of the node label or relationship type on which the constraint should be enforced.
-propCount
+
+propCount
is the number of properties following. Valid values are between 1 and 255.
-prop...
+
+prop...
is a list of `propCount` property names.
-Notes:
+:::note **Notes:**
- Constraints are created asynchronously. The constraint creation command will reply with `PENDING` and the newly created constraint is enforced gradually on all relevant nodes or relationships.
During its creation phase, a constraint's status is `UNDER CONSTRUCTION`. When all governed nodes or relationships confirm to the constraint - its status is updated to `OPERATIONAL`, otherwise, if a conflict is detected, the constraint status is updated to `FAILED` and the constraint is not enforced. The caller may try to resolve the conflict and recreate the constraint. To retrieve the status of all constraints - use the `db.constraints()` procedure.
@@ -115,7 +121,7 @@ is a list of `propCount` property names.
1. The graph contains data which violates the constraint
-
+:::
## Return value
diff --git a/commands/graph.constraint-drop.md b/commands/graph.constraint-drop.md
index 982375d..c8c3b59 100644
--- a/commands/graph.constraint-drop.md
+++ b/commands/graph.constraint-drop.md
@@ -22,30 +22,35 @@ For an introduction to constraints see [GRAPH.CONSTRAINT CREATE](/commands/graph
## Required arguments
-key
+
+key
is key name for the graph.
-constraintType
+
+constraintType
is the constraint type: either `MANDATORY` or `UNIQUE`.
-NODE label | RELATIONSHIP reltype
+
+NODE label | RELATIONSHIP reltype
is the graph entity type (`NODE` or `RELATIONSHIP`) and the name of the node label or relationship type on which the constraint is enforced.
-propCount
+
+propCount
is the number of properties following. Valid values are between 1 and 255.
-prop...
+
+prop...
is a list of `propCount` property names.
diff --git a/comprehensive_nav_check.py b/comprehensive_nav_check.py
deleted file mode 100644
index eb4b40c..0000000
--- a/comprehensive_nav_check.py
+++ /dev/null
@@ -1,149 +0,0 @@
-#!/usr/bin/env python3
-"""
-Comprehensive navigation check - verifies:
-1. All pages in sidebars.ts exist
-2. No duplicate entries
-3. All markdown files are accessible
-4. Build succeeds
-"""
-import re
-from pathlib import Path
-from collections import Counter
-
-def extract_all_doc_references(sidebars_content):
- """Extract all doc ID references including nested ones"""
- doc_ids = []
-
- # Pattern for doc IDs in various contexts
- patterns = [
- r"id:\s*['\"]([^'\"]+)['\"]", # id: 'doc-id'
- r"['\"]([a-zA-Z0-9/_-]+/[a-zA-Z0-9/_-]+)['\"]", # 'path/to/doc'
- ]
-
- for pattern in patterns:
- matches = re.findall(pattern, sidebars_content)
- doc_ids.extend(matches)
-
- # Also catch standalone doc references (single words with /)
- standalone = re.findall(r"^\s*['\"]([a-z-]+/[a-z-]+(?:/[a-z-]+)*)['\"],?\s*$",
- sidebars_content, re.MULTILINE)
- doc_ids.extend(standalone)
-
- return doc_ids
-
-def main():
- workspace = Path('/Users/danshalev/docs-staging')
- sidebars_path = workspace / 'website' / 'sidebars.ts'
- docs_dir = workspace / 'website' / 'docs'
-
- print("=" * 80)
- print(" " * 20 + "COMPREHENSIVE NAVIGATION CHECK")
- print("=" * 80)
- print()
-
- # Read sidebars
- with open(sidebars_path, 'r') as f:
- sidebars_content = f.read()
-
- # Extract all doc references
- print("📋 Step 1: Extracting all doc references from sidebars.ts")
- doc_ids = extract_all_doc_references(sidebars_content)
- print(f" Found {len(doc_ids)} total references")
-
- # Check for duplicates
- print()
- print("🔍 Step 2: Checking for duplicate entries")
- counts = Counter(doc_ids)
- duplicates = {doc: count for doc, count in counts.items() if count > 1}
-
- if duplicates:
- print(f" ❌ Found {len(duplicates)} duplicate entries:")
- for doc, count in duplicates.items():
- print(f" - {doc} (appears {count} times)")
- else:
- print(" ✅ No duplicates found!")
-
- # Check if files exist
- print()
- print("🔍 Step 3: Verifying all referenced files exist")
- unique_docs = set(doc_ids)
- missing = []
- found = []
-
- for doc_id in unique_docs:
- possible_paths = [
- docs_dir / f"{doc_id}.md",
- docs_dir / f"{doc_id}.mdx",
- ]
-
- if any(p.exists() for p in possible_paths):
- found.append(doc_id)
- else:
- missing.append(doc_id)
-
- if missing:
- print(f" ❌ Missing {len(missing)} files:")
- for doc in sorted(missing):
- print(f" - {doc}")
- else:
- print(f" ✅ All {len(found)} referenced files exist!")
-
- # Check all markdown files
- print()
- print("🔍 Step 4: Finding all markdown files in docs/")
- all_md_files = set()
- for ext in ['*.md', '*.mdx']:
- for file in docs_dir.rglob(ext):
- rel_path = file.relative_to(docs_dir)
- doc_id = str(rel_path.with_suffix(''))
- all_md_files.add(doc_id)
-
- print(f" Found {len(all_md_files)} markdown files")
-
- # Check which files aren't in sidebar
- not_in_sidebar = all_md_files - unique_docs
- if not_in_sidebar:
- print(f" ⚠️ {len(not_in_sidebar)} files not referenced in sidebar:")
- for doc in sorted(not_in_sidebar):
- print(f" - {doc}")
- else:
- print(" ✅ All markdown files are in the sidebar!")
-
- # Final summary
- print()
- print("=" * 80)
- print(" " * 30 + "FINAL REPORT")
- print("=" * 80)
- print(f"📊 Sidebar references: {len(doc_ids)} total, {len(unique_docs)} unique")
- print(f"📄 Markdown files in docs/: {len(all_md_files)}")
- print(f"✅ Files correctly referenced: {len(found)}")
- print(f"❌ Missing files: {len(missing)}")
- print(f"🔄 Duplicate references: {len(duplicates)}")
- print(f"⚠️ Unreferenced files: {len(not_in_sidebar)}")
- print()
-
- # Overall status
- if missing or duplicates:
- print("🚨 CRITICAL ISSUES FOUND - Navigation has errors!")
- print()
- if missing:
- print(" Fix missing files by either:")
- print(" 1. Creating the missing files")
- print(" 2. Removing the references from sidebars.ts")
- if duplicates:
- print(" Fix duplicates by removing redundant entries from sidebars.ts")
- return 1
- elif not_in_sidebar:
- print("⚠️ WARNING - Some files exist but aren't in navigation")
- print(" These files won't be accessible to users unless added to sidebars.ts")
- return 0
- else:
- print("✅ ✅ ✅ PERFECT! All navigation verified successfully! ✅ ✅ ✅")
- print()
- print(" • All referenced files exist")
- print(" • No duplicate entries")
- print(" • All markdown files are in sidebar")
- return 0
-
-if __name__ == '__main__':
- exit(main())
diff --git a/cypher/functions.md b/cypher/functions.md
index 60a16c0..4cdae1c 100644
--- a/cypher/functions.md
+++ b/cypher/functions.md
@@ -29,7 +29,7 @@ This section contains information on all supported functions from the Cypher que
| [all(_var_ IN _list_ WHERE _predicate_)](#existential-comprehension-functions) | Returns true when _predicate_ holds true for all elements in _list_ |
| [any(_var_ IN _list_ WHERE _predicate_)](#existential-comprehension-functions) | Returns true when _predicate_ holds true for at least one element in _list_ |
| exists(_pattern_) | Returns true when at least one match for _pattern_ exists |
-| isEmpty(_list_|_map_|_string_) | Returns true if the input list or map contains no elements or if the input string contains no characters
Returns null when the input evaluates to null |
+| isEmpty(_list_|_map_|_string_) | Returns true if the input list or map contains no elements or if the input string contains no characters
Returns null when the input evaluates to null |
| [none(_var_ IN _list_ WHERE _predicate_)](#existential-comprehension-functions) | Returns true when _predicate_ holds false for all elements in _list_ |
| [single(_var_ IN _list_ WHERE _predicate_)](#existential-comprehension-functions) | Returns true when _predicate_ holds true for exactly one element in _list_ |
@@ -37,17 +37,17 @@ This section contains information on all supported functions from the Cypher que
| Function | Description|
| --------------------------------- | :----------|
-| coalesce(_expr_[, expr...]) | Returns the evaluation of the first argument that evaluates to a non-null value
Returns null when all arguments evaluate to null |
-| endNode(_relationship_) | Returns the destination node of a relationship
Returns null when _relationship_ evaluates to null |
-| hasLabels(_node_, _labelsList_) * | Returns true when _node_ contains all labels in _labelsList_, otherwise false
Return true when _labelsList_ evaluates to an empty list |
+| coalesce(_expr_[, expr...]) | Returns the evaluation of the first argument that evaluates to a non-null value
Returns null when all arguments evaluate to null |
+| endNode(_relationship_) | Returns the destination node of a relationship
Returns null when _relationship_ evaluates to null |
+| hasLabels(_node_, _labelsList_) * | Returns true when _node_ contains all labels in _labelsList_, otherwise false
Return true when _labelsList_ evaluates to an empty list |
| id(_node_|_relationship_) | Returns the internal ID of a node or relationship (which is not immutable) |
-| labels(_node_) | Returns a list of strings: all labels of _node_
Returns null when _node_ evaluates to null |
-| properties(_expr_) | When _expr_ is a node or relationship: Returns a map containing all the properties of the given node or relationship
When _expr_ evaluates to a map: Returns _expr_ unchanged
Returns null when _expr_ evaluates to null |
+| labels(_node_) | Returns a list of strings: all labels of _node_
Returns null when _node_ evaluates to null |
+| properties(_expr_) | When _expr_ is a node or relationship: Returns a map containing all the properties of the given node or relationship
When _expr_ evaluates to a map: Returns _expr_ unchanged
Returns null when _expr_ evaluates to null |
| randomUUID() | Returns a random UUID (Universal Unique IDentifier) |
-| startNode(_relationship_) | Returns the source node of a relationship
Returns null when _relationship_ evaluates to null |
+| startNode(_relationship_) | Returns the source node of a relationship
Returns null when _relationship_ evaluates to null |
| timestamp() | Returns the current system timestamp (milliseconds since epoch) |
-| type(_relationship_) | Returns a string: the type of _relationship_
Returns null when _relationship_ evaluates to null |
-| typeOf(_expr_) * | Returns a string: the type of a literal, an expression's evaluation, an alias, a node's property, or a relationship's property
Return value is one of `Map`, `String`, `Integer`, `Boolean`, `Float`, `Node`, `Edge`, `List`, `Path`, `Point`, or `Null` |
+| type(_relationship_) | Returns a string: the type of _relationship_
Returns null when _relationship_ evaluates to null |
+| typeOf(_expr_) * | Returns a string: the type of a literal, an expression's evaluation, an alias, a node's property, or a relationship's property
Return value is one of `Map`, `String`, `Integer`, `Boolean`, `Float`, `Node`, `Edge`, `List`, `Path`, `Point`, or `Null` |
| prev(_expr_) * | Stores the previous value and returns it on the next call; returns `null` on the first call. Useful for variable-length traversal filtering of edges based on the prior value. |
* FalkorDB-specific extensions to Cypher
@@ -56,32 +56,32 @@ This section contains information on all supported functions from the Cypher que
|Function | Description|
| ----------------------------------- |:-----------|
-|avg(_expr_) | Returns the average of a set of numeric values. null values are ignored
Returns null when _expr_ has no evaluations |
+|avg(_expr_) | Returns the average of a set of numeric values. null values are ignored
Returns null when _expr_ has no evaluations |
|collect(_expr_) | Returns a list containing all non-null elements which evaluated from a given expression |
-|count(_expr_|*) | When argument is _expr_: returns the number of non-null evaluations of _expr_
When argument is `*`: returns the total number of evaluations (including nulls) |
-|max(_expr_) | Returns the maximum value in a set of values (taking into account type ordering). null values are ignored
Returns null when _expr_ has no evaluations |
-|min(_expr_) | Returns the minimum value in a set of values (taking into account type ordering). null values are ignored
Returns null when _expr_ has no evaluations |
-|percentileCont(_expr_, _percentile_) | Returns a linear-interpolated percentile (between 0.0 and 1.0) over a set of numeric values. null values are ignored
Returns null when _expr_ has no evaluations |
-|percentileDisc(_expr_, _percentile_) | Returns a nearest-value percentile (between 0.0 and 1.0) over a set of numeric values. null values are ignored
Returns null when _expr_ has no evaluations |
-|stDev(_expr_) | Returns the sample standard deviation over a set of numeric values. null values are ignored
Returns null when _expr_ has no evaluations |
-|stDevP(_expr_) | Returns the population standard deviation over a set of numeric values. null values are ignored
Returns null when _expr_ has no evaluations |
-|sum(_expr_) | Returns the sum of a set of numeric values. null values are ignored
Returns 0 when _expr_ has no evaluations |
+|count(_expr_|*) | When argument is _expr_: returns the number of non-null evaluations of _expr_
When argument is `*`: returns the total number of evaluations (including nulls) |
+|max(_expr_) | Returns the maximum value in a set of values (taking into account type ordering). null values are ignored
Returns null when _expr_ has no evaluations |
+|min(_expr_) | Returns the minimum value in a set of values (taking into account type ordering). null values are ignored
Returns null when _expr_ has no evaluations |
+|percentileCont(_expr_, _percentile_) | Returns a linear-interpolated percentile (between 0.0 and 1.0) over a set of numeric values. null values are ignored
Returns null when _expr_ has no evaluations |
+|percentileDisc(_expr_, _percentile_) | Returns a nearest-value percentile (between 0.0 and 1.0) over a set of numeric values. null values are ignored
Returns null when _expr_ has no evaluations |
+|stDev(_expr_) | Returns the sample standard deviation over a set of numeric values. null values are ignored
Returns null when _expr_ has no evaluations |
+|stDevP(_expr_) | Returns the population standard deviation over a set of numeric values. null values are ignored
Returns null when _expr_ has no evaluations |
+|sum(_expr_) | Returns the sum of a set of numeric values. null values are ignored
Returns 0 when _expr_ has no evaluations |
## List functions
| Function | Description|
| ------------------------------------ | :----------|
-| head(_expr_) | Returns the first element of a list
Returns null when _expr_ evaluates to null or an empty list |
-| keys(_expr_) | Returns a list of strings: all key names for given map or all property names for a given node or edge
Returns null when _expr_ evaluates to null |
-| last(_expr_) | Returns the last element of a list
Returns null when _expr_ evaluates to null or an empty list
-| list.dedup(_list_) * | Given a list, returns a similar list after removing duplicate elements
Order is preserved, duplicates are removed from the end of the list
Returns null when _list_ evaluates to null
Emit an error when _list_ does not evaluate to a list or to null |
-| list.insert(_list_, _idx_, _val_[, _dups_ = TRUE]) * | Given a list, returns a list after inserting a given value at a given index
_idx_ is 0-based when non-negative, or from the end of the list when negative
Returns null when _list_ evaluates to null
Returns _list_ when _val_ evaluates to null
Returns _list_ when _idx_ evaluates to an integer not in [-NumItems-1 .. NumItems]
When _dups_ evaluates to FALSE: returns _list_ when _val_ evaluates to a value that is already an element of _list_
Emit an error when _list_ does not evaluate to a list or to null
Emit an error when _idx_ does not evaluate to an integer
Emit an error when _dups_, if specified, does not evaluate to a Boolean |
-| list.insertListElements(_list_, _list2_, _idx_[, _dups_ = TRUE]) * | Given a list, returns a list after inserting the elements of a second list at a given index
_idx_ is 0-based when non-negative, or from the end of the list when negative
Returns null when _list_ evaluates to null
Returns _list_ when _list2_ evaluates to null
Returns _list_ when _idx_ evaluates to an integer not in [-NumItems-1 .. NumItems]
When _dups_ evaluates to FALSE: If an element of _list2_ evaluates to an element of _list_ it would be skipped; If multiple elements of _list2_ evaluate to the same value - this value would be inserted at most once to _list_
Emit an error when _list_ does not evaluate to a list or to null
Emit an error when _list2_ does not evaluate to a list or to null
Emit an error when _idx_ does not evaluate to an integer
Emit an error when _dups_, if specified, does not evaluate to a Boolean |
-| list.remove(_list_, _idx_[, _count_ = 1]) * | Given a list, returns a list after removing a given number of consecutive elements (or less, if the end of the list has been reached). starting at a given index.
_idx_ is 0-based when non-negative, or from the end of the list when negative
Returns _null_ when _list_ evaluates to null
Returns _list_ when _idx_ evaluates to an integer not in [-NumItems .. NumItems-1]
Returns _list_ when _count_ evaluates to a non-positive integer
Emit an error when _list_ does not evaluate to a list or to null
Emit an error when _idx_ does not evaluate to an integer
Emit an error when _count_, if specified, does not evaluate to an integer |
-| list.sort(_list_[, _ascending_ = TRUE]) * | Given a list, returns a list with similar elements, but sorted (inversely-sorted if _ascending_ is evaluated to FALSE)
Returns null when _list_ evaluates to null
Emit an error when _list_ does not evaluate to a list or to null
Emit an error when _ascending_, if specified, does not evaluate to a Boolean |
+| head(_expr_) | Returns the first element of a list
Returns null when _expr_ evaluates to null or an empty list |
+| keys(_expr_) | Returns a list of strings: all key names for given map or all property names for a given node or edge
Returns null when _expr_ evaluates to null |
+| last(_expr_) | Returns the last element of a list
Returns null when _expr_ evaluates to null or an empty list
+| list.dedup(_list_) * | Given a list, returns a similar list after removing duplicate elements
Order is preserved, duplicates are removed from the end of the list
Returns null when _list_ evaluates to null
Emit an error when _list_ does not evaluate to a list or to null |
+| list.insert(_list_, _idx_, _val_[, _dups_ = TRUE]) * | Given a list, returns a list after inserting a given value at a given index
_idx_ is 0-based when non-negative, or from the end of the list when negative
Returns null when _list_ evaluates to null
Returns _list_ when _val_ evaluates to null
Returns _list_ when _idx_ evaluates to an integer not in [-NumItems-1 .. NumItems]
When _dups_ evaluates to FALSE: returns _list_ when _val_ evaluates to a value that is already an element of _list_
Emit an error when _list_ does not evaluate to a list or to null
Emit an error when _idx_ does not evaluate to an integer
Emit an error when _dups_, if specified, does not evaluate to a Boolean |
+| list.insertListElements(_list_, _list2_, _idx_[, _dups_ = TRUE]) * | Given a list, returns a list after inserting the elements of a second list at a given index
_idx_ is 0-based when non-negative, or from the end of the list when negative
Returns null when _list_ evaluates to null
Returns _list_ when _list2_ evaluates to null
Returns _list_ when _idx_ evaluates to an integer not in [-NumItems-1 .. NumItems]
When _dups_ evaluates to FALSE: If an element of _list2_ evaluates to an element of _list_ it would be skipped; If multiple elements of _list2_ evaluate to the same value - this value would be inserted at most once to _list_
Emit an error when _list_ does not evaluate to a list or to null
Emit an error when _list2_ does not evaluate to a list or to null
Emit an error when _idx_ does not evaluate to an integer
Emit an error when _dups_, if specified, does not evaluate to a Boolean |
+| list.remove(_list_, _idx_[, _count_ = 1]) * | Given a list, returns a list after removing a given number of consecutive elements (or less, if the end of the list has been reached). starting at a given index.
_idx_ is 0-based when non-negative, or from the end of the list when negative
Returns _null_ when _list_ evaluates to null
Returns _list_ when _idx_ evaluates to an integer not in [-NumItems .. NumItems-1]
Returns _list_ when _count_ evaluates to a non-positive integer
Emit an error when _list_ does not evaluate to a list or to null
Emit an error when _idx_ does not evaluate to an integer
Emit an error when _count_, if specified, does not evaluate to an integer |
+| list.sort(_list_[, _ascending_ = TRUE]) * | Given a list, returns a list with similar elements, but sorted (inversely-sorted if _ascending_ is evaluated to FALSE)
Returns null when _list_ evaluates to null
Emit an error when _list_ does not evaluate to a list or to null
Emit an error when _ascending_, if specified, does not evaluate to a Boolean |
| range(_first_, _last_[, _step_ = 1]) | Returns a list of integers in the range of [start, end]. _step_, an optional integer argument, is the increment between consecutive elements |
-| size(_expr_) | Returns the number of elements in a list
Returns null with _expr_ evaluates to null |
-| tail(_expr_) | Returns a sublist of a list, which contains all its elements except the first
Returns an empty list when _expr_ contains less than 2 elements.
Returns null when _expr_ evaluates to null |
+| size(_expr_) | Returns the number of elements in a list
Returns null with _expr_ evaluates to null |
+| tail(_expr_) | Returns a sublist of a list, which contains all its elements except the first
Returns an empty list when _expr_ contains less than 2 elements.
Returns null when _expr_ evaluates to null |
| [reduce(...)](#reduce) | Returns a scalar produced by evaluating an expression against each list member |
* FalkorDB-specific extensions to Cypher
@@ -101,18 +101,18 @@ This section contains information on all supported functions from the Cypher que
|Function | Description|
| ------------------------- |:-----------|
-| abs(_expr_) | Returns the absolute value of a numeric value
Returns null when _expr_ evaluates to null |
-| ceil(_expr_) ** | When _expr_ evaluates to an integer: returns its evaluation
When _expr_ evaluates to floating point: returns a floating point equals to the smallest integer greater than or equal to _expr_
Returns null when _expr_ evaluates to null |
+| abs(_expr_) | Returns the absolute value of a numeric value
Returns null when _expr_ evaluates to null |
+| ceil(_expr_) ** | When _expr_ evaluates to an integer: returns its evaluation
When _expr_ evaluates to floating point: returns a floating point equals to the smallest integer greater than or equal to _expr_
Returns null when _expr_ evaluates to null |
| e() | Returns the constant _e_, the base of the natural logarithm |
-| exp(_expr_) | Returns _e_^_expr_, where _e_ is the base of the natural logarithm
Returns null when _expr_ evaluates to null |
-| floor(_expr_) ** | When _expr_ evaluates to an integer: returns its evaluation
When _expr_ evaluates to a floating point: returns a floating point equals to the greatest integer less than or equal to _expr_
Returns null when _expr_ evaluates to null |
-| log(_expr_) | Returns the natural logarithm of a numeric value
Returns nan when _expr_ evaluates to a negative numeric value, -inf when _expr_ evaluates to 0, and null when _expr_ evaluates to null |
-| log10(_expr_) | Returns the base-10 logarithm of a numeric value
Returns nan when _expr_ evaluates to a negative numeric value, -inf when _expr_ evaluates to 0, and null when _expr_ evaluates to null |
-| pow(_base_, _exponent_) * | Returns _base_ raised to the power of _exponent_ (equivalent to _base_^_exponent_)
Returns null when either evaluates to null |
+| exp(_expr_) | Returns _e_^_expr_, where _e_ is the base of the natural logarithm
Returns null when _expr_ evaluates to null |
+| floor(_expr_) ** | When _expr_ evaluates to an integer: returns its evaluation
When _expr_ evaluates to a floating point: returns a floating point equals to the greatest integer less than or equal to _expr_
Returns null when _expr_ evaluates to null |
+| log(_expr_) | Returns the natural logarithm of a numeric value
Returns nan when _expr_ evaluates to a negative numeric value, -inf when _expr_ evaluates to 0, and null when _expr_ evaluates to null |
+| log10(_expr_) | Returns the base-10 logarithm of a numeric value
Returns nan when _expr_ evaluates to a negative numeric value, -inf when _expr_ evaluates to 0, and null when _expr_ evaluates to null |
+| pow(_base_, _exponent_) * | Returns _base_ raised to the power of _exponent_ (equivalent to _base_^_exponent_)
Returns null when either evaluates to null |
| rand() | Returns a random floating point in the range [0,1] |
-| round(_expr_) ** *** | When _expr_ evaluates to an integer: returns its evaluation
When _expr_ evaluates to a floating point: returns a floating point equals to the integer closest to _expr_
Returns null when _expr_ evaluates to null |
-| sign(_expr_) | Returns the signum of a numeric value: 0 when _expr_ evaluates to 0, -1 when _expr_ evaluates to a negative numeric value, and 1 when _expr_ evaluates to a positive numeric value
Returns null when _expr_ evaluates to null |
-| sqrt(_expr_) | Returns the square root of a numeric value
Returns nan when _expr_ evaluates to a negative value and null when _expr_ evaluates to null |
+| round(_expr_) ** *** | When _expr_ evaluates to an integer: returns its evaluation
When _expr_ evaluates to a floating point: returns a floating point equals to the integer closest to _expr_
Returns null when _expr_ evaluates to null |
+| sign(_expr_) | Returns the signum of a numeric value: 0 when _expr_ evaluates to 0, -1 when _expr_ evaluates to a negative numeric value, and 1 when _expr_ evaluates to a positive numeric value
Returns null when _expr_ evaluates to null |
+| sqrt(_expr_) | Returns the square root of a numeric value
Returns nan when _expr_ evaluates to a negative value and null when _expr_ evaluates to null |
* FalkorDB-specific extensions to Cypher
@@ -124,64 +124,64 @@ This section contains information on all supported functions from the Cypher que
|Function | Description|
| --------------------- |:-----------|
-| acos(_expr_) | Returns the arccosine, in radians, of a numeric value
Returns nan when _expr_ evaluates to a numeric value not in [-1, 1] and null when _expr_ evaluates to null |
-| asin(_expr_) | Returns the arcsine, in radians, of a numeric value
Returns nan when _expr_ evaluates to a numeric value not in [-1, 1] and null when _expr_ evaluates to null |
-| atan(_expr_) | Returns the arctangent, in radians, of a numeric value
Returns null when _expr_ evaluates to null |
-| atan2(_expr_, _expr_) | Returns the 2-argument arctangent, in radians, of a pair of numeric values (Cartesian coordinates)
Returns 0 when both expressions evaluate to 0
Returns null when either expression evaluates to null |
-| cos(_expr_) | Returns the cosine of a numeric value that represents an angle in radians
Returns null when _expr_ evaluates to null |
-| cot(_expr_) | Returns the cotangent of a numeric value that represents an angle in radians
Returns inf when _expr_ evaluates to 0 and null when _expr_ evaluates to null |
-| degrees(_expr_) | Converts a numeric value from radians to degrees
Returns null when _expr_ evaluates to null |
-| haversin(_expr_) | Returns half the versine of a numeric value that represents an angle in radians
Returns null when _expr_ evaluates to null |
+| acos(_expr_) | Returns the arccosine, in radians, of a numeric value
Returns nan when _expr_ evaluates to a numeric value not in [-1, 1] and null when _expr_ evaluates to null |
+| asin(_expr_) | Returns the arcsine, in radians, of a numeric value
Returns nan when _expr_ evaluates to a numeric value not in [-1, 1] and null when _expr_ evaluates to null |
+| atan(_expr_) | Returns the arctangent, in radians, of a numeric value
Returns null when _expr_ evaluates to null |
+| atan2(_expr_, _expr_) | Returns the 2-argument arctangent, in radians, of a pair of numeric values (Cartesian coordinates)
Returns 0 when both expressions evaluate to 0
Returns null when either expression evaluates to null |
+| cos(_expr_) | Returns the cosine of a numeric value that represents an angle in radians
Returns null when _expr_ evaluates to null |
+| cot(_expr_) | Returns the cotangent of a numeric value that represents an angle in radians
Returns inf when _expr_ evaluates to 0 and null when _expr_ evaluates to null |
+| degrees(_expr_) | Converts a numeric value from radians to degrees
Returns null when _expr_ evaluates to null |
+| haversin(_expr_) | Returns half the versine of a numeric value that represents an angle in radians
Returns null when _expr_ evaluates to null |
| pi() | Returns the mathematical constant _pi_ |
-| radians(_expr_) | Converts a numeric value from degrees to radians
Returns null when _expr_ evaluates to null |
-| sin(_expr_) | Returns the sine of a numeric value that represents an angle in radians
Returns null when _expr_ evaluates to null |
-| tan(_expr_) | Returns the tangent of a numeric value that represents an angle in radians
Returns null when _expr_ evaluates to null |
+| radians(_expr_) | Converts a numeric value from degrees to radians
Returns null when _expr_ evaluates to null |
+| sin(_expr_) | Returns the sine of a numeric value that represents an angle in radians
Returns null when _expr_ evaluates to null |
+| tan(_expr_) | Returns the tangent of a numeric value that represents an angle in radians
Returns null when _expr_ evaluates to null |
## String functions
| Function | Description|
| ----------------------------------- | :----------|
-| left(_str_, _len_) | Returns a string containing the _len_ leftmost characters of _str_
Returns null when _str_ evaluates to null, otherwise emit an error if _len_ evaluates to null |
-| lTrim(_str_) | Returns _str_ with leading whitespace removed
Returns null when _str_ evaluates to null |
-| replace(_str_, _search_, _replace_) | Returns _str_ with all occurrences of _search_ replaced with _replace_
Returns null when any argument evaluates to null |
-| reverse(_str_) | Returns a string in which the order of all characters in _str_ are reversed
Returns null when _str_ evaluates to null |
-| right(_str_, _len_) | Returns a string containing the _len_ rightmost characters of _str_
Returns null when _str_ evaluates to null, otherwise emit an error if _len_ evaluates to null |
-| rTrim(_str_) | Returns _str_ with trailing whitespace removed
Returns null when _str_ evaluates to null |
-| split(_str_, _delimiter_) | Returns a list of strings from splitting _str_ by _delimiter_
Returns null when any argument evaluates to null |
-| string.join(_strList_[, _delimiter_ = '']) * | Returns a concatenation of a list of strings using a given delimiter
Returns null when _strList_ evaluates to null
Returns null when _delimiter_, if specified, evaluates to null
Emit an error when _strList_ does not evaluate to a list or to null
Emit an error when an element of _strList_ does not evaluate to a string
Emit an error when _delimiter_, if specified, does not evaluate to a string or to null |
-| string.matchRegEx(_str_, _regex_) * | Given a string and a regular expression, returns a list of all matches and matching regions
Returns an empty list when _str_ evaluates to null
Returns an empty list when _regex_ evaluates to null
Emit an error when _str_ does not evaluate to a string or to null
Emit an error when _regex_ does not evaluate to a valid regex string or to null |
-| string.replaceRegEx(_str_, _regex_, _replacement_) * | Given a string and a regular expression, returns a string after replacing each regex match with a given replacement
Returns null when _str_ evaluates to null
Returns null when _regex_ evaluates to null
Returns null when _replacement_ evaluates to null
Emit an error when _str_ does not evaluate to a string or to null
Emit an error when _regex_ does not evaluate to a valid regex string or to null
Emit an error when _replacement_ does not evaluate to a string or to null |
-| substring(_str_, _start_[, _len_]) | When _len_ is specified: returns a substring of _str_ beginning with a 0-based index _start_ and with length _len_
When _len_ is not specified: returns a substring of _str_ beginning with a 0-based index _start_ and extending to the end of _str_
Returns null when _str_ evaluates to null
Emit an error when _start_ or _len_ evaluate to null |
-| toLower(_str_) | Returns _str_ in lowercase
Returns null when _str_ evaluates to null |
-| toJSON(_expr_) * | Returns a [JSON representation](#json-format) of a value
Returns null when _expr_ evaluates to null |
-| toUpper(_str_) | Returns _str_ in uppercase
Returns null when _str_ evaluates to null |
-| trim(_str_) | Returns _str_ with leading and trailing whitespace removed
Returns null when _str_ evaluates to null |
-| size(_str_) | Returns the number of characters in _str_
Returns null when _str_ evaluates to null |
-| [intern(_str_)](#intern) | Returns a deduplicated, memory-efficient representation of _str_
Returns null when _str_ evaluates to null |
+| left(_str_, _len_) | Returns a string containing the _len_ leftmost characters of _str_
Returns null when _str_ evaluates to null, otherwise emit an error if _len_ evaluates to null |
+| lTrim(_str_) | Returns _str_ with leading whitespace removed
Returns null when _str_ evaluates to null |
+| replace(_str_, _search_, _replace_) | Returns _str_ with all occurrences of _search_ replaced with _replace_
Returns null when any argument evaluates to null |
+| reverse(_str_) | Returns a string in which the order of all characters in _str_ are reversed
Returns null when _str_ evaluates to null |
+| right(_str_, _len_) | Returns a string containing the _len_ rightmost characters of _str_
Returns null when _str_ evaluates to null, otherwise emit an error if _len_ evaluates to null |
+| rTrim(_str_) | Returns _str_ with trailing whitespace removed
Returns null when _str_ evaluates to null |
+| split(_str_, _delimiter_) | Returns a list of strings from splitting _str_ by _delimiter_
Returns null when any argument evaluates to null |
+| string.join(_strList_[, _delimiter_ = '']) * | Returns a concatenation of a list of strings using a given delimiter
Returns null when _strList_ evaluates to null
Returns null when _delimiter_, if specified, evaluates to null
Emit an error when _strList_ does not evaluate to a list or to null
Emit an error when an element of _strList_ does not evaluate to a string
Emit an error when _delimiter_, if specified, does not evaluate to a string or to null |
+| string.matchRegEx(_str_, _regex_) * | Given a string and a regular expression, returns a list of all matches and matching regions
Returns an empty list when _str_ evaluates to null
Returns an empty list when _regex_ evaluates to null
Emit an error when _str_ does not evaluate to a string or to null
Emit an error when _regex_ does not evaluate to a valid regex string or to null |
+| string.replaceRegEx(_str_, _regex_, _replacement_) * | Given a string and a regular expression, returns a string after replacing each regex match with a given replacement
Returns null when _str_ evaluates to null
Returns null when _regex_ evaluates to null
Returns null when _replacement_ evaluates to null
Emit an error when _str_ does not evaluate to a string or to null
Emit an error when _regex_ does not evaluate to a valid regex string or to null
Emit an error when _replacement_ does not evaluate to a string or to null |
+| substring(_str_, _start_[, _len_]) | When _len_ is specified: returns a substring of _str_ beginning with a 0-based index _start_ and with length _len_
When _len_ is not specified: returns a substring of _str_ beginning with a 0-based index _start_ and extending to the end of _str_
Returns null when _str_ evaluates to null
Emit an error when _start_ or _len_ evaluate to null |
+| toLower(_str_) | Returns _str_ in lowercase
Returns null when _str_ evaluates to null |
+| toJSON(_expr_) * | Returns a [JSON representation](#json-format) of a value
Returns null when _expr_ evaluates to null |
+| toUpper(_str_) | Returns _str_ in uppercase
Returns null when _str_ evaluates to null |
+| trim(_str_) | Returns _str_ with leading and trailing whitespace removed
Returns null when _expr_ evaluates to null |
+| size(_str_) | Returns the number of characters in _str_
Returns null when _str_ evaluates to null |
+| [intern(_str_)](#intern) | Returns a deduplicated, memory-efficient representation of _str_
Returns null when _str_ evaluates to null |
## Point functions
| Function | Description|
| ---------------------------- | :----------|
| [point(_map_)](#point) | Returns a Point representing a lat/lon coordinates |
-| distance(_point1_, _point2_) | Returns the distance in meters between the two given points
Returns null when either evaluates to null |
+| distance(_point1_, _point2_) | Returns the distance in meters between the two given points
Returns null when either evaluates to null |
## Type conversion functions
|Function | Description|
| --------------------------- |:-----------|
-| toBoolean(_expr_) | Returns a Boolean when _expr_ evaluates to a Boolean
Converts a string to Boolean (`"true"` (case insensitive) to true, `"false"` (case insensitive) to false, any other value to null)
Converts an integer to Boolean (0 to `false`, any other values to `true`)
Returns null when _expr_ evaluates to null
Emit an error on other types |
+| toBoolean(_expr_) | Returns a Boolean when _expr_ evaluates to a Boolean
Converts a string to Boolean (`"true"` (case insensitive) to true, `"false"` (case insensitive) to false, any other value to null)
Converts an integer to Boolean (0 to `false`, any other values to `true`)
Returns null when _expr_ evaluates to null
Emit an error on other types |
| toBooleanList(_exprList_) | Converts a list to a list of Booleans. Each element in the list is converted using toBooleanOrNull() |
-| toBooleanOrNull(_expr_) | Returns a Boolean when _expr_ evaluates to a Boolean
Converts a string to Boolean (`"true"` (case insensitive) to true, `"false"` (case insensitive) to false, any other value to null)
Converts an integer to Boolean (0 to `false`, any other values to `true`)
Returns null when _expr_ evaluates to null
Returns null for other types |
-| toFloat(_expr_) | Returns a floating point when _expr_ evaluates to a floating point
Converts an integer to a floating point
Converts a string to a floating point or null
Returns null when _expr_ evaluates to null
Emit an error on other types |
+| toBooleanOrNull(_expr_) | Returns a Boolean when _expr_ evaluates to a Boolean
Converts a string to Boolean (`"true"` (case insensitive) to true, `"false"` (case insensitive) to false, any other value to null)
Converts an integer to Boolean (0 to `false`, any other values to `true`)
Returns null when _expr_ evaluates to null
Returns null for other types |
+| toFloat(_expr_) | Returns a floating point when _expr_ evaluates to a floating point
Converts an integer to a floating point
Converts a string to a floating point or null
Returns null when _expr_ evaluates to null
Emit an error on other types |
| toFloatList(_exprList_) | Converts a list to a list of floating points. Each element in the list is converted using toFloatOrNull() |
-| toFloatOrNull(_expr_) | Returns a floating point when _expr_ evaluates to a floating point
Converts an integer to a floating point
Converts a string to a floating point or null
Returns null when _expr_ evaluates to null
Returns null for other types |
-| toInteger(_expr_) * | Returns an integer when _expr_ evaluates to an integer
Converts a floating point to integer
Converts a string to an integer or null
Converts a Boolean to an integer (false to 0, true to 1) Returns null when _expr_ evaluates to null
Emit an error on other types |
+| toFloatOrNull(_expr_) | Returns a floating point when _expr_ evaluates to a floating point
Converts an integer to a floating point
Converts a string to a floating point or null
Returns null when _expr_ evaluates to null
Returns null for other types |
+| toInteger(_expr_) * | Returns an integer when _expr_ evaluates to an integer
Converts a floating point to integer
Converts a string to an integer or null
Converts a Boolean to an integer (false to 0, true to 1) Returns null when _expr_ evaluates to null
Emit an error on other types |
| toIntegerList(_exprList_) * | Converts a list to a list of integer values. Each element in the list is converted using toIntegerOrNull() |
-| toIntegerOrNull(_expr_) * | Returns an integer when _expr_ evaluates to an integer
Converts a floating point to integer
Converts a string to an integer or null
Converts a Boolean to an integer (false to 0, true to 1) Returns null when _expr_ evaluates to null
Returns null for other types |
-| toString(_expr_) | Returns a string when _expr_ evaluates to a string
Converts an integer, float, Boolean, string, or point to a string representation
Returns null when _expr_ evaluates to null
Emit an error on other types |
+| toIntegerOrNull(_expr_) * | Returns an integer when _expr_ evaluates to an integer
Converts a floating point to integer
Converts a string to an integer or null
Converts a Boolean to an integer (false to 0, true to 1) Returns null when _expr_ evaluates to null
Returns null for other types |
+| toString(_expr_) | Returns a string when _expr_ evaluates to a string
Converts an integer, float, Boolean, string, or point to a string representation
Returns null when _expr_ evaluates to null
Emit an error on other types |
| toStringList(_exprList_) | Converts a list to a list of strings. Each element in the list is converted using toStringOrNull() |
-| toStringOrNull(_expr_) | Returns a string when _expr_ evaluates to a string
Converts an integer, float, Boolean, string, or point to a string representation
Returns null when _expr_ evaluates to null
Returns null for other types |
+| toStringOrNull(_expr_) | Returns a string when _expr_ evaluates to a string
Converts an integer, float, Boolean, string, or point to a string representation
Returns null when _expr_ evaluates to null
Returns null for other types |
* FalkorDB-specific behavior: rounding method when converting a floating point to an integer is "toward negative infinity (floor)"
@@ -189,8 +189,8 @@ This section contains information on all supported functions from the Cypher que
|Function | Description|
| ------------ |:-----------|
-|indegree(_node_ [, _reltype_ ...]) *
indegree(_node_ [, _reltypeList_]) * | When no relationship types are specified: Returns the number of _node_'s incoming edges
When one or more relationship types are specified: Returns the number of _node's_ incoming edges with one of the given relationship types
Return null when _node_ evaluates to null |
-|outdegree(_node_ [, _reltype_ ...]) *
outdegree(_node_ [, _reltypeList_]) * | When no relationship types are specified: Returns the number of _node_'s outgoing edges
When one or more relationship types are specified: Returns the number of _node's_ outgoing edges with one of the given relationship types
Return null when _node_ evaluates to null |
+|indegree(_node_ [, _reltype_ ...]) *
indegree(_node_ [, _reltypeList_]) * | When no relationship types are specified: Returns the number of _node_'s incoming edges
When one or more relationship types are specified: Returns the number of _node's_ incoming edges with one of the given relationship types
Return null when _node_ evaluates to null |
+|outdegree(_node_ [, _reltype_ ...]) *
outdegree(_node_ [, _reltypeList_]) * | When no relationship types are specified: Returns the number of _node_'s outgoing edges
When one or more relationship types are specified: Returns the number of _node's_ outgoing edges with one of the given relationship types
Return null when _node_ evaluates to null |
* FalkorDB-specific extensions to Cypher
@@ -198,9 +198,9 @@ This section contains information on all supported functions from the Cypher que
| Function | Description|
| ----------------------------------------------| :----------|
-| nodes(_path_) | Returns a list containing all the nodes in _path_
Returns null if _path_ evaluates to null |
-| relationships(_path_) | Returns a list containing all the relationships in _path_
Returns null if _path_ evaluates to null |
-| length(_path_) | Return the length (number of edges) of _path_
Returns null if _path_ evaluates to null |
+| nodes(_path_) | Returns a list containing all the nodes in _path_
Returns null if _path_ evaluates to null |
+| relationships(_path_) | Returns a list containing all the relationships in _path_
Returns null if _path_ evaluates to null |
+| length(_path_) | Return the length (number of edges) of _path_
Returns null if _path_ evaluates to null |
| [shortestPath(...)](#about-path-functions) * | Return the shortest path that resolves the given pattern |
| [allShortestPaths(...)](#about-path-functions) * | Returns all the shortest paths between a pair of entities
@@ -211,7 +211,7 @@ This section contains information on all supported functions from the Cypher que
| Function | Description|
| ----------------------------------------- | :----------|
-| vecf32(_array_) | Creates a new float 32 vector
all elements of input array must be of type float |
+| vecf32(_array_) | Creates a new float 32 vector
all elements of input array must be of type float |
| vec.euclideanDistance(_vector_, _vector_) | Returns the Euclidean distance between the two input vectors |
| vec.cosineDistance(_vector_, _vector_) | Returns the Cosine distance between the two input vectors |
diff --git a/final_results.txt b/final_results.txt
deleted file mode 100644
index 91719a2..0000000
--- a/final_results.txt
+++ /dev/null
@@ -1,504 +0,0 @@
-Found 125 files with duplicate H1 headings (frontmatter title + markdown H1):
-
-```
-File: website/docs/References.md
-Line: 9
-Heading: # References
-
-File: website/docs/agentic-memory/cognee.md
-Line: 9
-Heading: # Cognee
-
-File: website/docs/agentic-memory/graphiti.md
-Line: 9
-Heading: # Graphiti
-
-File: website/docs/agentic-memory/index.md
-Line: 9
-Heading: # Agentic Memory
-
-File: website/docs/algorithms/betweenness-centrality.md
-Line: 8
-Heading: # Betweenness Centrality
-
-File: website/docs/algorithms/bfs.md
-Line: 8
-Heading: # BFS
-
-File: website/docs/algorithms/cdlp.md
-Line: 8
-Heading: # Community Detection using Label Propagation (CDLP)
-
-File: website/docs/algorithms/index.md
-Line: 9
-Heading: # FalkorDB Algorithms Overview
-
-File: website/docs/algorithms/msf.md
-Line: 9
-Heading: # Minimum Spanning Forest (MSF)
-
-File: website/docs/algorithms/pagerank.md
-Line: 8
-Heading: # PageRank
-
-File: website/docs/algorithms/sppath.md
-Line: 8
-Heading: # `algo.SPpaths` - Shortest Path (Single Pair)
-
-File: website/docs/algorithms/sspath.md
-Line: 8
-Heading: # `algo.SSpaths` - Single Source Paths
-
-File: website/docs/algorithms/wcc.md
-Line: 8
-Heading: # Weakly Connected Components (WCC)
-
-File: website/docs/cloud/features.md
-Line: 9
-Heading: # Features
-
-File: website/docs/cloud/index.md
-Line: 11
-Heading: # FalkorDB Cloud DBaaS
-
-File: website/docs/commands/acl.md
-Line: 9
-Heading: # ACL
-
-File: website/docs/commands/graph.info.md
-Line: 8
-Heading: # GRAPH.INFO
-
-File: website/docs/commands/index.md
-Line: 9
-Heading: # Commands
-
-File: website/docs/cypher/call.md
-Line: 9
-Heading: # CALL \{\}
-
-File: website/docs/cypher/create.md
-Line: 9
-Heading: # CREATE
-
-File: website/docs/cypher/cypher-support.md
-Line: 9
-Heading: # Cypher coverage
-
-File: website/docs/cypher/delete.md
-Line: 9
-Heading: # DELETE
-
-File: website/docs/cypher/foreach.md
-Line: 9
-Heading: # FOREACH
-
-File: website/docs/cypher/functions.md
-Line: 9
-Heading: # Functions
-
-File: website/docs/cypher/indexing/index.md
-Line: 9
-Heading: # Indexing
-
-File: website/docs/cypher/known-limitations.md
-Line: 9
-Heading: # Known limitations
-
-File: website/docs/cypher/limit.md
-Line: 9
-Heading: # LIMIT
-
-File: website/docs/cypher/load-csv.md
-Line: 9
-Heading: # LOAD CSV
-
-File: website/docs/cypher/match.md
-Line: 9
-Heading: # MATCH
-
-File: website/docs/cypher/merge.md
-Line: 9
-Heading: # MERGE
-
-File: website/docs/cypher/optional-match.md
-Line: 9
-Heading: # OPTIONAL MATCH
-
-File: website/docs/cypher/order-by.md
-Line: 9
-Heading: # ORDER BY
-
-File: website/docs/cypher/procedures.md
-Line: 9
-Heading: # Procedures
-
-File: website/docs/cypher/remove.md
-Line: 9
-Heading: # REMOVE
-
-File: website/docs/cypher/return.md
-Line: 9
-Heading: # RETURN
-
-File: website/docs/cypher/set.md
-Line: 9
-Heading: # SET
-
-File: website/docs/cypher/skip.md
-Line: 9
-Heading: # SKIP
-
-File: website/docs/cypher/union.md
-Line: 9
-Heading: # UNION
-
-File: website/docs/cypher/unwind.md
-Line: 9
-Heading: # UNWIND
-
-File: website/docs/cypher/where.md
-Line: 9
-Heading: # WHERE
-
-File: website/docs/cypher/with.md
-Line: 9
-Heading: # WITH
-
-File: website/docs/design/client-spec.md
-Line: 8
-Heading: # Client Specification
-
-File: website/docs/design/index.md
-Line: 9
-Heading: # The FalkorDB Design
-
-File: website/docs/design/result-structure.md
-Line: 8
-Heading: # Result Set Structure
-
-File: website/docs/design/third-party.md
-Line: 8
-Heading: # Third-Party Components in FalkorDB
-
-File: website/docs/genai-tools/ag2.md
-Line: 9
-Heading: # AG2
-
-File: website/docs/genai-tools/graphrag-sdk.md
-Line: 9
-Heading: # GraphRAG-SDK
-
-File: website/docs/genai-tools/graphrag-toolkit.md
-Line: 9
-Heading: # GraphRAG Toolkit
-
-File: website/docs/genai-tools/index.md
-Line: 9
-Heading: # GenAI Tools
-
-File: website/docs/genai-tools/langchain.md
-Line: 9
-Heading: # LangChain
-
-File: website/docs/genai-tools/langgraph.md
-Line: 9
-Heading: # LangGraph
-
-File: website/docs/genai-tools/llamaindex.md
-Line: 9
-Heading: # LlamaIndex
-
-File: website/docs/getting-started/configuration.md
-Line: 9
-Heading: # Configuration
-
-File: website/docs/index.mdx
-Line: 19
-Heading: # FalkorDB
-
-File: website/docs/integration/bolt-support.md
-Line: 9
-Heading: # [EXPERIMENTAL] BOLT protocol support for FalkorDB
-
-File: website/docs/integration/index.md
-Line: 9
-Heading: # Integration
-
-File: website/docs/integration/jena.md
-Line: 9
-Heading: # Apache Jena Integration
-
-File: website/docs/integration/kafka-connect.md
-Line: 9
-Heading: # Kafka Connect Sink
-
-File: website/docs/integration/rest.md
-Line: 9
-Heading: # FalkorDB Browser REST API
-
-File: website/docs/integration/spring-data-falkordb.md
-Line: 9
-Heading: # Spring Data FalkorDB
-
-File: website/docs/license.md
-Line: 9
-Heading: # FalkorDB License
-
-File: website/docs/operations/cluster.md
-Line: 9
-Heading: # Setting Up a FalkorDB Cluster
-
-File: website/docs/operations/docker.md
-Line: 9
-Heading: # Running FalkorDB with Docker and Docker Compose
-
-File: website/docs/operations/falkordblite.md
-Line: 9
-Heading: # FalkorDBLite
-
-File: website/docs/operations/index.md
-Line: 9
-Heading: # Operations
-
-File: website/docs/operations/k8s-support.md
-Line: 9
-Heading: # Kubernetes support for FalkorDB
-
-File: website/docs/operations/kubeblocks.md
-Line: 9
-Heading: # Deploy FalkorDB with KubeBlocks
-
-File: website/docs/operations/lightning-ai.md
-Line: 9
-Heading: # Deploy FalkorDB on Lightning.AI
-
-File: website/docs/operations/migration/index.md
-Line: 9
-Heading: # Migration
-
-File: website/docs/operations/migration/kuzu-to-falkordb.md
-Line: 8
-Heading: # Kuzu to FalkorDB Migration
-
-File: website/docs/operations/migration/neo4j-to-falkordb.md
-Line: 8
-Heading: # Neo4j to FalkorDB Migration
-
-File: website/docs/operations/migration/rdf-to-falkordb.md
-Line: 8
-Heading: # RDF to FalkorDB Migration
-
-File: website/docs/operations/migration/redisgraph-to-falkordb.md
-Line: 8
-Heading: # RedisGraph to FalkorDB Migration
-
-File: website/docs/operations/persistence.md
-Line: 9
-Heading: # Configuring FalkorDB Docker for Persistence
-
-File: website/docs/operations/replication.md
-Line: 9
-Heading: # Configuring FalkorDB Docker for Replication
-
-File: website/docs/udfs/flex/bitwise/and.md
-Line: 7
-Heading: # bitwise.and
-
-File: website/docs/udfs/flex/bitwise/index.md
-Line: 8
-Heading: # Bitwise Functions
-
-File: website/docs/udfs/flex/bitwise/not.md
-Line: 7
-Heading: # bitwise.not
-
-File: website/docs/udfs/flex/bitwise/or.md
-Line: 7
-Heading: # bitwise.or
-
-File: website/docs/udfs/flex/bitwise/shiftLeft.md
-Line: 7
-Heading: # bitwise.shiftLeft
-
-File: website/docs/udfs/flex/bitwise/shiftRight.md
-Line: 7
-Heading: # bitwise.shiftRight
-
-File: website/docs/udfs/flex/bitwise/xor.md
-Line: 7
-Heading: # bitwise.xor
-
-File: website/docs/udfs/flex/collections/frequencies.md
-Line: 7
-Heading: # coll.frequencies
-
-File: website/docs/udfs/flex/collections/index.md
-Line: 8
-Heading: # Collection Functions
-
-File: website/docs/udfs/flex/collections/intersection.md
-Line: 7
-Heading: # coll.intersection
-
-File: website/docs/udfs/flex/collections/shuffle.md
-Line: 7
-Heading: # coll.shuffle
-
-File: website/docs/udfs/flex/collections/union.md
-Line: 7
-Heading: # coll.union
-
-File: website/docs/udfs/flex/collections/zip.md
-Line: 7
-Heading: # coll.zip
-
-File: website/docs/udfs/flex/date/format.md
-Line: 7
-Heading: # date.format
-
-File: website/docs/udfs/flex/date/index.md
-Line: 8
-Heading: # Date Functions
-
-File: website/docs/udfs/flex/date/parse.md
-Line: 7
-Heading: # date.parse
-
-File: website/docs/udfs/flex/date/toTimeZone.md
-Line: 7
-Heading: # date.toTimeZone
-
-File: website/docs/udfs/flex/date/truncate.md
-Line: 7
-Heading: # date.truncate
-
-File: website/docs/udfs/flex/index.md
-Line: 7
-Heading: # FLEX Function Reference
-
-File: website/docs/udfs/flex/json/fromJsonList.md
-Line: 7
-Heading: # json.fromJsonList
-
-File: website/docs/udfs/flex/json/fromJsonMap.md
-Line: 7
-Heading: # json.fromJsonMap
-
-File: website/docs/udfs/flex/json/index.md
-Line: 8
-Heading: # JSON Functions
-
-File: website/docs/udfs/flex/json/toJson.md
-Line: 7
-Heading: # json.toJson
-
-File: website/docs/udfs/flex/map/fromPairs.md
-Line: 7
-Heading: # map.fromPairs
-
-File: website/docs/udfs/flex/map/index.md
-Line: 8
-Heading: # Map Functions
-
-File: website/docs/udfs/flex/map/merge.md
-Line: 7
-Heading: # map.merge
-
-File: website/docs/udfs/flex/map/removeKey.md
-Line: 7
-Heading: # map.removeKey
-
-File: website/docs/udfs/flex/map/removeKeys.md
-Line: 7
-Heading: # map.removeKeys
-
-File: website/docs/udfs/flex/map/submap.md
-Line: 7
-Heading: # map.submap
-
-File: website/docs/udfs/flex/similarity/index.md
-Line: 8
-Heading: # Similarity Functions
-
-File: website/docs/udfs/flex/similarity/jaccard.md
-Line: 7
-Heading: # sim.jaccard
-
-File: website/docs/udfs/flex/text/camelCase.md
-Line: 7
-Heading: # text.camelCase
-
-File: website/docs/udfs/flex/text/capitalize.md
-Line: 7
-Heading: # text.capitalize
-
-File: website/docs/udfs/flex/text/decapitalize.md
-Line: 7
-Heading: # text.decapitalize
-
-File: website/docs/udfs/flex/text/format.md
-Line: 7
-Heading: # text.format
-
-File: website/docs/udfs/flex/text/index.md
-Line: 8
-Heading: # Text Functions
-
-File: website/docs/udfs/flex/text/indexOf.md
-Line: 7
-Heading: # text.indexOf
-
-File: website/docs/udfs/flex/text/indexesOf.md
-Line: 7
-Heading: # text.indexesOf
-
-File: website/docs/udfs/flex/text/jaroWinkler.md
-Line: 7
-Heading: # text.jaroWinkler
-
-File: website/docs/udfs/flex/text/join.md
-Line: 7
-Heading: # text.join
-
-File: website/docs/udfs/flex/text/levenshtein.md
-Line: 7
-Heading: # text.levenshtein
-
-File: website/docs/udfs/flex/text/lpad.md
-Line: 7
-Heading: # text.lpad
-
-File: website/docs/udfs/flex/text/regexGroups.md
-Line: 7
-Heading: # text.regexGroups
-
-File: website/docs/udfs/flex/text/repeat.md
-Line: 7
-Heading: # text.repeat
-
-File: website/docs/udfs/flex/text/replace.md
-Line: 7
-Heading: # text.replace
-
-File: website/docs/udfs/flex/text/rpad.md
-Line: 7
-Heading: # text.rpad
-
-File: website/docs/udfs/flex/text/snakeCase.md
-Line: 7
-Heading: # text.snakeCase
-
-File: website/docs/udfs/flex/text/swapCase.md
-Line: 7
-Heading: # text.swapCase
-
-File: website/docs/udfs/flex/text/upperCamelCase.md
-Line: 7
-Heading: # text.upperCamelCase
-
-File: website/docs/udfs/index.md
-Line: 8
-Heading: # UDFs
-
-```
diff --git a/find_duplicate_h1s.py b/find_duplicate_h1s.py
deleted file mode 100644
index 468e02e..0000000
--- a/find_duplicate_h1s.py
+++ /dev/null
@@ -1,71 +0,0 @@
-import os
-import re
-from pathlib import Path
-
-def extract_frontmatter_title(content):
- """Extract title from frontmatter"""
- frontmatter_match = re.match(r'^---\s*\n(.*?)\n---', content, re.DOTALL)
- if frontmatter_match:
- frontmatter = frontmatter_match.group(1)
- title_match = re.search(r'^title:\s*(.+)$', frontmatter, re.MULTILINE)
- if title_match:
- return title_match.group(1).strip()
- return None
-
-def find_h1_headings(content):
- """Find all H1 headings (# ) and their line numbers"""
- lines = content.split('\n')
- h1_headings = []
- for i, line in enumerate(lines, 1):
- # Match H1 headings but not comments in code blocks
- if re.match(r'^# [^#]', line):
- h1_headings.append((i, line.strip()))
- return h1_headings
-
-def normalize_text(text):
- """Normalize text for comparison"""
- return re.sub(r'[^a-z0-9]', '', text.lower())
-
-docs_dir = Path('website/docs')
-results = []
-
-# Process both .md and .mdx files
-for pattern in ['*.md', '*.mdx']:
- for md_file in docs_dir.rglob(pattern):
- try:
- with open(md_file, 'r', encoding='utf-8') as f:
- content = f.read()
-
- frontmatter_title = extract_frontmatter_title(content)
- h1_headings = find_h1_headings(content)
-
- if frontmatter_title and h1_headings:
- normalized_title = normalize_text(frontmatter_title)
- for line_num, h1_text in h1_headings:
- h1_content = h1_text[2:].strip() # Remove "# "
- normalized_h1 = normalize_text(h1_content)
-
- # Check for exact match or very similar match
- if normalized_h1 == normalized_title or normalized_title in normalized_h1 or normalized_h1 in normalized_title:
- relative_path = md_file.relative_to(Path('website/docs'))
- results.append({
- 'file': str(relative_path),
- 'line': line_num,
- 'h1': h1_text,
- 'frontmatter_title': frontmatter_title
- })
- break
- except Exception as e:
- print(f"Error processing {md_file}: {e}")
-
-# Sort results by file path
-results.sort(key=lambda x: x['file'])
-
-print(f"Found {len(results)} files with duplicate H1 headings:\n")
-print("="*80)
-for result in results:
- print(f"\nFile: {result['file']}")
- print(f"Line: {result['line']}")
- print(f"Heading: {result['h1']}")
- print(f"Frontmatter Title: {result['frontmatter_title']}")
- print("-"*80)
diff --git a/fix_all_h1s.py b/fix_all_h1s.py
deleted file mode 100644
index bc255b4..0000000
--- a/fix_all_h1s.py
+++ /dev/null
@@ -1,76 +0,0 @@
-import os
-import re
-from pathlib import Path
-
-def extract_frontmatter_title(content):
- """Extract title from frontmatter"""
- frontmatter_match = re.match(r'^---\s*\n(.*?)\n---', content, re.DOTALL)
- if frontmatter_match:
- frontmatter = frontmatter_match.group(1)
- title_match = re.search(r'^title:\s*(.+)$', frontmatter, re.MULTILINE)
- if title_match:
- return title_match.group(1).strip()
- return None
-
-def find_first_h1_heading(content):
- """Find the first H1 heading and its position"""
- lines = content.split('\n')
- for i, line in enumerate(lines):
- # Match H1 headings but not comments in code blocks
- if re.match(r'^# [^#]', line):
- return i, line
- return None, None
-
-def normalize_text(text):
- """Normalize text for comparison"""
- return re.sub(r'[^a-z0-9]', '', text.lower())
-
-def remove_duplicate_h1(file_path):
- """Remove duplicate H1 heading from a file"""
- try:
- with open(file_path, 'r', encoding='utf-8') as f:
- content = f.read()
-
- frontmatter_title = extract_frontmatter_title(content)
- h1_line_num, h1_text = find_first_h1_heading(content)
-
- if not frontmatter_title or h1_line_num is None:
- return False
-
- h1_content = h1_text[2:].strip() # Remove "# "
- normalized_title = normalize_text(frontmatter_title)
- normalized_h1 = normalize_text(h1_content)
-
- # Check if they match
- if normalized_h1 == normalized_title or normalized_title in normalized_h1 or normalized_h1 in normalized_title:
- lines = content.split('\n')
-
- # Remove the H1 line
- del lines[h1_line_num]
-
- # If the next line is empty, remove it too to avoid extra spacing
- if h1_line_num < len(lines) and lines[h1_line_num].strip() == '':
- del lines[h1_line_num]
-
- # Write back
- new_content = '\n'.join(lines)
- with open(file_path, 'w', encoding='utf-8') as f:
- f.write(new_content)
-
- return True
- except Exception as e:
- print(f"Error processing {file_path}: {e}")
- return False
-
-docs_dir = Path('website/docs')
-fixed_count = 0
-
-# Process both .md and .mdx files
-for pattern in ['*.md', '*.mdx']:
- for md_file in docs_dir.rglob(pattern):
- if remove_duplicate_h1(md_file):
- fixed_count += 1
- print(f"Fixed: {md_file.relative_to(docs_dir)}")
-
-print(f"\n{'='*80}")
-print(f"Fixed {fixed_count} files!")
diff --git a/fix_angle_brackets.py b/fix_angle_brackets.py
deleted file mode 100644
index 457e71a..0000000
--- a/fix_angle_brackets.py
+++ /dev/null
@@ -1,91 +0,0 @@
-#!/usr/bin/env python3
-"""
-Fix < > characters in markdown that should be escaped
-"""
-import re
-from pathlib import Path
-
-DOCS_DIR = Path("/Users/danshalev/docs-staging/website/docs")
-
-def fix_angle_brackets(file_path):
- """Fix unescaped angle brackets in markdown"""
- with open(file_path, 'r', encoding='utf-8') as f:
- content = f.read()
-
- original = content
-
- # Pattern to find < > that are not:
- # - Part of HTML/JSX tags (,
, , etc.)
- # - In code blocks
- # - Already escaped
-
- # Split by code blocks to avoid modifying code
- parts = re.split(r'(```[\s\S]*?```|`[^`]+`)', content)
-
- fixed_parts = []
- for i, part in enumerate(parts):
- # Skip code blocks (odd indices)
- if i % 2 == 1:
- fixed_parts.append(part)
- continue
-
- # Fix angle brackets that look like placeholders (not real HTML tags)
- # Match patterns like: , ,