diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml new file mode 100644 index 0000000..fb06aba --- /dev/null +++ b/.github/workflows/codeql.yml @@ -0,0 +1,38 @@ +name: "CodeQL" + +on: + push: + branches: ["main"] + pull_request: + branches: ["main"] + schedule: + - cron: "0 3 * * 1" + +jobs: + analyze: + name: Analyze (javascript) + runs-on: ubuntu-latest + permissions: + actions: read + contents: read + security-events: write + + strategy: + fail-fast: false + matrix: + language: ["javascript-typescript"] + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Initialize CodeQL + uses: github/codeql-action/init@v3 + with: + languages: ${{ matrix.language }} + + - name: Autobuild + uses: github/codeql-action/autobuild@v3 + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v3 diff --git a/.gitignore b/.gitignore index 99716b5..fbc4460 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,50 @@ +# Docusaurus build artifacts +website/build/ +website/.docusaurus/ +website/docs/ +website/node_modules/ + +# Dependencies +node_modules/ +npm-debug.log* +yarn-debug.log* +yarn-error.log* +package-lock.json +yarn.lock + +# Environment variables +.env +.env.local +.env.development.local +.env.test.local +.env.production.local + +# OS generated files +.DS_Store +.DS_Store? +._* +.Spotlight-V100 +.Trashes +ehthumbs.db +Thumbs.db + +# Editor files +*.swp +*.swo +*~ +.vscode/ +.idea/ + +# Spell check artifacts *.dic -# Ignore root-level Python scripts -/*.py +# Build outputs +/build +/dist + +# Local scripts +*.py + +# Generated outputs +output.txt +final_results.txt diff --git a/README.md b/README.md index ba83483..183ecf1 100644 --- a/README.md +++ b/README.md @@ -1,10 +1,12 @@ +# FalkorDB Docs + [![Workflow](https://github.com/FalkorDB/docs/actions/workflows/pages/pages-build-deployment/badge.svg?branch=main)](https://github.com/FalkorDB/docs/actions/workflows/pages/pages-build-deployment) -[![Discord](https://img.shields.io/discord/1146782921294884966?style=flat-square)](https://discord.gg/ErBEqN9E) +[![Discord](https://img.shields.io/discord/1146782921294884966?style=flat-square)](https://discord.gg/ErBEqN9E) [![Try Free](https://img.shields.io/badge/Try%20Free-FalkorDB%20Cloud-FF8101?labelColor=FDE900&style=flat-square)](https://app.falkordb.cloud) [![Trendshift](https://trendshift.io/api/badge/repositories/14787)](https://trendshift.io/repositories/14787) -# https://docs.falkordb.com +Docs site: [https://docs.falkordb.com](https://docs.falkordb.com) ## Build diff --git a/final_results.txt b/final_results.txt deleted file mode 100644 index 91719a2..0000000 --- a/final_results.txt +++ /dev/null @@ -1,504 +0,0 @@ -Found 125 files with duplicate H1 headings (frontmatter title + markdown H1): - -``` -File: website/docs/References.md -Line: 9 -Heading: # References - -File: website/docs/agentic-memory/cognee.md -Line: 9 -Heading: # Cognee - -File: website/docs/agentic-memory/graphiti.md -Line: 9 -Heading: # Graphiti - -File: website/docs/agentic-memory/index.md -Line: 9 -Heading: # Agentic Memory - -File: website/docs/algorithms/betweenness-centrality.md -Line: 8 -Heading: # Betweenness Centrality - -File: website/docs/algorithms/bfs.md -Line: 8 -Heading: # BFS - -File: website/docs/algorithms/cdlp.md -Line: 8 -Heading: # Community Detection using Label Propagation (CDLP) - -File: website/docs/algorithms/index.md -Line: 9 -Heading: # FalkorDB Algorithms Overview - -File: website/docs/algorithms/msf.md -Line: 9 -Heading: # Minimum Spanning Forest (MSF) - -File: website/docs/algorithms/pagerank.md -Line: 8 -Heading: # PageRank - -File: website/docs/algorithms/sppath.md -Line: 8 -Heading: # `algo.SPpaths` - Shortest Path (Single Pair) - -File: website/docs/algorithms/sspath.md -Line: 8 -Heading: # `algo.SSpaths` - Single Source Paths - -File: website/docs/algorithms/wcc.md -Line: 8 -Heading: # Weakly Connected Components (WCC) - -File: website/docs/cloud/features.md -Line: 9 -Heading: # Features - -File: website/docs/cloud/index.md -Line: 11 -Heading: # FalkorDB Cloud DBaaS - -File: website/docs/commands/acl.md -Line: 9 -Heading: # ACL - -File: website/docs/commands/graph.info.md -Line: 8 -Heading: # GRAPH.INFO - -File: website/docs/commands/index.md -Line: 9 -Heading: # Commands - -File: website/docs/cypher/call.md -Line: 9 -Heading: # CALL \{\} - -File: website/docs/cypher/create.md -Line: 9 -Heading: # CREATE - -File: website/docs/cypher/cypher-support.md -Line: 9 -Heading: # Cypher coverage - -File: website/docs/cypher/delete.md -Line: 9 -Heading: # DELETE - -File: website/docs/cypher/foreach.md -Line: 9 -Heading: # FOREACH - -File: website/docs/cypher/functions.md -Line: 9 -Heading: # Functions - -File: website/docs/cypher/indexing/index.md -Line: 9 -Heading: # Indexing - -File: website/docs/cypher/known-limitations.md -Line: 9 -Heading: # Known limitations - -File: website/docs/cypher/limit.md -Line: 9 -Heading: # LIMIT - -File: website/docs/cypher/load-csv.md -Line: 9 -Heading: # LOAD CSV - -File: website/docs/cypher/match.md -Line: 9 -Heading: # MATCH - -File: website/docs/cypher/merge.md -Line: 9 -Heading: # MERGE - -File: website/docs/cypher/optional-match.md -Line: 9 -Heading: # OPTIONAL MATCH - -File: website/docs/cypher/order-by.md -Line: 9 -Heading: # ORDER BY - -File: website/docs/cypher/procedures.md -Line: 9 -Heading: # Procedures - -File: website/docs/cypher/remove.md -Line: 9 -Heading: # REMOVE - -File: website/docs/cypher/return.md -Line: 9 -Heading: # RETURN - -File: website/docs/cypher/set.md -Line: 9 -Heading: # SET - -File: website/docs/cypher/skip.md -Line: 9 -Heading: # SKIP - -File: website/docs/cypher/union.md -Line: 9 -Heading: # UNION - -File: website/docs/cypher/unwind.md -Line: 9 -Heading: # UNWIND - -File: website/docs/cypher/where.md -Line: 9 -Heading: # WHERE - -File: website/docs/cypher/with.md -Line: 9 -Heading: # WITH - -File: website/docs/design/client-spec.md -Line: 8 -Heading: # Client Specification - -File: website/docs/design/index.md -Line: 9 -Heading: # The FalkorDB Design - -File: website/docs/design/result-structure.md -Line: 8 -Heading: # Result Set Structure - -File: website/docs/design/third-party.md -Line: 8 -Heading: # Third-Party Components in FalkorDB - -File: website/docs/genai-tools/ag2.md -Line: 9 -Heading: # AG2 - -File: website/docs/genai-tools/graphrag-sdk.md -Line: 9 -Heading: # GraphRAG-SDK - -File: website/docs/genai-tools/graphrag-toolkit.md -Line: 9 -Heading: # GraphRAG Toolkit - -File: website/docs/genai-tools/index.md -Line: 9 -Heading: # GenAI Tools - -File: website/docs/genai-tools/langchain.md -Line: 9 -Heading: # LangChain - -File: website/docs/genai-tools/langgraph.md -Line: 9 -Heading: # LangGraph - -File: website/docs/genai-tools/llamaindex.md -Line: 9 -Heading: # LlamaIndex - -File: website/docs/getting-started/configuration.md -Line: 9 -Heading: # Configuration - -File: website/docs/index.mdx -Line: 19 -Heading: # FalkorDB - -File: website/docs/integration/bolt-support.md -Line: 9 -Heading: # [EXPERIMENTAL] BOLT protocol support for FalkorDB - -File: website/docs/integration/index.md -Line: 9 -Heading: # Integration - -File: website/docs/integration/jena.md -Line: 9 -Heading: # Apache Jena Integration - -File: website/docs/integration/kafka-connect.md -Line: 9 -Heading: # Kafka Connect Sink - -File: website/docs/integration/rest.md -Line: 9 -Heading: # FalkorDB Browser REST API - -File: website/docs/integration/spring-data-falkordb.md -Line: 9 -Heading: # Spring Data FalkorDB - -File: website/docs/license.md -Line: 9 -Heading: # FalkorDB License - -File: website/docs/operations/cluster.md -Line: 9 -Heading: # Setting Up a FalkorDB Cluster - -File: website/docs/operations/docker.md -Line: 9 -Heading: # Running FalkorDB with Docker and Docker Compose - -File: website/docs/operations/falkordblite.md -Line: 9 -Heading: # FalkorDBLite - -File: website/docs/operations/index.md -Line: 9 -Heading: # Operations - -File: website/docs/operations/k8s-support.md -Line: 9 -Heading: # Kubernetes support for FalkorDB - -File: website/docs/operations/kubeblocks.md -Line: 9 -Heading: # Deploy FalkorDB with KubeBlocks - -File: website/docs/operations/lightning-ai.md -Line: 9 -Heading: # Deploy FalkorDB on Lightning.AI - -File: website/docs/operations/migration/index.md -Line: 9 -Heading: # Migration - -File: website/docs/operations/migration/kuzu-to-falkordb.md -Line: 8 -Heading: # Kuzu to FalkorDB Migration - -File: website/docs/operations/migration/neo4j-to-falkordb.md -Line: 8 -Heading: # Neo4j to FalkorDB Migration - -File: website/docs/operations/migration/rdf-to-falkordb.md -Line: 8 -Heading: # RDF to FalkorDB Migration - -File: website/docs/operations/migration/redisgraph-to-falkordb.md -Line: 8 -Heading: # RedisGraph to FalkorDB Migration - -File: website/docs/operations/persistence.md -Line: 9 -Heading: # Configuring FalkorDB Docker for Persistence - -File: website/docs/operations/replication.md -Line: 9 -Heading: # Configuring FalkorDB Docker for Replication - -File: website/docs/udfs/flex/bitwise/and.md -Line: 7 -Heading: # bitwise.and - -File: website/docs/udfs/flex/bitwise/index.md -Line: 8 -Heading: # Bitwise Functions - -File: website/docs/udfs/flex/bitwise/not.md -Line: 7 -Heading: # bitwise.not - -File: website/docs/udfs/flex/bitwise/or.md -Line: 7 -Heading: # bitwise.or - -File: website/docs/udfs/flex/bitwise/shiftLeft.md -Line: 7 -Heading: # bitwise.shiftLeft - -File: website/docs/udfs/flex/bitwise/shiftRight.md -Line: 7 -Heading: # bitwise.shiftRight - -File: website/docs/udfs/flex/bitwise/xor.md -Line: 7 -Heading: # bitwise.xor - -File: website/docs/udfs/flex/collections/frequencies.md -Line: 7 -Heading: # coll.frequencies - -File: website/docs/udfs/flex/collections/index.md -Line: 8 -Heading: # Collection Functions - -File: website/docs/udfs/flex/collections/intersection.md -Line: 7 -Heading: # coll.intersection - -File: website/docs/udfs/flex/collections/shuffle.md -Line: 7 -Heading: # coll.shuffle - -File: website/docs/udfs/flex/collections/union.md -Line: 7 -Heading: # coll.union - -File: website/docs/udfs/flex/collections/zip.md -Line: 7 -Heading: # coll.zip - -File: website/docs/udfs/flex/date/format.md -Line: 7 -Heading: # date.format - -File: website/docs/udfs/flex/date/index.md -Line: 8 -Heading: # Date Functions - -File: website/docs/udfs/flex/date/parse.md -Line: 7 -Heading: # date.parse - -File: website/docs/udfs/flex/date/toTimeZone.md -Line: 7 -Heading: # date.toTimeZone - -File: website/docs/udfs/flex/date/truncate.md -Line: 7 -Heading: # date.truncate - -File: website/docs/udfs/flex/index.md -Line: 7 -Heading: # FLEX Function Reference - -File: website/docs/udfs/flex/json/fromJsonList.md -Line: 7 -Heading: # json.fromJsonList - -File: website/docs/udfs/flex/json/fromJsonMap.md -Line: 7 -Heading: # json.fromJsonMap - -File: website/docs/udfs/flex/json/index.md -Line: 8 -Heading: # JSON Functions - -File: website/docs/udfs/flex/json/toJson.md -Line: 7 -Heading: # json.toJson - -File: website/docs/udfs/flex/map/fromPairs.md -Line: 7 -Heading: # map.fromPairs - -File: website/docs/udfs/flex/map/index.md -Line: 8 -Heading: # Map Functions - -File: website/docs/udfs/flex/map/merge.md -Line: 7 -Heading: # map.merge - -File: website/docs/udfs/flex/map/removeKey.md -Line: 7 -Heading: # map.removeKey - -File: website/docs/udfs/flex/map/removeKeys.md -Line: 7 -Heading: # map.removeKeys - -File: website/docs/udfs/flex/map/submap.md -Line: 7 -Heading: # map.submap - -File: website/docs/udfs/flex/similarity/index.md -Line: 8 -Heading: # Similarity Functions - -File: website/docs/udfs/flex/similarity/jaccard.md -Line: 7 -Heading: # sim.jaccard - -File: website/docs/udfs/flex/text/camelCase.md -Line: 7 -Heading: # text.camelCase - -File: website/docs/udfs/flex/text/capitalize.md -Line: 7 -Heading: # text.capitalize - -File: website/docs/udfs/flex/text/decapitalize.md -Line: 7 -Heading: # text.decapitalize - -File: website/docs/udfs/flex/text/format.md -Line: 7 -Heading: # text.format - -File: website/docs/udfs/flex/text/index.md -Line: 8 -Heading: # Text Functions - -File: website/docs/udfs/flex/text/indexOf.md -Line: 7 -Heading: # text.indexOf - -File: website/docs/udfs/flex/text/indexesOf.md -Line: 7 -Heading: # text.indexesOf - -File: website/docs/udfs/flex/text/jaroWinkler.md -Line: 7 -Heading: # text.jaroWinkler - -File: website/docs/udfs/flex/text/join.md -Line: 7 -Heading: # text.join - -File: website/docs/udfs/flex/text/levenshtein.md -Line: 7 -Heading: # text.levenshtein - -File: website/docs/udfs/flex/text/lpad.md -Line: 7 -Heading: # text.lpad - -File: website/docs/udfs/flex/text/regexGroups.md -Line: 7 -Heading: # text.regexGroups - -File: website/docs/udfs/flex/text/repeat.md -Line: 7 -Heading: # text.repeat - -File: website/docs/udfs/flex/text/replace.md -Line: 7 -Heading: # text.replace - -File: website/docs/udfs/flex/text/rpad.md -Line: 7 -Heading: # text.rpad - -File: website/docs/udfs/flex/text/snakeCase.md -Line: 7 -Heading: # text.snakeCase - -File: website/docs/udfs/flex/text/swapCase.md -Line: 7 -Heading: # text.swapCase - -File: website/docs/udfs/flex/text/upperCamelCase.md -Line: 7 -Heading: # text.upperCamelCase - -File: website/docs/udfs/index.md -Line: 8 -Heading: # UDFs - -``` diff --git a/getting-started/index.md b/getting-started/index.md index c3eae15..a605806 100644 --- a/getting-started/index.md +++ b/getting-started/index.md @@ -3,10 +3,11 @@ title: "Getting Started" description: > Getting Started with FalkorDB Graph Database. --- + import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -# Getting Started with FalkorDB +## Getting Started with FalkorDB This guide will walk you through setting up FalkorDB, modeling a social network as a graph, and accessing it using one of the [FalkorDB client libraries](/getting-started/clients) with the [Cypher](/cypher) query language. @@ -14,11 +15,11 @@ This guide will walk you through setting up FalkorDB, modeling a social network ## Prerequisites -1. **FalkorDB Instance**: Set up FalkorDB (on-prem or cloud). +1. **FalkorDB Instance**: Set up FalkorDB (on-prem or cloud). - [Run FalkorDB Docker](https://hub.docker.com/r/falkordb/falkordb/) - [Create a FalkorDB Cloud Instance](https://app.falkordb.cloud/signup) 2. **Install FalkorDB Client**: - + @@ -62,6 +63,7 @@ cargo add falkordb ## Step 1: Model a Social Network as a Graph Let's create a simple graph for a social network where: + - **Nodes** represent `User` and `Post`. - **Relationships** connect `User`s with a `FRIENDS_WITH` relationship, and `User`s are connected via a `CREATED` relationship to `Post`s @@ -69,17 +71,17 @@ Let's create a simple graph for a social network where: **Node Types:** -| Node Type | Properties | Description | -|-----------|--------------------------|---------------------------------------| -| User | `id`, `name`, `email` | Represents a user in the social network | -| Post | `id`, `content`, `date` | Represents a post created by a user | +| Node Type | Properties | Description | +| --- | --- | --- | +| User | `id`, `name`, `email` | Represents a user in the social network | +| Post | `id`, `content`, `date` | Represents a post created by a user | **Relationship Types:** -| Relationship Type | Start Node | End Node | Properties | Description | -|-------------------|------------|----------|--------------|------------------------------------------| -| FRIENDS_WITH | User | User | `since` | Indicates friendship between two users | -| CREATED | User | Post | `time` | Connects a user to their created posts | +| Relationship Type | Start Node | End Node | Properties | Description | +| --- | --- | --- | --- | --- | +| FRIENDS_WITH | User | User | `since` | Indicates friendship between two users | +| CREATED | User | Post | `time` | Connects a user to their created posts | ![FalkorDB-Model a Social Network as a Graph](https://github.com/user-attachments/assets/57d9b837-661e-4500-a9f2-88e754382d29) @@ -416,9 +418,11 @@ for record in result.data.by_ref() { Congratulations! πŸŽ‰ You have successfully modeled, loaded, and queried a social network graph with FalkorDB. Next, dive deeper into FalkorDB's powerful features: + - [Advanced Cypher](/cypher) - [Database Operations](/operations) - [GenAI Tools](/genai-tools) - [Agentic Memory](/agentic-memory) + For questions or support, visit our [community forums](https://www.falkordb.com/contact-us/) diff --git a/index-staging.md b/index-staging.md deleted file mode 100644 index 520bed8..0000000 --- a/index-staging.md +++ /dev/null @@ -1,326 +0,0 @@ ---- -title: Home (Staging) -description: "Build intelligent applications with the fastest graph database for knowledge graphs and GraphRAG" ---- - -[![Docker Hub](https://img.shields.io/docker/pulls/falkordb/falkordb?label=Docker&style=flat-square)](https://hub.docker.com/r/falkordb/falkordb/) -[![Discord](https://img.shields.io/discord/1146782921294884966?style=flat-square)](https://discord.gg/ErBEqN9E) -[![Try Free](https://img.shields.io/badge/Try%20Free-FalkorDB%20Cloud-FF8101?labelColor=FDE900&style=flat-square)](https://app.falkordb.cloud) -[![Trendshift](https://trendshift.io/api/badge/repositories/14787)](https://trendshift.io/repositories/14787) - -# FalkorDB - -**A blazing-fast graph database that makes it simple to build knowledge graphs, recommendation engines, fraud detection systems, and intelligent GenAI applications.** - -FalkorDB combines the power of a low-latency property graph database with native GraphRAG capabilitiesβ€”enabling developers to query complex, interconnected data using OpenCypher while seamlessly integrating with LLMs for next-generation AI applications. - ---- - -## Why Choose FalkorDB? - - - - - - - - - - -
- -### ⚑ **Built for Speed** -Sub-millisecond query latency using sparse adjacency matrix storage. Handle millions of relationships without compromising performance. - - - -### 🎯 **Production-Ready** -Multi-tenant architecture, ACID transactions, and support for both RESP and Bolt protocols. Deploy on Docker, Kubernetes, or FalkorDB Cloud. - -
- -### 🧠 **AI-Native** -Native vector similarity search and GraphRAG SDK for building accurate RAG applications that understand relationships, not just keywords. - - - -### πŸ› οΈ **Developer-First** -OpenCypher query language, clients in 6+ languages, integrations with LangChain, LlamaIndex, and popular data tools. - -
- ---- - -## What Can You Build? - - - - - - - - - - - - -
- -### **πŸ” Fraud Detection** -Model complex transaction patterns and identify suspicious relationships in real-time across financial networks. - - - -### **πŸ’‘ Recommendation Engines** -Build collaborative filtering systems that understand user behavior, product relationships, and contextual connections. - - - -### **🧩 Knowledge Graphs** -Create intelligent knowledge bases that power semantic search, question answering, and enterprise data discovery. - -
- -### **πŸ€– GraphRAG for GenAI** -Enhance LLM responses with structured knowledge graphs, reducing hallucinations and improving accuracy for domain-specific queries. - - - -### **πŸ“Š Real-Time Analytics** -Analyze social networks, supply chains, and interconnected data with graph algorithms like PageRank and community detection. - - - -### **πŸ”— Master Data Management** -Unify data across systems by modeling complex entity relationships and hierarchies in a flexible graph structure. - -
- ---- - -## Quick Start - -Get FalkorDB running in 30 seconds: - -```bash -docker run -p 6379:6379 -p 3000:3000 -it --rm falkordb/falkordb:latest -``` - -**Alternative Options:** -- [Try FalkorDB Cloud](https://app.falkordb.cloud) (Free tier available) -- [Deploy to Kubernetes](/operations/k8s-support) -- [Railway Template](/operations/railway) - ---- - -## Choose Your Path - - - - - - - - - - -
- -### πŸ“š **Learn Graph Database Concepts** - -New to graph databases? Start here. - -- [What is a Graph Database?](/getting-started) -- [Property Graph Model Overview](https://github.com/opencypher/openCypher/blob/master/docs/property-graph-model.adoc) -- [OpenCypher Query Language](/cypher) -- [When to Use Graphs vs. Relational](/getting-started#when-to-use-graphs) - - - -### πŸš€ **Quick Start Tutorial** - -Ready to code? Jump right in. - -- [5-Minute Tutorial](/getting-started) -- [Client Libraries (Python, JS, Java, Rust)](/getting-started/clients) -- [Connect & Query Guide](/getting-started#first-query) -- [Sample Applications](https://github.com/FalkorDB/demos) - -
- -### πŸ€– **Explore GraphRAG & GenAI** - -Build AI-powered applications. - -- [GraphRAG SDK Overview](/genai-tools/graphrag-sdk) -- [LangChain Integration](/genai-tools/langchain) -- [LlamaIndex Integration](/genai-tools/llamaindex) -- [AG2 & AutoGen Support](/genai-tools/ag2) - - - -### βš™οΈ **Deploy to Production** - -Scale your application confidently. - -- [Configuration Guide](/getting-started/configuration) -- [Docker Deployment](/operations/docker) -- [Kubernetes & Helm](/operations/k8s-support) -- [Replication & High Availability](/operations/replication) - -
- ---- - -## Core Features - - - - - - - - - - - - -
- -#### **🎯 Flexible Data Model** -[Property Graph Model](https://github.com/opencypher/openCypher/blob/master/docs/property-graph-model.adoc) with nodes, relationships, and properties. Model complex domains naturally. - - - -#### **πŸ”Ž Advanced Indexing** -[Full-text search](/cypher/indexing/fulltext-index), [vector similarity](/cypher/indexing/vector-index), and [range indexes](/cypher/indexing/range-index) for lightning-fast queries. - - - -#### **πŸ“ OpenCypher Support** -Industry-standard [OpenCypher](/cypher) query language with FalkorDB enhancements for complex graph operations. - -
- -#### **⚑ High Performance** -Sparse adjacency matrix representation enables efficient graph traversal at scale with minimal memory overhead. - - - -#### **πŸ”Œ Protocol Flexibility** -Native support for both [RESP](https://redis.io/docs/reference/protocol-spec/) (Redis) and [Bolt](https://en.wikipedia.org/wiki/Bolt_(network_protocol)) (Neo4j) protocols. - - - -#### **πŸ“Š Graph Algorithms** -Built-in algorithms: [PageRank](/algorithms/pagerank), [BFS](/algorithms/bfs), [Shortest Path](/algorithms/sppath), [Community Detection](/algorithms/cdlp), and more. - -
- ---- - -## Example: Build a Social Network Graph - -Here's how simple it is to create and query a graph with FalkorDB: - -```python -from falkordb import FalkorDB - -# Connect to FalkorDB -db = FalkorDB(host='localhost', port=6379) -g = db.select_graph('SocialNetwork') - -# Create a small social network -g.query(""" - CREATE - (alice:Person {name: 'Alice', age: 30}), - (bob:Person {name: 'Bob', age: 25}), - (charlie:Person {name: 'Charlie', age: 35}), - (alice)-[:FOLLOWS]->(bob), - (bob)-[:FOLLOWS]->(charlie), - (charlie)-[:FOLLOWS]->(alice) -""") - -# Find who Alice follows -result = g.query(""" - MATCH (alice:Person {name: 'Alice'})-[:FOLLOWS]->(friend) - RETURN friend.name -""") - -print(result.result_set[0][0]) # Bob -``` - -**Want to see more examples?** -- [Complete tutorial with multiple languages](/getting-started) -- [GraphRAG examples with LangChain](/genai-tools/langchain) -- [Sample applications repository](https://github.com/FalkorDB/demos) - ---- - -## Integrations & Ecosystem - -FalkorDB works seamlessly with your favorite tools: - -**Language Clients:** Python β€’ JavaScript β€’ Java β€’ Rust β€’ Go β€’ C# β€’ [View All](/getting-started/clients) - -**GenAI Frameworks:** [LangChain](/genai-tools/langchain) β€’ [LangGraph](/genai-tools/langgraph) β€’ [LlamaIndex](/genai-tools/llamaindex) β€’ [AG2](/genai-tools/ag2) - -**Data Tools:** [Kafka Connect](/integration/kafka-connect) β€’ [Spring Data](/integration/spring-data-falkordb) β€’ [Apache Jena](/integration/jena) - -**Deployment:** [Docker](/operations/docker) β€’ [Kubernetes](/operations/k8s-support) β€’ [Railway](/operations/railway) β€’ [Lightning.AI](/operations/lightning-ai) - ---- - -## Performance at Scale - -- **Sub-10ms queries** for most graph traversals -- **Multi-tenant architecture** for SaaS applications -- **Horizontal scaling** via replication -- **ACID transactions** for data consistency -- **Sparse matrix storage** reduces memory by 90%+ vs dense graphs - -[See detailed benchmarks β†’](https://github.com/FalkorDB/FalkorDB#benchmarks) - ---- - -## Community & Support - - - - - - - -
- -### πŸ’¬ Discord -Join our [Discord community](https://discord.gg/ErBEqN9E) for real-time help and discussions - - - -### πŸ“‹ GitHub Discussions -Ask questions on [GitHub Discussions](https://github.com/FalkorDB/FalkorDB/discussions) - - - -### πŸ› Issue Tracker -Report bugs on [GitHub Issues](https://github.com/FalkorDB/FalkorDB/issues) - -
- ---- - -## Next Steps - -1. **[Get Started](/getting-started)** β€” Follow our step-by-step tutorial -2. **[Learn OpenCypher](/cypher)** β€” Master the query language -3. **[Explore GraphRAG](/genai-tools/graphrag-sdk)** β€” Build AI-powered applications -4. **[Deploy to Cloud](https://app.falkordb.cloud)** β€” Try our managed service - ---- - -## Open Source & License - -FalkorDB is open source under the [Server Side Public License v1 (SSPLv1)](https://github.com/FalkorDB/FalkorDB/blob/master/LICENSE.txt). - -⭐ [Star us on GitHub](https://github.com/FalkorDB/FalkorDB) | πŸ“– [Read the Blog](https://www.falkordb.com/blog) | 🌐 [Visit falkordb.com](https://www.falkordb.com) diff --git a/index.md b/index.md index e7c4b79..2c8f552 100644 --- a/index.md +++ b/index.md @@ -2,30 +2,34 @@ title: Home description: "The fastest way to your knowledge" --- + import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; [![Trendshift](https://trendshift.io/api/badge/repositories/14787)](https://trendshift.io/repositories/14787) [![Docker Hub](https://img.shields.io/docker/pulls/falkordb/falkordb?label=Docker&style=flat-square)](https://hub.docker.com/r/falkordb/falkordb/) -[![Discord](https://img.shields.io/discord/1146782921294884966?style=flat-square)](https://discord.gg/ErBEqN9E) +[![Discord](https://img.shields.io/discord/1146782921294884966?style=flat-square)](https://discord.gg/ErBEqN9E) [![Try Free](https://img.shields.io/badge/Try%20Free-FalkorDB%20Cloud-FF8101?labelColor=FDE900&style=flat-square)](https://app.falkordb.cloud) ![FalkorDB Docs Readme Banner](https://github.com/user-attachments/assets/201b07e1-ac6d-4593-98cf-e58946d7766c) -# FalkorDB +## FalkorDB + ### The Graph platform developers use to achieve accurate GraphRAG for enterprise GenAI ### About FalkorDB -FalkorDB delivers an **accurate, multi-tenant RAG solution powered by a low-latency, scalable graph database technology.** + +FalkorDB delivers an **accurate, multi-tenant RAG solution powered by a low-latency, scalable graph database technology.** * Our solution is purpose-built for development teams working with complex, interconnected dataβ€”whether structured or unstructuredβ€”in real-time or interactive user environments. * The system supports the OpenCypher query language with proprietary enhancements that streamline interactions with graph data. Its efficient graph traversal and query capabilities make it well-suited for production environments. ### Choose Your Path -* **Graph Database Path:** If you're interested in using FalkorDB as a property graph database with OpenCypher support, continue with the sections below. -* **GraphRAG Path:** If you're aiming to implement advanced graph reasoning and generative AI tasks, explore our [GenAI Tools](/genai-tools) section, starting with the [GraphRAG SDK](/genai-tools/graphrag-sdk). + +* **Graph Database Path:** If you're interested in using FalkorDB as a property graph database with OpenCypher support, continue with the sections below. +* **GraphRAG Path:** If you're aiming to implement advanced graph reasoning and generative AI tasks, explore our [GenAI Tools](/genai-tools) section, starting with the [GraphRAG SDK](/genai-tools/graphrag-sdk). ## Primary Features diff --git a/output.txt b/output.txt deleted file mode 100644 index d569769..0000000 --- a/output.txt +++ /dev/null @@ -1,753 +0,0 @@ -Found 125 files with duplicate H1 headings: - -================================================================================ - -File: References.md -Line: 9 -Heading: # References -Frontmatter Title: References --------------------------------------------------------------------------------- - -File: agentic-memory/cognee.md -Line: 9 -Heading: # Cognee -Frontmatter Title: Cognee --------------------------------------------------------------------------------- - -File: agentic-memory/graphiti.md -Line: 9 -Heading: # Graphiti -Frontmatter Title: Graphiti --------------------------------------------------------------------------------- - -File: agentic-memory/index.md -Line: 9 -Heading: # Agentic Memory -Frontmatter Title: Agentic Memory --------------------------------------------------------------------------------- - -File: algorithms/betweenness-centrality.md -Line: 8 -Heading: # Betweenness Centrality -Frontmatter Title: Betweenness Centrality --------------------------------------------------------------------------------- - -File: algorithms/bfs.md -Line: 8 -Heading: # BFS -Frontmatter Title: BFS --------------------------------------------------------------------------------- - -File: algorithms/cdlp.md -Line: 8 -Heading: # Community Detection using Label Propagation (CDLP) -Frontmatter Title: Community Detection using Label Propagation (CDLP) --------------------------------------------------------------------------------- - -File: algorithms/index.md -Line: 9 -Heading: # FalkorDB Algorithms Overview -Frontmatter Title: Algorithms --------------------------------------------------------------------------------- - -File: algorithms/msf.md -Line: 9 -Heading: # Minimum Spanning Forest (MSF) -Frontmatter Title: MSF --------------------------------------------------------------------------------- - -File: algorithms/pagerank.md -Line: 8 -Heading: # PageRank -Frontmatter Title: PageRank --------------------------------------------------------------------------------- - -File: algorithms/sppath.md -Line: 8 -Heading: # `algo.SPpaths` - Shortest Path (Single Pair) -Frontmatter Title: algo.SPpaths --------------------------------------------------------------------------------- - -File: algorithms/sspath.md -Line: 8 -Heading: # `algo.SSpaths` - Single Source Paths -Frontmatter Title: algo.SSpaths --------------------------------------------------------------------------------- - -File: algorithms/wcc.md -Line: 8 -Heading: # Weakly Connected Components (WCC) -Frontmatter Title: Weakly Connected Components (WCC) --------------------------------------------------------------------------------- - -File: cloud/features.md -Line: 9 -Heading: # Features -Frontmatter Title: Features --------------------------------------------------------------------------------- - -File: cloud/index.md -Line: 11 -Heading: # FalkorDB Cloud DBaaS -Frontmatter Title: Cloud DBaaS --------------------------------------------------------------------------------- - -File: commands/acl.md -Line: 9 -Heading: # ACL -Frontmatter Title: ACL --------------------------------------------------------------------------------- - -File: commands/graph.info.md -Line: 8 -Heading: # GRAPH.INFO -Frontmatter Title: GRAPH.INFO --------------------------------------------------------------------------------- - -File: commands/index.md -Line: 9 -Heading: # Commands -Frontmatter Title: Commands --------------------------------------------------------------------------------- - -File: cypher/call.md -Line: 9 -Heading: # CALL \{\} -Frontmatter Title: CALL --------------------------------------------------------------------------------- - -File: cypher/create.md -Line: 9 -Heading: # CREATE -Frontmatter Title: CREATE --------------------------------------------------------------------------------- - -File: cypher/cypher-support.md -Line: 9 -Heading: # Cypher coverage -Frontmatter Title: Cypher coverage --------------------------------------------------------------------------------- - -File: cypher/delete.md -Line: 9 -Heading: # DELETE -Frontmatter Title: DELETE --------------------------------------------------------------------------------- - -File: cypher/foreach.md -Line: 9 -Heading: # FOREACH -Frontmatter Title: FOREACH --------------------------------------------------------------------------------- - -File: cypher/functions.md -Line: 9 -Heading: # Functions -Frontmatter Title: Functions --------------------------------------------------------------------------------- - -File: cypher/indexing/index.md -Line: 9 -Heading: # Indexing -Frontmatter Title: Indexing --------------------------------------------------------------------------------- - -File: cypher/known-limitations.md -Line: 9 -Heading: # Known limitations -Frontmatter Title: Known limitations --------------------------------------------------------------------------------- - -File: cypher/limit.md -Line: 9 -Heading: # LIMIT -Frontmatter Title: LIMIT --------------------------------------------------------------------------------- - -File: cypher/load-csv.md -Line: 9 -Heading: # LOAD CSV -Frontmatter Title: LOAD CSV --------------------------------------------------------------------------------- - -File: cypher/match.md -Line: 9 -Heading: # MATCH -Frontmatter Title: MATCH --------------------------------------------------------------------------------- - -File: cypher/merge.md -Line: 9 -Heading: # MERGE -Frontmatter Title: MERGE --------------------------------------------------------------------------------- - -File: cypher/optional-match.md -Line: 9 -Heading: # OPTIONAL MATCH -Frontmatter Title: OPTIONAL MATCH --------------------------------------------------------------------------------- - -File: cypher/order-by.md -Line: 9 -Heading: # ORDER BY -Frontmatter Title: ORDER BY --------------------------------------------------------------------------------- - -File: cypher/procedures.md -Line: 9 -Heading: # Procedures -Frontmatter Title: Procedures --------------------------------------------------------------------------------- - -File: cypher/remove.md -Line: 9 -Heading: # REMOVE -Frontmatter Title: REMOVE --------------------------------------------------------------------------------- - -File: cypher/return.md -Line: 9 -Heading: # RETURN -Frontmatter Title: RETURN --------------------------------------------------------------------------------- - -File: cypher/set.md -Line: 9 -Heading: # SET -Frontmatter Title: SET --------------------------------------------------------------------------------- - -File: cypher/skip.md -Line: 9 -Heading: # SKIP -Frontmatter Title: SKIP --------------------------------------------------------------------------------- - -File: cypher/union.md -Line: 9 -Heading: # UNION -Frontmatter Title: UNION --------------------------------------------------------------------------------- - -File: cypher/unwind.md -Line: 9 -Heading: # UNWIND -Frontmatter Title: UNWIND --------------------------------------------------------------------------------- - -File: cypher/where.md -Line: 9 -Heading: # WHERE -Frontmatter Title: WHERE --------------------------------------------------------------------------------- - -File: cypher/with.md -Line: 9 -Heading: # WITH -Frontmatter Title: WITH --------------------------------------------------------------------------------- - -File: design/client-spec.md -Line: 8 -Heading: # Client Specification -Frontmatter Title: Client Specification --------------------------------------------------------------------------------- - -File: design/index.md -Line: 9 -Heading: # The FalkorDB Design -Frontmatter Title: The FalkorDB Design --------------------------------------------------------------------------------- - -File: design/result-structure.md -Line: 8 -Heading: # Result Set Structure -Frontmatter Title: Result Set Structure --------------------------------------------------------------------------------- - -File: design/third-party.md -Line: 8 -Heading: # Third-Party Components in FalkorDB -Frontmatter Title: Third Party --------------------------------------------------------------------------------- - -File: genai-tools/ag2.md -Line: 9 -Heading: # AG2 -Frontmatter Title: AG2 --------------------------------------------------------------------------------- - -File: genai-tools/graphrag-sdk.md -Line: 9 -Heading: # GraphRAG-SDK -Frontmatter Title: GraphRAG-SDK --------------------------------------------------------------------------------- - -File: genai-tools/graphrag-toolkit.md -Line: 9 -Heading: # GraphRAG Toolkit -Frontmatter Title: GraphRAG Toolkit --------------------------------------------------------------------------------- - -File: genai-tools/index.md -Line: 9 -Heading: # GenAI Tools -Frontmatter Title: GenAI Tools --------------------------------------------------------------------------------- - -File: genai-tools/langchain.md -Line: 9 -Heading: # LangChain -Frontmatter Title: LangChain --------------------------------------------------------------------------------- - -File: genai-tools/langgraph.md -Line: 9 -Heading: # LangGraph -Frontmatter Title: LangGraph --------------------------------------------------------------------------------- - -File: genai-tools/llamaindex.md -Line: 9 -Heading: # LlamaIndex -Frontmatter Title: LlamaIndex --------------------------------------------------------------------------------- - -File: getting-started/configuration.md -Line: 9 -Heading: # Configuration -Frontmatter Title: Configuration --------------------------------------------------------------------------------- - -File: index.mdx -Line: 19 -Heading: # FalkorDB -Frontmatter Title: FalkorDB Documentation --------------------------------------------------------------------------------- - -File: integration/bolt-support.md -Line: 9 -Heading: # [EXPERIMENTAL] BOLT protocol support for FalkorDB -Frontmatter Title: BOLT protocol support --------------------------------------------------------------------------------- - -File: integration/index.md -Line: 9 -Heading: # Integration -Frontmatter Title: Integration --------------------------------------------------------------------------------- - -File: integration/jena.md -Line: 9 -Heading: # Apache Jena Integration -Frontmatter Title: Apache Jena --------------------------------------------------------------------------------- - -File: integration/kafka-connect.md -Line: 9 -Heading: # Kafka Connect Sink -Frontmatter Title: Kafka Connect Sink --------------------------------------------------------------------------------- - -File: integration/rest.md -Line: 9 -Heading: # FalkorDB Browser REST API -Frontmatter Title: Rest API --------------------------------------------------------------------------------- - -File: integration/spring-data-falkordb.md -Line: 9 -Heading: # Spring Data FalkorDB -Frontmatter Title: Spring Data FalkorDB --------------------------------------------------------------------------------- - -File: license.md -Line: 9 -Heading: # FalkorDB License -Frontmatter Title: FalkorDB License --------------------------------------------------------------------------------- - -File: operations/cluster.md -Line: 9 -Heading: # Setting Up a FalkorDB Cluster -Frontmatter Title: Cluster --------------------------------------------------------------------------------- - -File: operations/docker.md -Line: 9 -Heading: # Running FalkorDB with Docker and Docker Compose -Frontmatter Title: Docker and Docker Compose --------------------------------------------------------------------------------- - -File: operations/falkordblite.md -Line: 9 -Heading: # FalkorDBLite -Frontmatter Title: FalkorDBLite --------------------------------------------------------------------------------- - -File: operations/index.md -Line: 9 -Heading: # Operations -Frontmatter Title: Operations --------------------------------------------------------------------------------- - -File: operations/k8s-support.md -Line: 9 -Heading: # Kubernetes support for FalkorDB -Frontmatter Title: Kubernetes support --------------------------------------------------------------------------------- - -File: operations/kubeblocks.md -Line: 9 -Heading: # Deploy FalkorDB with KubeBlocks -Frontmatter Title: KubeBlocks --------------------------------------------------------------------------------- - -File: operations/lightning-ai.md -Line: 9 -Heading: # Deploy FalkorDB on Lightning.AI -Frontmatter Title: Lightning.AI --------------------------------------------------------------------------------- - -File: operations/migration/index.md -Line: 9 -Heading: # Migration -Frontmatter Title: Migration --------------------------------------------------------------------------------- - -File: operations/migration/kuzu-to-falkordb.md -Line: 8 -Heading: # Kuzu to FalkorDB Migration -Frontmatter Title: Kuzu to FalkorDB --------------------------------------------------------------------------------- - -File: operations/migration/neo4j-to-falkordb.md -Line: 8 -Heading: # Neo4j to FalkorDB Migration -Frontmatter Title: Neo4j to FalkorDB --------------------------------------------------------------------------------- - -File: operations/migration/rdf-to-falkordb.md -Line: 8 -Heading: # RDF to FalkorDB Migration -Frontmatter Title: RDF to FalkorDB --------------------------------------------------------------------------------- - -File: operations/migration/redisgraph-to-falkordb.md -Line: 8 -Heading: # RedisGraph to FalkorDB Migration -Frontmatter Title: RedisGraph to FalkorDB --------------------------------------------------------------------------------- - -File: operations/persistence.md -Line: 9 -Heading: # Configuring FalkorDB Docker for Persistence -Frontmatter Title: Persistence --------------------------------------------------------------------------------- - -File: operations/replication.md -Line: 9 -Heading: # Configuring FalkorDB Docker for Replication -Frontmatter Title: Replication --------------------------------------------------------------------------------- - -File: udfs/flex/bitwise/and.md -Line: 7 -Heading: # bitwise.and -Frontmatter Title: bitwise.and --------------------------------------------------------------------------------- - -File: udfs/flex/bitwise/index.md -Line: 8 -Heading: # Bitwise Functions -Frontmatter Title: Bitwise Functions --------------------------------------------------------------------------------- - -File: udfs/flex/bitwise/not.md -Line: 7 -Heading: # bitwise.not -Frontmatter Title: bitwise.not --------------------------------------------------------------------------------- - -File: udfs/flex/bitwise/or.md -Line: 7 -Heading: # bitwise.or -Frontmatter Title: bitwise.or --------------------------------------------------------------------------------- - -File: udfs/flex/bitwise/shiftLeft.md -Line: 7 -Heading: # bitwise.shiftLeft -Frontmatter Title: bitwise.shiftLeft --------------------------------------------------------------------------------- - -File: udfs/flex/bitwise/shiftRight.md -Line: 7 -Heading: # bitwise.shiftRight -Frontmatter Title: bitwise.shiftRight --------------------------------------------------------------------------------- - -File: udfs/flex/bitwise/xor.md -Line: 7 -Heading: # bitwise.xor -Frontmatter Title: bitwise.xor --------------------------------------------------------------------------------- - -File: udfs/flex/collections/frequencies.md -Line: 7 -Heading: # coll.frequencies -Frontmatter Title: coll.frequencies --------------------------------------------------------------------------------- - -File: udfs/flex/collections/index.md -Line: 8 -Heading: # Collection Functions -Frontmatter Title: Collection Functions --------------------------------------------------------------------------------- - -File: udfs/flex/collections/intersection.md -Line: 7 -Heading: # coll.intersection -Frontmatter Title: coll.intersection --------------------------------------------------------------------------------- - -File: udfs/flex/collections/shuffle.md -Line: 7 -Heading: # coll.shuffle -Frontmatter Title: coll.shuffle --------------------------------------------------------------------------------- - -File: udfs/flex/collections/union.md -Line: 7 -Heading: # coll.union -Frontmatter Title: coll.union --------------------------------------------------------------------------------- - -File: udfs/flex/collections/zip.md -Line: 7 -Heading: # coll.zip -Frontmatter Title: coll.zip --------------------------------------------------------------------------------- - -File: udfs/flex/date/format.md -Line: 7 -Heading: # date.format -Frontmatter Title: date.format --------------------------------------------------------------------------------- - -File: udfs/flex/date/index.md -Line: 8 -Heading: # Date Functions -Frontmatter Title: Date Functions --------------------------------------------------------------------------------- - -File: udfs/flex/date/parse.md -Line: 7 -Heading: # date.parse -Frontmatter Title: date.parse --------------------------------------------------------------------------------- - -File: udfs/flex/date/toTimeZone.md -Line: 7 -Heading: # date.toTimeZone -Frontmatter Title: date.toTimeZone --------------------------------------------------------------------------------- - -File: udfs/flex/date/truncate.md -Line: 7 -Heading: # date.truncate -Frontmatter Title: date.truncate --------------------------------------------------------------------------------- - -File: udfs/flex/index.md -Line: 7 -Heading: # FLEX Function Reference -Frontmatter Title: FLEX Function Reference --------------------------------------------------------------------------------- - -File: udfs/flex/json/fromJsonList.md -Line: 7 -Heading: # json.fromJsonList -Frontmatter Title: json.fromJsonList --------------------------------------------------------------------------------- - -File: udfs/flex/json/fromJsonMap.md -Line: 7 -Heading: # json.fromJsonMap -Frontmatter Title: json.fromJsonMap --------------------------------------------------------------------------------- - -File: udfs/flex/json/index.md -Line: 8 -Heading: # JSON Functions -Frontmatter Title: JSON Functions --------------------------------------------------------------------------------- - -File: udfs/flex/json/toJson.md -Line: 7 -Heading: # json.toJson -Frontmatter Title: json.toJson --------------------------------------------------------------------------------- - -File: udfs/flex/map/fromPairs.md -Line: 7 -Heading: # map.fromPairs -Frontmatter Title: map.fromPairs --------------------------------------------------------------------------------- - -File: udfs/flex/map/index.md -Line: 8 -Heading: # Map Functions -Frontmatter Title: Map Functions --------------------------------------------------------------------------------- - -File: udfs/flex/map/merge.md -Line: 7 -Heading: # map.merge -Frontmatter Title: map.merge --------------------------------------------------------------------------------- - -File: udfs/flex/map/removeKey.md -Line: 7 -Heading: # map.removeKey -Frontmatter Title: map.removeKey --------------------------------------------------------------------------------- - -File: udfs/flex/map/removeKeys.md -Line: 7 -Heading: # map.removeKeys -Frontmatter Title: map.removeKeys --------------------------------------------------------------------------------- - -File: udfs/flex/map/submap.md -Line: 7 -Heading: # map.submap -Frontmatter Title: map.submap --------------------------------------------------------------------------------- - -File: udfs/flex/similarity/index.md -Line: 8 -Heading: # Similarity Functions -Frontmatter Title: Similarity Functions --------------------------------------------------------------------------------- - -File: udfs/flex/similarity/jaccard.md -Line: 7 -Heading: # sim.jaccard -Frontmatter Title: sim.jaccard --------------------------------------------------------------------------------- - -File: udfs/flex/text/camelCase.md -Line: 7 -Heading: # text.camelCase -Frontmatter Title: text.camelCase --------------------------------------------------------------------------------- - -File: udfs/flex/text/capitalize.md -Line: 7 -Heading: # text.capitalize -Frontmatter Title: text.capitalize --------------------------------------------------------------------------------- - -File: udfs/flex/text/decapitalize.md -Line: 7 -Heading: # text.decapitalize -Frontmatter Title: text.decapitalize --------------------------------------------------------------------------------- - -File: udfs/flex/text/format.md -Line: 7 -Heading: # text.format -Frontmatter Title: text.format --------------------------------------------------------------------------------- - -File: udfs/flex/text/index.md -Line: 8 -Heading: # Text Functions -Frontmatter Title: Text Functions --------------------------------------------------------------------------------- - -File: udfs/flex/text/indexOf.md -Line: 7 -Heading: # text.indexOf -Frontmatter Title: text.indexOf --------------------------------------------------------------------------------- - -File: udfs/flex/text/indexesOf.md -Line: 7 -Heading: # text.indexesOf -Frontmatter Title: text.indexesOf --------------------------------------------------------------------------------- - -File: udfs/flex/text/jaroWinkler.md -Line: 7 -Heading: # text.jaroWinkler -Frontmatter Title: text.jaroWinkler --------------------------------------------------------------------------------- - -File: udfs/flex/text/join.md -Line: 7 -Heading: # text.join -Frontmatter Title: text.join --------------------------------------------------------------------------------- - -File: udfs/flex/text/levenshtein.md -Line: 7 -Heading: # text.levenshtein -Frontmatter Title: text.levenshtein --------------------------------------------------------------------------------- - -File: udfs/flex/text/lpad.md -Line: 7 -Heading: # text.lpad -Frontmatter Title: text.lpad --------------------------------------------------------------------------------- - -File: udfs/flex/text/regexGroups.md -Line: 7 -Heading: # text.regexGroups -Frontmatter Title: text.regexGroups --------------------------------------------------------------------------------- - -File: udfs/flex/text/repeat.md -Line: 7 -Heading: # text.repeat -Frontmatter Title: text.repeat --------------------------------------------------------------------------------- - -File: udfs/flex/text/replace.md -Line: 7 -Heading: # text.replace -Frontmatter Title: text.replace --------------------------------------------------------------------------------- - -File: udfs/flex/text/rpad.md -Line: 7 -Heading: # text.rpad -Frontmatter Title: text.rpad --------------------------------------------------------------------------------- - -File: udfs/flex/text/snakeCase.md -Line: 7 -Heading: # text.snakeCase -Frontmatter Title: text.snakeCase --------------------------------------------------------------------------------- - -File: udfs/flex/text/swapCase.md -Line: 7 -Heading: # text.swapCase -Frontmatter Title: text.swapCase --------------------------------------------------------------------------------- - -File: udfs/flex/text/upperCamelCase.md -Line: 7 -Heading: # text.upperCamelCase -Frontmatter Title: text.upperCamelCase --------------------------------------------------------------------------------- - -File: udfs/index.md -Line: 8 -Heading: # UDFs -Frontmatter Title: UDFs --------------------------------------------------------------------------------- diff --git a/website/docs/References.md b/website/docs/References.md deleted file mode 100644 index a099243..0000000 --- a/website/docs/References.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: References -description: Learn more about the Technology behind FalkorDB -sidebar_position: 998 -sidebar_label: References ---- - - - -# References -* [FalkorDB Blog](https://www.falkordb.com/blog) - -* Video - - [Building a Multi-dimensional Analytics Engine with RedisGraph](https://www.youtube.com/watch?v=6FYYn-9fPXE) - - [A Practical Introduction to RedisGraph](https://www.youtube.com/watch?v=aGHALjV6JGc) - - [Redis Graph with Roi Lipman](https://www.youtube.com/watch?v=HpEa2cftbnc) - - [RedisGraph 2.2: The Fastest Way to Query Your Highly Connected Data in Redis](https://www.youtube.com/watch?v=JNpHba2kRGM) - -* Slides - - [RedisGraph A Low Latency Graph DB](https://www.slideshare.net/RedisLabs/redisgraph-a-low-latency-graph-db-pieter-cailliau) - -* Article - [RedisGraph GraphBLAS Enabled Graph Database](https://arxiv.org/abs/1905.01294). -Cailliau, Pieter & Davis, Tim & Gadepally, Vijay & Kepner, Jeremy & Lipman, Roi & Lovitz, Jeffrey & Ouaknine, Keren (IEEE IPDPS 2019 GrAPL workshop). -([pdf](http://www.mit.edu/~kepner/NEDB2019/NEDB2019-RedisGraph-NEDB.pdf)) - -* Blog - - - [RedisGraph 2.0 Boosts Performance Up to 6x](https://redis.com/blog/redisgraph-2-0-boosts-performance-up-to-6x/) - - [Investigating RedisGraph](https://phpscaling.com/2018/12/06/investigating-redisgraph/). [alister](https://phpscaling.com/author/alister/) - - [Getting Started with Knowledge Graphs in RedisGraph](https://redis.com/blog/getting-started-with-knowledge-graphs-in-redisgraph/) - - [Introducing RedisGraph 2.0](https://redis.com/blog/introducing-redisgraph-2-0/) - - [RedisGraph Elixir client](https://flynn.gg/blog/redisgraph-elixir/). [Christopher Flynn](https://flynn.gg/) - - [What’s New in RedisGraph 1.2.0](https://redis.com/blog/whats-new-redisgraph-1-2-0/). [Roi Lipman](https://redis.com/author/roi/). - - [Benchmarking RedisGraph 1.0](https://redis.com/blog/new-redisgraph-1-0-achieves-600x-faster-performance-graph-databases/). [Pieter Cailliau](https://redis.com/author/pcailliau/). - -* [Development Tutorial](https://developer.redis.com/howtos/redisgraph/) - diff --git a/website/docs/agentic-memory/cognee.md b/website/docs/agentic-memory/cognee.md deleted file mode 100644 index 7b68876..0000000 --- a/website/docs/agentic-memory/cognee.md +++ /dev/null @@ -1,353 +0,0 @@ ---- -title: Cognee -description: Build flexible agentic memory with Cognee and FalkorDB -sidebar_position: 2 -sidebar_label: Cognee ---- - - - -# Cognee -[Cognee](https://github.com/topoteretes/cognee) is a memory management framework for AI agents that provides a flexible approach to storing and retrieving knowledge. It combines graph database capabilities with vector storage to create rich, context-aware memory systems. - -## Overview - -Cognee provides a comprehensive memory layer that: -- **Manages complex knowledge structures**: Store entities, relationships, and contextual information -- **Supports hybrid storage**: Combine graph databases with vector stores for optimal retrieval -- **Enables flexible querying**: Search by semantic similarity, graph relationships, or both -- **Scales with your needs**: From simple chatbots to complex multi-agent systems - -## Why Cognee + FalkorDB? - -### FalkorDB's Added Value - -- **Native Graph Storage**: Efficient storage and traversal of entity relationships -- **Fast Queries**: Quick retrieval of connected information for context building -- **Flexible Schema**: Adapt to evolving knowledge structures without rigid schemas -- **Production Ready**: Scale from development to production seamlessly -- **Hybrid Capabilities**: Combine graph traversal with vector similarity search - -### Use Cases - -- **Conversational AI**: Build chatbots that remember and learn from past conversations -- **Knowledge Management**: Create organizational memory that captures relationships and context -- **Recommendation Systems**: Leverage connection patterns for personalized recommendations -- **Research Assistants**: Help AI agents navigate and understand complex information networks -- **Customer Support**: Provide context-aware responses based on customer history and relationships - -## Getting Started - -### Prerequisites - -- Python 3.10 or higher -- FalkorDB instance (Cloud or self-hosted) -- API keys for LLM and embedding providers (if using those features) - -### Installation - -Install Cognee with the FalkorDB community adapter: - -```bash -pip install cognee -pip install cognee-community-hybrid-adapter-falkor -``` - -### Quick Start Example - -Here's a complete example to get you started with Cognee and FalkorDB: - -```python -import asyncio -import os -import pathlib -from os import path -from cognee import config, prune, add, cognify, search, SearchType - -# Import the register module to enable FalkorDB support -import cognee_community_hybrid_adapter_falkor.register - -async def main(): - # Set up local directories - system_path = pathlib.Path(__file__).parent - config.system_root_directory(path.join(system_path, ".cognee_system")) - config.data_root_directory(path.join(system_path, ".cognee_data")) - - # Configure relational database - config.set_relational_db_config({ - "db_provider": "sqlite", - }) - - # Configure FalkorDB as both vector and graph database - config.set_vector_db_config({ - "vector_db_provider": "falkordb", - "vector_db_url": os.getenv("GRAPH_DB_URL", "localhost"), - "vector_db_port": int(os.getenv("GRAPH_DB_PORT", "6379")), - }) - config.set_graph_db_config({ - "graph_database_provider": "falkordb", - "graph_database_url": os.getenv("GRAPH_DB_URL", "localhost"), - "graph_database_port": int(os.getenv("GRAPH_DB_PORT", "6379")), - }) - - # Optional: Clean previous data - await prune.prune_data() - await prune.prune_system() - - # Add and process your content - text_data = """ - Sarah is a software engineer at TechCorp. She specializes in machine learning - and has been working on implementing graph-based recommendation systems. - Sarah recently collaborated with Mike on a new project using FalkorDB. - Mike is the lead data scientist at TechCorp. - """ - - await add(text_data) - await cognify() - - # Search using graph completion - search_results = await search( - query_type=SearchType.GRAPH_COMPLETION, - query_text="What does Sarah work on?" - ) - - print("Search Results:") - for result in search_results: - print("\n" + result) - -# Run the example -asyncio.run(main()) -``` - -### Understanding the Code - -1. **Import the FalkorDB Adapter**: Import `cognee_community_hybrid_adapter_falkor.register` to enable FalkorDB support -2. **Configure Directories**: Set up local directories for Cognee's system and data storage -3. **Configure Databases**: Set FalkorDB as both the vector and graph database for hybrid capabilities -4. **Add Data**: Provide text or structured data to be processed -5. **Cognify**: Process the data to extract entities and relationships -6. **Search**: Query the knowledge using different search types (graph completion, similarity, etc.) - -## Advanced Features - -### Search Types - -Cognee supports different search types for various use cases: - -```python -from cognee import search, SearchType - -# Graph completion search - uses graph structure for context -graph_results = await search( - query_type=SearchType.GRAPH_COMPLETION, - query_text="machine learning projects" -) - -# Similarity search - semantic vector search -similarity_results = await search( - query_type=SearchType.SIMILARITY, - query_text="machine learning projects" -) - -# Insights search - combines multiple approaches -insights_results = await search( - query_type=SearchType.INSIGHTS, - query_text="machine learning projects" -) -``` - -### LLM Configuration - -Configure the LLM provider for entity extraction and processing: - -```python -import os -from cognee import config - -# Set LLM API key -os.environ["LLM_API_KEY"] = "your-openai-api-key" - -# Configure LLM provider -config.set_llm_config({ - "llm_provider": "openai", - "llm_model": "gpt-4", - "llm_temperature": 0.7 -}) -``` - -### Managing Knowledge - -```python -from cognee import add, cognify, prune - -# Add multiple documents -documents = [ - "Natural language processing is a subfield of AI.", - "Machine learning models require training data.", - "Graph databases excel at relationship queries." -] - -for doc in documents: - await add(doc) - -await cognify() - -# Reset memory (clear all data) -await prune.prune_data() -await prune.prune_system() -``` - -### Environment Variables - -You can use environment variables for configuration: - -```bash -export GRAPH_DB_URL="localhost" -export GRAPH_DB_PORT="6379" -export LLM_API_KEY="your-openai-api-key" -``` - -Then access them in your code: - -```python -import os -from cognee import config - -config.set_graph_db_config({ - "graph_database_provider": "falkordb", - "graph_database_url": os.getenv("GRAPH_DB_URL", "localhost"), - "graph_database_port": int(os.getenv("GRAPH_DB_PORT", "6379")), -}) -``` - -## Configuration Options - -### Database Configuration - -```python -from cognee import config - -# Relational database (for metadata) -config.set_relational_db_config({ - "db_provider": "sqlite", # or "postgres" -}) - -# FalkorDB as graph database -config.set_graph_db_config({ - "graph_database_provider": "falkordb", - "graph_database_url": "localhost", - "graph_database_port": 6379, -}) - -# FalkorDB as vector database (hybrid mode) -config.set_vector_db_config({ - "vector_db_provider": "falkordb", - "vector_db_url": "localhost", - "vector_db_port": 6379, -}) -``` - -### LLM Configuration - -```python -import os -from cognee import config - -# Set API key via environment variable -os.environ["LLM_API_KEY"] = "your-openai-api-key" - -# Configure LLM -config.set_llm_config({ - "llm_provider": "openai", - "llm_model": "gpt-4", - "llm_temperature": 0.7 -}) -``` - -## Best Practices - -1. **Import Registration First**: Always import `cognee_community_hybrid_adapter_falkor.register` before configuring Cognee -2. **Use Environment Variables**: Store connection details and API keys in environment variables -3. **Batch Processing**: Add multiple documents before calling `cognify()` for better performance -4. **Clean Up**: Use `prune.prune_data()` and `prune.prune_system()` to reset when needed -5. **Hybrid Mode**: Configure FalkorDB as both vector and graph database for optimal search capabilities -6. **Monitor Resources**: Track FalkorDB memory usage and query performance as your knowledge base grows - -## Integration Patterns - -### With LangChain - -```python -from cognee import add, cognify, search, SearchType - -# Use Cognee as a knowledge base for LangChain -async def get_context(query): - results = await search( - query_type=SearchType.GRAPH_COMPLETION, - query_text=query - ) - return results - -# Integrate with your LangChain application -context = await get_context("previous conversations about AI") -``` - -### Adding Multiple Documents - -```python -from cognee import add, cognify - -# Add documents to Cognee -documents = [ - "Your first document content...", - "Your second document content...", - "Your third document content..." -] - -for doc in documents: - await add(doc) - -await cognify() -``` - -## Troubleshooting - -### Installation Issues - -If you have trouble installing the community adapter: -- Ensure you have the correct package name: `cognee-community-hybrid-adapter-falkor` -- Check that you're using Python 3.10 or higher -- Try installing in a fresh virtual environment - -### Connection Issues - -If you experience connection problems: -- Verify FalkorDB is running: `redis-cli -h localhost -p 6379 ping` -- Check the `GRAPH_DB_URL` and `GRAPH_DB_PORT` environment variables -- Ensure FalkorDB is accessible on the specified host and port - -### Data Not Appearing in Graph - -- Make sure to import `cognee_community_hybrid_adapter_falkor.register` before using Cognee -- Call `await cognify()` after adding data to process and extract entities -- Check that your LLM API key is set correctly -- Verify the graph is being populated using FalkorDB CLI or Browser - -### Performance Issues - -- Consider batching operations for large datasets -- Monitor graph size with `GRAPH.MEMORY USAGE` command -- Clean up old data periodically using `prune.prune_data()` - -## Resources - -- πŸ“š [Cognee Documentation](https://github.com/topoteretes/cognee-community) -- πŸ’» [Cognee GitHub Repository](https://github.com/topoteretes/cognee) -- πŸ”— [FalkorDB Integration Guide](https://github.com/topoteretes/cognee-community/blob/main/packages/hybrid/falkordb/README.md) -- πŸ“– [Cognee Examples](https://github.com/topoteretes/cognee/tree/main/examples) - -## Next Steps - -- Explore [Graphiti](./graphiti.md) for temporal knowledge graph capabilities -- Learn about [GenAI Tools](/genai-tools) for graph reasoning and LLM integrations -- Review [Cypher Query Language](/cypher) for custom graph queries diff --git a/website/docs/agentic-memory/graphiti-mcp-server.md b/website/docs/agentic-memory/graphiti-mcp-server.md deleted file mode 100644 index 5cdf2dd..0000000 --- a/website/docs/agentic-memory/graphiti-mcp-server.md +++ /dev/null @@ -1,637 +0,0 @@ ---- -title: Graphiti MCP Server -description: Run Graphiti MCP server with FalkorDB for AI agent memory in Claude Desktop and other MCP clients -sidebar_position: 3 -sidebar_label: Graphiti MCP Server ---- - - -# Graphiti MCP Server -Graphiti is a framework for building and querying temporally-aware knowledge graphs, specifically tailored for AI agents operating in dynamic environments. The Graphiti MCP (Model Context Protocol) Server enables AI clients like Claude Desktop, Cursor IDE, and other MCP-compatible applications to interact with FalkorDB-powered knowledge graphs for persistent agent memory. This allows AI assistants to store and retrieve information across conversations, building a rich, contextual memory over time. - -## What is MCP? - -The [Model Context Protocol (MCP)](https://modelcontextprotocol.io/) is an open standard that enables AI applications to connect to external data sources and tools. The Graphiti MCP Server implements this protocol to provide AI agents with access to graph-based knowledge storage powered by FalkorDB. - -## Features - -The Graphiti MCP Server provides comprehensive knowledge graph capabilities: - -- **Episode Management**: Add, retrieve, and delete episodes (text, messages, or JSON data) -- **Entity Management**: Search and manage entity nodes and relationships in the knowledge graph -- **Search Capabilities**: Search for facts (edges) and node summaries using semantic and hybrid search -- **Group Management**: Organize and manage groups of related data with group_id filtering -- **Graph Maintenance**: Clear the graph and rebuild indices -- **Multiple LLM Providers**: Support for OpenAI, Anthropic, Gemini, Groq, and Azure OpenAI -- **Multiple Embedding Providers**: Support for OpenAI, Voyage, Sentence Transformers, and Gemini embeddings -- **Rich Entity Types**: Built-in entity types including Preferences, Requirements, Procedures, Locations, Events, Organizations, Documents, and more for structured knowledge extraction -- **HTTP Transport**: Default HTTP transport with MCP endpoint at `/mcp/` for broad client compatibility -- **Queue-based Processing**: Asynchronous episode processing with configurable concurrency limits - -## Prerequisites - -Before you begin, ensure you have: - -- Docker and Docker Compose installed on your system -- At least one LLM provider API key: - - OpenAI API key (recommended) - - Or Anthropic, Gemini, Groq, or Azure OpenAI API key -- (Optional) Python 3.10+ if running the MCP server standalone with an external FalkorDB instance - -## Quick Start with Docker Compose - -The easiest way to run the Graphiti MCP Server with FalkorDB is using the official Docker Compose configuration from [Zep's Graphiti repository](https://github.com/getzep/graphiti/tree/main/mcp_server). - -### Option 1: Combined Image (Recommended) - -This setup uses a single container that includes both FalkorDB and the MCP server. - -1. **Create a directory for your setup:** - -```bash -mkdir graphiti-mcp && cd graphiti-mcp -``` - -1. **Download the docker-compose configuration:** - -```bash -curl -O https://raw.githubusercontent.com/getzep/graphiti/main/mcp_server/docker/docker-compose.yml -``` - -1. **Create a `.env` file with your API key:** - -```env -OPENAI_API_KEY=your-openai-api-key -FALKORDB_PASSWORD= -GRAPHITI_GROUP_ID=main -``` - -1. **Start the services:** - -```bash -docker-compose up -d -``` - -The combined image will start both FalkorDB and the MCP server in a single container, accessible at: - -- **FalkorDB (Redis):** `localhost:6379` -- **FalkorDB Browser UI:** `http://localhost:3000` -- **MCP Server HTTP endpoint:** `http://localhost:8000/mcp/` -- **Health check:** `http://localhost:8000/health` - -### Option 2: Separate Containers - -For more flexibility, you can run FalkorDB and the MCP server in separate containers. - -1. **Create a directory and download the configuration:** - -```bash -mkdir graphiti-mcp && cd graphiti-mcp -curl -O https://raw.githubusercontent.com/getzep/graphiti/main/mcp_server/docker/docker-compose-falkordb.yml -mv docker-compose-falkordb.yml docker-compose.yml -``` - -1. **Create a `.env` file:** - -```env -OPENAI_API_KEY=your-openai-api-key -FALKORDB_URI=redis://falkordb:6379 -FALKORDB_PASSWORD= -FALKORDB_DATABASE=default_db -GRAPHITI_GROUP_ID=main -``` - -1. **Start the services:** - -```bash -docker-compose up -d -``` - -This configuration starts FalkorDB and the MCP server as separate containers with the same accessible ports as the combined image. - -## Manual Docker Setup (Alternative) - -If you prefer to run containers manually without Docker Compose, you can use the standalone MCP server image: - -### Step 1: Run FalkorDB - -```bash -docker run -d \ - --name falkordb \ - -p 6379:6379 \ - -p 3000:3000 \ - falkordb/falkordb:latest -``` - -### Step 2: Run the MCP Server - -```bash -docker run -d \ - --name graphiti-mcp \ - -e OPENAI_API_KEY="your-openai-api-key" \ - -e FALKORDB_URI="redis://host.docker.internal:6379" \ - -e FALKORDB_PASSWORD="" \ - -e FALKORDB_DATABASE="default_db" \ - -e GRAPHITI_GROUP_ID="main" \ - -p 8000:8000 \ - zepai/knowledge-graph-mcp:standalone -``` - -**Note**: Use `host.docker.internal` as the hostname to allow the container to access FalkorDB running on your host machine. - -## Configuration - -The Graphiti MCP server can be configured using environment variables in a `.env` file or through a `config.yaml` file. - -### Default Configuration - -The MCP server comes with sensible defaults: - -- **Transport**: HTTP (accessible at `http://localhost:8000/mcp/`) -- **Database**: FalkorDB (combined in single container with MCP server) -- **LLM**: OpenAI with model gpt-4o-mini -- **Embedder**: OpenAI text-embedding-3-small - -### LLM Provider Configuration - -The server supports multiple LLM providers. Set the appropriate API key in your `.env` file: - -**OpenAI (default)**: - -```env -OPENAI_API_KEY=sk-proj-your-key-here -``` - -**Anthropic**: - -```env -ANTHROPIC_API_KEY=your-anthropic-key -``` - -**Google Gemini**: - -```env -GOOGLE_API_KEY=your-google-key -``` - -**Groq**: - -```env -GROQ_API_KEY=your-groq-key -``` - -**Azure OpenAI**: - -```env -AZURE_OPENAI_API_KEY=your-azure-key -AZURE_OPENAI_ENDPOINT=your-endpoint-url -AZURE_OPENAI_DEPLOYMENT=your-deployment-name -``` - -### Environment Variables - -Key environment variables for the MCP server: - -| Variable | Description | Default | Required | Example | -| -------- | ----------- | ------- | -------- | ------- | -| `OPENAI_API_KEY` | OpenAI API key (or use another LLM provider) | - | Yes* | `sk-proj-...` | -| `FALKORDB_URI` | FalkorDB connection URI | `redis://localhost:6379` | No | `redis://falkordb:6379` | -| `FALKORDB_PASSWORD` | FalkorDB password (if authentication enabled) | - | No | `your-password` | -| `FALKORDB_DATABASE` | Database name | `default_db` | No | `default_db` | -| `SEMAPHORE_LIMIT` | Episode processing concurrency limit | `10` | No | `10` | -| `BROWSER` | Enable FalkorDB Browser UI (combined image) | `1` | No | `1` | - -*At least one LLM provider API key is required - -### Concurrency and Rate Limits - -The `SEMAPHORE_LIMIT` controls how many episodes can be processed simultaneously. Adjust based on your LLM provider tier: - -- **OpenAI Tier 1 (free)**: `SEMAPHORE_LIMIT=1-2` -- **OpenAI Tier 2**: `SEMAPHORE_LIMIT=5-8` -- **OpenAI Tier 3**: `SEMAPHORE_LIMIT=10-15` -- **OpenAI Tier 4**: `SEMAPHORE_LIMIT=20-50` -- **Anthropic default**: `SEMAPHORE_LIMIT=5-8` - -If you see 429 rate limit errors, reduce the value. Monitor your LLM provider's dashboard for actual request rates. - -### FalkorDB Cloud Configuration - -To use FalkorDB Cloud with the MCP server, update your `.env` file: - -```env -OPENAI_API_KEY=your-openai-api-key -FALKORDB_URI=redis://your-instance.falkordb.cloud:6379 -FALKORDB_PASSWORD=your-cloud-password -FALKORDB_DATABASE=default_db -GRAPHITI_GROUP_ID=main -``` - -Then use the docker-compose configuration with the separate containers option (docker-compose-falkordb.yml), as it's designed for external database connections. - -## Client Integration - -### Claude Desktop - -Configure Claude Desktop to use the Graphiti MCP server by editing your Claude Desktop configuration file: - -**macOS**: `~/Library/Application Support/Claude/claude_desktop_config.json` - -**Windows**: `%APPDATA%\Claude\claude_desktop_config.json` - -Add the following configuration: - -```json -{ - "mcpServers": { - "graphiti-memory": { - "transport": "http", - "url": "http://localhost:8000/mcp/" - } - } -} -``` - -**Note**: The MCP server uses HTTP transport by default with the endpoint at `/mcp/`. The `OPENAI_API_KEY` is already configured in the MCP server's Docker environment, so you don't need to specify it again here. - -**Alternative (stdio transport)**: If you have the Graphiti repository cloned locally and Python installed, you can use stdio transport for better integration with some clients. See the [official Graphiti documentation](https://github.com/getzep/graphiti/blob/main/mcp_server/README.md#integrating-with-mcp-clients) for stdio configuration details. - -**After configuration**: - -1. Restart Claude Desktop to apply the changes -2. Look for the MCP server indicator in Claude's interface -3. Claude will now have access to persistent memory through the knowledge graph - -### Cursor IDE and VS Code - -For Cursor IDE and VS Code with GitHub Copilot, add the MCP server configuration: - -**Cursor IDE**: Add to Cursor settings - -**VS Code**: Add to `.vscode/mcp.json` or global settings - -```json -{ - "mcpServers": { - "graphiti-memory": { - "uri": "http://localhost:8000/mcp/", - "transport": { - "type": "http" - } - } - } -} -``` - -### Testing the Connection - -Once configured, test the connection with these steps: - -1. **Restart your AI client** (Claude Desktop or Cursor) -2. **Look for the MCP indicator** in your client's interface -3. **Test with a simple prompt**: - - ```text - "Remember that my favorite programming language is Python" - ``` - - The AI should confirm it has stored this information. - -4. **Verify the memory**: - - ```text - "What do you remember about my programming language preferences?" - ``` - - The AI should respond with "Python" or reference your previous statement. - -5. **Check the graph** (optional): - - Open [http://localhost:3000](http://localhost:3000) in your browser - - Connect to the database - - Run: `MATCH (n) RETURN n LIMIT 10` - - You should see nodes representing the stored information - -**More example prompts**: - -- "Store this fact: I'm working on a project called MyApp" -- "What projects am I working on?" -- "Remember that I prefer dark mode in my IDE" - -The AI will use the Graphiti MCP server to store and retrieve this information from the FalkorDB knowledge graph. - -## Available Tools - -The Graphiti MCP server exposes the following tools to AI clients: - -- **`add_episode`**: Add an episode to the knowledge graph (supports text, JSON, and message formats) -- **`search_nodes`**: Search the knowledge graph for relevant node summaries -- **`search_facts`**: Search the knowledge graph for relevant facts (edges between entities) -- **`delete_entity_edge`**: Delete an entity edge from the knowledge graph -- **`delete_episode`**: Delete an episode from the knowledge graph -- **`get_entity_edge`**: Get an entity edge by its UUID -- **`get_episodes`**: Get the most recent episodes for a specific group -- **`clear_graph`**: Clear all data from the knowledge graph and rebuild indices - -### Entity Types - -Graphiti MCP Server includes built-in entity types for structured knowledge extraction. The MCP server automatically uses these entity types during episode ingestion to extract and structure information from conversations and documents. - -**Available Entity Types:** - -- **Preference**: User preferences, choices, opinions, or selections (prioritized for user-specific information) -- **Requirement**: Specific needs, features, or functionality that must be fulfilled -- **Procedure**: Standard operating procedures and sequential instructions -- **Location**: Physical or virtual places where activities occur -- **Event**: Time-bound activities, occurrences, or experiences -- **Organization**: Companies, institutions, groups, or formal entities -- **Document**: Information content in various forms (books, articles, reports, videos, etc.) -- **Topic**: Subject of conversation, interest, or knowledge domain (used as a fallback) -- **Object**: Physical items, tools, devices, or possessions (used as a fallback) - -These entity types can be customized in the `config.yaml` file if you're running the MCP server from source. - -### Graph Schema - -The Graphiti MCP server stores information in FalkorDB using the following schema: - -**Node Types**: - -- **`Entity`**: Represents people, places, things, or concepts - - Properties: `name`, `entity_type`, `summary` -- **`Episode`**: Represents events or pieces of information - - Properties: `name`, `content`, `timestamp`, `source` - -**Relationship Types**: - -- **`RELATES_TO`**: Connects entities that are related -- **`MENTIONED_IN`**: Links entities to episodes where they appear -- **`OCCURRED_AFTER`**: Creates temporal ordering between episodes - -### Graph Name - -All data is stored in a graph named `graphiti_memory` - -## Advanced Usage - -### Programmatic Access - -> **⚠️ Important**: The Graphiti MCP server is designed to be used by MCP clients (like Claude Desktop or Cursor) via the HTTP transport protocol. It does **not** expose direct REST API endpoints outside of the MCP protocol. - -The server exposes: - -- `/mcp/` - HTTP MCP protocol endpoint -- `/health` - Health check endpoint - -To interact with the Graphiti knowledge graph programmatically, you have two options: - -#### Option 1: Use an MCP Client Library - -Use an MCP client library that implements the MCP protocol to communicate with the server via HTTP. This is the intended way to interact with the server programmatically. - -#### Option 2: Access FalkorDB Directly - -Connect directly to FalkorDB to query the knowledge graph. See the "Custom Graph Queries" section below for details. - -### Custom Graph Queries - -For advanced users, you can connect directly to FalkorDB and run custom Cypher queries on the knowledge graph: - -```python -from falkordb import FalkorDB - -# Connect to FalkorDB -db = FalkorDB(host='localhost', port=6379) - -# Select the Graphiti memory graph -graph = db.select_graph('graphiti_memory') - -# Example 1: Find all entities related to a specific entity -query = """ -MATCH (e:Entity)-[r:RELATES_TO]->(e2:Entity) -WHERE e.name CONTAINS 'John' -RETURN e.name AS entity, type(r) AS relationship, e2.name AS related_entity -LIMIT 10 -""" - -result = graph.query(query) -for record in result.result_set: - print(f"{record[0]} -> {record[1]} -> {record[2]}") - -# Example 2: Find recent episodes -recent_episodes = """ -MATCH (ep:Episode) -RETURN ep.name, ep.content, ep.timestamp -ORDER BY ep.timestamp DESC -LIMIT 5 -""" - -result = graph.query(recent_episodes) -for record in result.result_set: - print(f"Episode: {record[0]} - {record[1]}") - -# Example 3: Find entities mentioned in episodes -entity_episodes = """ -MATCH (e:Entity)-[:MENTIONED_IN]->(ep:Episode) -WHERE e.name = 'John' -RETURN ep.name, ep.content -""" - -result = graph.query(entity_episodes) -for record in result.result_set: - print(f"Mentioned in: {record[0]}") -``` - -## Monitoring and Debugging - -### View Server Logs - -To view the logs: - -**For combined image:** - -```bash -docker logs -f graphiti-falkordb -``` - -**For separate containers:** - -```bash -# MCP server logs -docker logs -f graphiti-mcp - -# FalkorDB logs -docker logs -f falkordb -``` - -### Check FalkorDB Connection - -Verify the connection to FalkorDB using the Redis CLI: - -**For combined image:** - -```bash -docker exec -it graphiti-falkordb redis-cli PING -``` - -**For separate containers:** - -```bash -# Test from the FalkorDB container -docker exec -it falkordb redis-cli PING - -# Test from the MCP container to FalkorDB -docker exec -it graphiti-mcp redis-cli -h falkordb -p 6379 PING -``` - -All commands should return `PONG` if the connection is successful. - -### Inspect the Knowledge Graph - -Use the FalkorDB Browser to visualize the knowledge graph: - -1. Open `http://localhost:3000` in your browser -2. Connect to the `graphiti_memory` graph -3. Run queries to explore stored knowledge: - -```cypher -MATCH (n) RETURN n LIMIT 25 -``` - -## Troubleshooting - -### Connection Issues - -**Problem**: MCP server cannot connect to FalkorDB - -**Solutions**: - -- Verify FalkorDB is running: `docker ps | grep falkordb` -- Test FalkorDB connection: `docker exec -it redis-cli PING` (should return `PONG`) -- Check the `FALKORDB_URI` format: `redis://hostname:port` -- For separate containers, use the service name: `redis://falkordb:6379` -- For external FalkorDB, use `redis://host.docker.internal:6379` -- Verify port 6379 is accessible -- Check firewall settings - -### Authentication Errors - -**Problem**: FalkorDB authentication failed - -**Solutions**: - -- Ensure `FALKORDB_USERNAME` and `FALKORDB_PASSWORD` are set correctly -- For FalkorDB Cloud, use your cloud credentials -- For local instances without auth, leave username/password empty - -### OpenAI API Issues - -**Problem**: LLM operations fail - -**Solutions**: - -- Verify your `OPENAI_API_KEY` is valid -- Check you have sufficient API credits -- Ensure you have access to the specified model (default: `gpt-4o-mini`) -- Try setting `MODEL_NAME` to a different model - -### Client Not Connecting - -**Problem**: Claude Desktop or Cursor cannot connect to MCP server - -**Solutions**: - -- Verify the MCP server is running: `docker ps | grep graphiti` -- Check the server logs: `docker logs graphiti-mcp` (or `docker logs graphiti-falkordb` for combined image) -- Test the MCP endpoint: `curl http://localhost:8000/mcp/` -- Check the health endpoint: `curl http://localhost:8000/health` -- Ensure the configuration file path is correct for your OS -- Restart the client application after changing configuration -- Check for port conflicts on port 8000: `lsof -i :8000` (macOS/Linux) or `netstat -ano | findstr :8000` (Windows) -- Verify JSON syntax in the configuration file - -### Memory Not Persisting - -**Problem**: Knowledge is lost between sessions - -**Solutions**: - -- Ensure FalkorDB has persistent storage configured -- Check that the Docker volume is mounted correctly -- Verify the graph name is consistent across sessions -- Use `docker-compose` with volumes for production - -## Best Practices - -1. **Use Environment Variables**: Store sensitive information like API keys in environment variables or `.env` files -2. **Enable Persistence**: Configure FalkorDB with persistent storage for production use -3. **Monitor Resources**: Track memory usage and query performance as the knowledge graph grows -4. **Regular Backups**: Back up your FalkorDB data regularly -5. **Use FalkorDB Cloud**: For production deployments, consider using FalkorDB Cloud for managed hosting -6. **Separate Graphs**: Use different graph names for different projects or users -7. **Clean Up**: Periodically review and clean up old or irrelevant data - -## Performance Tips - -- **Indexing**: FalkorDB automatically creates indexes for optimal query performance -- **Batch Operations**: For large data loads, consider batching multiple episodes -- **Graph Size**: Monitor graph size and consider archiving old episodes to separate graphs -- **Model Selection**: - - Use `gpt-4o-mini` for cost-effective operations - - Use `gpt-5` for better accuracy with complex relationships -- **Connection Pooling**: The MCP server handles connection pooling automatically -- **Query Optimization**: Use specific entity names and filters in search queries for faster results - -## Resources - -- 🐳 [Graphiti MCP Server Docker Setup](https://github.com/getzep/graphiti/tree/main/mcp_server/docker) -- πŸ“¦ [Docker Hub Repository](https://hub.docker.com/r/zepai/knowledge-graph-mcp) -- πŸ“š [Model Context Protocol Documentation](https://modelcontextprotocol.io/) -- πŸ“– [Graphiti Documentation](https://help.getzep.com/graphiti/) -- πŸ’» [Graphiti GitHub Repository](https://github.com/getzep/graphiti) -- πŸ”— [FalkorDB Documentation](https://docs.falkordb.com/) -- πŸ“ [MCP Integration Blog Post](https://www.falkordb.com/blog/mcp-integration-falkordb-graphrag/) - -## Next Steps - -- Explore [Graphiti Python Library](./graphiti.md) for direct integration -- Learn about [Cognee](./cognee.md) for flexible memory management -- Check out [GraphRAG SDK](/genai-tools/graphrag-sdk) for advanced reasoning -- Review [Cypher Query Language](/cypher) for custom graph queries - -## Example Use Cases - -### Personal Assistant Memory - -Store personal preferences, tasks, and information: - -```text -You: "Remember that I prefer Python over JavaScript" -AI: "I'll remember that you prefer Python over JavaScript." - -You: "What programming languages do I prefer?" -AI: "You prefer Python over JavaScript." -``` - -### Project Knowledge Base - -Build a knowledge base about your projects: - -```text -You: "Store this: MyApp is a web application built with React and FastAPI" -AI: "I've stored that MyApp is a web application built with React and FastAPI." - -You: "What technologies does MyApp use?" -AI: "MyApp uses React and FastAPI." -``` - -### Meeting Notes and Context - -Remember meeting discussions and action items: - -```text -You: "In today's meeting, we decided to migrate to FalkorDB for the knowledge graph backend" -AI: "I'll remember that decision from today's meeting." - -You: "What did we decide about the knowledge graph backend?" -AI: "You decided to migrate to FalkorDB for the knowledge graph backend." -``` diff --git a/website/docs/agentic-memory/graphiti.md b/website/docs/agentic-memory/graphiti.md deleted file mode 100644 index 5dcfd16..0000000 --- a/website/docs/agentic-memory/graphiti.md +++ /dev/null @@ -1,243 +0,0 @@ ---- -title: Graphiti -description: Build temporally-aware knowledge graphs with Graphiti and FalkorDB -sidebar_position: 1 -sidebar_label: Graphiti ---- - - - -# Graphiti -[Graphiti](https://github.com/getzep/graphiti) is a Python framework for building **temporally-aware, multi-tenant knowledge graphs** designed for multi-agent AI systems with persistent memory. It enables real-time integration of structured and unstructured data, supporting advanced hybrid search, temporal reasoning, and collaborative agent memory. - -## Overview - -Graphiti provides a powerful abstraction layer for building knowledge graphs that: -- **Track temporal changes**: Understand how entities and relationships evolve over time -- **Support multi-tenancy**: Isolated memory spaces for different users or agents -- **Enable hybrid search**: Combine semantic search with graph traversal -- **Scale efficiently**: Built on top of FalkorDB's high-performance graph engine - -## Why Graphiti + FalkorDB? - -### FalkorDB's Added Value - -- **Multi-tenant, multi-agent memory**: Isolated graph instances for different users or agents, each with their own persistent memory -- **High performance**: Fast graph operations and efficient memory usage -- **Cloud and on-premises ready**: Works with FalkorDB Cloud or your own deployment -- **Easy integration**: Seamless connection with Graphiti for scalable, production-ready knowledge graphs - -### Use Cases - -- **AI memory for multi-agent systems**: Provide persistent, context-rich memory for each agent -- **Enterprise knowledge management**: Aggregate and search across documents, conversations, and structured data -- **Conversational AI**: Track facts, entities, and relationships over time for more accurate responses -- **E-commerce**: Manage inventory, personalize recommendations, and track customer interactions over time -- **Research and analytics**: Temporal and semantic search across large, heterogeneous datasets - -## Getting Started - -### Prerequisites - -- Python 3.10 or higher -- FalkorDB instance (Cloud or self-hosted) -- OpenAI API key (for embeddings and LLM features) - -### Installation - -Install Graphiti with FalkorDB support: - -```bash -pip install graphiti-core[falkordb] -``` - -### Quick Start Example - -Here's a complete example to get you started with Graphiti and FalkorDB: - -```python -import asyncio -from datetime import datetime -from graphiti_core import Graphiti -from graphiti_core.nodes import EpisodeType - -async def main(): - # Initialize Graphiti with FalkorDB - graphiti = Graphiti( - uri="falkor://localhost:6379", # Your FalkorDB connection string - # For FalkorDB Cloud: - # uri="falkor://your-instance.falkordb.cloud:6379", - # username="default", - # password="your-password" - ) - - # Build indices (run once during setup) - await graphiti.build_indices_and_constraints() - - # Add an episode (information to be stored in the graph) - episode_body = """ - Alice met Bob at the AI conference in San Francisco on March 15, 2024. - They discussed the latest developments in graph databases and decided to - collaborate on a new project using FalkorDB. - """ - - await graphiti.add_episode( - name="Conference Meeting", - episode_body=episode_body, - episode_type=EpisodeType.text, - reference_time=datetime(2024, 3, 15), - source_description="Conference notes" - ) - - # Search the knowledge graph - search_results = await graphiti.search( - query="What did Alice and Bob discuss?", - num_results=5 - ) - - print("Search Results:") - for result in search_results: - print(f"- {result}") - - # Close the connection - await graphiti.close() - -# Run the example -asyncio.run(main()) -``` - -### Understanding the Code - -1. **Initialize Graphiti**: Connect to your FalkorDB instance with the connection URI -2. **Build Indices**: Create necessary graph indices and constraints (one-time setup) -3. **Add Episodes**: Store information as "episodes" - chunks of text or structured data with temporal context -4. **Search**: Query the graph using natural language or specific parameters -5. **Close**: Clean up connections when done - -## Advanced Features - -### Hybrid Search - -Graphiti supports multiple search types: - -```python -# Node search - find specific entities -nodes = await graphiti.retrieve_nodes( - query="Alice", - num_results=10 -) - -# Episode search - find specific conversations/events -episodes = await graphiti.retrieve_episodes( - query="conference meeting", - num_results=5 -) - -# Combined hybrid search -results = await graphiti.search( - query="project collaboration", - num_results=10 -) -``` - -### Temporal Queries - -Query the graph at specific points in time: - -```python -from datetime import datetime - -# Get the state of knowledge at a specific time -results = await graphiti.search( - query="What projects was Alice working on?", - reference_time=datetime(2024, 3, 1), - num_results=5 -) -``` - -### Multi-Tenant Architecture - -Create isolated graphs for different users or agents: - -```python -# User 1's graph -graphiti_user1 = Graphiti( - uri="falkor://localhost:6379", - graph_name="user1_memory" -) - -# User 2's graph -graphiti_user2 = Graphiti( - uri="falkor://localhost:6379", - graph_name="user2_memory" -) -``` - -## Configuration Options - -### Connection Parameters - -```python -graphiti = Graphiti( - uri="falkor://localhost:6379", - username="your-username", # For authenticated instances - password="your-password", - graph_name="my_graph", # Custom graph name - llm_provider="openai", # LLM provider for entity extraction - embedding_provider="openai" # Embedding provider for vector search -) -``` - -### Custom LLM Configuration - -```python -from graphiti_core.llm_client import OpenAIClient - -llm_client = OpenAIClient( - api_key="your-openai-key", - model="gpt-4", - temperature=0.7 -) - -graphiti = Graphiti( - uri="falkor://localhost:6379", - llm_client=llm_client -) -``` - -## Best Practices - -1. **Batch Episodes**: When loading large amounts of data, batch your episodes for better performance -2. **Set Reference Times**: Always provide reference times for temporal tracking -3. **Use Descriptive Names**: Give episodes meaningful names for easier retrieval -4. **Index Strategy**: Build indices once during setup, not on every run -5. **Connection Pooling**: Reuse Graphiti instances when possible instead of creating new connections - -## Troubleshooting - -### Connection Issues - -If you can't connect to FalkorDB: -- Verify your connection string format: `falkor://host:port` -- Check if FalkorDB is running: `redis-cli ping` -- Ensure credentials are correct for authenticated instances - -### Performance Optimization - -- Use batching for bulk operations -- Monitor memory usage with `GRAPH.MEMORY USAGE` command -- Consider graph partitioning for very large knowledge bases - -## Resources - -- πŸ“š [Graphiti Documentation](https://help.getzep.com/graphiti/) -- πŸŽ₯ [Workshop: How to Build a Knowledge Graph ft. Graphiti](https://www.youtube.com/watch?v=F4hwuLlISP4&lc=UgwPSaR6GAM_86g9AxJ4AaABAg) -- πŸ““ [Blog: Get Started with Graphiti](https://www.falkordb.com/blog/graphiti-get-started/) -- πŸ’» [Graphiti GitHub Repository](https://github.com/getzep/graphiti) -- πŸ“ [Google Colab Example](https://colab.research.google.com/drive/1HbDPKlsz9tYfRGeWHn60vsWeGhFIsqyF?usp=sharing) -- πŸ”— [FalkorDB Configuration Guide](https://help.getzep.com/graphiti/configuration/graph-db-configuration#falkordb) - -## Next Steps - -- Explore [Cognee](./cognee.md) for alternative memory management approaches -- Learn about [GenAI Tools](/genai-tools) for graph reasoning and LLM integrations diff --git a/website/docs/agentic-memory/index.md b/website/docs/agentic-memory/index.md deleted file mode 100644 index 79e090c..0000000 --- a/website/docs/agentic-memory/index.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -title: Agentic Memory -description: Learn how to implement agentic memory with FalkorDB for AI agents -sidebar_position: 11 -sidebar_label: Agentic Memory ---- - -# Agentic Memory - -Agentic memory enables AI agents to maintain persistent, contextual memory across interactions. FalkorDB provides an ideal foundation for implementing agentic memory systems through its graph database capabilities, allowing agents to store, retrieve, and reason over complex relationships and temporal information. - -## What is Agentic Memory? - -Agentic memory refers to the ability of AI agents to: -- **Remember past interactions** and learn from them -- **Build contextual understanding** through connected knowledge -- **Reason over temporal information** to understand how relationships evolve -- **Share memory across agents** in multi-agent systems -- **Scale efficiently** as knowledge grows - -## Why FalkorDB for Agentic Memory? - -FalkorDB's graph database architecture makes it uniquely suited for agentic memory: - -- **Graph-Native Storage**: Natural representation of entities, relationships, and contexts -- **Fast Traversals**: Quick retrieval of connected information for context-aware responses -- **Temporal Support**: Track how knowledge and relationships change over time -- **Multi-Tenant Architecture**: Isolated memory spaces for different agents or users -- **Hybrid Search**: Combine vector similarity with graph relationships for precise retrieval -- **High Performance**: Scale from prototype to production seamlessly - -## Agentic Memory Frameworks - -This section covers popular frameworks and tools that implement agentic memory with FalkorDB: - -- [**Graphiti**](./graphiti.md): A temporally-aware knowledge graph framework designed for multi-agent AI systems with persistent memory -- [**Graphiti MCP Server**](./graphiti-mcp-server.md): Run Graphiti as an MCP server for Claude Desktop, Cursor IDE, and other AI clients *(Experimental)* -- [**Cognee**](./cognee.md): A memory management framework for AI agents that combines graph and vector storage - -## Getting Started - -Choose a framework based on your needs: -- If you need **temporal reasoning** and **multi-agent memory**, start with [Graphiti](./graphiti.md) -- If you want to add **persistent memory to Claude Desktop or Cursor IDE**, try the [Graphiti MCP Server](./graphiti-mcp-server.md) -- If you need **flexible memory structures** with **hybrid storage**, explore [Cognee](./cognee.md) - -All frameworks integrate seamlessly with FalkorDB and can be used together in complex systems. diff --git a/website/docs/algorithms/betweenness-centrality.md b/website/docs/algorithms/betweenness-centrality.md deleted file mode 100644 index af9464d..0000000 --- a/website/docs/algorithms/betweenness-centrality.md +++ /dev/null @@ -1,92 +0,0 @@ ---- -title: Betweenness Centrality -description: Measures the importance of nodes based on the number of shortest paths that pass through them. -sidebar_label: Betweenness Centrality ---- - - - -# Betweenness Centrality -## Introduction - -Betweenness Centrality is a graph algorithm that quantifies the importance of a node based on the number of shortest paths that pass through it. Nodes that frequently occur on shortest paths between other nodes have higher betweenness centrality scores. This makes the algorithm useful for identifying **key connectors** or **brokers** within a network. - -## Algorithm Overview - -The core idea of Betweenness Centrality is that a node is more important if it lies on many of the shortest paths connecting other nodes. It’s particularly useful in understanding information flow or communication efficiency in a graph. - -> For example, in a social network, a person who frequently connects otherwise unconnected groups would have high betweenness centrality. - -## Syntax - -The procedure has the following call signature: -```cypher -CALL algo.betweenness({ - nodeLabels: [], - relationshipTypes: [] -}) -YIELD node, score -``` - -### Parameters - -| Name | Type | Description | Default | -|-----------------------|---------|-------------------------------------------------|---------| -| `nodeLabels` | Array | *(Optional)* List of Strings representing node labels | [] | -| `relationshipTypes` | Array | *(Optional)* List of Strings representing relationship types | [] | - -### Yield - -| Name | Type | Description | -|---------|-------|-----------------------------------------------| -| `node` | Node | The node being evaluated | -| `score` | Float | The betweenness centrality score for the node | - -## Example: - -Lets take this Social Graph as an example: -![Social Graph](/img/between.png) - -### Create the Graph - -```cypher -CREATE - (a:Person {name: 'Alice'}), - (b:Person {name: 'Bob'}), - (c:Person {name: 'Charlie'}), - (d:Person {name: 'David'}), - (e:Person {name: 'Emma'}), - (a)-[:FRIEND]->(b), - (b)-[:FRIEND]->(c), - (b)-[:FRIEND]->(d), - (c)-[:FRIEND]->(e), - (d)-[:FRIEND]->(e) -``` - -### Run Betweenness Centrality - Sort Persons by importance based on FRIEND relationship - -```cypher -CALL algo.betweenness({ - 'nodeLabels': ['Person'], - 'relationshipTypes': ['FRIEND'] - }) -YIELD node, score -RETURN node.name AS person, score -ORDER BY score DESC -``` - -Expected result: - -| person | score | -|-----------|--------| -| `Bob` | 6 | -| `Charlie` | 2 | -| `David` | 2 | -| `Alice` | 0 | -| `Emma` | 0 | - -## Usage Notes - -- Scores are based on **all shortest paths** between node pairs. -- Nodes that serve as bridges between clusters tend to score higher. -- Can be computationally expensive on large, dense graphs. diff --git a/website/docs/algorithms/bfs.md b/website/docs/algorithms/bfs.md deleted file mode 100644 index 0f43a39..0000000 --- a/website/docs/algorithms/bfs.md +++ /dev/null @@ -1,98 +0,0 @@ ---- -title: BFS -description: Breadth-First Search (BFS) explores a graph level by level, visiting all neighbors of a node before moving to the next depth. -sidebar_label: BFS ---- - - - -# BFS -## Overview - -The Breadth-First Search (BFS) procedure allows you to perform a breadth-first traversal of a graph starting from a specific node. -BFS explores all the nodes at the present depth before moving on to nodes at the next depth level. -This is particularly useful for finding the shortest path between two nodes or exploring a graph layer by layer. - -## Syntax - -``` -CALL algo.bfs(start_node, max_depth, relationship) -YIELD nodes, edges -``` - -## Arguments - -| Name | Type | Description | Default | -|--------------|----------------|-----------------------------------------------------------------------------|------------| -| start_node | Node | Starting node for the BFS traversal | (Required) | -| max_depth | Integer | Maximum depth to traverse | (Required) | -| relationship | String or null | The relationship type to traverse. If null, all relationship types are used | null | - -## Returns - -| Name | Type | Description | -|-------|------|----------------------------------------------| -| nodes | List | List of visited nodes in breadth-first order | -| edges | List | List of edges traversed during the BFS | - -## Examples - -### Social Network Friend Recommendations - -This example demonstrates how to use BFS to find potential friend recommendations in a social network. -By exploring friends of friends, BFS uncovers second-degree connectionsβ€”people you may know through mutual friendsβ€”which are often strong candidates for relevant and meaningful recommendations. - -#### Create the Graph - -```cypher -CREATE - (alice:Person {name: 'Alice', age: 28, city: 'New York'}), - (bob:Person {name: 'Bob', age: 32, city: 'Boston'}), - (charlie:Person {name: 'Charlie', age: 35, city: 'Chicago'}), - (david:Person {name: 'David', age: 29, city: 'Denver'}), - (eve:Person {name: 'Eve', age: 31, city: 'San Francisco'}), - (frank:Person {name: 'Frank', age: 27, city: 'Miami'}), - - (alice)-[:FRIEND]->(bob), - (alice)-[:FRIEND]->(charlie), - (bob)-[:FRIEND]->(david), - (charlie)-[:FRIEND]->(eve), - (david)-[:FRIEND]->(frank), - (eve)-[:FRIEND]->(frank) -``` - -![Graph BFS](/img/graph_bfs.png) - -#### Find Friends of Friends (Potential Recommendations) - -``` -// Find Alice's friends-of-friends (potential recommendations) -MATCH (alice:Person {name: 'Alice'}) -CALL algo.bfs(alice, 2, 'FRIEND') -YIELD nodes - -// Process results to get only depth 2 connections (friends of friends) -WHERE size(nodes) >= 3 -WITH alice, nodes[2] AS potential_friend -WHERE NOT (alice)-[:FRIEND]->(potential_friend) -RETURN potential_friend -``` - -In this social network example, the BFS algorithm helps find potential friend recommendations by identifying people who are connected to Alice's existing friends but not directly connected to Alice yet. - - -## Performance Considerations - -- **Indexing:** Ensure properties used for finding your starting node are indexed for optimal performance -- **Maximum Depth:** Choose an appropriate max_depth value based on your graph's connectivity; large depths in highly connected graphs can result in exponential growth of traversed nodes -- **Relationship Filtering:** When applicable, specify the relationship type to limit the traversal scope -- **Memory Management:** Be aware that the procedure stores visited nodes in memory to avoid cycles, which may require significant resources in large, densely connected graphs - -## Error Handling - -Common errors that may occur: - -- **Null Starting Node:** If the start_node parameter is null, the procedure will raise an error; ensure your MATCH clause successfully finds the starting node -- **Invalid Relationship Type:** If you specify a relationship type that doesn't exist in your graph, the traversal will only include the starting node -- **Memory Limitations:** For large graphs with high connectivity, an out-of-memory error may occur if too many nodes are visited -- **Result Size:** If the BFS traversal returns too many nodes, query execution may be slow or time out; in such cases, try reducing the max_depth or filtering by relationship types diff --git a/website/docs/algorithms/cdlp.md b/website/docs/algorithms/cdlp.md deleted file mode 100644 index 237bcfd..0000000 --- a/website/docs/algorithms/cdlp.md +++ /dev/null @@ -1,182 +0,0 @@ ---- -title: Community Detection using Label Propagation (CDLP) -description: Community Detection using Label Propagation (CDLP) -sidebar_label: Community Detection using Label Propagation (CDLP) ---- - - - -# Community Detection using Label Propagation (CDLP) -## Overview - -The Community Detection using Label Propagation (CDLP) algorithm identifies communities in networks by propagating labels through the graph structure. -Each node starts with a unique label, and through iterative propagation, nodes adopt the most frequent label among their neighbors, naturally forming communities where densely connected nodes share the same label. - -CDLP serves as a powerful algorithm in scenarios such as: -- Social network community detection -- Biological network module identification -- Web page clustering and topic detection -- Market segmentation analysis -- Fraud detection networks - -## Algorithm Details - -CDLP initializes by assigning each node a unique label (typically its node ID). -The algorithm then iteratively updates each node's label to the most frequent label among its neighbors. -During each iteration, nodes are processed in random order to avoid deterministic bias. -The algorithm continues until labels stabilize (no changes occur) or a maximum number of iterations is reached. -The final labels represent community assignments, where nodes sharing the same label belong to the same community. - -The algorithm's strength lies in its ability to discover communities without requiring prior knowledge of the number of communities or their sizes. -It runs in near-linear time and mimics epidemic contagion by spreading labels through the network. - -### Performance - -CDLP operates with a time complexity of **O(m + n)** per iteration, where: -- **n** represents the total number of nodes -- **m** represents the total number of edges - -The algorithm typically converges within a few iterations, making it highly efficient for large-scale networks. - -## Syntax - -```cypher -CALL algo.labelPropagation([config]) -``` - -### Parameters - -The procedure accepts an optional configuration `Map` with the following parameters: - -| Name | Type | Default | Description | -|---------------------|---------|------------------------|----------------------------------------------------------------------------------| -| `nodeLabels` | Array | All labels | Array of node labels to filter which nodes are included in the computation | -| `relationshipTypes` | Array | All relationship types | Array of relationship types to define which edges are traversed | -| `maxIterations` | Integer | 10 | Maximum number of iterations to run the algorithm | - -### Return Values -The procedure returns a stream of records with the following fields: - -| Name | Type | Description | -|---------------|---------|---------------------------------------------------------------------| -| `node` | Node | The node entity included in the community | -| `communityId` | Integer | Identifier of the community the node belongs to | - -## Examples - -Let's take this Social Network as an example: - -``` - (Alice)---(Bob)---(Charlie) (Kate) - | | | - (Diana) | (Eve)---(Frank) - | | | | - (Grace)--(Henry) (Iris)--(Jack) -``` - -There are 3 different communities that should emerge from this network: -- Alice, Bob, Charlie, Diana, Grace, Henry -- Eve, Frank, Iris, Jack -- Any isolated nodes - -### Create the Graph - -```cypher -CREATE - (alice:Person {name: 'Alice'}), - (bob:Person {name: 'Bob'}), - (charlie:Person {name: 'Charlie'}), - (diana:Person {name: 'Diana'}), - (eve:Person {name: 'Eve'}), - (frank:Person {name: 'Frank'}), - (grace:Person {name: 'Grace'}), - (henry:Person {name: 'Henry'}), - (iris:Person {name: 'Iris'}), - (jack:Person {name: 'Jack'}), - (kate:Person {name: 'Kate'}), - - (alice)-[:KNOWS]->(bob), - (bob)-[:KNOWS]->(charlie), - (alice)-[:KNOWS]->(diana), - (bob)-[:KNOWS]->(henry), - (diana)-[:KNOWS]->(grace), - (grace)-[:KNOWS]->(henry), - (charlie)-[:KNOWS]->(eve), - (eve)-[:KNOWS]->(frank), - (eve)-[:KNOWS]->(iris), - (frank)-[:KNOWS]->(jack), - (iris)-[:KNOWS]->(jack) -``` - -### Example: Detect all communities in the network - -```cypher -CALL algo.labelPropagation() YIELD node, communityId RETURN node.name AS name, communityId ORDER BY communityId, name -``` - -#### Expected Results -| name | communityId | -|------------|-------------| -| `Alice` | 0 | -| `Bob` | 0 | -| `Charlie` | 0 | -| `Diana` | 0 | -| `Grace` | 0 | -| `Henry` | 0 | -| `Eve` | 2 | -| `Frank` | 2 | -| `Iris` | 2 | -| `Jack` | 2 | -| `Kate` | 10 | - -### Example: Detect communities with limited iterations - -```cypher -CALL algo.labelPropagation({maxIterations: 5}) YIELD node, communityId -``` - -### Example: Focus on specific node types - -```cypher -CALL algo.labelPropagation({nodeLabels: ['Person']}) YIELD node, communityId -``` - -### Example: Use only certain relationship types - -```cypher -CALL algo.labelPropagation({relationshipTypes: ['KNOWS', 'FRIENDS_WITH']}) YIELD node, communityId -``` - -### Example: Combine node and relationship filtering - -```cypher -CALL algo.labelPropagation({ - nodeLabels: ['Person'], - relationshipTypes: ['KNOWS'] -}) YIELD node, communityId -``` - -### Example: Group communities together - -```cypher -CALL algo.labelPropagation() YIELD node, communityId -RETURN collect(node.name) AS community_members, communityId, count(*) AS community_size -ORDER BY community_size DESC -``` - -#### Expected Results -| community_members | communityId | community_size | -|----------------------------------------------------------|-------------|----------------| -| `["Alice", "Bob", "Charlie", "Diana", "Grace", "Henry"]` | 0 | 6 | -| `["Eve", "Frank", "Iris", "Jack"]` | 2 | 4 | -| `["Kate"]` | 10 | 1 | - -### Example: Find the largest communities - -```cypher -CALL algo.labelPropagation() YIELD node, communityId -RETURN communityId, collect(node) AS nodes, count(*) AS size -ORDER BY size DESC -LIMIT 1 -``` - diff --git a/website/docs/algorithms/index.md b/website/docs/algorithms/index.md deleted file mode 100644 index 4aec522..0000000 --- a/website/docs/algorithms/index.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -title: Algorithms -description: Graph Algorithms Overview -sidebar_position: 3 -sidebar_label: Algorithms ---- - -# Algorithms - -FalkorDB offers a suite of graph algorithms optimized for high-performance graph analytics. -These algorithms are accessible via the `CALL algo.()` interface and are built for speed and scalability using matrix-based computation. - -This overview summarizes the available algorithms and links to their individual documentation. - -## Table of Contents - -- [Pathfinding Algorithms](#pathfinding-algorithms) -- [Centrality Measures](#centrality-measures) -- [Community Detection](#community-detection) - ---- - -## Pathfinding Algorithms - -- **[BFS](./bfs.md)** - Performs a breadth-first search starting from a source node and optionally stopping at target nodes or maximum depth. - -- **[SPpath](./sppath.md)** - Computes the shortest paths between a source and one or more destination nodes. - -- **[SSpath](./sspath.md)** - Enumerates all paths from a single source node to other nodes, based on constraints like edge filters and depth. - -- **[MSF](./msf.md)** - Computes the Minimum Spanning Forest of a graph, finding the minimum spanning tree for each connected component. - -For path expressions like `shortestPath()` used directly in Cypher queries, refer to the [Cypher Path Functions section](../cypher/functions.md#path-functions). - -## Centrality Measures - -- **[PageRank](./pagerank.md)** - Computes the PageRank score of each node in the graph, representing its influence based on the structure of incoming links. - -- **[Betweenness Centrality](./betweenness-centrality.md)** - Calculates the number of shortest paths that pass through each node, indicating its importance as a connector in the graph. - -## Community Detection - -- **[WCC (Weakly Connected Components)](./wcc.md)** - Finds weakly connected components in a graph, where each node is reachable from others ignoring edge directions. - -- **[CDLP (Community Detection Label Propagation)](./cdlp.md)** - Detects communities in a network, by propagating labels through the graph structure. - diff --git a/website/docs/algorithms/msf.md b/website/docs/algorithms/msf.md deleted file mode 100644 index a7e29f6..0000000 --- a/website/docs/algorithms/msf.md +++ /dev/null @@ -1,178 +0,0 @@ ---- -title: MSF -description: Minimum Spanning Forest Algorithm -sidebar_position: 9 -sidebar_label: MSF ---- - - - -# MSF -The Minimum Spanning Forest algorithm computes the minimum spanning forest of a graph. A minimum spanning forest is a collection of minimum spanning trees, one for each connected component in the graph. - -## What is a Minimum Spanning Forest? - -- For a **connected graph**, the MSF is a single minimum spanning tree (MST) that connects all nodes with the minimum total edge weight -- For a **disconnected graph**, the MSF consists of multiple MSTs, one for each connected component -- The forest contains no cycles and has exactly `N - C` edges, where `N` is the number of nodes and `C` is the number of connected components - -## Use Cases - -- **Network Design**: Minimize cable/pipeline costs when connecting multiple locations -- **Clustering**: Identify natural groupings in data by analyzing the forest structure -- **Image Segmentation**: Group similar pixels using edge weights as similarity measures -- **Road Networks**: Optimize road construction to connect all cities with minimum cost - -## Syntax - -```cypher -CALL algo.MSF( - config: MAP -) YIELD src, dest, weight, relationshipType -``` - -### Parameters - -| Parameter | Type | Description | -|-----------|------|-------------| -| `config` | MAP | Configuration map containing algorithm parameters | - -#### Configuration Options - -| Option | Type | Required | Default | Description | -|--------|------|----------|---------|-------------| -| `sourceNodes` | List of Nodes | No | All nodes | Starting nodes for the algorithm. If not provided, all nodes in the graph are considered | -| `relationshipTypes` | List of Strings | No | All types | Relationship types to traverse. If not provided, all relationship types are considered | -| `relationshipWeightProperty` | String | No | `null` | Property name containing edge weights. If not specified, all edges have weight 1.0 | -| `defaultValue` | Float | No | `1.0` | Default weight for edges that don't have the weight property | - -### Returns - -| Field | Type | Description | -|-------|------|-------------| -| `src` | Node | Source node of the edge in the spanning forest | -| `dest` | Node | Destination node of the edge in the spanning forest | -| `weight` | Float | Weight of the edge | -| `relationshipType` | String | Type of the relationship | - -## Examples - -### Example 1: Basic MSF with Unweighted Graph - -Find the minimum spanning forest treating all edges equally: - -```cypher -CALL algo.MSF({}) YIELD src, dest, weight, relationshipType -RETURN src.name AS source, dest.name AS destination, weight, relationshipType -``` - -### Example 2: MSF with Weighted Edges - -Consider a graph representing cities connected by roads with distances: - -```cypher -// Create a weighted graph -CREATE (a:City {name: 'A'}), (b:City {name: 'B'}), (c:City {name: 'C'}), - (d:City {name: 'D'}), (e:City {name: 'E'}) -CREATE (a)-[:ROAD {distance: 2}]->(b), - (a)-[:ROAD {distance: 3}]->(c), - (b)-[:ROAD {distance: 1}]->(c), - (b)-[:ROAD {distance: 4}]->(d), - (c)-[:ROAD {distance: 5}]->(d), - (d)-[:ROAD {distance: 6}]->(e) - -// Find minimum spanning forest using distance weights -CALL algo.MSF({ - relationshipWeightProperty: 'distance' -}) YIELD src, dest, weight -RETURN src.name AS from, dest.name AS to, weight AS distance -ORDER BY weight -``` - -**Result:** -```text -from | to | distance ------|----|--------- -B | C | 1.0 -A | B | 2.0 -A | C | 3.0 -B | D | 4.0 -D | E | 6.0 -``` - -### Example 3: MSF on Specific Relationship Types - -Find the spanning forest considering only specific relationship types: - -```cypher -CALL algo.MSF({ - relationshipTypes: ['ROAD', 'HIGHWAY'], - relationshipWeightProperty: 'distance' -}) YIELD src, dest, weight, relationshipType -RETURN src.name AS from, dest.name AS to, weight, relationshipType -``` - -### Example 4: MSF Starting from Specific Nodes - -Compute the spanning forest starting from a subset of nodes: - -```cypher -MATCH (start:City) WHERE start.name IN ['A', 'B'] -WITH collect(start) AS startNodes -CALL algo.MSF({ - sourceNodes: startNodes, - relationshipWeightProperty: 'distance' -}) YIELD src, dest, weight -RETURN src.name AS from, dest.name AS to, weight -``` - -### Example 5: Disconnected Graph - -For a graph with multiple components, MSF returns multiple trees: - -```cypher -// Create two disconnected components -CREATE (a:Node {name: 'A'})-[:CONNECTED {weight: 1}]->(b:Node {name: 'B'}), - (b)-[:CONNECTED {weight: 2}]->(c:Node {name: 'C'}), - (x:Node {name: 'X'})-[:CONNECTED {weight: 3}]->(y:Node {name: 'Y'}) - -// Find MSF -CALL algo.MSF({ - relationshipWeightProperty: 'weight' -}) YIELD src, dest, weight -RETURN src.name AS from, dest.name AS to, weight -``` - -**Result:** Two separate trees (A-B-C and X-Y) - -## Algorithm Details - -FalkorDB's MSF implementation uses an efficient matrix-based approach optimized for graph databases: - -1. **Connected Components**: First identifies all connected components in the graph -2. **MST per Component**: Computes a minimum spanning tree for each component using a variant of Kruskal's or Prim's algorithm -3. **Edge Selection**: Selects edges in order of increasing weight, avoiding cycles - -### Performance Characteristics - -- **Time Complexity**: O(E log V) where E is the number of edges and V is the number of vertices -- **Space Complexity**: O(V + E) -- **Optimized**: Uses sparse matrix representation for efficient computation - -## Best Practices - -1. **Weight Properties**: Ensure weight properties are numeric (integers or floats) -2. **Missing Weights**: Use `defaultValue` to handle edges without weight properties -3. **Large Graphs**: For large graphs (100K+ nodes), consider filtering by `sourceNodes` or `relationshipTypes` -4. **Directed vs Undirected**: The algorithm treats relationships as undirected for spanning forest purposes - -## Related Algorithms - -- **[WCC (Weakly Connected Components)](./wcc.md)**: Identify connected components before running MSF -- **[BFS](./bfs.md)**: Traverse the resulting spanning forest -- **[SPpath](./sppath.md)**: Find shortest paths using the spanning forest structure - -## See Also - -- [Cypher Procedures](../cypher/procedures.md) -- [Graph Algorithms Overview](./index.md) diff --git a/website/docs/algorithms/pagerank.md b/website/docs/algorithms/pagerank.md deleted file mode 100644 index e89afe3..0000000 --- a/website/docs/algorithms/pagerank.md +++ /dev/null @@ -1,100 +0,0 @@ ---- -title: PageRank -description: Rank nodes based on the number and quality of edges pointing to them, simulating the likelihood of a random traversal landing on each node. -sidebar_label: PageRank ---- - - - -# PageRank -## Introduction - -PageRank is an algorithm that measures the importance of each node within the graph based on the number of incoming relationships and the importance of the corresponding source nodes. -The algorithm was originally developed by Google's founders Larry Page and Sergey Brin during their time at Stanford University. - -## Algorithm Overview - -PageRank works by counting the number and quality of relationships to a node to determine a rough estimate of how important that node is. -The underlying assumption is that more important nodes are likely to receive more connections from other nodes. - -The algorithm assigns each node a score, where higher scores indicate greater importance. -The score for a node is derived recursively from the scores of the nodes that link to it, with a damping factor typically applied to prevent rank sinks. -For example, in a network of academic papers, a paper cited by many other highly cited papers will receive a high PageRank score, reflecting its influence in the field. - -## Syntax - -The PageRank procedure has the following call signature: - -```cypher -CALL pagerank.stream( - [label], - [relationship] -) -YIELD node, score -``` - -### Parameters - -| Name | Type | Default | Description | -|----------------|--------|---------|------------------------------------------------------------------------------| -| `label` | String | null | The label of nodes to run the algorithm on. If null, all nodes are used. | -| `relationship` | String | null | The relationship type to traverse. If null, all relationship types are used. | - -### Yield - -| Name | Type | Description | -|---------|-------|--------------------------------------| -| `node` | Node | The node processed by the algorithm. | -| `score` | Float | The PageRank score for the node. | - -## Examples - -### Unweighted PageRank - -First, let's create a sample graph representing a citation network between scientific papers: - -```cypher -CREATE - (paper1:Paper {title: 'Graph Algorithms in Database Systems'}), - (paper2:Paper {title: 'PageRank Applications'}), - (paper3:Paper {title: 'Data Mining Techniques'}), - (paper4:Paper {title: 'Network Analysis Methods'}), - (paper5:Paper {title: 'Social Network Graph Theory'}), - - (paper2)-[:CITES]->(paper1), - (paper3)-[:CITES]->(paper1), - (paper3)-[:CITES]->(paper2), - (paper4)-[:CITES]->(paper1), - (paper4)-[:CITES]->(paper3), - (paper5)-[:CITES]->(paper2), - (paper5)-[:CITES]->(paper4) -``` - -![Graph PR](/img/graph_page_rank.png) - -Now we can run the PageRank algorithm on this citation network: - -```cypher -CALL pagerank.stream('Paper', 'CITES') -YIELD node, score -RETURN node.title AS paper, score -ORDER BY score DESC -``` - -Expected results: - -| paper | score | -|--------------------------------------|-------| -| Graph Algorithms in Database Systems | 0.43 | -| Data Mining Techniques | 0.21 | -| PageRank Applications | 0.19 | -| Network Analysis Methods | 0.14 | -| Social Network Graph Theory | 0.03 | - - -## Usage Notes - -**Interpreting scores**: - - PageRank scores are relative, not absolute measures - - The sum of all scores in a graph equals 1.0 - - Scores typically follow a power-law distribution diff --git a/website/docs/algorithms/sppath.md b/website/docs/algorithms/sppath.md deleted file mode 100644 index e4e36c9..0000000 --- a/website/docs/algorithms/sppath.md +++ /dev/null @@ -1,103 +0,0 @@ ---- -title: algo.SPpaths -description: Find shortest paths between two nodes with advanced cost and length constraints. -sidebar_label: algo.SPpaths ---- - - - -# algo.SPpaths -The `algo.SPpaths` procedure finds the shortest paths between a **source** and a **target** node, optionally constrained by cost, path length, and the number of paths to return. - -It is designed for efficient and scalable computation of paths in large graphs, using properties like distance, time, or price as weights. -For example, it can be used to find the fastest driving route between two cities, the cheapest shipping option in a logistics network, or the shortest communication path in a computer network. - -## Syntax - -```cypher -CALL algo.SPpaths({ - sourceNode: , - targetNode: , - relTypes: [], - weightProp: , - costProp: , // optional - maxCost: , // optional - maxLen: , // optional - relDirection: "outgoing", // or "incoming", "both" - pathCount: // 0 = all, 1 = single (default), n > 1 = up to n -}) -YIELD path, pathWeight, pathCost -``` - -## Parameters - -| Name | Type | Description | -|-----------------|----------|--------------------------------------------------------------------------------------| -| `sourceNode` | Node | Starting node | -| `targetNode` | Node | Destination node | -| `relTypes` | Array | List of relationship types to follow | -| `weightProp` | String | Property to minimize along the path (e.g., `dist`, `time`) | -| `costProp` | String | Property to constrain the total value (optional) | -| `maxCost` | Integer | Upper bound on total cost (optional) | -| `maxLen` | Integer | Max number of relationships in the path (optional) | -| `relDirection` | String | Traversal direction (`outgoing`, `incoming`, `both`) | -| `pathCount` | Integer | Number of paths to return (0 = all shortest, 1 = default, n = max number of results) | - -## Returns - -| Name | Type | Description | -|--------------|---------|------------------------------------------------| -| `path` | Path | Discovered path from source to target | -| `pathWeight` | Integer | Sum of the weightProp across the path | -| `pathCost` | Integer | Sum of the costProp across the path (if used) | - - -## Examples: -Lets take this Road Network Graph as an example: - -![Road network](/img/road_network.png) - -### Example: Shortest Path by Distance from City A to City G: - -```cypher -MATCH (a:City{name:'A'}), (g:City{name:'G'}) -CALL algo.SPpaths({ - sourceNode: a, - targetNode: g, - relTypes: ['Road'], - weightProp: 'dist' -}) -YIELD path, pathWeight -RETURN pathWeight, [n in nodes(path) | n.name] AS pathNodes -``` - -#### Expected Result: -| pathWeight | pathNodes | -|------------|---------------| -| `12` | [A, D, E G] | - - -### Example: Bounded Cost Path from City A to City G: - -```cypher -MATCH (a:City{name:'A'}), (g:City{name:'G'}) -CALL algo.SPpaths({ - sourceNode: a, - targetNode: g, - relTypes: ['Road'], - weightProp: 'dist', - costProp: 'time', - maxCost: 12, - pathCount: 2 -}) -YIELD path, pathWeight, pathCost -RETURN pathWeight, pathCost, [n in nodes(path) | n.name] AS pathNodes -``` - -#### Expected Result: -| pathWeight | pathCost | pathNodes | -|------------|----------| --------------- | -| `16` | `10` | [A, D, F G] | -| `14` | `12` | [A, D, C F, G] | - ---- diff --git a/website/docs/algorithms/sspath.md b/website/docs/algorithms/sspath.md deleted file mode 100644 index f4934b0..0000000 --- a/website/docs/algorithms/sspath.md +++ /dev/null @@ -1,109 +0,0 @@ ---- -title: algo.SSpaths -description: Explore all shortest paths from a single source node with weight, cost, and length constraints. -sidebar_label: algo.SSpaths ---- - - - -# algo.SSpaths -The `algo.SSpaths` procedure returns all shortest paths from a **source node** to multiple reachable nodes, subject to constraints like cost, path length, and number of paths to return. - -## Syntax - -```cypher -CALL algo.SSpaths({ - sourceNode: , - relTypes: [], - weightProp: , // optional - costProp: , // optional - maxCost: , // optional - maxLen: , // optional - relDirection: "outgoing", // or "incoming", "both" - pathCount: -}) -YIELD path, pathWeight, pathCost -``` - -## Parameters - - -| Name | Type | Description | -|-----------------|----------|--------------------------------------------------------------------------------------| -| `sourceNode` | Node | Starting node | -| `relTypes` | Array | List of relationship types to follow | -| `weightProp` | String | Property to minimize along the path (e.g., `dist`, `time`) | -| `costProp` | String | Property to constrain the total value (optional) | -| `maxCost` | Integer | Upper bound on total cost (optional) | -| `maxLen` | Integer | Max number of relationships in the path (optional) | -| `relDirection` | String | Traversal direction (`outgoing`, `incoming`, `both`) | -| `pathCount` | Integer | Number of paths to return (0 = all shortest, 1 = default, n = max number of results) | - -## Returns - -| Name | Type | Description | -|--------------|---------|------------------------------------------------| -| `path` | Path | Discovered path from source to target | -| `pathWeight` | Integer | Sum of the weightProp across the path | -| `pathCost` | Integer | Sum of the costProp across the path (if used) | - - -## Examples: -Lets take this Road Network Graph as an example: - -![Road network](/img/road_network.png) - - -### Example: All Shortest Paths by Distance (up to 10 km) - -```cypher -MATCH (a:City{name:'A'}) -CALL algo.SSpaths({ - sourceNode: a, - relTypes: ['Road'], - costProp: 'dist', - maxCost: 10, - pathCount: 1000 -}) -YIELD path, pathCost -RETURN pathCost, [n in nodes(path) | n.name] AS pathNodes -ORDER BY pathCost -``` - -#### Expected Result: -| pathCost | pathNodes | -|----------| ---------- | -| `2` | [A, D] | -| `3` | [A, B] | -| `6` | [A, D, C] | -| `7` | [A, D, E] | -| `8` | [A, B, D] | -| `8` | [A, C] | -| `10` | [A, B, E] | - ---- - -### Example: Top 5 Shortest Paths from A by Distance - -```cypher -MATCH (a:City{name:'A'}) -CALL algo.SSpaths({ - sourceNode: a, - relTypes: ['Road'], - weightProp: 'dist', - pathCount: 5 -}) -YIELD path, pathWeight, pathCost -RETURN pathWeight, pathCost, [n in nodes(path) | n.name] AS pathNodes -ORDER BY pathWeight -``` - -#### Expected Result: -| pathWeight | pathCost | pathNodes | -| -----------|----------| ---------- | -| `2` | `1` | [A, D] | -| `3` | `1` | [A, B] | -| `6` | `2` | [A, D, C] | -| `7` | `2` | [A, D, E] | -| `8` | `1` | [A, C] | - diff --git a/website/docs/algorithms/wcc.md b/website/docs/algorithms/wcc.md deleted file mode 100644 index e9db247..0000000 --- a/website/docs/algorithms/wcc.md +++ /dev/null @@ -1,116 +0,0 @@ ---- -title: Weakly Connected Components (WCC) -description: Weakly Connected Components (WCC) -sidebar_label: Weakly Connected Components (WCC) ---- - - - -# Weakly Connected Components (WCC) -## Overview - -The Weakly Connected Components (WCC) algorithm identifies groups of nodes connected through any path, disregarding edge directions. In a weakly connected component, every node is reachable from any other node when treating all edges as undirected. - -WCC serves as a common algorithm in scenarios such as: -- Community detection -- Data cleaning and preprocessing -- Large-scale network analysis -- Detecting isolated or loosely connected subgraphs - -## Algorithm Details - -WCC initializes by assigning each node to its own component. It iteratively scans for edges linking nodes across different components and merges them, ignoring the directionality of edges throughout the process. The algorithm terminates when no further merges occur, producing a collection of disjoint connected components. - -### Performance - -WCC operates with a time complexity of **O(\|V\| + \|E\|)**, where: -- **\|V\|** represents the total number of nodes -- **\|E\|** represents the total number of edges -This linear complexity makes WCC efficient for large graphs. - -## Syntax - -```cypher -CALL algo.wcc([config]) -``` - -### Parameters - -The procedure accepts an optional configuration `Map` with the following parameters: - -| Name | Type | Default | Description | -|---------------------|-------|------------------------|----------------------------------------------------------------------------------| -| `nodeLabels` | Array | All labels | Array of node labels to filter which nodes are included in the computation | -| `relationshipTypes` | Array | All relationship types | Array of relationship types to define which edges are traversed | - -### Return Values - -The procedure returns a stream of records with the following fields: - -| Name | Type | Description | -|---------------|---------|---------------------------------------------------------------------| -| `node` | Node | The node entity included in the component | -| `componentId` | Integer | Identifier of the weakly connected component the node belongs to | - -## Examples: - -Lets take this Social Graph as an example: - -![Graph WCC](/img/wcc.png) - -There are 3 different communities in this graph: -- Alice, Bob, Charlie -- David, Emma -- Frank - -### Create the Graph - -```cypher -CREATE - (alice:User {name: 'Alice'}), - (bob:User {name: 'Bob'}), - (charlie:User {name: 'Charlie'}), - - (david:User {name: 'David'}), - (emma:User {name: 'Emma'}), - - (frank:User {name: 'Frank'}), - - (alice)-[:FOLLOWS]->(bob), - (bob)-[:FRIENDS_WITH]->(charlie), - (charlie)-[:FOLLOWS]->(alice), - - (david)-[:FRIENDS_WITH]->(emma) -``` - -### Example: Find isolated communities in a social network - -```cypher -CALL algo.WCC(null) yield node, componentId -``` - -#### Expected Results - -| node | componentId | -|--------------------------------|-------------| -| `(:User {name: "Alice"})` | 0 | -| `(:User {name: "Bob"})` | 0 | -| `(:User {name: "Charlie"})` | 0 | -| `(:User {name: "David"})` | 3 | -| `(:User {name: "Emma"})` | 3 | -| `(:User {name: "Frank"})` | 5 | - -### Example: Group Communities together into a single list - -```cypher -CALL algo.WCC(null) yield node, componentId return collect(node.name), componentId -``` - -#### Expected Results - -| collect(node.name) | componentId | -|----------------------------|-------------| -| `[David, Emma]` | 3 | -| `[Frank]` | 5 | -| `[Alice, Bob, Charlie]` | 0 | -``` diff --git a/website/docs/browser/readme-browser.md b/website/docs/browser/readme-browser.md deleted file mode 100644 index 5bfbc36..0000000 --- a/website/docs/browser/readme-browser.md +++ /dev/null @@ -1,145 +0,0 @@ ---- -title: Browser -description: FalkorDB Browser web UI documentation -sidebar_position: 9 -sidebar_label: Browser ---- - - -# Browser -FalkorDB's Browser provides a web UI for exploring, querying, and managing FalkorDB graphs. It allows developers to interact with graphs loaded to FalkorDB, explore how specific queries behave, and review the current data model. FalkorDB Browser integrates within the main FalkorDB Docker container and through the Cloud service. - -![FalkorDB Browser GIF_01-26(1)](https://github.com/user-attachments/assets/af4f4d1c-111a-46a4-8442-8c08c037014f) - ---- - -## Main Features - -### Graph exploration (Graph page) - -| Feature | Description | -| --------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| Interactive graph canvas | Visualizes query results containing nodes and edges as an interactive graph. Supports pan, zoom, and interaction with nodes and relationships. Toggles visibility by labels and relationship types. | -| Element search (in-canvas search) | Search nodes and edges by node properties (string prefix match), IDs, relationship type, and labels. Jump to, zoom to, and select the match. | -| Data and inspection panel | Selecting an element opens a side panel for inspecting its properties. This panel supports editing workflows (see "Data manipulation"). | -| Entity Creation Tools | Add a node, an edge, or both to the current graph from the canvas view. | - -### Querying - -| Feature | Description | -| ---------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| Cypher query editor (Monaco) | The editor-style experience for writing Cypher includes keyboard shortcuts: Run (Enter and Cmd/Ctrl + Enter in the query-history editor) and Insert newline (Shift + Enter). The editor includes Cypher keyword and function completion based on the Browser's built-in lists. | -| Results views | Graph view for node and edge results. Table view for tabular results. | -| Query metadata | The Metadata tab shows query metadata text, explain plan (rendered as a nested tree), and profile output (rendered as a nested tree). | - -### Query history - -| Feature | Description | -| ------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------- | -| Persistent query history | Stores in browser localStorage. | -| History browser dialog | Allows you to search and filter previous queries, filter by graph name, and delete single queries, multi-select delete, or delete all queries. | -| Per-query metadata | Review metadata, explain, and profile for past queries. | - -![query-history-eye-candy](https://github.com/user-attachments/assets/be000961-f456-4b04-adf0-96f754b7447a) - -### Data manipulation (nodes/relationships) - -| Feature | Description | -| ---------------------------- | ---------------------------------------------------------------------------------------------------------------------- | -| Create and delete operations | Create node and create relationship flows from the Graph UI. Delete elements (node or relationship) from the Graph UI. | -| Edit labels | Edit labels through API routes (the UI provides label management components). | - -### Graph management - -| Feature | Description | -| ---------------- | -------------------------------------------------------------------------------------------------------- | -| Create graphs | Create graphs from the UI. | -| Delete graphs | Delete graphs (supports deleting multiple selected graphs). | -| Duplicate graphs | Create a copy of an existing graph (including data). | -| Export graphs | Download a .dump file via the Browser (/api/graph/:graph/export). | -| Upload data | Upload data through the "Upload Data" dialog, which supports drag-and-drop file selection (Dropzone UI). | - -### Graph Info panel - -| Feature | Description | -| ---------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| Memory Usage tracking | Exposes current memory utilization of the graph in MB. | -| Node Label tracking | Displays all node labels in the graph and controls style visualization for labels. Click on a node label to trigger a query that visualizes nodes from this label. | -| Edge Type tracking | Displays all edge types in the graph. Click on an edge type to trigger a graph query showing only nodes connected through this edge type. | -| Property Keys tracking | Displays all property keys in the graph. Click on a key to issue a query that shows nodes and edges where the property exists (not NULL). | - -falkordb-browser-eye-candy - ---- - -### API Documentation - -| Feature | Description | -| ------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| Built-in Swagger UI | Available at /docs. Loads the Browser's OpenAPI spec from /api/swagger. "Try it out" enabled. Adds an X-JWT-Only: true header when calling endpoints from Swagger UI. | - -browser-api-doc-eye-candy - -### Authentication & access control - -| Feature | Description | -| -------------------------- | ----------------------------------------------------------------------------------------------------------------------------- | -| Authentication | Uses NextAuth (credentials-backed) for authentication. | -| Role-aware UI capabilities | Read-Only users cannot create graphs. Admin users can access database configuration and user-management sections in settings. | - -### Settings - -| Section | Description | -| ------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| Browser settings | Query execution defaults and limits: timeout, result limit, run default query on load. User experience: content persistence (auto-save and restore), display-text priority (controls which node property appears as the node caption). Graph info refresh interval. Tutorial replay. | -| DB configurations (Admin) | View and update server configuration values. Some runtime configurations remain read-only. | -| Users (Admin) | List users and adjust roles. Add and delete users. | -| Personal Access Tokens | Generate tokens (with optional expiration). Tokens appear once at creation (copy-to-clipboard UX). Revoke existing tokens. | - - ---- - -## Common Workflows - -### Running and visualizing queries - -| Step | Action | -| ---- | -------------------------------------------------------------------------------------- | -| 1 | Go to Graphs and select a graph. | -| 2 | Write a Cypher query in the editor and run it. | -| 3 | Inspect results in the Graph tab (interactive canvas) or Table tab (rows and columns). | -| 4 | Use Labels and Relationships toggles to focus the canvas. | - -[NEED GIF HERE] - -### Inspecting and editing elements - -| Step | Action | -| ---- | -------------------------------------------------------------------------------------- | -| 1 | Click a node or edge in the canvas. | -| 2 | Use the Data panel to inspect properties and apply create or delete actions as needed. | - -### Working with query history - -| Step | Action | -| ---- | ---------------------------------------------------------------------- | -| 1 | Open Query History and filter by graph or search for a previous query. | -| 2 | Select a query and review Metadata, Explain, or Profile. | - -[NEED GIF HERE] - -### Exporting graph data - -| Step | Action | -| ---- | ----------------------------------------------- | -| 1 | Open graph management and select a graph. | -| 2 | Click Export Data to download a `.dump` file. | - -[NEED GIF HERE] - diff --git a/website/docs/cloud/enterprise-tier.md b/website/docs/cloud/enterprise-tier.md deleted file mode 100644 index 5c016e3..0000000 --- a/website/docs/cloud/enterprise-tier.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -title: Enterprise Tier -description: FalkorDB DBaaS Enterprise Tier -sidebar_position: 4 -sidebar_label: Enterprise Tier ---- - -# Enterprise Tier - -![FalkorDB Cloud Enterprise Tier Banner](https://github.com/user-attachments/assets/f03bb001-1916-4e0f-9a82-cb8271309855) - -FalkorDB's **Enterprise Tier** is designed for the most demanding workloads, offering ultimate performance, scale, and customization, with pricing determined on a **Custom** basis. This tier includes **every available enterprise feature**, such as **VPC Peering**, **Advanced Monitoring**, and **Dedicated Account Manager** support. - -The Enterprise Tier is fully optimized for mission-critical applications, providing the highest levels of security, availability, and dedicated operational support. Deployment configurations are tailored to your specific infrastructure, scale, and compliance requirements. - -## FalkorDB Pricing Plans Comparison - -| Feature | FREE | STARTUP | PRO | ENTERPRISE | -| :--- | :---: | :---: | :---: | :---: | -| **Monthly Cost (from)** | **Free** | **$73** | **$350** | **Custom** | -| Multi-Graph / Multi-Tenancy | βœ“ | βœ“ | βœ“ | **🟒** | -| Graph Access Control | βœ“ | βœ“ | βœ“ | **🟒** | -| **TLS** | βœ— | βœ“ | βœ“ | **🟒** | -| **VPC** | βœ— | βœ— | βœ— | **🟒** | -| Cluster Deployment | βœ— | βœ— | βœ“ | **🟒** | -| High Availability | βœ— | βœ— | βœ“ | **🟒** | -| Multi-zone Deployment | βœ— | βœ— | βœ“ | **🟒** | -| Scalability | βœ— | βœ— | βœ“ | **🟒** | -| Continuous Persistence | βœ— | βœ— | βœ“ | **🟒** | -| **Automated Backups** | βœ— | Every 12 Hours | Every 12 Hours | **Every Hour** | -| **Advanced Monitoring** | βœ— | βœ— | βœ— | **🟒** | -| **Support** | Community | Community | 24/7 | **Dedicated** | -| **Dedicated Account Manager** | βœ— | βœ— | βœ— | **🟒** | -| **Cloud Providers** | AWS, GCP, Azure | AWS, GCP, Azure | AWS, GCP, Azure | **AWS, GCP, Azure** | - -## Terms -### Consultation and Pricing -> The Enterprise Tier is customized to your specific needs, leveraging dedicated resources, tailored support SLAs, and private deployment options. Pricing is calculated based on the custom configuration of cores, memory, and additional enterprise components. -> -> **For precise pricing, deployment details, and a dedicated consultation:** -> **[Contact our Sales Team](https://www.falkordb.com/get-a-demo/)** -> -> ⚠️ Prices are subject to change - -## Getting Started - - - FalkorDB Graph DBaaS Enterprise Tier Tutorial Video - - -βš™οΈ To begin your Enterprise journey, schedule a consultation: -[![Contact Us](https://img.shields.io/badge/Contact%20Us-8A2BE2?style=for-the-badge)](mailto:info@falkordb.com) diff --git a/website/docs/cloud/features.md b/website/docs/cloud/features.md deleted file mode 100644 index db03f2b..0000000 --- a/website/docs/cloud/features.md +++ /dev/null @@ -1,63 +0,0 @@ ---- -title: Features -description: FalkorDB DBaaS Features -sidebar_position: 5 -sidebar_label: Features ---- - -# Features - -## Multi-Tenancy -Multi-tenancy lets you run multiple isolated graph databases within a single FalkorDB instance. Each tenant operates independently with its own data, queries, and access controls while sharing the underlying infrastructure. - -Developers building SaaS applications need multi-tenancy to serve multiple customers without deploying separate database instances for each one. This approach reduces operational overhead and infrastructure costs while maintaining strict data isolation between tenants. - -In practice, you create distinct graph databases for each customer or project, and FalkorDB handles the isolation automatically. - -## Cloud Providers -### AWS -FalkorDB runs on Amazon Web Services infrastructure, giving you access to AWS's global network of data centers and integration with other AWS services. You can deploy FalkorDB instances in several AWS regions and connect them to your existing AWS resources. - -Teams already using AWS benefit from keeping their graph database in the same cloud environment as their applications. This setup reduces latency and simplifies network configuration since your services communicate within the AWS network. - -When you deploy on AWS, you choose your preferred region, and FalkorDB provisions the necessary compute and storage resources in that location. - -### Google Cloud Platform (GCP) -FalkorDB integrates with Google Cloud Platform, allowing you to run graph databases on Google's infrastructure. You gain access to GCP's global network and can combine FalkorDB with other Google Cloud services. - -Organizations using GCP for their applications should deploy FalkorDB in the same cloud to maintain consistent infrastructure management. Keeping your database and applications on GCP reduces cross-cloud data transfer costs and latency. - -You select a GCP region during deployment, and FalkorDB sets up your graph database instance within Google's infrastructure. - -> Note: Microsoft Azure is currently available in a Bring-Your-Own-Cloud configuration - -## TLS -TLS (Transport Layer Security) encrypts all data transmitted between your application and FalkorDB. This encryption prevents anyone intercepting network traffic from reading your queries or results. - -Applications handling sensitive data must use TLS to protect information in transit. Without encryption, credentials, personal data, and business logic become vulnerable when traveling across networks. - -When you enable TLS, FalkorDB requires encrypted connections. Your application must configure its database client to use TLS, and all communication happens over secure channels. - -## VPC -A Virtual Private Cloud (VPC) creates an isolated network environment where your FalkorDB instance runs separately from the public internet. Only resources within your VPC or those you explicitly authorize can reach your database. - -Organizations with security requirements need VPC deployment to control network access to their databases. VPCs prevent unauthorized connection attempts and give you granular control over which services can communicate with FalkorDB. - -You deploy FalkorDB into your existing VPC, and the database becomes accessible only through your private network. Your applications connect using private IP addresses instead of public endpoints. - -## Persistence -Persistence ensures your graph data survives system restarts, crashes, or failures by writing changes to disk. Without persistence, you lose all data when the database stops. - -Any application storing important data requires persistence to maintain durability. In-memory-only databases lose everything during unexpected shutdowns, making them unsuitable for production workloads. - -FalkorDB persists data through regular snapshots and transaction logs. These mechanisms guarantee that committed transactions remain safe even if the system crashes immediately afterward. - -## Graph Browser -You can connect to the falkordb browser (integrated into your web browser) from the cloud console. The browser allows visualizing query results, allows you to traverse the graph and more. Multi Graph support is enabled by default in the browser which simplifies navigation and data management. - -### Solution Architecture -Solution architecture support helps you design how FalkorDB integrates with your broader application infrastructure. This guidance covers connection patterns, data modeling approaches, and best practices for specific use cases. - -Teams building complex systems benefit from architectural advice to avoid common pitfalls and optimize their graph database implementation. Poor architectural decisions early in development create technical debt that becomes expensive to fix later. - -Architecture consultations provide recommendations on graph schema design, query optimization strategies, and integration patterns that match your application requirements. diff --git a/website/docs/cloud/free-tier.md b/website/docs/cloud/free-tier.md deleted file mode 100644 index 6982dfc..0000000 --- a/website/docs/cloud/free-tier.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: Free Tier -description: FalkorDB DBaaS Free Tier -sidebar_position: 1 -sidebar_label: Free Tier ---- - -# Free Tier - -![FalkorDB Cloud Free Tier Banner](https://github.com/user-attachments/assets/062cb5c5-d969-4481-ab1b-1802fea0732a) - -FalkorDB's free cloud tier gives you instant access to a graph database with multi-graph support and multi-tenancy capabilities. You can deploy on AWS or GCP with 100MB of storage and rely on community support to get started. - -The free tier provides everything you need to explore FalkorDB and build initial prototypes. When your application grows and requires TLS security, VPC networking, high availability, automated backups, or dedicated support, you can upgrade to a paid plan that includes these enterprise features. - -## FalkorDB Pricing Plans Comparison - -| Feature | FREE | STARTUP | PRO | ENTERPRISE | -| :--- | :---: | :---: | :---: | :---: | -| **Monthly Cost (from)** | **Free** | **$73** | **$350** | **Custom** | -| Multi-Graph / Multi-Tenancy | **βœ“** | **βœ“** | **βœ“** | **βœ“** | -| Graph Access Control | **βœ“** | **βœ“** | **βœ“** | **βœ“** | -| TLS | βœ— | **βœ“** | **βœ“** | **βœ“** | -| VPC | βœ— | βœ— | βœ— | **βœ“** | -| Cluster Deployment | βœ— | βœ— | **βœ“** | **βœ“** | -| High Availability | βœ— | βœ— | **βœ“** | **βœ“** | -| Multi-zone Deployment | βœ— | βœ— | **βœ“** | **βœ“** | -| Scalability | βœ— | βœ— | **βœ“** | **βœ“** | -| Continuous Persistence | βœ— | βœ— | **βœ“** | **βœ“** | -| Automated Backups | βœ— | Every 12 Hours | Every 12 Hours | Every Hour | -| Advanced Monitoring | βœ— | βœ— | βœ— | **βœ“** | -| **Support** | Community | Community | 24/7 | Dedicated | -| Dedicated Account Manager | βœ— | βœ— | βœ— | **βœ“** | -| **Cloud Providers** | AWS, GCP | AWS, GCP | AWS, GCP | AWS, GCP, Azure | -| **Call-to-Action** | [Sign up](https://app.falkordb.cloud/signup) | [Sign up](https://app.falkordb.cloud/signup) | [Sign up](https://app.falkordb.cloud/signup) | [Contact Us](mailto:info@falkordb.com) | - -#### Terms -> ⚠️ Free instances that aren't utilized for 1 day will be stopped, and deleted after 7 days. -Need an extension? Speak to [sales](https://www.falkordb.com/get-a-demo/)) - -## Getting Started - -[![FalkorDB Graph DBaaS Free Tier Tutorial Video](https://github.com/user-attachments/assets/56255f72-ff9d-4863-9942-b839257a723c)](https://www.youtube.com/watch?v=z0XO4pb2t5Y) - -βš™οΈ Spin up your first FalkorDB Cloud instance: -[![Sign Up](https://img.shields.io/badge/Sign%20Up-8A2BE2?style=for-the-badge)](https://app.falkordb.cloud/signup) diff --git a/website/docs/cloud/index.md b/website/docs/cloud/index.md deleted file mode 100644 index 873b324..0000000 --- a/website/docs/cloud/index.md +++ /dev/null @@ -1,62 +0,0 @@ ---- -title: Cloud DBaaS -description: Cloud Offering -sidebar_position: 10 -sidebar_label: Cloud DBaaS ---- - -# Cloud DBaaS - -![FalkorDB Cloud Banner](https://github.com/user-attachments/assets/e436f01d-d60a-42cf-ac76-7e457180482e) - -Get started with FalkorDB's cloud offering. The platform provides several enterprise features, including multi-tenancy, across all tiers. Browse the available plans and select the one that suits your needs. You can scale and upgrade your deployment when ready. - -## Features & Services - -| Group | Features | -| :--- | :--- | -| **Availability & Resilience** | - High Availability`
`- Multi-zone Deployment`
`- Multi-Graph / Multi-Tenancy`
`- Automated Backups`
`- Continuous Persistence | -| **Security & Access** | - Graph Access Control`
`- TLS`
`- VPC Peering | -| **Deployment & Scaling** | - Dedicated Cluster Deployment`
`- Scalability | -| **Support & Monitoring** | - Dedicated Support`
`- Advanced Monitoring`
`- Dedicated Account Manager | -| ☁️ **Cloud Providers** | - AWS`
`- GCP`
`- Azure (BYOC) | - -[![Learn More](https://img.shields.io/badge/Learn%20More-8A2BE2?style=for-the-badge)](https://github.com/FalkorDB/docs/edit/Cloud-Docs/cloud/features.md) - ---- - -### Billing & Setup -ℹ️ Prior to subscribing to any of FalkorDB's paid cloud tiers, please set up your billing information here: -> Adding your billing information is an easy, 2-step process: -> 1. Create a billing account ([Link](https://app.falkordb.cloud/billing)) -> 2. Input your billing information - -![FDB-cloud-billing-how-to](https://github.com/user-attachments/assets/d5d6ce47-0bbc-4c71-b5fa-60a43677fb3f) - -## Free Tier - -The FalkorDB Free Tier provides a free FalkorDB instance for evaluation purposes. You can deploy, connect, and share the instance with minimal effort and no maintenance. - -[![Learn More](https://img.shields.io/badge/Learn%20More-8A2BE2?style=for-the-badge)](https://github.com/FalkorDB/docs/cloud/free-tier.md) -[![Watch Demo](https://img.shields.io/badge/Watch%20Demo-black?style=for-the-badge)](https://www.youtube.com/watch?v=z0XO4pb2t5Y) - -## Startup Tier - -The FalkorDB Startup Tier provides a production-ready standalone FalkorDB deployment. Pick your machine size, add a dataset size, and start extracting insights. - -[![Learn More](https://img.shields.io/badge/Learn%20More-8A2BE2?style=for-the-badge)](https://github.com/FalkorDB/docs/cloud/startup-tier.md) -[![Watch Demo](https://img.shields.io/badge/Watch%20Demo-black?style=for-the-badge)](https://www.youtube.com/watch?v=xjpLPoQgo2s) - -## Pro Tier - -The Pro Tier provides a robust, dedicated environment to scale your application, including highly-available setups. - -[![Learn More](https://img.shields.io/badge/Learn%20More-8A2BE2?style=for-the-badge)](https://github.com/FalkorDB/docs/cloud/pro-tier.md) -[![Watch Demo](https://img.shields.io/badge/Watch%20Demo-black?style=for-the-badge)](https://youtu.be/UIzrW9otvYM?si=P1too6QjZ5r9AHtB) - -## Enterprise - -The Enterprise Tier is fully optimized for mission-critical applications, providing the highest levels of security, availability, and dedicated operational support. Schedule a call with a FalkorDB solutions architect to learn more. - -[![Learn More](https://img.shields.io/badge/Learn%20More-8A2BE2?style=for-the-badge)](https://github.com/FalkorDB/docs/cloud/enterprise-tier.md) -[![Watch Demo](https://img.shields.io/badge/Watch%20Demo-black?style=for-the-badge)](https://youtu.be/fu_8CLFKYSs?si=G7K6dN1i5tyqXTfC) diff --git a/website/docs/cloud/pro-tier.md b/website/docs/cloud/pro-tier.md deleted file mode 100644 index 410c625..0000000 --- a/website/docs/cloud/pro-tier.md +++ /dev/null @@ -1,90 +0,0 @@ ---- -title: Pro Tier -description: FalkorDB DBaaS Pro Tier -sidebar_position: 3 -sidebar_label: Pro Tier ---- - -# Pro Tier - -![FalkorDB Cloud Pro Tier Banner](https://github.com/user-attachments/assets/2d39df96-f932-4cba-a124-bfff93f9a0ca) - -FalkorDB's **Pro Tier** is your solution for high-performance, production-ready graph database workloads, starting at **$350/Month**. This tier is designed for applications requiring **High Availability (HA)**, **Multi-zone Deployment**, and robust **Scalability**. It includes essential infrastructure features like **Cluster Deployment** and **Continuous Persistence (AOF + Snapshot)**, backed by **24-hour Dedicated Support**. - -The Pro Tier provides a robust environment to scale your application with confidence. When your needs extend to features like VPC Peering, Advanced Monitoring, or a Dedicated Account Manager, you can easily upgrade to the Enterprise plan. - -## FalkorDB Pricing Plans Comparison - -| Feature | FREE | STARTUP | PRO | ENTERPRISE | -| :--- | :---: | :---: | :---: | :---: | -| **Monthly Cost (from)** | **Free** | **$73** | **$350** | **Custom** | -| Multi-Graph / Multi-Tenancy | βœ“ | βœ“ | **🟒** | βœ“ | -| Graph Access Control | βœ“ | βœ“ | **🟒** | βœ“ | -| **TLS** | βœ— | βœ“ | **🟒** | βœ“ | -| VPC | βœ— | βœ— | **πŸ”΄** | βœ“ | -| Cluster Deployment | βœ— | βœ— | **🟒** | βœ“ | -| High Availability | βœ— | βœ— | **🟒** | βœ“ | -| Multi-zone Deployment | βœ— | βœ— | **🟒** | βœ“ | -| Scalability | βœ— | βœ— | **🟒** | βœ“ | -| Continuous Persistence | βœ— | βœ— | **🟒** | βœ“ | -| **Automated Backups** | βœ— | Every 12 Hours | **Every 12 Hours** | Every Hour | -| Advanced Monitoring | βœ— | βœ— | **πŸ”΄** | βœ“ | -| **Support** | Community | Community | **24/7** | Dedicated | -| Dedicated Account Manager | βœ— | βœ— | **πŸ”΄** | βœ“ | -| **Cloud Providers** | AWS, GCP | AWS, GCP | **AWS, GCP** | AWS, GCP, Azure (BYOC) | -| **Get started** | [Sign up](https://app.falkordb.cloud/signup) | [Sign up](https://app.falkordb.cloud/signup) | [Sign up](https://app.falkordb.cloud/signup) | [Contact Us](mailto:info@falkordb.com) | - -## Terms -### Pricing Calculation -> We charge deployments based on **Core/Hour** and **Memory GB/Hour** usage. You pay **$0.200 per Core/Hour** and **$0.01 per Memory GB/Hour**. - -## Standalone - -| Instance Type | Monthly Cost | -| :--- | ---: | -| E2-standard-2 / m6i.large (Starting Instance) | $350.40 | -| E2-standard-4 / m6i.xlarge | $700.80 | -| E2-custom-4-8192 / c6i.xlarge | $642.40 | -| E2-custom-8-16384 / c6i.2xlarge | $1,284.80 | -| E2-custom-16-32768 / c6i.4xlarge | $2,569.60 | -| E2-custom-32-65536 / c6i.8xlarge | $5,139.20 | - -## Replicated (High Availability, Master (x1), Replica (x1)) - - -| Instances Type | Monthly Cost | -| :-------------------------------------------- | -----------: | -| E2-standard-2 / m6i.large (Starting Instance) | \$1,007.40 | -| E2-standard-4 / m6i.xlarge | \$1,708.20 | -| E2-custom-4-8192 / c6i.xlarge | \$1,591.40 | -| E2-custom-8-16384 / c6i.2xlarge | \$2,876.20 | -| E2-custom-16-32768 / c6i.4xlarge | \$5,445.80 | -| E2-custom-32-65536 / c6i.8xlarge | \$10,585.00 | - -> Note: We charge an additional 2 cores and 2 GB for replication and cluster since they require an extra component (sentinel for replication and rebalancer for cluster). - ---- - - -> Use our **[graph size calculator](https://www.falkordb.com/graph-database-graph-size-calculator/)** to further estimate your cost. -> ⚠️ Prices are subject to change - -## Getting Started - - - FalkorDB Graph DBaaS Pro Tier Tutorial Video - - -βš™οΈ Spin up your first FalkorDB Cloud instance: -[![Sign Up](https://img.shields.io/badge/Sign%20Up-8A2BE2?style=for-the-badge)](https://app.falkordb.cloud/signup) diff --git a/website/docs/cloud/startup-tier.md b/website/docs/cloud/startup-tier.md deleted file mode 100644 index f38967c..0000000 --- a/website/docs/cloud/startup-tier.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -title: Startup Tier -description: FalkorDB DBaaS Startup Tier -sidebar_position: 2 -sidebar_label: Startup Tier ---- - -# Startup Tier - -![FalkorDB Cloud Startup Tier Banner](https://github.com/user-attachments/assets/a60eacb7-2af6-432e-84c8-7c3dbe98422c) - -FalkorDB's **Startup Tier** gives you instant access to a production-ready graph database starting at **$73/Month**. This tier is designed to help you **Build a Powerful MVP** with standalone deployment, multi-graph support, and multi-tenancy capabilities. You can deploy on AWS, GCP, or Azure (BYOC) and rely on community support to grow your application. - -The Startup Tier includes essential features like **TLS** and **Automated Backups (Every 12 Hours)**, making it a robust, secure choice for your first production workload. When your application requires High Availability, dedicated support, or advanced enterprise features like VPC networking, you can easily upgrade to a Pro or Enterprise plan. - -## FalkorDB Pricing Plans Comparison - -| Feature | FREE | STARTUP | PRO | ENTERPRISE | -| :--- | :---: | :---: | :---: | :---: | -| **Monthly Cost (from)** | **Free** | **$73** | **$350** | **Custom** | -| Multi-Graph / Multi-Tenancy | βœ“ | **🟒** | βœ“ | βœ“ | -| Graph Access Control | βœ“ | **🟒** | βœ“ | βœ“ | -| **TLS** | βœ— | **🟒** | βœ“ | βœ“ | -| VPC | βœ— | **πŸ”΄** | βœ— | βœ“ | -| Cluster Deployment | βœ— | **πŸ”΄** | βœ“ | βœ“ | -| High Availability | βœ— | **πŸ”΄** | βœ“ | βœ“ | -| Multi-zone Deployment | βœ— | **πŸ”΄** | βœ“ | βœ“ | -| Scalability | βœ— | **πŸ”΄** | βœ“ | βœ“ | -| Continuous Persistence | βœ— | **πŸ”΄** | βœ“ | βœ“ | -| **Automated Backups** | βœ— | **Every 12 Hours** | Every 12 Hours | Every Hour | -| Advanced Monitoring | βœ— | **πŸ”΄** | βœ— | βœ“ | -| **Support** | Community | **Community** | 24/7 | Dedicated | -| Dedicated Account Manager | βœ— | **πŸ”΄** | βœ— | βœ“ | -| **Cloud Providers** | AWS, GCP | **AWS, GCP** | AWS, GCP | AWS, GCP, Azure (BYOC) | -| **Call-to-Action** | [Sign up](https://app.falkordb.cloud/signup) | [Sign up](https://app.falkordb.cloud/signup) | [Sign up](https://app.falkordb.cloud/signup) | [Contact Us](mailto:info@falkordb.com) | - -## Terms -### Pricing Calculation -> We calculate deployment costs based on **Memory GB/Hour** usage. Each Memory GB/Hour costs **$0.100**. -> -> The list below shows approximate monthly costs for different instance sizes (based on 730 hours/month): -> -> * **1 Gigabyte memory instance:** $0.100 Γ— 1 (Memory GB) Γ— 730 hours = **$73/month*** -> * **2 Gigabyte memory instance:** $0.100 Γ— 2 (Memory GB) Γ— 730 hours = **$146/month*** -> -> You can estimate your monthly costs by multiplying your instance's memory allocation (in GB) by **$73**. -> -> Use our **[graph size calculator](https://www.falkordb.com/graph-database-graph-size-calculator/)** to further estimate your cost. -> -> ⚠️ Prices are subject to change - -## Getting Started - - - FalkorDB Graph DBaaS Startup Tier Tutorial Video - - -βš™οΈ Spin up your first FalkorDB Cloud instance: -[![Sign Up](https://img.shields.io/badge/Sign%20Up-8A2BE2?style=for-the-badge)](https://app.falkordb.cloud/signup) diff --git a/website/docs/commands/acl.md b/website/docs/commands/acl.md deleted file mode 100644 index 69537aa..0000000 --- a/website/docs/commands/acl.md +++ /dev/null @@ -1,135 +0,0 @@ ---- -title: ACL -description: > -sidebar_position: 100 -sidebar_label: ACL ---- - - - -# ACL -The ACL command in FalkorDB provides tools for managing Access Control Lists, -enabling administrators to control user permissions at a granular level. -This command is crucial for maintaining secure access to your FalkorDB instances. - -Usage: `ACL [SUBCOMMAND] [arg1] [arg2] ...` - -## Subcommands - -### ACL HELP - -Returns a list of all available `ACL` subcommands and their syntax. - -Usage: `ACL HELP` - -#### Example - -``` -> ACL HELP -``` - -#### Output - -``` -1) "GETUSER" -2) "SETUSER" -3) "DELUSER" -4) "LIST" -... -``` - -### ACL SETUSER - -Defines or updates a user's permissions. - -Usage: `ACL SETUSER [rule1] [rule2] ...` - -#### Rules - - * on / off: Enables or disables the user account. - * nopass: Allows access without a password. - * password:``: Sets a password for the user. - * ~``: Restricts access to graphs matching the given pattern. - * +``: Grants permission to execute specific commands. - * -``: Denies permission to execute specific commands. - -#### Example - -``` -> ACL SETUSER john on >password123 +GRAPH.LIST +GRAPH.RO_QUERY ~* -``` - -### ACL GETUSER - -Retrieves details about a specific user, including permissions and settings. -Syntax - -Usage: `ACL GETUSER ` - -#### Example - -``` -> ACL GETUSER john -``` - -#### Output - -``` -1) "on" -2) ">password123" -3) "+GRAPH.LIST" -4) "+GRAPH.RO_QUERY" -5) "~*" -``` - -### ACL DELUSER - -Deletes a user from the ACL. - -Usage: `ACL DELUSER ` - -#### Example - -``` -> ACL DELUSER john -``` - -### ACL LIST - -Lists all users currently configured in the ACL. - -Usage: `ACL LIST` - -#### Example - -``` -> ACL LIST -``` - -#### Output - -``` -1) "admin" -2) "john" -3) "guest" -``` - -### ACL LOG - -Displays a log of recent ACL-related events, such as user authentication attempts or rule changes. - -Usage: `ACL LOG [count]` - - * count: (Optional) Limits the number of entries in the log. - -#### Example - -``` -> ACL LOG 10 -``` - -## Notes - - The ACL command is available only to users with administrative privileges. - Be cautious when using the nopass rule, as it may compromise security. - Use specific patterns and commands to enforce the principle of least privilege. diff --git a/website/docs/commands/graph.config-get.mdx b/website/docs/commands/graph.config-get.mdx deleted file mode 100644 index 4e53026..0000000 --- a/website/docs/commands/graph.config-get.mdx +++ /dev/null @@ -1,137 +0,0 @@ ---- -title: GRAPH.CONFIG-GET -description: Retrieve FalkorDB configuration parameters -sidebar_label: GRAPH.CONFIG-GET ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - - -# GRAPH.CONFIG-GET - -Retrieves the current value of a FalkorDB configuration parameter. - -FalkorDB configuration parameters are detailed [here](/getting-started/configuration). - -`*` can be used to retrieve the value of all FalkorDB configuration parameters. - - - - - - - - - - - - - - - -```python -from falkordb import FalkorDB -client = FalkorDB() -config = client.get_config('*') -print(config) -``` - - - - -```javascript -import { FalkorDB } from 'falkordb'; -const client = await FalkorDB.connect(); -const config = await client.getConfig('*'); -console.log(config); -``` - - - - -```rust -let client = FalkorDB::connect_default(); -let config = client.get_config("*")?; -println!("{:?}", config); -``` - - - - -```java -FalkorDB client = new FalkorDB(); -Map config = client.getConfig("*"); -System.out.println(config); -``` - - - - -```bash -graph.config get * -# Output: -# 1) 1) "TIMEOUT" -# 2) (integer) 0 -# ... -``` - - - - - - - - - - - - - - - - - - - -```python -timeout = client.get_config('TIMEOUT') -print(timeout) -``` - - - - -```javascript -const timeout = await client.getConfig('TIMEOUT'); -console.log(timeout); -``` - - - - -```rust -let timeout = client.get_config("TIMEOUT")?; -println!("{:?}", timeout); -``` - - - - -```java -Object timeout = client.getConfig("TIMEOUT"); -System.out.println(timeout); -``` - - - - -```bash -graph.config get TIMEOUT -# Output: -# 1) "TIMEOUT" -# 2) (integer) 0 -``` - - - diff --git a/website/docs/commands/graph.config-set.mdx b/website/docs/commands/graph.config-set.mdx deleted file mode 100644 index 9746a56..0000000 --- a/website/docs/commands/graph.config-set.mdx +++ /dev/null @@ -1,155 +0,0 @@ ---- -title: GRAPH.CONFIG-SET -description: Set FalkorDB configuration parameters -sidebar_label: GRAPH.CONFIG-SET ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - - -# GRAPH.CONFIG-SET - -Set the value of a FalkorDB configuration parameter. - -Values set using `GRAPH.CONFIG SET` are not persisted after server restart. - -FalkorDB configuration parameters are detailed [here](/getting-started/configuration). - -Note: As detailed in the link above, not all FalkorDB configuration parameters can be set at run-time. - - - - - - - - - - - - - - - -```python -from falkordb import FalkorDB -client = FalkorDB() -print(client.get_config('TIMEOUT')) -client.set_config('TIMEOUT', 10000) -print(client.get_config('TIMEOUT')) -``` - - - - -```javascript -import { FalkorDB } from 'falkordb'; -const client = await FalkorDB.connect(); -console.log(await client.getConfig('TIMEOUT')); -await client.setConfig('TIMEOUT', 10000); -console.log(await client.getConfig('TIMEOUT')); -``` - - - - -```rust -let client = FalkorDB::connect_default(); -println!("{:?}", client.get_config("TIMEOUT")?); -client.set_config("TIMEOUT", 10000)?; -println!("{:?}", client.get_config("TIMEOUT")?); -``` - - - - -```java -FalkorDB client = new FalkorDB(); -System.out.println(client.getConfig("TIMEOUT")); -client.setConfig("TIMEOUT", 10000); -System.out.println(client.getConfig("TIMEOUT")); -``` - - - - -```bash -graph.config get TIMEOUT -graph.config set TIMEOUT 10000 -graph.config get TIMEOUT -# Output: -# 1) "TIMEOUT" -# 2) (integer) 0 -# OK -# 1) "TIMEOUT" -# 2) (integer) 10000 -``` - - - - - - - - - - - - - - - - - - - -```python -try: - client.set_config('THREAD_COUNT', 10) -except Exception as e: - print(e) -``` - - - - -```javascript -try { - await client.setConfig('THREAD_COUNT', 10); -} catch (e) { - console.error(e); -} -``` - - - - -```rust -if let Err(e) = client.set_config("THREAD_COUNT", 10) { - println!("{}", e); -} -``` - - - - -```java -try { - client.setConfig("THREAD_COUNT", 10); -} catch (Exception e) { - System.out.println(e); -} -``` - - - - -```bash -graph.config set THREAD_COUNT 10 -# Output: -# (error) This configuration parameter cannot be set at run-time -``` - - - diff --git a/website/docs/commands/graph.constraint-create.mdx b/website/docs/commands/graph.constraint-create.mdx deleted file mode 100644 index 25c6f26..0000000 --- a/website/docs/commands/graph.constraint-create.mdx +++ /dev/null @@ -1,333 +0,0 @@ ---- -title: GRAPH.CONSTRAINT CREATE -description: Create constraints on graph entities -sidebar_label: GRAPH.CONSTRAINT CREATE ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - - -# GRAPH.CONSTRAINT CREATE - ---- -syntax: | - GRAPH.CONSTRAINT CREATE key - MANDATORY|UNIQUE - NODE label | RELATIONSHIP reltype - PROPERTIES propCount prop [prop...] ---- - -Creates a graph constraint. - -[Examples](#examples) - -## Introduction to constraints - -A constraint is a rule enforced on graph nodes or relationships, used to guarantee a certain structure of the data. - -FalkorDB supports two types of constraints: - -1. Mandatory constraints -2. Unique constraints - -### Mandatory constraints - -A mandatory constraint enforces existence of given attributes for all nodes with a given label or for all edges with a given relationship-type. - -Consider a mandatory constraint over the attribute `id` of all nodes with the label `Person`. -This constraint will enforce that any `Person` node in the graph has an `id` attribute. -Any attempt to create or modify a `Person` node, such that the resulting node does not have an `id` attribute, will fail. - -### Unique constraints - -A unique constraint enforces uniqueness of values of a given set of attributes for all nodes with a given label or for all edges with a given relationship-type. I.e., no duplicates are allowed. - -Consider a unique constraint over the attributes: `first_name` and `last_name` of all nodes with the label `Person` -This constraint will enforce that any combination of `first_name`, `last_name` is unique. -E.g., a graph can contain the following `Person` nodes: - -```sql -(:Person {first_name:'Frank', last_name:'Costanza'}) -(:Person {first_name:'Estelle', last_name:'Costanza'}) -``` - -But trying to create a third node with `first_name` Frank and `last_name` Costanza, will issue an error and the query will fail. - -> **Notes:** -> -> - A unique constraint requires the existence of an exact-match index prior to its creation. For example, trying to create a unique constraint governing attributes: `first_name` and `last_name` of nodes with label `Person` without having an exact-match index over `Person`'s `first_name` and `last_name` attributes will fail. -> -> - A unique constraint is enforced for a given node or edge only if all the constrained properties are defined (non-null). -> - Unique constraints are not enforced for array-valued properties. -- Trying to delete an index that supports a constraint will fail. - - -## Creating a constraint - -To create a constraint, use the `GRAPH.CONSTRAINT CREATE` command as follows: - -```sh -GRAPH.CONSTRAINT CREATE key constraintType {NODE label | RELATIONSHIP reltype} PROPERTIES propCount prop [prop...] -``` - -## Required arguments - -
-key - -is key name for the graph. - - -
- -
-constraintType - -is the constraint type: either `MANDATORY` or `UNIQUE`. - - -
- -
-NODE label | RELATIONSHIP reltype - -is the graph entity type (`NODE` or `RELATIONSHIP`) and the name of the node label or relationship type on which the constraint should be enforced. - - -
- -
-propCount - -is the number of properties following. Valid values are between 1 and 255. - - -
- -
-prop... - -is a list of `propCount` property names. - - -
- -:::note - -**Notes:** - -- Constraints are created asynchronously. The constraint creation command will reply with `PENDING` and the newly created constraint is enforced gradually on all relevant nodes or relationships. - During its creation phase, a constraint's status is `UNDER CONSTRUCTION`. When all governed nodes or relationships confirm to the constraint - its status is updated to `OPERATIONAL`, otherwise, if a conflict is detected, the constraint status is updated to `FAILED` and the constraint is not enforced. The caller may try to resolve the conflict and recreate the constraint. To retrieve the status of all constraints - use the `db.constraints()` procedure. -- A constraint creation command may fail synchronously due to the following reasons: - 1. Syntax error - 2. Constraint already exists - 3. Missing supporting index (for unique constraint) - - In addition, a constraint creation command may fail asynchronously due to the following reasons: - - 1. The graph contains data which violates the constraint - -::: - -## Return value - -@simple-string-reply - `PENDING` if executed correctly and the constraint is being created asynchronously, or @error-reply otherwise. - -## Examples - -### Creating a unique constraint for a node label - -To create a unique constraint for all nodes with label `Person` enforcing uniqueness on the combination of values of attributes `first_name` and `last_name`, issue the following commands: - - - - - - - - - - - - - - - -```python -from falkordb import FalkorDB -client = FalkorDB() -graph = client.select_graph('g') -graph.query("CREATE INDEX FOR (p:Person) ON (p.first_name, p.last_name)") -result = client.create_constraint('g', 'UNIQUE', 'NODE', 'Person', ['first_name', 'last_name']) -print(result) -``` - - - - -```javascript -import { FalkorDB } from 'falkordb'; -const client = await FalkorDB.connect(); -const graph = client.selectGraph('g'); -await graph.query("CREATE INDEX FOR (p:Person) ON (p.first_name, p.last_name)"); -const result = await client.createConstraint('g', 'UNIQUE', 'NODE', 'Person', ['first_name', 'last_name']); -console.log(result); -``` - - - - -```rust -let client = FalkorDB::connect_default(); -let graph = client.select_graph("g"); -graph.query("CREATE INDEX FOR (p:Person) ON (p.first_name, p.last_name)")?; -let result = client.create_constraint("g", "UNIQUE", "NODE", "Person", &["first_name", "last_name"])?; -println!("{}", result); -``` - - - - -```java -FalkorDB client = new FalkorDB(); -Graph graph = client.selectGraph("g"); -graph.query("CREATE INDEX FOR (p:Person) ON (p.first_name, p.last_name)"); -String result = client.createConstraint("g", "UNIQUE", "NODE", "Person", Arrays.asList("first_name", "last_name")); -System.out.println(result); -``` - - - - -```bash -redis> GRAPH.QUERY g "CREATE INDEX FOR (p:Person) ON (p.first_name, p.last_name)" -redis> GRAPH.CONSTRAINT CREATE g UNIQUE NODE Person PROPERTIES 2 first_name last_name -# Output: PENDING -``` - - - - - -### Creating a mandatory constraint for a relationship type - -To create a mandatory constraint for all edges with relationship-type `Visited`, enforcing the existence of a `date` attribute, issue the following command: - - - - - - - - - - - - - - - -```python -result = client.create_constraint('g', 'MANDATORY', 'RELATIONSHIP', 'Visited', ['date']) -print(result) -``` - - - - -```javascript -const result = await client.createConstraint('g', 'MANDATORY', 'RELATIONSHIP', 'Visited', ['date']); -console.log(result); -``` - - - - -```rust -let result = client.create_constraint("g", "MANDATORY", "RELATIONSHIP", "Visited", &["date"])?; -println!("{}", result); -``` - - - - -```java -String result = client.createConstraint("g", "MANDATORY", "RELATIONSHIP", "Visited", Arrays.asList("date")); -System.out.println(result); -``` - - - - -```bash -redis> GRAPH.CONSTRAINT CREATE g MANDATORY RELATIONSHIP Visited PROPERTIES 1 date -# Output: PENDING -``` - - - - - -### Listing constraints - -To list all constraints enforced on a given graph, use the `db.constraints` procedure: - - - - - - - - - - - - - - - -```python -result = graph.ro_query("call db.constraints()") -print(result) -``` - - - - -```javascript -const result = await graph.ro_query("call db.constraints()"); -console.log(result); -``` - - - - -```rust -let result = graph.ro_query("call db.constraints()")?; -println!("{:?}", result); -``` - - - - -```java -ResultSet result = graph.ro_query("call db.constraints()"); -System.out.println(result); -``` - - - - -```bash -redis> GRAPH.RO_QUERY g "call db.constraints()" -# Output: ... -``` - - - - - -## Deleting a constraint - -See [GRAPH.CONSTRAINT DROP](/commands/graph.constraint-drop) diff --git a/website/docs/commands/graph.constraint-drop.mdx b/website/docs/commands/graph.constraint-drop.mdx deleted file mode 100644 index 6ed73f4..0000000 --- a/website/docs/commands/graph.constraint-drop.mdx +++ /dev/null @@ -1,134 +0,0 @@ ---- -title: GRAPH.CONSTRAINT DROP -description: Drop constraints from graph entities -sidebar_label: GRAPH.CONSTRAINT DROP ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - - -# GRAPH.CONSTRAINT DROP - ---- -syntax: | - GRAPH.CONSTRAINT DROP key - MANDATORY|UNIQUE - NODE label | RELATIONSHIP reltype - PROPERTIES propCount prop [prop...] ---- - -Deleted a graph constraint. - -[Examples](#examples) - -For an introduction to constraints see [GRAPH.CONSTRAINT CREATE](/commands/graph.constraint-create) - -## Required arguments - -
-key - -is key name for the graph. - -
- -
-constraintType - -is the constraint type: either `MANDATORY` or `UNIQUE`. - - -
- -
-NODE label | RELATIONSHIP reltype - -is the graph entity type (`NODE` or `RELATIONSHIP`) and the name of the node label or relationship type on which the constraint is enforced. - - -
- -
-propCount - -is the number of properties following. Valid values are between 1 and 255. - - -
- -
-prop... - -is a list of `propCount` property names. - - -
- -## Return value - -@simple-string-reply - `OK` if executed correctly, or @error-reply otherwise. - -## Examples - -To delete a unique constraint for all nodes with label `Person` enforcing uniqueness on the combination of values of attributes `first_name` and `last_name`, issue the following command: - - - - - - - - - - - - - - - -```python -from falkordb import FalkorDB -client = FalkorDB() -result = client.drop_constraint('g', 'UNIQUE', 'NODE', 'Person', ['first_name', 'last_name']) -print(result) -``` - - - - -```javascript -import { FalkorDB } from 'falkordb'; -const client = await FalkorDB.connect(); -const result = await client.dropConstraint('g', 'UNIQUE', 'NODE', 'Person', ['first_name', 'last_name']); -console.log(result); -``` - - - - -```rust -let client = FalkorDB::connect_default(); -let result = client.drop_constraint("g", "UNIQUE", "NODE", "Person", &["first_name", "last_name"])?; -println!("{}", result); -``` - - - - -```java -FalkorDB client = new FalkorDB(); -String result = client.dropConstraint("g", "UNIQUE", "NODE", "Person", Arrays.asList("first_name", "last_name")); -System.out.println(result); -``` - - - - -```bash -redis> GRAPH.CONSTRAINT DROP g UNIQUE NODE Person PROPERTIES 2 first_name last_name -# Output: OK -``` - - - diff --git a/website/docs/commands/graph.copy.mdx b/website/docs/commands/graph.copy.mdx deleted file mode 100644 index e15edd3..0000000 --- a/website/docs/commands/graph.copy.mdx +++ /dev/null @@ -1,132 +0,0 @@ ---- -title: GRAPH.COPY -description: Create a copy of a graph -sidebar_label: GRAPH.COPY ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - - -# GRAPH.COPY - -Usage: `GRAPH.COPY ` - -The `GRAPH.COPY` command creates a copy of a graph, while the copy is performed the `src` graph is fully accessible. - -Example: - - - - - - - - - - - - - - - -```python -# Graphs list is empty -graph_list = db.list() - -# Create Graph 'A' -graph_a = db.select_graph('A') -result = graph_a.query('CREATE (:Account {number: 516637})') - -# Copy Graph 'A' to 'Z' -graph_z = graph_a.copy('Z') - -# Graphs list including 'A' and 'Z' -graph_list = db.list() - -# Query Graph 'Z' -result = graph_z.query('MATCH (a:Account) RETURN a.number')Query Graph 'Z' -``` - - - - -```javascript -import { FalkorDB } from 'falkordb'; - -const client = await FalkorDB.connect(); - -// Create Graph 'A' -const graphA = client.selectGraph('A'); -await graphA.query("CREATE (:Account {number: 516637})"); - -// Copy Graph 'A' to 'Z' -await client.copyGraph('A', 'Z'); - -// Query Graph 'Z' -const graphZ = client.selectGraph('Z'); -const result = await graphZ.query("MATCH (a:Account) RETURN a.number"); -console.log(result); -``` - - - - -```rust -let client = FalkorDB::connect_default(); -let graph_a = client.select_graph("A"); - -graph_a.query("CREATE (:Account {number: 516637})")?; -client.copy_graph("A", "Z")?; - -let graph_z = client.select_graph("Z"); -let result = graph_z.query("MATCH (a:Account) RETURN a.number")?; -println!("{:?}", result); -``` - - - - -```java -FalkorDB client = new FalkorDB(); - -// Create Graph 'A' -Graph graphA = client.selectGraph("A"); -graphA.query("CREATE (:Account {number: 516637})"); - -// Copy Graph 'A' to 'Z' -client.copyGraph("A", "Z"); -Graph graphZ = client.selectGraph("Z"); - -// Query Graph 'Z' -ResultSet result = graphZ.query("MATCH (a:Account) RETURN a.number"); -System.out.println(result); -``` - - - - -```bash -127.0.0.1:6379> GRAPH.LIST -(empty array) -127.0.0.1:6379> GRAPH.QUERY A "CREATE (:Account {number: 516637})" -1) 1) "Labels added: 1" - 2) "Nodes created: 1" - 3) "Properties set: 1" - 4) "Cached execution: 0" - 5) "Query internal execution time: 0.588084 milliseconds" -127.0.0.1:6379> GRAPH.COPY A Z -"OK" -127.0.0.1:6379> GRAPH.LIST -1) "Z" -2) "telemetry{A}" -3) "A" -127.0.0.1:6379> GRAPH.QUERY Z "MATCH (a:Account) RETURN a.number" -1) 1) "a.number" -2) 1) 1) (integer) 516637 -3) 1) "Cached execution: 0" - 2) "Query internal execution time: 0.638375 milliseconds" -``` - - - diff --git a/website/docs/commands/graph.delete.mdx b/website/docs/commands/graph.delete.mdx deleted file mode 100644 index 807d682..0000000 --- a/website/docs/commands/graph.delete.mdx +++ /dev/null @@ -1,133 +0,0 @@ ---- -title: GRAPH.DELETE -description: Remove a graph and all its entities -sidebar_position: 3 -sidebar_label: GRAPH.DELETE ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - - -# GRAPH.DELETE - -Completely removes a graph and all of its entities (nodes and relationships). - -## Syntax - -``` -GRAPH.DELETE graph_name -``` - -**Arguments:** -- `graph_name` - Name of the graph to delete - -**Returns:** String indicating if the operation succeeded or failed. - -## Examples - - - - - - - - - - - - - - - -```python -graph.delete() -``` - - - - -```javascript -await graph.delete(); -``` - - - - -```rust -graph.delete()?; -``` - - - - -```java -graph.delete(); -``` - - - - -```bash -GRAPH.DELETE us_government -``` - - - - - -## Deleting Individual Nodes - -**Note:** To delete specific nodes or relationships (not the entire graph), use the Cypher `DELETE` clause with a `MATCH` query: - - - - - - - - - - - - - - - -```python -graph.query("MATCH (x:Y {propname: propvalue}) DELETE x") -``` - - - - -```javascript -await graph.query("MATCH (x:Y {propname: propvalue}) DELETE x"); -``` - - - - -```rust -graph.query("MATCH (x:Y {propname: propvalue}) DELETE x")?; -``` - - - - -```java -graph.query("MATCH (x:Y {propname: propvalue}) DELETE x"); -``` - - - - -```bash -GRAPH.QUERY DEMO_GRAPH "MATCH (x:Y {propname: propvalue}) DELETE x" -``` - - - - - -**⚠️ Warning:** When you delete a node using the Cypher `DELETE` clause, all of the node's incoming and outgoing relationships are also automatically removed. diff --git a/website/docs/commands/graph.explain.mdx b/website/docs/commands/graph.explain.mdx deleted file mode 100644 index 6552aef..0000000 --- a/website/docs/commands/graph.explain.mdx +++ /dev/null @@ -1,86 +0,0 @@ ---- -title: GRAPH.EXPLAIN -description: Construct and display query execution plans -sidebar_position: 4 -sidebar_label: GRAPH.EXPLAIN ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - - -# GRAPH.EXPLAIN - -Constructs a query execution plan but does not run it. Inspect this execution plan to better -understand how your query will get executed. - -Arguments: `Graph name, Query` - -Returns: `String representation of a query execution plan` - - - - - - - - - - - - - - - -```python -from falkordb import FalkorDB -client = FalkorDB() -graph = client.select_graph('us_government') -query = "MATCH (p:President)-[:BORN]->(h:State {name:'Hawaii'}) RETURN p" -result = graph.explain(query) -print(result) -``` - - - - -```javascript -import { FalkorDB } from 'falkordb'; -const client = await FalkorDB.connect(); -const graph = client.selectGraph('us_government'); -const query = "MATCH (p:President)-[:BORN]->(h:State {name:'Hawaii'}) RETURN p"; -const result = await graph.explain(query); -console.log(result); -``` - - - - -```rust -let client = FalkorDB::connect_default(); -let graph = client.select_graph("us_government"); -let query = r#"MATCH (p:President)-[:BORN]->(h:State {name:'Hawaii'}) RETURN p"#; -let result = graph.explain(query)?; -println!("{}", result); -``` - - - - -```java -FalkorDB client = new FalkorDB(); -Graph graph = client.selectGraph("us_government"); -String query = "MATCH (p:President)-[:BORN]->(h:State {name:'Hawaii'}) RETURN p"; -String result = graph.explain(query); -System.out.println(result); -``` - - - - -```bash -GRAPH.EXPLAIN us_government "MATCH (p:President)-[:BORN]->(h:State {name:'Hawaii'}) RETURN p" -``` - - - diff --git a/website/docs/commands/graph.info.md b/website/docs/commands/graph.info.md deleted file mode 100644 index 8d98328..0000000 --- a/website/docs/commands/graph.info.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -title: GRAPH.INFO -description: > -sidebar_label: GRAPH.INFO ---- - - - -# GRAPH.INFO -Returns information and statistics about currently running and waiting queries. - -## Syntax - -``` -GRAPH.INFO [RunningQueries | WaitingQueries] -``` - -If no argument is provided, both running and waiting queries are returned. - -## Examples - -```sh -127.0.0.1:6379> GRAPH.INFO -1) "# Running queries" -2) (empty array) -3) "# Waiting queries" -4) (empty array) - -127.0.0.1:6379> GRAPH.INFO RunningQueries -1) "# Running queries" -2) (empty array) - -127.0.0.1:6379> GRAPH.INFO WaitingQueries -1) "# Waiting queries" -2) (empty array) -``` diff --git a/website/docs/commands/graph.list.mdx b/website/docs/commands/graph.list.mdx deleted file mode 100644 index d614595..0000000 --- a/website/docs/commands/graph.list.mdx +++ /dev/null @@ -1,97 +0,0 @@ ---- -title: GRAPH.LIST -description: List all graph keys in the keyspace -sidebar_position: 5 -sidebar_label: GRAPH.LIST ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - - -# GRAPH.LIST - -Lists all graph keys in the keyspace. - -## Examples - - - - - - - - - - - - - - - -```python -from falkordb import FalkorDB -db = FalkorDB(host='localhost', port=6379) -graphs = db.list_graphs() -print(graphs) -``` - - - - -```javascript -import { FalkorDB } from 'falkordb'; -const db = await FalkorDB.connect({ - socket: { host: 'localhost', port: 6379 } -}); -const graphs = await db.list(); -console.log(graphs); -``` - - - - -```rust -use falkordb::{FalkorClientBuilder, FalkorConnectionInfo}; - -let connection_info: FalkorConnectionInfo = "falkor://127.0.0.1:6379" - .try_into() - .expect("Invalid connection info"); -let client = FalkorClientBuilder::new() - .with_connection_info(connection_info) - .build() - .expect("Failed to build client"); -let graphs = client.list_graphs(); -println!("{:?}", graphs); -``` - - - - -```java -import com.falkordb.*; - -Driver driver = FalkorDB.driver("localhost", 6379); -List graphs = driver.listGraphs(); -System.out.println(graphs); -``` - - - - -```bash -GRAPH.LIST -``` - - - - - -### Sample Output - -```sh -127.0.0.1:6379> GRAPH.LIST -2) G -3) resources -4) players -``` diff --git a/website/docs/commands/graph.memory.mdx b/website/docs/commands/graph.memory.mdx deleted file mode 100644 index 3d024d4..0000000 --- a/website/docs/commands/graph.memory.mdx +++ /dev/null @@ -1,124 +0,0 @@ ---- -title: GRAPH.MEMORY -description: View detailed memory consumption statistics for a graph -sidebar_label: GRAPH.MEMORY ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - - -# GRAPH.MEMORY -The `GRAPH.MEMORY` command returns detailed memory consumption statistics for a specific graph in **megabytes (MB)**. It provides insight into how much memory is used by various internal data structures such as nodes, edges, schemas, indices, and matrix representations. This command can be used to monitor memory consumption at the graph level, making it especially useful for debugging, monitoring, performance optimization, and capacity planning in FalkorDB deployments. - -## Syntax - -```bash -GRAPH.MEMORY USAGE [SAMPLES ] -``` - -Usage: `GRAPH.MEMORY USAGE [SAMPLES ]` - -## Arguments - -| Argument | Description | -|----------------|------------------------------------------------------------------------------------------------------------------------------------------| -| `` | The name of the graph to inspect (also referred to as ``). | -| `SAMPLES ` | *(Optional)* Number of samples to take when estimating memory usage. A higher number improves accuracy but increases computation time. The samples are averaged to estimate the total size. By default, this option is set to 100 if not specified. | - -## Return - -The command returns an array of key-value pairs, where each pair represents a specific memory metric and its value (in MB), corresponding to different components of the graph: - -| Metric Name / Field | Type | Description | -|-----------------------------------------------|---------|-------------------------------------------------------------------| -| `total_graph_sz_mb` | integer | Total memory consumed by the graph. | -| `label_matrices_sz_mb` | integer | Amount of memory used by label matrices (node labels tracking). | -| `relation_matrices_sz_mb` | integer | Amount of memory used by relationship type matrices (graph topology tracking). | -| `amortized_node_block_sz_mb` | integer | Memory used by nodes (amortized node storage). | -| `amortized_node_storage_sz_mb` | integer | Amount of memory used for nodes storage (alternative naming). | -| `amortized_node_attributes_by_label_sz_mb` | integer | Memory used by node attributes, split by node label. | -| `amortized_unlabeled_nodes_attributes_sz_mb` | integer | Memory used by node attributes with no label. | -| `amortized_edge_block_sz_mb` | integer | Memory used by edges (amortized edge storage). | -| `amortized_edge_storage_sz_mb` | integer | Amount of memory used for relationships storage (alternative naming). | -| `amortized_edge_attributes_by_type_sz_mb` | integer | Memory used by edge attributes, split by relationship type. | -| `indices_sz_mb` | integer | Amount of memory consumed by indices (if any). | - -*Note*: Metrics like `amortized_node_block_sz_mb` and `amortized_node_storage_sz_mb` are alternative names for the same data; both are included for clarity. - -## Examples - -### Basic Usage - - - - - - - - - -```javascript -import { FalkorDB } from 'falkordb'; -const db = await FalkorDB.connect({ - socket: { host: 'localhost', port: 6379 } -}); -const graph = db.selectGraph('myGraph'); -const memoryInfo = await graph.memoryUsage(); -console.log(memoryInfo); -``` - - - - -```bash -GRAPH.MEMORY USAGE myGraph -``` - - - - - -### With Sampling - - - - - - - - - -```javascript -const memoryInfo = await graph.memoryUsage({ samples: 500 }); -console.log(memoryInfo); -``` - - - - -```bash -GRAPH.MEMORY USAGE myGraph SAMPLES 500 -``` - - - - - -### Sample Output - -```sh -127.0.0.1:6379> GRAPH.MEMORY USAGE flights - 1) "total_graph_sz_mb" - 2) (integer) 1086 - 3) "label_matrices_sz_mb" - 4) (integer) 96 - 5) "relation_matrices_sz_mb" - 6) (integer) 64 - 7) "amortized_node_storage_sz_mb" - 8) (integer) 120 - 9) "amortized_edge_storage_sz_mb" -10) (integer) 54 -11) "indices_sz_mb" -12) (integer) 752 -``` diff --git a/website/docs/commands/graph.profile.mdx b/website/docs/commands/graph.profile.mdx deleted file mode 100644 index f51407b..0000000 --- a/website/docs/commands/graph.profile.mdx +++ /dev/null @@ -1,119 +0,0 @@ ---- -title: GRAPH.PROFILE -description: Execute queries and produce execution plans with metrics -sidebar_position: 6 -sidebar_label: GRAPH.PROFILE ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - - -# GRAPH.PROFILE - -Executes a query and produces an execution plan augmented with metrics for each operation's execution. - -Arguments: `Graph name, Query` - -Returns: `String representation of a query execution plan, with details on results produced by and time spent in each operation.` - -`GRAPH.PROFILE` is a parallel entrypoint to `GRAPH.QUERY`. It accepts and executes the same queries, but it will not emit results, -instead returning the operation tree structure alongside the number of records produced and total runtime of each operation. - -It is important to note that this blends elements of [GRAPH.QUERY](/commands/graph.query) and [GRAPH.EXPLAIN](/commands/graph.explain). -It is not a dry run and will perform all graph modifications expected of the query, but will not output results produced by a `RETURN` clause or query statistics. - - - - - - - - - - - - - - - -```python -from falkordb import FalkorDB -client = FalkorDB() -graph = client.select_graph('imdb') -query = '''\ -MATCH (actor_a:Actor)-[:ACT]->(:Movie)<-[:ACT]-(actor_b:Actor) -WHERE actor_a <> actor_b -CREATE (actor_a)-[:COSTARRED_WITH]->(actor_b) -''' -result = graph.profile(query) -for line in result: - print(line) -``` - - - - -```javascript -import { FalkorDB } from 'falkordb'; -const client = await FalkorDB.connect(); -const graph = client.selectGraph('imdb'); -const query = `\ -MATCH (actor_a:Actor)-[:ACT]->(:Movie)<-[:ACT]-(actor_b:Actor) -WHERE actor_a <> actor_b -CREATE (actor_a)-[:COSTARRED_WITH]->(actor_b) -`; -const result = await graph.profile(query); -result.forEach(line => console.log(line)); -``` - - - - -```rust -let client = FalkorDB::connect_default(); -let graph = client.select_graph("imdb"); -let query = r#" -MATCH (actor_a:Actor)-[:ACT]->(:Movie)<-[:ACT]-(actor_b:Actor) -WHERE actor_a <> actor_b -CREATE (actor_a)-[:COSTARRED_WITH]->(actor_b) -"#; -let result = graph.profile(query)?; -for line in result { - println!("{}", line); -} -``` - - - - -```java -FalkorDB client = new FalkorDB(); -Graph graph = client.selectGraph("imdb"); -String query = """ -MATCH (actor_a:Actor)-[:ACT]->(:Movie)<-[:ACT]-(actor_b:Actor) -WHERE actor_a <> actor_b -CREATE (actor_a)-[:COSTARRED_WITH]->(actor_b) -"""; -ResultSet result = graph.profile(query); -for (String line : result) { - System.out.println(line); -} -``` - - - - -```bash -GRAPH.PROFILE imdb \ -"MATCH (actor_a:Actor)-[:ACT]->(:Movie)<-[:ACT]-(actor_b:Actor) -WHERE actor_a <> actor_b -CREATE (actor_a)-[:COSTARRED_WITH]->(actor_b)" -1) "Create | Records produced: 11208, Execution time: 168.208661 ms" -2) " Filter | Records produced: 11208, Execution time: 1.250565 ms" -3) " Conditional Traverse | Records produced: 12506, Execution time: 7.705860 ms" -4) " Node By Label Scan | (actor_a:Actor) | Records produced: 1317, Execution time: 0.104346 ms" -``` - - - diff --git a/website/docs/commands/graph.query.mdx b/website/docs/commands/graph.query.mdx deleted file mode 100644 index 3a2433e..0000000 --- a/website/docs/commands/graph.query.mdx +++ /dev/null @@ -1,161 +0,0 @@ ---- -title: GRAPH.QUERY -description: Execute queries against a graph -sidebar_position: 1 -sidebar_label: GRAPH.QUERY ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - - -# GRAPH.QUERY - -Executes the given query against a specified graph. - -Arguments: `Graph name, Query, Timeout [optional]` - -Returns: [Result set](/design/result-structure) - -### Queries and Parameterized Queries - -The execution plans of queries, both regular and parameterized, are cached (up to [CACHE_SIZE](/getting-started/configuration#cache_size) unique queries are cached). Therefore, it is recommended to use parameterized queries when executing many queries with the same pattern but different constants. - -Query-level timeouts can be set as described in [the configuration section](/getting-started/configuration#timeout). - -#### Command structure - -`GRAPH.QUERY graph_name "query"` - -example: - - - - - - - - - - - - - - - -```python -graph.query("MATCH (p:president)-[:born]->(:state {name:'Hawaii'}) RETURN p") -``` - - - - -```javascript -const result = await graph.query("MATCH (p:president)-[:born]->(:state {name:'Hawaii'}) RETURN p"); -console.log(result); -``` - - - - -```rust -let result = graph.query(r#"MATCH (p:president)-[:born]->(:state {name:'Hawaii'}) RETURN p"#).execute().await?; -println!("{:?}", result); -``` - - - - -```java -ResultSet result = graph.query("MATCH (p:president)-[:born]->(:state {name:'Hawaii'}) RETURN p"); -System.out.println(result); -``` - - - - -```bash -GRAPH.QUERY us_government "MATCH (p:president)-[:born]->(:state {name:'Hawaii'}) RETURN p" -``` - - - - - - -#### Parametrized query structure: - -`GRAPH.QUERY graph_name "CYPHER param=val [param=val ...] query"` - -example: - - - - - - - - - - - - - - - -```python -graph.query("MATCH (p:president)-[:born]->(:state {name:$state_name}) RETURN p", {'state_name': 'Hawaii'}) -``` - - - - -```javascript -const result = await graph.query( - "MATCH (p:president)-[:born]->(:state {name:$state_name}) RETURN p", - { params: { state_name: "Hawaii" } } -); -console.log(result); -``` - - - - -```rust -let params = std::collections::HashMap::from([ - ("state_name", "Hawaii") -]); -let result = graph.query_with_params( - r#"MATCH (p:president)-[:born]->(:state {name:$state_name}) RETURN p"#, - ¶ms -).execute().await?; -println!("{:?}", result); -``` - - - - -```java -Map params = new HashMap<>(); -params.put("state_name", "Hawaii"); -ResultSet result = graph.query( - "MATCH (p:president)-[:born]->(:state {name:$state_name}) RETURN p", - params -); -System.out.println(result); -``` - - - - -```bash -GRAPH.QUERY us_government "CYPHER state_name='Hawaii' MATCH (p:president)-[:born]->(:state {name:$state_name}) RETURN p" -``` - - - - - -### Query language - -The syntax is based on [Cypher](http://www.opencypher.org/). [Most](/cypher/cypher-support) of the language is supported. See [Cypher documentation](/cypher). diff --git a/website/docs/commands/graph.ro-query.mdx b/website/docs/commands/graph.ro-query.mdx deleted file mode 100644 index 824d568..0000000 --- a/website/docs/commands/graph.ro-query.mdx +++ /dev/null @@ -1,73 +0,0 @@ ---- -title: GRAPH.RO_QUERY -description: Execute read-only queries against a graph -sidebar_position: 2 -sidebar_label: GRAPH.RO_QUERY ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - - -# GRAPH.RO_QUERY - -Executes a given read only query against a specified graph. - -Arguments: `Graph name, Query, Timeout [optional]` - -Returns: [Result set](/design/result-structure) for a read only query or an error if a write query was given. - - - - - - - - - - - - - - - -```python -graph.ro_query("MATCH (p:president)-[:born]->(:state {name:'Hawaii'}) RETURN p") -``` - - - - -```javascript -const result = await graph.ro_query("MATCH (p:president)-[:born]->(:state {name:'Hawaii'}) RETURN p"); -console.log(result); -``` - - - - -```rust -let result = graph.ro_query(r#"MATCH (p:president)-[:born]->(:state {name:'Hawaii'}) RETURN p"#).execute().await?; -println!("{:?}", result); -``` - - - - -```java -ResultSet result = graph.readOnlyQuery("MATCH (p:president)-[:born]->(:state {name:'Hawaii'}) RETURN p"); -System.out.println(result); -``` - - - - -```bash -GRAPH.RO_QUERY us_government "MATCH (p:president)-[:born]->(:state {name:'Hawaii'}) RETURN p" -``` - - - - - -Query-level timeouts can be set as described in [the configuration section](/getting-started/configuration#timeout). diff --git a/website/docs/commands/graph.slowlog.mdx b/website/docs/commands/graph.slowlog.mdx deleted file mode 100644 index 810ebe1..0000000 --- a/website/docs/commands/graph.slowlog.mdx +++ /dev/null @@ -1,107 +0,0 @@ ---- -title: GRAPH.SLOWLOG -description: View slowest queries issued against a graph -sidebar_label: GRAPH.SLOWLOG ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - - -# GRAPH.SLOWLOG - -Returns a list containing up to 10 of the slowest queries issued against the given graph ID. - -Each item in the list has the following structure: - -1. A Unix timestamp at which the log entry was processed. -2. The issued command. -3. The issued query. -4. The amount of time needed for its execution, in milliseconds. - -## Examples - -### Get slowlog - - - - - - - - - - - -```python -from falkordb import FalkorDB -db = FalkorDB(host='localhost', port=6379) -graph = db.select_graph('graph_id') -slowlog = graph.slowlog() -print(slowlog) -``` - - - - -```javascript -import { FalkorDB } from 'falkordb'; -const db = await FalkorDB.connect({ - socket: { host: 'localhost', port: 6379 } -}); -const graph = db.selectGraph('graph_id'); -const slowlog = await graph.slowLog(); -console.log(slowlog); -``` - - - - -```bash -GRAPH.SLOWLOG graph_id -``` - - - - - -### Sample Output - -```sh -GRAPH.SLOWLOG graph_id - 1) 1) "1581932396" - 2) "GRAPH.QUERY" - 3) "MATCH (a:Person)-[:FRIEND]->(e) RETURN e.name" - 4) "0.831" - 2) 1) "1581932396" - 2) "GRAPH.QUERY" - 3) "MATCH (me:Person)-[:FRIEND]->(:Person)-[:FRIEND]->(fof:Person) RETURN fof.name" - 4) "0.288" -``` - -### Reset slowlog - - - - - - - - - -```python -graph.slowlog_reset() -``` - - - - -```bash -GRAPH.SLOWLOG graph_id RESET -``` - - - - - -Once cleared the information is lost forever. diff --git a/website/docs/commands/index.md b/website/docs/commands/index.md deleted file mode 100644 index 405c179..0000000 --- a/website/docs/commands/index.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: Commands -description: Commands overview -sidebar_position: 3 -sidebar_label: Commands ---- - -# Commands - -## FalkorDB Features - -FalkorDB exposes graph database functionality within Redis using the [openCypher](https://opencypher.org/) query language. Its basic commands accept openCypher queries, while additional commands are exposed for configuration or metadata retrieval. - -## FalkorDB API - -Command details can be retrieved by filtering for the [module](/commands/?group=graph) or for a specific command, e.g., `GRAPH.QUERY`. -The details include the syntax for the commands, where: - -* Optional arguments are enclosed in square brackets, for example `[timeout]`. -* Additional optional arguments are indicated by an ellipsis: `...` - -Most commands require a graph key name as their first argument. diff --git a/website/docs/cypher/call.md b/website/docs/cypher/call.md deleted file mode 100644 index f466de5..0000000 --- a/website/docs/cypher/call.md +++ /dev/null @@ -1,146 +0,0 @@ ---- -title: CALL -description: > -sidebar_position: 16 -sidebar_label: CALL ---- - - - -# CALL -The CALL {} (subquery) clause allows local execution of subqueries, which opens the door for many comfortable and efficient actions on a graph. - -The subquery is executed once for each record in the input stream. - -The subquery may be a returning or non-returning subquery. A returning subquery may change the amount of records, while a non-returning subquery will not. - -The variables in the scope before the CALL {} clause are available after the clause, together with the variables returned by the subquery (in the case of a returning subquery). - -Variables may be imported from the outer scope **only** in an opening `WITH` clause, via simple projections (e.g. `WITH n, m`), or via `WITH *` (which imports all bound variables). The variables returned from a subquery may not override existing variables in the outer scope. - -The CALL {} clause may be used for numerous purposes, such as: Post-`UNION` processing, local environment for aggregations and actions on every input row, efficient operations using a limited namespace (via imports) and performing side-effects using non-returning subqueries. Let's see some examples. - -* Post-`UNION` processing. - -We can easily get the cheapest and most expensive items in a store and set their `of_interest` property to `true` (to keep monitoring the 'interesting' items) using post-`UNION` processing: - - ```sh - GRAPH.QUERY DEMO_GRAPH - CALL { - MATCH (s:Store {name: 'Walmart'})-[:SELLS]->(i:Item) - RETURN i AS item - ORDER BY price ASC - LIMIT 1 - UNION - MATCH (s:Store {name: 'Walmart'})-[:SELLS]->(i:Item) - RETURN i AS item - ORDER BY price DESC - LIMIT 1 - } - SET item.of_interest = true - RETURN item.name AS name, item.price AS price - ``` - -We can utilize post-`UNION` processing to perform aggregations over differently-matched entities. For example, we can count the number of customers and vendors that a store interacts with: - - ```sh - GRAPH.QUERY DEMO_GRAPH - CALL { - MATCH (s:Store {name: 'Walmart'})-[:SELLS_TO]->(c:Customer) - RETURN c AS interface - UNION - MATCH (s:Store {name: 'Walmart'})-[:BUYS_FROM]->(v:Vendor) - RETURN v AS interface - } - RETURN count(interface) AS interfaces - ``` - -* Local environment for aggregations and actions on every input row. - -Another key feature of the CALL {} clause is the ability to perform isolated aggregations on every input row. For example, let's check if there is any correlation between the amount of sales per-product and the advertisement-intensity implemented for it in a particular month. - - ```sh - GRAPH.QUERY DEMO_GRAPH - MATCH (item:Item) - CALL { - WITH item - MATCH (item)-[s:SOLD_TO {advertisement_intensity: 10}]->(c:Customer) - WHERE s.date > '01-01-2023' AND s.date < '01-02-2023' - RETURN count(s) AS item_sales_ads_high - } - CALL { - WITH item - MATCH (item)-[s:SOLD_TO {advertisement_intensity: 5}]->(c:Customer) - WHERE s.date > '01-01-2023' AND s.date < '01-02-2023' - RETURN count(s) AS item_sales_ads_low - } - RETURN item.name AS name, item_sales_ads_high as high_ads_sales, item_sales_ads_low as low_ads_sales - ``` - - - - - -* Side-effects. - -We can comfortably perform side-effects using non-returning subqueries. For example, we can mark a sub-group of nodes in the graph withholding some shared property. Let's mark all the items in a Walmart store that were sold more than 100 times as popular items, and return **all** items in the store: - - ```sh - GRAPH.QUERY DEMO_GRAPH - MATCH (item:Item) - CALL { - WITH item - MATCH (item)-[s:SOLD_TO]->(c:Customer) - WITH item, count(s) AS item_sales - WHERE item_sales > 100 - SET item.popular = true - } - RETURN item - ``` \ No newline at end of file diff --git a/website/docs/cypher/create.md b/website/docs/cypher/create.md deleted file mode 100644 index 5850d79..0000000 --- a/website/docs/cypher/create.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -title: CREATE -description: > -sidebar_position: 8 -sidebar_label: CREATE ---- - - - -# CREATE -The `CREATE` clause is used to introduce new nodes and relationships into the graph. - -## Creating Nodes - -The simplest example creates a single node without any labels or properties: - -```sh -CREATE (n) -``` - -You can create multiple entities by separating them with commas: - -```sh -CREATE (n),(m) -``` - -Create a node with a label and properties: - -```sh -CREATE (:Person {name: 'Kurt', age: 27}) -``` - -## Creating Relationships - -To add relationships between nodes, you typically match existing nodes first, then create the relationship. In this example, we find an existing source node and create a new relationship with a new destination node: - -```sh -GRAPH.QUERY DEMO_GRAPH -"MATCH (a:Person) -WHERE a.name = 'Kurt' -CREATE (a)-[:MEMBER]->(:Band {name:'Nirvana'})" -``` - -Here the source node `(a:Person)` is matched (bound), while the destination node `(:Band)` is unbound and will be created. - -This query creates a new node representing the band Nirvana and a new `MEMBER` relationship connecting Kurt to the band. - -## Creating Complete Patterns - -You can create entire graph patterns in a single statement. All entities within the pattern that are not bound (matched) will be created: - -```sh -GRAPH.QUERY DEMO_GRAPH -"CREATE (jim:Person{name:'Jim', age:29})-[:FRIENDS]->(pam:Person {name:'Pam', age:27})-[:WORKS]->(:Employer {name:'Dunder Mifflin'})" -``` - -This query creates three nodes (Jim, Pam, and an Employer) and two relationships (FRIENDS and WORKS), establishing a complete graph pattern in one operation. diff --git a/website/docs/cypher/cypher-support.md b/website/docs/cypher/cypher-support.md deleted file mode 100644 index e000614..0000000 --- a/website/docs/cypher/cypher-support.md +++ /dev/null @@ -1,147 +0,0 @@ ---- -title: Cypher coverage -description: > -sidebar_position: 22 -sidebar_label: Cypher coverage ---- - - - -# Cypher coverage -This document is based on the Cypher Query Language Reference (version 9), available at [OpenCypher Resources](https://www.opencypher.org/resources). - -## Patterns - -Patterns are fully supported. - -## Types - -### Structural types - -+ Nodes -+ Relationships -+ Path variables (alternating sequence of nodes and relationships). - - -### Composite types - -+ Lists -+ Maps -+ Temporal types (Date, DateTime, LocalDateTime, Time, LocalTime, Duration) - -### Literal types - -+ Numeric types (64-bit doubles and 64-bit signed integer representations) -+ String literals -+ Booleans - - **Unsupported:** - -- Hexadecimal and octal numerics - -### Other - -NULL is supported as a representation of a missing or undefined value. - -## Comparability, equality, orderability, and equivalence - -This is a somewhat nebulous area in Cypher itself, with a lot of edge cases. -Broadly speaking, FalkorDB behaves as expected with string and numeric values. -There are likely some behaviors involving the numerics NaN, -inf, inf, and possibly -0.0 that deviate from the Cypher standard. -We do not support any of these properties at the type level, meaning nodes and relationships are not internally comparable. - -## Clauses - -### Reading Clauses - -+ MATCH -+ OPTIONAL MATCH - - **Unsupported:** - -- Label expressions - -### Projecting Clauses - -+ RETURN -+ AS -+ WITH -+ UNWIND - -### Reading sub-clauses - -+ WHERE -+ ORDER BY -+ SKIP -+ LIMIT - -### Writing Clauses - -+ CREATE -+ DELETE - + We actually implement DETACH DELETE, the distinction being that relationships invalidated by node deletions are automatically deleted. -+ SET - - **Unsupported:** - -- REMOVE (to modify properties) - + Properties can be deleted with SET [prop] = NULL. - -### Reading/Writing Clauses - -+ MERGE -+ CALL (procedures) - - The currently-supported procedures are listed in [the Procedures documentation](/commands/graph.query#procedures). - -### Set Operations - -+ UNION -+ UNION ALL - -## Functions - -The currently-supported functions are listed in [the Functions documentation](/commands/graph.query#functions). - - **Unsupported:** - -- Temporal arithmetic functions -- User-defined functions - -## Operators - -### Mathematical operators - -The currently-supported functions are listed in [the mathematical operators documentation](/commands/graph.query#mathematical-operators). - -### String operators - -+ String operators (STARTS WITH, ENDS WITH, CONTAINS) are supported. - - **Unsupported:** - -- Regex operator - -### Boolean operators - -+ AND -+ OR -+ NOT -+ XOR - -## Parameters - -Parameters may be specified to allow for more flexible query construction: - -```sh -CYPHER name_param = "NiccolΓ² Machiavelli" birth_year_param = 1469 MATCH (p:Person {name: $name_param, birth_year: $birth_year_param}) RETURN p -``` - -The example above shows the syntax used by `redis-cli` to set parameters, but -each FalkorDB client introduces a language-appropriate method for setting parameters, -and is described in their documentation. - -## Non-Cypher queries - -+ FalkorDB provides the `GRAPH.EXPLAIN` command to print the execution plan of a provided query. -+ `GRAPH.DELETE` will remove a graph and all Redis keys associated with it. -- We do not currently provide support for queries that retrieve schemas, though the LABELS and TYPE scalar functions may be used to get a graph overview. diff --git a/website/docs/cypher/delete.md b/website/docs/cypher/delete.md deleted file mode 100644 index 6570e42..0000000 --- a/website/docs/cypher/delete.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: DELETE -description: > -sidebar_position: 9 -sidebar_label: DELETE ---- - - - -# DELETE -The `DELETE` clause is used to remove nodes and relationships from the graph. - -## Important Behavior - -**⚠️ Note:** Deleting a node automatically deletes all of its incoming and outgoing relationships. You cannot have orphaned relationships in the graph. - -## Deleting Nodes - -To delete a node and all of its relationships: - -```sh -GRAPH.QUERY DEMO_GRAPH "MATCH (p:Person {name:'Jim'}) DELETE p" -``` - -## Deleting Relationships - -To delete specific relationships: - -```sh -GRAPH.QUERY DEMO_GRAPH "MATCH (:Person {name:'Jim'})-[r:FRIENDS]->() DELETE r" -``` - -This query deletes all outgoing `FRIENDS` relationships from the node with name 'Jim', while keeping the nodes intact. - -## Common Patterns - -### Delete all nodes and relationships in a graph - -```sh -GRAPH.QUERY DEMO_GRAPH "MATCH (n) DETACH DELETE n" -``` - -The `DETACH DELETE` automatically removes all relationships before deleting the node. - -### Conditional deletion - -```sh -GRAPH.QUERY DEMO_GRAPH "MATCH (p:Person) WHERE p.age < 18 DELETE p" -``` - -Deletes all Person nodes where age is less than 18. \ No newline at end of file diff --git a/website/docs/cypher/foreach.md b/website/docs/cypher/foreach.md deleted file mode 100644 index 92d1e17..0000000 --- a/website/docs/cypher/foreach.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -title: FOREACH -description: > -sidebar_position: 15 -sidebar_label: FOREACH ---- - - - -# FOREACH -The `FOREACH` clause feeds the components of a list to a sub-query comprised of **updating clauses only** (`CREATE`, `MERGE`, `SET`, `REMOVE`, `DELETE` and `FOREACH`), while passing on the records it receives without change. - -The clauses within the sub-query recognize the bound variables defined prior to the `FOREACH` clause, but are local in the sense that later clauses are not aware of the variables defined inside them. In other words, `FOREACH` uses the current context, and does not affect it. - -The `FOREACH` clause can be used for numerous purposes, such as: Updating and creating graph entities in a concise manner, marking nodes\edges that satisfy some condition or are part of a path of interest and performing conditional queries. - -We show examples of queries performing the above 3 use-cases. - -The following query will create 5 nodes, each with property `v` with the values from 0 to 4 corresponding to the appropriate index in the list. - -```sh -GRAPH.QUERY DEMO_GRAPH -"FOREACH(i in [1, 2, 3, 4] | CREATE (n:N {v: i}))" -``` - -The following query marks the nodes of all paths of length up to 15 km from a hotel in Toronto to a steakhouse with at least 2 Michelin stars. - -```sh -GRAPH.QUERY DEMO_GRAPH -"MATCH p = (hotel:HOTEL {City: 'Toronto'})-[r:ROAD*..5]->(rest:RESTAURANT {type: 'Steakhouse'}) WHERE sum(r.length) <= 15 AND hotel.stars >= 4 AND rest.Michelin_stars >= 2 -FOREACH(n in nodes(p) | SET n.part_of_path = true)" -``` - -The following query searches for all the hotels, checks whether they buy directly from a bakery, and if not - makes sure they are marked as buying from a supplier that supplies bread, and that they do not buy directly from a bakery. - -```sh -GRAPH.QUERY DEMO_GRAPH -"MATCH (h:HOTEL) OPTIONAL MATCH (h)-[b:BUYS_FROM]->(bakery:BAKERY) -FOREACH(do_perform IN CASE WHEN b = NULL THEN [1] ELSE [] END | MERGE (h)-[b2:BUYS_FROM]->(s:SUPPLIER {supplies_bread: true}) SET b2.direct = false)" -``` \ No newline at end of file diff --git a/website/docs/cypher/functions.md b/website/docs/cypher/functions.md deleted file mode 100644 index af29f88..0000000 --- a/website/docs/cypher/functions.md +++ /dev/null @@ -1,447 +0,0 @@ ---- -title: Functions -description: > -sidebar_position: 18 -sidebar_label: Functions ---- - - - -# Functions -This section contains information on all supported functions from the Cypher query language. - -* [Predicate functions](#predicate-functions) -* [Scalar functions](#scalar-functions) -* [Aggregating functions](#aggregating-functions) -* [List functions](#list-functions) -* [Mathematical operators](#mathematical-operators) -* [Mathematical functions](#mathematical-functions) -* [Trigonometric functions](#trigonometric-functions) -* [String functions](#string-functions) -* [Point functions](#point-functions) -* [Type conversion functions](#type-conversion-functions) -* [Node functions](#node-functions) -* [Path functions](#path-functions) -* [Vector functions](#vector-functions) - -## Predicate functions - -| Function | Description| -| --------------------------------------------------------------------------------- | :----------| -| [all(_var_ IN _list_ WHERE _predicate_)](#existential-comprehension-functions) | Returns true when _predicate_ holds true for all elements in _list_ | -| [any(_var_ IN _list_ WHERE _predicate_)](#existential-comprehension-functions) | Returns true when _predicate_ holds true for at least one element in _list_ | -| exists(_pattern_) | Returns true when at least one match for _pattern_ exists | -| isEmpty(_list_|_map_|_string_) | Returns true if the input list or map contains no elements or if the input string contains no characters `
` Returns null when the input evaluates to null | -| [none(_var_ IN _list_ WHERE _predicate_)](#existential-comprehension-functions) | Returns true when _predicate_ holds false for all elements in _list_ | -| [single(_var_ IN _list_ WHERE _predicate_)](#existential-comprehension-functions) | Returns true when _predicate_ holds true for exactly one element in _list_ | - -## Scalar functions - -| Function | Description| -| --------------------------------- | :----------| -| coalesce(_expr_[, expr...]) | Returns the evaluation of the first argument that evaluates to a non-null value `
` Returns null when all arguments evaluate to null | -| endNode(_relationship_) | Returns the destination node of a relationship `
` Returns null when _relationship_ evaluates to null | -| hasLabels(_node_, _labelsList_) * | Returns true when _node_ contains all labels in _labelsList_, otherwise false `
` Return true when _labelsList_ evaluates to an empty list | -| id(_node_|_relationship_) | Returns the internal ID of a node or relationship (which is not immutable) | -| labels(_node_) | Returns a list of strings: all labels of _node_ `
` Returns null when _node_ evaluates to null | -| properties(_expr_) | When _expr_ is a node or relationship: Returns a map containing all the properties of the given node or relationship `
` When _expr_ evaluates to a map: Returns _expr_ unchanged `
` Returns null when _expr_ evaluates to null | -| randomUUID() | Returns a random UUID (Universal Unique IDentifier) | -| startNode(_relationship_) | Returns the source node of a relationship `
` Returns null when _relationship_ evaluates to null | -| timestamp() | Returns the current system timestamp (milliseconds since epoch) | -| type(_relationship_) | Returns a string: the type of _relationship_ `
` Returns null when _relationship_ evaluates to null | -| typeOf(_expr_) * | Returns a string: the type of a literal, an expression's evaluation, an alias, a node's property, or a relationship's property `
` Return value is one of `Map`, `String`, `Integer`, `Boolean`, `Float`, `Node`, `Edge`, `List`, `Path`, `Point`, or `Null` | -| prev(_expr_) * | Stores the previous value and returns it on the next call; returns `null` on the first call. Useful for variable-length traversal filtering of edges based on the prior value. | - -* FalkorDB-specific extensions to Cypher - -## Aggregating functions - -|Function | Description| -| ----------------------------------- |:-----------| -|avg(_expr_) | Returns the average of a set of numeric values. null values are ignored `
` Returns null when _expr_ has no evaluations | -|collect(_expr_) | Returns a list containing all non-null elements which evaluated from a given expression | -|count(_expr_|*) | When argument is _expr_: returns the number of non-null evaluations of _expr_ `
` When argument is `*`: returns the total number of evaluations (including nulls) | -|max(_expr_) | Returns the maximum value in a set of values (taking into account type ordering). null values are ignored `
` Returns null when _expr_ has no evaluations | -|min(_expr_) | Returns the minimum value in a set of values (taking into account type ordering). null values are ignored `
` Returns null when _expr_ has no evaluations | -|percentileCont(_expr_, _percentile_) | Returns a linear-interpolated percentile (between 0.0 and 1.0) over a set of numeric values. null values are ignored `
` Returns null when _expr_ has no evaluations | -|percentileDisc(_expr_, _percentile_) | Returns a nearest-value percentile (between 0.0 and 1.0) over a set of numeric values. null values are ignored `
` Returns null when _expr_ has no evaluations | -|stDev(_expr_) | Returns the sample standard deviation over a set of numeric values. null values are ignored `
` Returns null when _expr_ has no evaluations | -|stDevP(_expr_) | Returns the population standard deviation over a set of numeric values. null values are ignored `
` Returns null when _expr_ has no evaluations | -|sum(_expr_) | Returns the sum of a set of numeric values. null values are ignored `
` Returns 0 when _expr_ has no evaluations | - -## List functions - -| Function | Description| -| ------------------------------------ | :----------| -| head(_expr_) | Returns the first element of a list `
` Returns null when _expr_ evaluates to null or an empty list | -| keys(_expr_) | Returns a list of strings: all key names for given map or all property names for a given node or edge `
` Returns null when _expr_ evaluates to null | -| last(_expr_) | Returns the last element of a list `
` Returns null when _expr_ evaluates to null or an empty list -| list.dedup(_list_) * | Given a list, returns a similar list after removing duplicate elements `
` Order is preserved, duplicates are removed from the end of the list `
` Returns null when _list_ evaluates to null `
` Emit an error when _list_ does not evaluate to a list or to null | -| list.insert(_list_, _idx_, _val_[, _dups_ = TRUE]) * | Given a list, returns a list after inserting a given value at a given index `
` _idx_ is 0-based when non-negative, or from the end of the list when negative `
` Returns null when _list_ evaluates to null `
` Returns _list_ when _val_ evaluates to null `
` Returns _list_ when _idx_ evaluates to an integer not in [-NumItems-1 .. NumItems] `
` When _dups_ evaluates to FALSE: returns _list_ when _val_ evaluates to a value that is already an element of _list_ `
` Emit an error when _list_ does not evaluate to a list or to null `
` Emit an error when _idx_ does not evaluate to an integer `
` Emit an error when _dups_, if specified, does not evaluate to a Boolean | -| list.insertListElements(_list_, _list2_, _idx_[, _dups_ = TRUE]) * | Given a list, returns a list after inserting the elements of a second list at a given index `
` _idx_ is 0-based when non-negative, or from the end of the list when negative `
` Returns null when _list_ evaluates to null `
` Returns _list_ when _list2_ evaluates to null `
` Returns _list_ when _idx_ evaluates to an integer not in [-NumItems-1 .. NumItems] `
` When _dups_ evaluates to FALSE: If an element of _list2_ evaluates to an element of _list_ it would be skipped; If multiple elements of _list2_ evaluate to the same value - this value would be inserted at most once to _list_ `
` Emit an error when _list_ does not evaluate to a list or to null `
` Emit an error when _list2_ does not evaluate to a list or to null `
` Emit an error when _idx_ does not evaluate to an integer `
` Emit an error when _dups_, if specified, does not evaluate to a Boolean | -| list.remove(_list_, _idx_[, _count_ = 1]) * | Given a list, returns a list after removing a given number of consecutive elements (or less, if the end of the list has been reached). starting at a given index. `
` _idx_ is 0-based when non-negative, or from the end of the list when negative `
` Returns _null_ when _list_ evaluates to null `
` Returns _list_ when _idx_ evaluates to an integer not in [-NumItems .. NumItems-1] `
` Returns _list_ when _count_ evaluates to a non-positive integer `
` Emit an error when _list_ does not evaluate to a list or to null `
` Emit an error when _idx_ does not evaluate to an integer `
` Emit an error when _count_, if specified, does not evaluate to an integer | -| list.sort(_list_[, _ascending_ = TRUE]) * | Given a list, returns a list with similar elements, but sorted (inversely-sorted if _ascending_ is evaluated to FALSE) `
` Returns null when _list_ evaluates to null `
` Emit an error when _list_ does not evaluate to a list or to null `
` Emit an error when _ascending_, if specified, does not evaluate to a Boolean | -| range(_first_, _last_[, _step_ = 1]) | Returns a list of integers in the range of [start, end]. _step_, an optional integer argument, is the increment between consecutive elements | -| size(_expr_) | Returns the number of elements in a list `
` Returns null with _expr_ evaluates to null | -| tail(_expr_) | Returns a sublist of a list, which contains all its elements except the first `
` Returns an empty list when _expr_ contains less than 2 elements. `
` Returns null when _expr_ evaluates to null | -| [reduce(...)](#reduce) | Returns a scalar produced by evaluating an expression against each list member | - -* FalkorDB-specific extensions to Cypher - -## Mathematical operators - -|Function | Description| -| ----------- |:-----------| -| + | Add two values | -| - | Subtract second value from first | -| * | Multiply two values | -| / | Divide first value by the second | -| ^ | Raise the first value to the power of the second | -| % | Perform modulo division of the first value by the second | - -## Mathematical functions - -|Function | Description| -| ------------------------- |:-----------| -| abs(_expr_) | Returns the absolute value of a numeric value `
` Returns null when _expr_ evaluates to null | -| ceil(_expr_) ** | When _expr_ evaluates to an integer: returns its evaluation `
` When _expr_ evaluates to floating point: returns a floating point equals to the smallest integer greater than or equal to _expr_ `
` Returns null when _expr_ evaluates to null | -| e() | Returns the constant _e_, the base of the natural logarithm | -| exp(_expr_) | Returns _e_^_expr_, where _e_ is the base of the natural logarithm `
` Returns null when _expr_ evaluates to null | -| floor(_expr_) ** | When _expr_ evaluates to an integer: returns its evaluation `
` When _expr_ evaluates to a floating point: returns a floating point equals to the greatest integer less than or equal to _expr_ `
` Returns null when _expr_ evaluates to null | -| log(_expr_) | Returns the natural logarithm of a numeric value `
` Returns nan when _expr_ evaluates to a negative numeric value, -inf when _expr_ evaluates to 0, and null when _expr_ evaluates to null | -| log10(_expr_) | Returns the base-10 logarithm of a numeric value `
` Returns nan when _expr_ evaluates to a negative numeric value, -inf when _expr_ evaluates to 0, and null when _expr_ evaluates to null | -| pow(_base_, _exponent_) * | Returns _base_ raised to the power of _exponent_ (equivalent to _base_^_exponent_) `
` Returns null when either evaluates to null | -| rand() | Returns a random floating point in the range [0,1] | -| round(_expr_) ** *** | When _expr_ evaluates to an integer: returns its evaluation `
` When _expr_ evaluates to a floating point: returns a floating point equals to the integer closest to _expr_ `
` Returns null when _expr_ evaluates to null | -| sign(_expr_) | Returns the signum of a numeric value: 0 when _expr_ evaluates to 0, -1 when _expr_ evaluates to a negative numeric value, and 1 when _expr_ evaluates to a positive numeric value `
` Returns null when _expr_ evaluates to null | -| sqrt(_expr_) | Returns the square root of a numeric value `
` Returns nan when _expr_ evaluates to a negative value and null when _expr_ evaluates to null | - -* FalkorDB-specific extensions to Cypher - -** FalkorDB-specific behavior: to avoid possible loss of precision, when _expr_ evaluates to an integer - the result is an integer as well - -*** FalkorDB-specific behavior: tie-breaking method is "half away from zero" - -## Trigonometric functions - -|Function | Description| -| --------------------- |:-----------| -| acos(_expr_) | Returns the arccosine, in radians, of a numeric value `
` Returns nan when _expr_ evaluates to a numeric value not in [-1, 1] and null when _expr_ evaluates to null | -| asin(_expr_) | Returns the arcsine, in radians, of a numeric value `
` Returns nan when _expr_ evaluates to a numeric value not in [-1, 1] and null when _expr_ evaluates to null | -| atan(_expr_) | Returns the arctangent, in radians, of a numeric value `
` Returns null when _expr_ evaluates to null | -| atan2(_expr_, _expr_) | Returns the 2-argument arctangent, in radians, of a pair of numeric values (Cartesian coordinates) `
` Returns 0 when both expressions evaluate to 0 `
` Returns null when either expression evaluates to null | -| cos(_expr_) | Returns the cosine of a numeric value that represents an angle in radians `
` Returns null when _expr_ evaluates to null | -| cot(_expr_) | Returns the cotangent of a numeric value that represents an angle in radians `
` Returns inf when _expr_ evaluates to 0 and null when _expr_ evaluates to null | -| degrees(_expr_) | Converts a numeric value from radians to degrees `
` Returns null when _expr_ evaluates to null | -| haversin(_expr_) | Returns half the versine of a numeric value that represents an angle in radians `
` Returns null when _expr_ evaluates to null | -| pi() | Returns the mathematical constant _pi_ | -| radians(_expr_) | Converts a numeric value from degrees to radians `
` Returns null when _expr_ evaluates to null | -| sin(_expr_) | Returns the sine of a numeric value that represents an angle in radians `
` Returns null when _expr_ evaluates to null | -| tan(_expr_) | Returns the tangent of a numeric value that represents an angle in radians `
` Returns null when _expr_ evaluates to null | - -## String functions - -| Function | Description| -| ----------------------------------- | :----------| -| left(_str_, _len_) | Returns a string containing the _len_ leftmost characters of _str_ `
` Returns null when _str_ evaluates to null, otherwise emit an error if _len_ evaluates to null | -| lTrim(_str_) | Returns _str_ with leading whitespace removed `
` Returns null when _str_ evaluates to null | -| replace(_str_, _search_, _replace_) | Returns _str_ with all occurrences of _search_ replaced with _replace_ `
` Returns null when any argument evaluates to null | -| reverse(_str_) | Returns a string in which the order of all characters in _str_ are reversed `
` Returns null when _str_ evaluates to null | -| right(_str_, _len_) | Returns a string containing the _len_ rightmost characters of _str_ `
` Returns null when _str_ evaluates to null, otherwise emit an error if _len_ evaluates to null | -| rTrim(_str_) | Returns _str_ with trailing whitespace removed `
` Returns null when _str_ evaluates to null | -| split(_str_, _delimiter_) | Returns a list of strings from splitting _str_ by _delimiter_ `
` Returns null when any argument evaluates to null | -| string.join(_strList_[, _delimiter_ = '']) * | Returns a concatenation of a list of strings using a given delimiter `
` Returns null when _strList_ evaluates to null `
` Returns null when _delimiter_, if specified, evaluates to null `
` Emit an error when _strList_ does not evaluate to a list or to null `
` Emit an error when an element of _strList_ does not evaluate to a string `
` Emit an error when _delimiter_, if specified, does not evaluate to a string or to null | -| string.matchRegEx(_str_, _regex_) * | Given a string and a regular expression, returns a list of all matches and matching regions `
` Returns an empty list when _str_ evaluates to null `
` Returns an empty list when _regex_ evaluates to null `
` Emit an error when _str_ does not evaluate to a string or to null `
` Emit an error when _regex_ does not evaluate to a valid regex string or to null | -| string.replaceRegEx(_str_, _regex_, _replacement_) * | Given a string and a regular expression, returns a string after replacing each regex match with a given replacement `
` Returns null when _str_ evaluates to null `
` Returns null when _regex_ evaluates to null `
` Returns null when _replacement_ evaluates to null `
` Emit an error when _str_ does not evaluate to a string or to null `
` Emit an error when _regex_ does not evaluate to a valid regex string or to null `
` Emit an error when _replacement_ does not evaluate to a string or to null | -| substring(_str_, _start_[, _len_]) | When _len_ is specified: returns a substring of _str_ beginning with a 0-based index _start_ and with length _len_ `
` When _len_ is not specified: returns a substring of _str_ beginning with a 0-based index _start_ and extending to the end of _str_ `
` Returns null when _str_ evaluates to null `
` Emit an error when _start_ or _len_ evaluate to null | -| toLower(_str_) | Returns _str_ in lowercase `
` Returns null when _str_ evaluates to null | -| toJSON(_expr_) * | Returns a [JSON representation](#json-format) of a value `
` Returns null when _expr_ evaluates to null | -| toUpper(_str_) | Returns _str_ in uppercase `
` Returns null when _str_ evaluates to null | -| trim(_str_) | Returns _str_ with leading and trailing whitespace removed `
` Returns null when _str_ evaluates to null | -| size(_str_) | Returns the number of characters in _str_ `
` Returns null when _str_ evaluates to null | -| [intern(_str_)](#intern) | Returns a deduplicated, memory-efficient representation of _str_ `
` Returns null when _str_ evaluates to null | - -## Point functions - -| Function | Description| -| ---------------------------- | :----------| -| [point(_map_)](#point) | Returns a Point representing a lat/lon coordinates | -| distance(_point1_, _point2_) | Returns the distance in meters between the two given points `
` Returns null when either evaluates to null | - -## Type conversion functions - -|Function | Description| -| --------------------------- |:-----------| -| toBoolean(_expr_) | Returns a Boolean when _expr_ evaluates to a Boolean `
` Converts a string to Boolean (`"true"` (case insensitive) to true, `"false"` (case insensitive) to false, any other value to null) `
` Converts an integer to Boolean (0 to `false`, any other values to `true`) `
` Returns null when _expr_ evaluates to null `
` Emit an error on other types | -| toBooleanList(_exprList_) | Converts a list to a list of Booleans. Each element in the list is converted using toBooleanOrNull() | -| toBooleanOrNull(_expr_) | Returns a Boolean when _expr_ evaluates to a Boolean `
` Converts a string to Boolean (`"true"` (case insensitive) to true, `"false"` (case insensitive) to false, any other value to null) `
` Converts an integer to Boolean (0 to `false`, any other values to `true`) `
` Returns null when _expr_ evaluates to null `
` Returns null for other types | -| toFloat(_expr_) | Returns a floating point when _expr_ evaluates to a floating point `
` Converts an integer to a floating point `
` Converts a string to a floating point or null `
` Returns null when _expr_ evaluates to null `
` Emit an error on other types | -| toFloatList(_exprList_) | Converts a list to a list of floating points. Each element in the list is converted using toFloatOrNull() | -| toFloatOrNull(_expr_) | Returns a floating point when _expr_ evaluates to a floating point `
` Converts an integer to a floating point `
` Converts a string to a floating point or null `
` Returns null when _expr_ evaluates to null `
` Returns null for other types | -| toInteger(_expr_) * | Returns an integer when _expr_ evaluates to an integer `
` Converts a floating point to integer `
` Converts a string to an integer or null `
` Converts a Boolean to an integer (false to 0, true to 1) Returns null when _expr_ evaluates to null `
` Emit an error on other types | -| toIntegerList(_exprList_) * | Converts a list to a list of integer values. Each element in the list is converted using toIntegerOrNull() | -| toIntegerOrNull(_expr_) * | Returns an integer when _expr_ evaluates to an integer `
` Converts a floating point to integer `
` Converts a string to an integer or null `
` Converts a Boolean to an integer (false to 0, true to 1) Returns null when _expr_ evaluates to null `
` Returns null for other types | -| toString(_expr_) | Returns a string when _expr_ evaluates to a string `
` Converts an integer, float, Boolean, string, or point to a string representation `
` Returns null when _expr_ evaluates to null `
` Emit an error on other types | -| toStringList(_exprList_) | Converts a list to a list of strings. Each element in the list is converted using toStringOrNull() | -| toStringOrNull(_expr_) | Returns a string when _expr_ evaluates to a string `
` Converts an integer, float, Boolean, string, or point to a string representation `
` Returns null when _expr_ evaluates to null `
` Returns null for other types | - -* FalkorDB-specific behavior: rounding method when converting a floating point to an integer is "toward negative infinity (floor)" - -## Node functions - -|Function | Description| -| ------------ |:-----------| -|indegree(_node_ [, _reltype_ ...]) * `
` indegree(_node_ [, _reltypeList_]) * | When no relationship types are specified: Returns the number of _node_'s incoming edges `
` When one or more relationship types are specified: Returns the number of _node's_ incoming edges with one of the given relationship types `
` Return null when _node_ evaluates to null | -|outdegree(_node_ [, _reltype_ ...]) * `
` outdegree(_node_ [, _reltypeList_]) * | When no relationship types are specified: Returns the number of _node_'s outgoing edges `
` When one or more relationship types are specified: Returns the number of _node's_ outgoing edges with one of the given relationship types `
` Return null when _node_ evaluates to null | - -* FalkorDB-specific extensions to Cypher - -## Path functions - -| Function | Description| -| ----------------------------------------------| :----------| -| nodes(_path_) | Returns a list containing all the nodes in _path_ `
` Returns null if _path_ evaluates to null | -| relationships(_path_) | Returns a list containing all the relationships in _path_ `
` Returns null if _path_ evaluates to null | -| length(_path_) | Return the length (number of edges) of _path_ `
` Returns null if _path_ evaluates to null | -| [shortestPath(...)](#about-path-functions) * | Return the shortest path that resolves the given pattern | -| [allShortestPaths(...)](#about-path-functions) * | Returns all the shortest paths between a pair of entities - -* FalkorDB-specific extensions to Cypher - - -## Vector functions - -| Function | Description| -| ----------------------------------------- | :----------| -| vecf32(_array_) | Creates a new float 32 vector `
` all elements of input array must be of type float | -| vec.euclideanDistance(_vector_, _vector_) | Returns the Euclidean distance between the two input vectors | -| vec.cosineDistance(_vector_, _vector_) | Returns the Cosine distance between the two input vectors | - -### List comprehensions - -List comprehensions are a syntactical construct that accepts an array and produces another based on the provided map and filter directives. - -They are a common construct in functional languages and modern high-level languages. In Cypher, they use the syntax: - -```sh -[element IN array WHERE condition | output elem] -``` - -* `array` can be any expression that produces an array: a literal, a property reference, or a function call. -* `WHERE condition` is an optional argument to only project elements that pass a certain criteria. If omitted, all elements in the array will be represented in the output. -* `| output elem` is an optional argument that allows elements to be transformed in the output array. If omitted, the output elements will be the same as their corresponding inputs. - -The following query collects all paths of any length, then for each produces an array containing the `name` property of every node with a `rank` property greater than 10: - -```cypher -MATCH p=()-[*]->() RETURN [node IN nodes(p) WHERE node.rank > 10 | node.name] -``` - -#### Existential comprehension functions - -The functions `any()`, `all()`, `single()` and `none()` use a simplified form of the list comprehension syntax and return a boolean value. - -```cypher -any(element IN array WHERE condition) -``` - -They can operate on any form of input array, but are particularly useful for path filtering. The following query collects all paths of any length in which all traversed edges have a weight less than 3: - -```sh -MATCH p=()-[*]->() WHERE all(edge IN relationships(p) WHERE edge.weight < 3) RETURN p -``` - -### Pattern comprehensions - -Pattern comprehensions are a method of producing a list composed of values found by performing the traversal of a given graph pattern. - -The following query returns the name of a `Person` node and a list of all their friends' ages: - -```cypher -MATCH (n:Person) -RETURN -n.name, -[(n)-[:FRIEND_OF]->(f:Person) | f.age] -``` - -Optionally, a `WHERE` clause may be embedded in the pattern comprehension to filter results. In this query, all friends' ages will be gathered for friendships that started before 2010: - -```cypher -MATCH (n:Person) -RETURN -n.name, -[(n)-[e:FRIEND_OF]->(f:Person) WHERE e.since < 2010 | f.age] -``` - -### CASE WHEN - -The case statement comes in two variants. Both accept an input argument and evaluates it against one or more expressions. The first `WHEN` argument that specifies a value matching the result will be accepted, and the value specified by the corresponding `THEN` keyword will be returned. - -Optionally, an `ELSE` argument may also be specified to indicate what to do if none of the `WHEN` arguments match successfully. - -In its simple form, there is only one expression to evaluate and it immediately follows the `CASE` keyword: - -```cypher -MATCH (n) -RETURN -CASE n.title -WHEN 'Engineer' THEN 100 -WHEN 'Scientist' THEN 80 -ELSE n.privileges -END -``` - -In its generic form, no expression follows the `CASE` keyword. Instead, each `WHEN` statement specifies its own expression: - -```cypher -MATCH (n) -RETURN -CASE -WHEN n.age < 18 THEN '0-18' -WHEN n.age < 30 THEN '18-30' -ELSE '30+' -END -``` - -#### Reduce - -The `reduce()` function accepts a starting value and updates it by evaluating an expression against each element of the list: - -```cypher -RETURN reduce(sum = 0, n IN [1,2,3] | sum + n) -``` - -`sum` will successively have the values 0, 1, 3, and 6, with 6 being the output of the function call. - -### Intern - -The `intern()` function expects a single string argument: - -```cypher -"CREATE (:A {v:intern('VERY LONG STRING')})" -``` - -This function deduplicates the input string by storing a single internal copy across the database. -It is especially useful for repeated string valuesβ€”like country names, email domains, or tagsβ€”in large graphs. -Interned strings can be stored as node or relationship properties, and behave identically to regular strings in queries, -with the added benefit of reduced memory usage. - -### Point - -The `point()` function expects one map argument of the form: - -```cypher -RETURN point({latitude: lat_value, longitude: lon_val}) -``` - -The key names `latitude` and `longitude` are case-sensitive. - -The point constructed by this function can be saved as a node/relationship property or used within the query, such as in a `distance` function call. - -### About Path Functions - -The following graph: - -![Road network](/img/road_network.png) - -represents a road network with 7 cities (A, B, C, and so on) and 11 one-way roads. Each road has a distance (say, in kilometers) and trip time (say, in minutes). - - -#### shortestPath - -`shortestPath` returns one of the shortest paths. If there is more than one, only one is retrieved. - -The sole `shortestPath` argument is a traversal pattern. This pattern's endpoints must be resolved prior to the function call, and no property filters may be introduced in the pattern. The relationship pattern may specify any number of relationship types (including zero) to be considered. If a minimum number of edges to traverse is specified, it may only be 0 or 1, while any number may be used for the maximum. If 0 is specified as the minimum, the source node will be included in the returned path. If no shortest path can be found, NULL is returned. - -Example Usage: Find the shortest path (by number of roads) from A to G - -```bash -$ GRAPH.QUERY g "MATCH (a:City{name:'A'}),(g:City{name:'G'}) WITH shortestPath((a)-[*]->(g)) as p RETURN length(p), [n in nodes(p) | n.name] as pathNodes" -1) 1) "length(p)" - 2) "pathNodes" -2) 1) 1) (integer) 3 - 2) "[A, D, F, G]" -``` - -![Road network](/img/graph_query_road.png) - -#### allShortestPaths - -All `allShortestPaths` results have, by definition, the same length (number of roads). - -Examples Usage: Find all the shortest paths (by number of roads) from A to G - -```bash -$ GRAPH.QUERY g "MATCH (a:City{name:'A'}),(g:City{name:'G'}) WITH a,g MATCH p=allShortestPaths((a)-[*]->(g)) RETURN length(p), [n in nodes(p) | n.name] as pathNodes" -1) 1) "length(p)" - 2) "pathNodes" -2) 1) 1) (integer) 3 - 2) "[A, D, F, G]" - 2) 1) (integer) 3 - 2) "[A, C, F, G]" - 3) 1) (integer) 3 - 2) "[A, D, E, G]" - 4) 1) (integer) 3 - 2) "[A, B, E, G]" -``` - -Using the unbounded traversal pattern `(a:City{name:'A'})-[*]->(g:City{name:'G'})`, FalkorDB traverses all possible paths from A to G. `ORDER BY length(p) LIMIT 5` ensures that you collect only [up to 5 shortest paths (minimal number of relationships). This approach is very inefficient because all possible paths would have to be traversed. Ideally, you would want to abort some traversals as soon as you are sure they would not result in the discovery of shorter paths. - -### JSON format - -`toJSON()` returns the input value in JSON formatting. For primitive data types and arrays, this conversion is conventional. Maps and map projections (`toJSON(node { .prop} )`) are converted to JSON objects, as are nodes and relationships. - -The format for a node object in JSON is: - -```sh -{ - "type": "node", - "id": id(int), - "labels": [label(string) X N], - "properties": { - property_key(string): property_value X N - } -} -``` - -The format for a relationship object in JSON is: - -```sh -{ - "type": "relationship", - "id": id(int), - "relationship": type(string), - "properties": { - property_key(string): property_value X N - } - "start": src_node(node), - "end": dest_node(node) -} -``` - -### Variable length traverse filtering - -Consider a logistics network where: - -* Nodes (Warehouse) represent distribution centers. -* Edges (Shipment) represent routes where packages are shipped. -* Each shipment has an increasing priority level. - -Imagine a package tracking system where deliveries follow a priority-based routing: - -* Each shipment (Shipment) has a priority value (s.priority). -* We want to ensure that package priority never decreases as it moves through the network. -* The query filters paths where the previous shipment (prev(s.priority)) has a lower or equal priority than the current one (s.priority). - -```cypher -MATCH p=(:Warehouse)-[s:Shipment]->(:Warehouse) -WHERE coalesce(prev(s.priority), s.priority) <= s.priority -RETURN p -``` - -* `MATCH p=(:Warehouse)-[s:Shipment]->(:Warehouse)` - Finds shipment paths between warehouses. -* `WHERE coalesce(prev(s.priority)) <= s.priority` - Ensures that priority never decreases along the route. -* `RETURN p` - Returns valid paths where shipments maintain or increase priority. diff --git a/website/docs/cypher/index.md b/website/docs/cypher/index.md deleted file mode 100644 index 67e4553..0000000 --- a/website/docs/cypher/index.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: Cypher Language -description: > -sidebar_position: 7 -sidebar_label: Cypher Language ---- - - -# Clauses - -A Cypher query consists of one or more clauses. - -- [MATCH](/cypher/match) -- [OPTIONAL MATCH](/cypher/optional-match) -- [WHERE](/cypher/where) -- [RETURN](/cypher/return) -- [ORDER BY](/cypher/order-by) -- [SKIP](/cypher/skip) -- [LIMIT](/cypher/limit) -- [CREATE](/cypher/create) -- [MERGE](/cypher/merge) -- [DELETE](/cypher/delete) -- [REMOVE](/cypher/remove) -- [SET](/cypher/set) -- [WITH](/cypher/with) -- [UNION](/cypher/union) -- [UNWIND](/cypher/unwind) -- [FOREACH](/cypher/foreach) -- [CALL {}](/cypher/call) - -## Functions - -See the list of available [functions](/cypher/functions). - -## Procedures - -See the list of available [procedures](/cypher/procedures). - -## Algorithms - -See the list of available graph [algorithms](/algorithms). - -## Indexing - -See how to use [indexing](/cypher/indexing/). diff --git a/website/docs/cypher/indexing/fulltext-index.mdx b/website/docs/cypher/indexing/fulltext-index.mdx deleted file mode 100644 index 7008c18..0000000 --- a/website/docs/cypher/indexing/fulltext-index.mdx +++ /dev/null @@ -1,1001 +0,0 @@ ---- -title: Full-text Index -description: Full-text indexing using RediSearch capabilities -sidebar_position: 2 -sidebar_label: Full-text Index ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - - -# Full-text indexing - -FalkorDB leverages the indexing capabilities of [RediSearch](https://redis.io/docs/interact/search-and-query/) to provide full-text indices through procedure calls. - -## Creating a full-text index for a node label - -To construct a full-text index on the `title` property of all nodes with label `Movie`, use the syntax: - - - - - - - - - - - - - - - -```python -graph.query("CALL db.idx.fulltext.createNodeIndex('Movie', 'title')") -``` - - - - -```javascript -await graph.query("CALL db.idx.fulltext.createNodeIndex('Movie', 'title')"); -``` - - - - -```rust -graph.query("CALL db.idx.fulltext.createNodeIndex('Movie', 'title')").execute().await?; -``` - - - - -```java -graph.query("CALL db.idx.fulltext.createNodeIndex('Movie', 'title')"); -``` - - - - -```bash -GRAPH.QUERY DEMO_GRAPH "CALL db.idx.fulltext.createNodeIndex('Movie', 'title')" -``` - - - - - -More properties can be added to this index by adding their names to the above set of arguments, or using this syntax again with the additional names. - - - - - - - - - - - - - - - -```python -graph.query("CALL db.idx.fulltext.createNodeIndex('Person', 'firstName', 'lastName')") -``` - - - - -```javascript -await graph.query("CALL db.idx.fulltext.createNodeIndex('Person', 'firstName', 'lastName')"); -``` - - - - -```rust -graph.query("CALL db.idx.fulltext.createNodeIndex('Person', 'firstName', 'lastName')").execute().await?; -``` - - - - -```java -graph.query("CALL db.idx.fulltext.createNodeIndex('Person', 'firstName', 'lastName')"); -``` - - - - -```bash -GRAPH.QUERY DEMO_GRAPH "CALL db.idx.fulltext.createNodeIndex('Person', 'firstName', 'lastName')" -``` - - - - - -Index configuration options: - -1. Language - Define which language to use for stemming text, which is adding the base form of a word to the index. This allows the query for "going" to also return results for "go" and "gone", for example. -2. Stopwords - These are words that are usually so common that they do not add much information to search, but take up a lot of space and CPU time in the index. - -To construct a full-text index on the `title` property using `German` language and using custom stopwords of all nodes with label `Movie`, use the syntax: - - - - - - - - - - - - - - - -```python -graph.query("CALL db.idx.fulltext.createNodeIndex({ label: 'Movie', language: 'German', stopwords: ['a', 'ab'] }, 'title')") -``` - - - - -```javascript -await graph.query("CALL db.idx.fulltext.createNodeIndex({ label: 'Movie', language: 'German', stopwords: ['a', 'ab'] }, 'title')"); -``` - - - - -```rust -graph.query("CALL db.idx.fulltext.createNodeIndex({ label: 'Movie', language: 'German', stopwords: ['a', 'ab'] }, 'title')").execute().await?; -``` - - - - -```java -graph.query("CALL db.idx.fulltext.createNodeIndex({ label: 'Movie', language: 'German', stopwords: ['a', 'ab'] }, 'title')"); -``` - - - - -```bash -GRAPH.QUERY DEMO_GRAPH "CALL db.idx.fulltext.createNodeIndex({ label: 'Movie', language: 'German', stopwords: ['a', 'ab'] }, 'title')" -``` - - - - - -Additional field configuration options: - -1. Weight - The importance of the text in the field -2. Nostem - Skip stemming when indexing text -3. Phonetic - Enable phonetic search on the text - -To construct a full-text index on the `title` property with phonetic search of all nodes with label `Movie`, use the syntax: - - - - - - - - - - - - - - - -```python -graph.query("CALL db.idx.fulltext.createNodeIndex('Movie', {field: 'title', phonetic: 'dm:en'})") -``` - - - - -```javascript -await graph.query("CALL db.idx.fulltext.createNodeIndex('Movie', {field: 'title', phonetic: 'dm:en'})"); -``` - - - - -```rust -graph.query("CALL db.idx.fulltext.createNodeIndex('Movie', {field: 'title', phonetic: 'dm:en'})").execute().await?; -``` - - - - -```java -graph.query("CALL db.idx.fulltext.createNodeIndex('Movie', {field: 'title', phonetic: 'dm:en'})"); -``` - - - - -```bash -GRAPH.QUERY DEMO_GRAPH "CALL db.idx.fulltext.createNodeIndex('Movie', {field: 'title', phonetic: 'dm:en'})" -``` - - - - - -## Query Syntax and Features - -FalkorDB uses [RediSearch query syntax](https://redis.io/docs/latest/develop/ai/search-and-query/advanced-concepts/query_syntax/) which provides powerful search capabilities including fuzzy matching, prefix matching, and tokenization. - -### Tokenization - -When text is indexed, it is automatically tokenized (split into words). By default, text is split on whitespace and punctuation. This allows you to search for individual words within larger text fields. - -For example, if you index a `title` property containing "The Lord of the Rings", you can search for any of the individual words like "Lord" or "Rings". - -### Prefix Matching - -Prefix matching allows you to search for words that start with a specific prefix using the `*` wildcard. This is useful for autocomplete functionality or when you want to match word variations. - - - - - - - - - - - - - - - -```python -# Find all movies with titles containing words starting with "Jun" -result = graph.query("CALL db.idx.fulltext.queryNodes('Movie', 'Jun*') YIELD node RETURN node.title") -for record in result: - print(record["node.title"]) -# This would match "Jungle", "June", "Junior", etc. -``` - - - - -```javascript -// Find all movies with titles containing words starting with "Jun" -const result = await graph.query("CALL db.idx.fulltext.queryNodes('Movie', 'Jun*') YIELD node RETURN node.title"); -for (const record of result.data) { - console.log(record["node.title"]); -} -// This would match "Jungle", "June", "Junior", etc. -``` - - - - -```rust -// Find all movies with titles containing words starting with "Jun" -let result = graph.query("CALL db.idx.fulltext.queryNodes('Movie', 'Jun*') YIELD node RETURN node.title").execute().await?; -for record in result.data() { - println!("{}", record["node.title"]); -} -// This would match "Jungle", "June", "Junior", etc. -``` - - - - -```java -// Find all movies with titles containing words starting with "Jun" -ResultSet result = graph.query("CALL db.idx.fulltext.queryNodes('Movie', 'Jun*') YIELD node RETURN node.title"); -for (Record record : result) { - System.out.println(record.get("node.title")); -} -// This would match "Jungle", "June", "Junior", etc. -``` - - - - -```bash -# Find all movies with titles containing words starting with "Jun" -GRAPH.QUERY DEMO_GRAPH "CALL db.idx.fulltext.queryNodes('Movie', 'Jun*') YIELD node RETURN node.title" -# This would match "Jungle", "June", "Junior", etc. -``` - - - - - -**Note:** Prefix matching only works at the end of a word (e.g., `Jun*`). The wildcard must appear at the end of the search term. - -### Fuzzy Matching - -Fuzzy matching allows you to find words that are similar to your search term, accounting for typos and spelling variations. Use the `%` symbol followed by the Levenshtein distance (number of character changes allowed). - - - - - - - - - - - - - - - -```python -# Find movies with titles containing words similar to "Jangle" (allowing 1 character difference) -result = graph.query("CALL db.idx.fulltext.queryNodes('Movie', '%Jangle%1') YIELD node RETURN node.title") -for record in result: - print(record["node.title"]) -# This would match "Jungle" (1 character different) - -# Allow up to 2 character differences -result = graph.query("CALL db.idx.fulltext.queryNodes('Movie', '%Jngle%2') YIELD node RETURN node.title") -# This would also match "Jungle" (1 character missing) -``` - - - - -```javascript -// Find movies with titles containing words similar to "Jangle" (allowing 1 character difference) -const result = await graph.query("CALL db.idx.fulltext.queryNodes('Movie', '%Jangle%1') YIELD node RETURN node.title"); -for (const record of result.data) { - console.log(record["node.title"]); -} -// This would match "Jungle" (1 character different) - -// Allow up to 2 character differences -const result2 = await graph.query("CALL db.idx.fulltext.queryNodes('Movie', '%Jngle%2') YIELD node RETURN node.title"); -// This would also match "Jungle" (1 character missing) -``` - - - - -```rust -// Find movies with titles containing words similar to "Jangle" (allowing 1 character difference) -let result = graph.query("CALL db.idx.fulltext.queryNodes('Movie', '%Jangle%1') YIELD node RETURN node.title").execute().await?; -for record in result.data() { - println!("{}", record["node.title"]); -} -// This would match "Jungle" (1 character different) - -// Allow up to 2 character differences -let result2 = graph.query("CALL db.idx.fulltext.queryNodes('Movie', '%Jngle%2') YIELD node RETURN node.title").execute().await?; -// This would also match "Jungle" (1 character missing) -``` - - - - -```java -// Find movies with titles containing words similar to "Jangle" (allowing 1 character difference) -ResultSet result = graph.query("CALL db.idx.fulltext.queryNodes('Movie', '%Jangle%1') YIELD node RETURN node.title"); -for (Record record : result) { - System.out.println(record.get("node.title")); -} -// This would match "Jungle" (1 character different) - -// Allow up to 2 character differences -ResultSet result2 = graph.query("CALL db.idx.fulltext.queryNodes('Movie', '%Jngle%2') YIELD node RETURN node.title"); -// This would also match "Jungle" (1 character missing) -``` - - - - -```bash -# Find movies with titles containing words similar to "Jangle" (allowing 1 character difference) -GRAPH.QUERY DEMO_GRAPH "CALL db.idx.fulltext.queryNodes('Movie', '%Jangle%1') YIELD node RETURN node.title" -# This would match "Jungle" (1 character different) - -# Allow up to 2 character differences -GRAPH.QUERY DEMO_GRAPH "CALL db.idx.fulltext.queryNodes('Movie', '%Jngle%2') YIELD node RETURN node.title" -# This would also match "Jungle" (1 character missing) -``` - - - - - -**Fuzzy matching syntax:** `%term%distance` where: -- `term` is the word to match -- `distance` is the maximum Levenshtein distance (1-3, default is 1 if not specified) - -**Note:** Fuzzy matching is computationally more expensive than exact or prefix matching, so use it judiciously on large datasets. - -### Combining Query Features - -You can combine multiple search terms using boolean operators: - -- `AND` (or space): All terms must match -- `OR` (`|`): At least one term must match -- `NOT` (`-`): Term must not be present - - - - - - - - - - - - - - - -```python -# Find movies with "Jungle" AND "Book" in the title -result = graph.query("CALL db.idx.fulltext.queryNodes('Movie', 'Jungle Book') YIELD node RETURN node.title") - -# Find movies with "Jungle" OR "Forest" in the title -result = graph.query("CALL db.idx.fulltext.queryNodes('Movie', 'Jungle|Forest') YIELD node RETURN node.title") - -# Find movies with "Book" but NOT "Jungle" -result = graph.query("CALL db.idx.fulltext.queryNodes('Movie', 'Book -Jungle') YIELD node RETURN node.title") - -# Combine prefix and fuzzy matching -result = graph.query("CALL db.idx.fulltext.queryNodes('Movie', 'Jun*|%Forst%1') YIELD node RETURN node.title") -``` - - - - -```javascript -// Find movies with "Jungle" AND "Book" in the title -const result = await graph.query("CALL db.idx.fulltext.queryNodes('Movie', 'Jungle Book') YIELD node RETURN node.title"); - -// Find movies with "Jungle" OR "Forest" in the title -const result2 = await graph.query("CALL db.idx.fulltext.queryNodes('Movie', 'Jungle|Forest') YIELD node RETURN node.title"); - -// Find movies with "Book" but NOT "Jungle" -const result3 = await graph.query("CALL db.idx.fulltext.queryNodes('Movie', 'Book -Jungle') YIELD node RETURN node.title"); - -// Combine prefix and fuzzy matching -const result4 = await graph.query("CALL db.idx.fulltext.queryNodes('Movie', 'Jun*|%Forst%1') YIELD node RETURN node.title"); -``` - - - - -```rust -// Find movies with "Jungle" AND "Book" in the title -let result = graph.query("CALL db.idx.fulltext.queryNodes('Movie', 'Jungle Book') YIELD node RETURN node.title").execute().await?; - -// Find movies with "Jungle" OR "Forest" in the title -let result2 = graph.query("CALL db.idx.fulltext.queryNodes('Movie', 'Jungle|Forest') YIELD node RETURN node.title").execute().await?; - -// Find movies with "Book" but NOT "Jungle" -let result3 = graph.query("CALL db.idx.fulltext.queryNodes('Movie', 'Book -Jungle') YIELD node RETURN node.title").execute().await?; - -// Combine prefix and fuzzy matching -let result4 = graph.query("CALL db.idx.fulltext.queryNodes('Movie', 'Jun*|%Forst%1') YIELD node RETURN node.title").execute().await?; -``` - - - - -```java -// Find movies with "Jungle" AND "Book" in the title -ResultSet result = graph.query("CALL db.idx.fulltext.queryNodes('Movie', 'Jungle Book') YIELD node RETURN node.title"); - -// Find movies with "Jungle" OR "Forest" in the title -ResultSet result2 = graph.query("CALL db.idx.fulltext.queryNodes('Movie', 'Jungle|Forest') YIELD node RETURN node.title"); - -// Find movies with "Book" but NOT "Jungle" -ResultSet result3 = graph.query("CALL db.idx.fulltext.queryNodes('Movie', 'Book -Jungle') YIELD node RETURN node.title"); - -// Combine prefix and fuzzy matching -ResultSet result4 = graph.query("CALL db.idx.fulltext.queryNodes('Movie', 'Jun*|%Forst%1') YIELD node RETURN node.title"); -``` - - - - -```bash -# Find movies with "Jungle" AND "Book" in the title -GRAPH.QUERY DEMO_GRAPH "CALL db.idx.fulltext.queryNodes('Movie', 'Jungle Book') YIELD node RETURN node.title" - -# Find movies with "Jungle" OR "Forest" in the title -GRAPH.QUERY DEMO_GRAPH "CALL db.idx.fulltext.queryNodes('Movie', 'Jungle|Forest') YIELD node RETURN node.title" - -# Find movies with "Book" but NOT "Jungle" -GRAPH.QUERY DEMO_GRAPH "CALL db.idx.fulltext.queryNodes('Movie', 'Book -Jungle') YIELD node RETURN node.title" - -# Combine prefix and fuzzy matching: Find "Jun*" OR words similar to "Forst" -GRAPH.QUERY DEMO_GRAPH "CALL db.idx.fulltext.queryNodes('Movie', 'Jun*|%Forst%1') YIELD node RETURN node.title" -``` - - - - - -For more advanced query syntax features, see the [RediSearch query syntax documentation](https://redis.io/docs/latest/develop/ai/search-and-query/advanced-concepts/query_syntax/). - -## Utilizing a full-text index for a node label - -An index can be invoked to match any whole words contained within: - - - - - - - - - - - - - - - -```python -result = graph.query("CALL db.idx.fulltext.queryNodes('Movie', 'Book') YIELD node RETURN node.title") -for record in result: - print(record["node.title"]) -# Output: -# The Jungle Book -# The Book of Life -``` - - - - -```javascript -const result = await graph.query("CALL db.idx.fulltext.queryNodes('Movie', 'Book') YIELD node RETURN node.title"); -for (const record of result.data) { - console.log(record["node.title"]); -} -// Output: -// The Jungle Book -// The Book of Life -``` - - - - -```rust -let result = graph.query("CALL db.idx.fulltext.queryNodes('Movie', 'Book') YIELD node RETURN node.title").execute().await?; -for record in result.data() { - println!("{}", record["node.title"]); -} -// Output: -// The Jungle Book -// The Book of Life -``` - - - - -```java -ResultSet result = graph.query("CALL db.idx.fulltext.queryNodes('Movie', 'Book') YIELD node RETURN node.title"); -for (Record record : result) { - System.out.println(record.get("node.title")); -} -// Output: -// The Jungle Book -// The Book of Life -``` - - - - -```bash -GRAPH.QUERY DEMO_GRAPH -"CALL db.idx.fulltext.queryNodes('Movie', 'Book') YIELD node RETURN node.title" -1) 1) "node.title" -2) 1) 1) "The Jungle Book" - 2) 1) "The Book of Life" -3) 1) "Query internal execution time: 0.927409 milliseconds" -``` - - - - - -This CALL clause can be interleaved with other Cypher clauses to perform more elaborate manipulations: - -```sh -GRAPH.QUERY DEMO_GRAPH -"CALL db.idx.fulltext.queryNodes('Movie', 'Book') YIELD node AS m -WHERE m.genre = 'Adventure' -RETURN m ORDER BY m.rating" -1) 1) "m" -2) 1) 1) 1) 1) "id" - 2) (integer) 1168 - 2) 1) "labels" - 2) 1) "Movie" - 3) 1) "properties" - 2) 1) 1) "genre" - 2) "Adventure" - 2) 1) "rating" - 2) "7.6" - 3) 1) "votes" - 2) (integer) 151342 - 4) 1) "year" - 2) (integer) 2016 - 5) 1) "title" - 2) "The Jungle Book" -3) 1) "Query internal execution time: 0.226914 milliseconds" -``` - -In addition to yielding matching nodes, full-text index scans will return the score of each node. This is the [TF-IDF](https://redis.io/docs/interact/search-and-query/advanced-concepts/scoring/#tfidf-default) score of the node, which is informed by how many times the search terms appear in the node and how closely grouped they are. This can be observed in the example: - -```sh -GRAPH.QUERY DEMO_GRAPH -"CALL db.idx.fulltext.queryNodes('Node', 'hello world') YIELD node, score RETURN score, node.val" -1) 1) "score" - 2) "node.val" -2) 1) 1) "2" - 2) "hello world" - 2) 1) "1" - 2) "hello to a different world" -3) 1) "Cached execution: 1" - 2) "Query internal execution time: 0.335401 milliseconds" -``` - -## Deleting a full-text index for a node label - -For a node label, the full-text index deletion syntax is: - - - - - - - - - - - - - - - -```python -graph.query("CALL db.idx.fulltext.drop('Movie')") -``` - - - - -```javascript -await graph.query("CALL db.idx.fulltext.drop('Movie')"); -``` - - - - -```rust -graph.query("CALL db.idx.fulltext.drop('Movie')").execute().await?; -``` - - - - -```java -graph.query("CALL db.idx.fulltext.drop('Movie')"); -``` - - - - -```bash -GRAPH.QUERY DEMO_GRAPH "CALL db.idx.fulltext.drop('Movie')" -``` - - - - - -## Creating Full-Text indexing for Relation Labels -To create a full-text index on the name property of all relations with the label Manager and enable phonetic search, use the following syntax: - - - - - - - - - - - - - - - -```python -graph.query("CREATE FULLTEXT INDEX FOR ()-[m:Manager]-() on (m.name)") -``` - - - - -```javascript -await graph.query("CREATE FULLTEXT INDEX FOR ()-[m:Manager]-() on (m.name)"); -``` - - - - -```rust -graph.query("CREATE FULLTEXT INDEX FOR ()-[m:Manager]-() on (m.name)").execute().await?; -``` - - - - -```java -graph.query("CREATE FULLTEXT INDEX FOR ()-[m:Manager]-() on (m.name)"); -``` - - - - -```bash -GRAPH.QUERY DEMO_GRAPH "CREATE FULLTEXT INDEX FOR ()-[m:Manager]-() on (m.name)" -``` - - - - -## Querying with a Full-Text Index -To search for specific words within the indexed relations, use: - - - - - - - - - - - - - - - -```python -result = graph.query("CALL db.idx.fulltext.queryRelationships('Manager', 'Charlie Munger') YIELD relationship RETURN relationship.name") -``` - - - - -```javascript -const result = await graph.query("CALL db.idx.fulltext.queryRelationships('Manager', 'Charlie Munger') YIELD relationship RETURN relationship.name"); -``` - - - - -```rust -let result = graph.query("CALL db.idx.fulltext.queryRelationships('Manager', 'Charlie Munger') YIELD relationship RETURN relationship.name").execute().await?; -``` - - - - -```java -ResultSet result = graph.query("CALL db.idx.fulltext.queryRelationships('Manager', 'Charlie Munger') YIELD relationship RETURN relationship.name"); -``` - - - - -```bash -GRAPH.QUERY DEMO_GRAPH -"CALL db.idx.fulltext.queryRelationships('Manager', 'Charlie Munger') YIELD relationship RETURN relationship.name" -``` - - - - - -## Deleting a Full-Text Index -To delete the full-text index for a specific relation label, use: - - - - - - - - - - - - - - - -```python -graph.query("DROP FULLTEXT INDEX FOR ()-[m:Manager]-() ON (m.name)") -``` - - - - -```javascript -await graph.query("DROP FULLTEXT INDEX FOR ()-[m:Manager]-() ON (m.name)"); -``` - - - - -```rust -graph.query("DROP FULLTEXT INDEX FOR ()-[m:Manager]-() ON (m.name)").execute().await?; -``` - - - - -```java -graph.query("DROP FULLTEXT INDEX FOR ()-[m:Manager]-() ON (m.name)"); -``` - - - - -```bash -GRAPH.QUERY DEMO_GRAPH "DROP FULLTEXT INDEX FOR ()-[m:Manager]-() ON (m.name)" -``` - - - - - -## Index Management - -### Listing Full-text Indexes - -To view all indexes (including full-text) in your graph, use: - -```cypher -CALL db.indexes() -``` - -This returns information about all indexes, with full-text indexes marked with type `FULLTEXT`. - -## Performance Tradeoffs and Best Practices - -### When to Use Full-text Indexes - -Full-text indexes are ideal for: -- **Text-heavy search**: Searching within large text fields like descriptions, articles, or comments -- **Partial word matching**: When users might not know the exact text -- **Fuzzy search**: Handling typos and spelling variations -- **Multi-word queries**: Searching for multiple terms with boolean logic - -### When NOT to Use Full-text Indexes - -Full-text indexes are not optimal for: -- **Exact numeric filtering**: Use range indexes instead for numeric comparisons -- **Exact-match queries**: Range indexes are more efficient for exact property matches -- **Small or structured data**: For short, well-defined strings, range indexes may be sufficient - -### Performance Considerations - -**Benefits:** -- Enables sophisticated text search capabilities (fuzzy, prefix, phonetic) -- Supports stemming and language-specific optimizations -- Returns relevance scores (TF-IDF) for ranking results - -**Costs:** -- **Write overhead**: Text must be tokenized and indexed on write -- **Storage**: Requires more space than range indexes due to tokenization and inverted indices -- **Configuration complexity**: Language, stopwords, and stemming settings affect results -- **Query performance**: Fuzzy matching is more expensive than exact matching - -**Recommendations:** -- Choose the correct language setting for proper stemming -- Configure appropriate stopwords for your use case -- Use prefix matching (`*`) for autocomplete rather than full fuzzy search when possible -- Test query performance with realistic data volumes -- Consider the tradeoff between index configurability and query performance - -### Configuration Best Practices - -**Language Selection:** -- Wrong language settings can produce poor stemming results -- Example: Searching "running" with English stemming finds "run", but German stemming won't - -**Stopwords:** -- Default stopwords are optimized for general text -- Customize stopwords for domain-specific applications (e.g., legal, medical, technical documents) -- Too many stopwords can hurt precision; too few increase index size - -**Phonetic Search:** -- Useful for name searches and when spelling variations are common -- Increases index size and query time -- Double Metaphone (`dm:en`) is recommended for English - -## Verifying Full-text Index Usage - -Use `GRAPH.EXPLAIN` to verify that full-text queries use the index: - - - - - - - - - - - - - - - -```python -# Check if full-text index is used -result = graph.explain("CALL db.idx.fulltext.queryNodes('Movie', 'Book') YIELD node RETURN node") -print(result) -# Output shows: ProcedureCall | db.idx.fulltext.queryNodes -``` - - - - -```javascript -// Check if full-text index is used -const result = await graph.explain("CALL db.idx.fulltext.queryNodes('Movie', 'Book') YIELD node RETURN node"); -console.log(result); -// Output shows: ProcedureCall | db.idx.fulltext.queryNodes -``` - - - - -```rust -// Check if full-text index is used -let result = graph.explain("CALL db.idx.fulltext.queryNodes('Movie', 'Book') YIELD node RETURN node").execute().await?; -println!("{}", result); -// Output shows: ProcedureCall | db.idx.fulltext.queryNodes -``` - - - - -```java -// Check if full-text index is used -String result = graph.explain("CALL db.idx.fulltext.queryNodes('Movie', 'Book') YIELD node RETURN node"); -System.out.println(result); -// Output shows: ProcedureCall | db.idx.fulltext.queryNodes -``` - - - - -```bash -# Check if full-text index is used -GRAPH.EXPLAIN DEMO_GRAPH "CALL db.idx.fulltext.queryNodes('Movie', 'Book') YIELD node RETURN node" -# Output shows: ProcedureCall | db.idx.fulltext.queryNodes -``` - - - diff --git a/website/docs/cypher/indexing/index.md b/website/docs/cypher/indexing/index.md deleted file mode 100644 index 31fd522..0000000 --- a/website/docs/cypher/indexing/index.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: Indexing -description: > -sidebar_position: 21 -sidebar_label: Indexing ---- - -# Indexing - -FalkorDB provides multiple types of indexes to optimize query performance and enable efficient data retrieval. Each index type is designed for specific use cases and data patterns. - -## Index Types - -FalkorDB supports the following index types: - -### [Range Index](./range-index) - -Range indexes support single-property indexes for node labels and relationship types. String, numeric, and geospatial data types can be indexed. These indexes automatically optimize queries with filters on indexed properties. - -### [Full-text Index](./fulltext-index) - -Full-text indexes leverage RediSearch capabilities to provide powerful text search functionality. They support features like stemming, stopwords, phonetic search, and scoring based on TF-IDF. - -### [Vector Index](./vector-index) - -Vector indexes enable similarity search on vector embeddings. These indexes are essential for AI and machine learning applications, supporting operations like nearest neighbor search with configurable similarity functions (euclidean or cosine). - ---- - -Choose an index type from the navigation menu to learn more about creating, querying, and managing that specific type of index. diff --git a/website/docs/cypher/indexing/range-index.mdx b/website/docs/cypher/indexing/range-index.mdx deleted file mode 100644 index 594e8cd..0000000 --- a/website/docs/cypher/indexing/range-index.mdx +++ /dev/null @@ -1,798 +0,0 @@ ---- -title: Range Index -description: Single-property indexes for node labels and relationship types -sidebar_position: 1 -sidebar_label: Range Index ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - - -# Range Index - -FalkorDB supports single-property indexes for node labels and for relationship type. String, numeric, and geospatial data types can be indexed. - -## Supported Data Types - -Range indexes support the following data types: -- **String**: Text values for exact matching and range queries -- **Numeric**: Integer and floating-point numbers for range comparisons -- **Geospatial**: Point data types for location-based queries -- **Arrays**: Single-property arrays containing scalar values (integers, floats, strings) - -**Note**: Complex types like nested arrays, maps, or vectors are not supported for range indexing. - -## Creating an index for a node label - -For a node label, the index creation syntax is: - - - - - - - - - - - - - - - -```python -graph.query("CREATE INDEX FOR (p:Person) ON (p.age)") -``` - - - - -```javascript -await graph.query("CREATE INDEX FOR (p:Person) ON (p.age)"); -``` - - - - -```rust -graph.query("CREATE INDEX FOR (p:Person) ON (p.age)").execute().await?; -``` - - - - -```java -graph.query("CREATE INDEX FOR (p:Person) ON (p.age)"); -``` - - - - -```bash -GRAPH.QUERY DEMO_GRAPH "CREATE INDEX FOR (p:Person) ON (p.age)" -``` - - - - - -An old syntax is also supported: - - - - - - - - - - - - - - - -```python -graph.query("CREATE INDEX ON :Person(age)") -``` - - - - -```javascript -await graph.query("CREATE INDEX ON :Person(age)"); -``` - - - - -```rust -graph.query("CREATE INDEX ON :Person(age)").execute().await?; -``` - - - - -```java -graph.query("CREATE INDEX ON :Person(age)"); -``` - - - - -```bash -GRAPH.QUERY DEMO_GRAPH "CREATE INDEX ON :Person(age)" -``` - - - - - -After an index is explicitly created, it will automatically be used by queries that reference that label and any indexed property in a filter. - - - - - - - - - - - - - - - -```python -result = graph.explain("MATCH (p:Person) WHERE p.age > 80 RETURN p") -print(result) -# Output: -# Results -# Project -# Index Scan | (p:Person) -``` - - - - -```javascript -const result = await graph.explain("MATCH (p:Person) WHERE p.age > 80 RETURN p"); -console.log(result); -// Output: -// Results -// Project -// Index Scan | (p:Person) -``` - - - - -```rust -let result = graph.explain("MATCH (p:Person) WHERE p.age > 80 RETURN p").execute().await?; -println!("{}", result); -// Output: -// Results -// Project -// Index Scan | (p:Person) -``` - - - - -```java -String result = graph.explain("MATCH (p:Person) WHERE p.age > 80 RETURN p"); -System.out.println(result); -// Output: -// Results -// Project -// Index Scan | (p:Person) -``` - - - - -```bash -GRAPH.EXPLAIN DEMO_GRAPH "MATCH (p:Person) WHERE p.age > 80 RETURN p" -1) "Results" -2) " Project" -3) " Index Scan | (p:Person)" -``` - - - - - -This can significantly improve the runtime of queries with very specific filters. An index on `:employer(name)`, for example, will dramatically benefit the query: - - - - - - - - - - - - - - - -```python -result = graph.query("MATCH (:Employer {name: 'Dunder Mifflin'})-[:EMPLOYS]->(p:Person) RETURN p") -``` - - - - -```javascript -const result = await graph.query("MATCH (:Employer {name: 'Dunder Mifflin'})-[:EMPLOYS]->(p:Person) RETURN p"); -``` - - - - -```rust -let result = graph.query("MATCH (:Employer {name: 'Dunder Mifflin'})-[:EMPLOYS]->(p:Person) RETURN p").execute().await?; -``` - - - - -```java -ResultSet result = graph.query("MATCH (:Employer {name: 'Dunder Mifflin'})-[:EMPLOYS]->(p:Person) RETURN p"); -``` - - - - -```bash -GRAPH.QUERY DEMO_GRAPH -"MATCH (:Employer {name: 'Dunder Mifflin'})-[:EMPLOYS]->(p:Person) RETURN p" -``` - - - - - -An example of utilizing a geospatial index to find `Employer` nodes within 5 kilometers of Scranton are: - - - - - - - - - - - - - - - -```python -result = graph.query("WITH point({latitude:41.4045886, longitude:-75.6969532}) AS scranton MATCH (e:Employer) WHERE distance(e.location, scranton) < 5000 RETURN e") -``` - - - - -```javascript -const result = await graph.query("WITH point({latitude:41.4045886, longitude:-75.6969532}) AS scranton MATCH (e:Employer) WHERE distance(e.location, scranton) < 5000 RETURN e"); -``` - - - - -```rust -let result = graph.query("WITH point({latitude:41.4045886, longitude:-75.6969532}) AS scranton MATCH (e:Employer) WHERE distance(e.location, scranton) < 5000 RETURN e").execute().await?; -``` - - - - -```java -ResultSet result = graph.query("WITH point({latitude:41.4045886, longitude:-75.6969532}) AS scranton MATCH (e:Employer) WHERE distance(e.location, scranton) < 5000 RETURN e"); -``` - - - - -```bash -GRAPH.QUERY DEMO_GRAPH -"WITH point({latitude:41.4045886, longitude:-75.6969532}) AS scranton MATCH (e:Employer) WHERE distance(e.location, scranton) < 5000 RETURN e" -``` - - - - - -Geospatial indexes can currently only be leveraged with `<` and `<=` filters; matching nodes outside the given radius are matched using conventional traversal. - -## Creating an index for a relationship type - -For a relationship type, the index creation syntax is: - - - - - - - - - - - - - - - -```python -graph.query("CREATE INDEX FOR ()-[f:FOLLOW]-() ON (f.created_at)") -``` - - - - -```javascript -await graph.query("CREATE INDEX FOR ()-[f:FOLLOW]-() ON (f.created_at)"); -``` - - - - -```rust -graph.query("CREATE INDEX FOR ()-[f:FOLLOW]-() ON (f.created_at)").execute().await?; -``` - - - - -```java -graph.query("CREATE INDEX FOR ()-[f:FOLLOW]-() ON (f.created_at)"); -``` - - - - -```bash -GRAPH.QUERY DEMO_GRAPH "CREATE INDEX FOR ()-[f:FOLLOW]-() ON (f.created_at)" -``` - - - - - -Then the execution plan for using the index: - - - - - - - - - - - - - - - -```python -result = graph.explain("MATCH (p:Person {id: 0})-[f:FOLLOW]->(fp) WHERE 0 < f.created_at AND f.created_at < 1000 RETURN fp") -print(result) -# Output: -# Results -# Project -# Edge By Index Scan | [f:FOLLOW] -# Node By Index Scan | (p:Person) -``` - - - - -```javascript -const result = await graph.explain("MATCH (p:Person {id: 0})-[f:FOLLOW]->(fp) WHERE 0 < f.created_at AND f.created_at < 1000 RETURN fp"); -console.log(result); -// Output: -// Results -// Project -// Edge By Index Scan | [f:FOLLOW] -// Node By Index Scan | (p:Person) -``` - - - - -```rust -let result = graph.explain("MATCH (p:Person {id: 0})-[f:FOLLOW]->(fp) WHERE 0 < f.created_at AND f.created_at < 1000 RETURN fp").execute().await?; -println!("{}", result); -// Output: -// Results -// Project -// Edge By Index Scan | [f:FOLLOW] -// Node By Index Scan | (p:Person) -``` - - - - -```java -String result = graph.explain("MATCH (p:Person {id: 0})-[f:FOLLOW]->(fp) WHERE 0 < f.created_at AND f.created_at < 1000 RETURN fp"); -System.out.println(result); -// Output: -// Results -// Project -// Edge By Index Scan | [f:FOLLOW] -// Node By Index Scan | (p:Person) -``` - - - - -```bash -GRAPH.EXPLAIN DEMO_GRAPH "MATCH (p:Person {id: 0})-[f:FOLLOW]->(fp) WHERE 0 < f.created_at AND f.created_at < 1000 RETURN fp" -1) "Results" -2) " Project" -3) " Edge By Index Scan | [f:FOLLOW]" -4) " Node By Index Scan | (p:Person)" -``` - - - - - -This can significantly improve the runtime of queries that traverse super nodes or when we want to start traverse from relationships. - -## Deleting an index for a node label - -For a node label, the index deletion syntax is: - - - - - - - - - - - - - - - -```python -graph.query("DROP INDEX ON :Person(age)") -``` - - - - -```javascript -await graph.query("DROP INDEX ON :Person(age)"); -``` - - - - -```rust -graph.query("DROP INDEX ON :Person(age)").execute().await?; -``` - - - - -```java -graph.query("DROP INDEX ON :Person(age)"); -``` - - - - -```bash -GRAPH.QUERY DEMO_GRAPH "DROP INDEX ON :Person(age)" -``` - - - - - -## Deleting an index for a relationship type - -For a relationship type, the index deletion syntax is: - - - - - - - - - - - - - - - -```python -graph.query("DROP INDEX ON :FOLLOW(created_at)") -``` - - - - -```javascript -await graph.query("DROP INDEX ON :FOLLOW(created_at)"); -``` - - - - -```rust -graph.query("DROP INDEX ON :FOLLOW(created_at)").execute().await?; -``` - - - - -```java -graph.query("DROP INDEX ON :FOLLOW(created_at)"); -``` - - - - -```bash -GRAPH.QUERY DEMO_GRAPH "DROP INDEX ON :FOLLOW(created_at)" -``` - - - - - -## Array Indices - -FalkorDB supports indexing on array properties containing scalar values (e.g., integers, floats, strings), enabling efficient lookups for elements within such arrays. - -Note: Complex types like nested arrays, maps, or vectors are not supported for indexing. - -The following example demonstrates how to index and search an array property: - - - - - - - - - - - - - - - -```python -# Create a node with an array property -graph.query("CREATE (:Person {samples: [-21, 30.5, 0, 90, 3.14]})") - -# Create an index on the array property -graph.query("CREATE INDEX FOR (p:Person) ON (p.samples)") - -# Use the index to search for nodes containing a specific value in the array -result = graph.query("MATCH (p:Person) WHERE 90 IN p.samples RETURN p") -``` - - - - -```javascript -// Create a node with an array property -await graph.query("CREATE (:Person {samples: [-21, 30.5, 0, 90, 3.14]})"); - -// Create an index on the array property -await graph.query("CREATE INDEX FOR (p:Person) ON (p.samples)"); - -// Use the index to search for nodes containing a specific value in the array -const result = await graph.query("MATCH (p:Person) WHERE 90 IN p.samples RETURN p"); -``` - - - - -```rust -// Create a node with an array property -graph.query("CREATE (:Person {samples: [-21, 30.5, 0, 90, 3.14]})").execute().await?; - -// Create an index on the array property -graph.query("CREATE INDEX FOR (p:Person) ON (p.samples)").execute().await?; - -// Use the index to search for nodes containing a specific value in the array -let result = graph.query("MATCH (p:Person) WHERE 90 IN p.samples RETURN p").execute().await?; -``` - - - - -```java -// Create a node with an array property -graph.query("CREATE (:Person {samples: [-21, 30.5, 0, 90, 3.14]})"); - -// Create an index on the array property -graph.query("CREATE INDEX FOR (p:Person) ON (p.samples)"); - -// Use the index to search for nodes containing a specific value in the array -ResultSet result = graph.query("MATCH (p:Person) WHERE 90 IN p.samples RETURN p"); -``` - - - - -```bash -# Create a node with an array property -GRAPH.QUERY DEMO_GRAPH "CREATE (:Person {samples: [-21, 30.5, 0, 90, 3.14]})" - -# Create an index on the array property -GRAPH.QUERY DEMO_GRAPH "CREATE INDEX FOR (p:Person) ON (p.samples)" - -# Use the index to search for nodes containing a specific value in the array -GRAPH.QUERY DEMO_GRAPH "MATCH (p:Person) WHERE 90 IN p.samples RETURN p" -``` - - - - - -## Verifying Index Usage - -To verify that an index is being used by your query, use `GRAPH.EXPLAIN` before and after creating the index: - - - - - - - - - - - - - - - -```python -# Before creating the index -result = graph.explain("MATCH (p:Person) WHERE p.age > 30 RETURN p") -print(result) # Shows: Label Scan | (p:Person) - -# Create the index -graph.query("CREATE INDEX FOR (p:Person) ON (p.age)") - -# After creating the index -result = graph.explain("MATCH (p:Person) WHERE p.age > 30 RETURN p") -print(result) # Now shows: Index Scan | (p:Person) -``` - - - - -```javascript -// Before creating the index -let result = await graph.explain("MATCH (p:Person) WHERE p.age > 30 RETURN p"); -console.log(result); // Shows: Label Scan | (p:Person) - -// Create the index -await graph.query("CREATE INDEX FOR (p:Person) ON (p.age)"); - -// After creating the index -result = await graph.explain("MATCH (p:Person) WHERE p.age > 30 RETURN p"); -console.log(result); // Now shows: Index Scan | (p:Person) -``` - - - - -```rust -// Before creating the index -let result = graph.explain("MATCH (p:Person) WHERE p.age > 30 RETURN p").execute().await?; -println!("{}", result); // Shows: Label Scan | (p:Person) - -// Create the index -graph.query("CREATE INDEX FOR (p:Person) ON (p.age)").execute().await?; - -// After creating the index -let result = graph.explain("MATCH (p:Person) WHERE p.age > 30 RETURN p").execute().await?; -println!("{}", result); // Now shows: Index Scan | (p:Person) -``` - - - - -```java -// Before creating the index -String result = graph.explain("MATCH (p:Person) WHERE p.age > 30 RETURN p"); -System.out.println(result); // Shows: Label Scan | (p:Person) - -// Create the index -graph.query("CREATE INDEX FOR (p:Person) ON (p.age)"); - -// After creating the index -result = graph.explain("MATCH (p:Person) WHERE p.age > 30 RETURN p"); -System.out.println(result); // Now shows: Index Scan | (p:Person) -``` - - - - -```bash -# Before creating the index -GRAPH.EXPLAIN DEMO_GRAPH "MATCH (p:Person) WHERE p.age > 30 RETURN p" -# Output shows: Label Scan | (p:Person) - -# Create the index -GRAPH.QUERY DEMO_GRAPH "CREATE INDEX FOR (p:Person) ON (p.age)" - -# After creating the index -GRAPH.EXPLAIN DEMO_GRAPH "MATCH (p:Person) WHERE p.age > 30 RETURN p" -# Output now shows: Index Scan | (p:Person) -``` - - - - - -## Index Management - -### Listing Existing Indexes - -To view all indexes in your graph, use the `db.indexes()` procedure: - -```cypher -CALL db.indexes() -``` - -This returns information about all indexes including their type (RANGE), entity type (node/relationship), labels, and properties. - -## Performance Tradeoffs and Best Practices - -### When to Use Range Indexes - -Range indexes are ideal for: -- **Filtering by specific values**: Queries with equality filters (e.g., `WHERE p.name = 'Alice'`) -- **Range queries**: Numeric or string comparisons (e.g., `WHERE p.age > 30`, `WHERE p.name >= 'A' AND p.name < 'B'`) -- **Geospatial queries**: Finding entities within a certain distance -- **Array membership**: Checking if a value exists in an array property - -### Performance Considerations - -**Benefits:** -- Dramatically improves query performance for filtered searches -- Reduces the number of nodes/relationships that need to be scanned -- Enables efficient range scans and point lookups - -**Costs:** -- **Write overhead**: Every insert or update to an indexed property requires updating the index -- **Storage**: Indexes consume additional memory and disk space -- **Maintenance**: Index structures need to be maintained during graph modifications - -**Recommendations:** -- Index properties that are frequently used in `WHERE` clauses -- Avoid indexing properties that are rarely queried or have high write frequency -- For properties with very few distinct values (low cardinality), indexes may not provide significant benefits -- Monitor query performance with `GRAPH.PROFILE` to validate index effectiveness - -### Example: Profiling Index Performance - -```cypher -# Profile query to see actual execution metrics -GRAPH.PROFILE DEMO_GRAPH "MATCH (p:Person) WHERE p.age > 30 RETURN p" -``` - -This shows detailed timing information and confirms whether the index was used. diff --git a/website/docs/cypher/indexing/vector-index.mdx b/website/docs/cypher/indexing/vector-index.mdx deleted file mode 100644 index 67c931d..0000000 --- a/website/docs/cypher/indexing/vector-index.mdx +++ /dev/null @@ -1,619 +0,0 @@ ---- -title: Vector Index -description: Index and search through vectors in FalkorDB -sidebar_position: 3 -sidebar_label: Vector Index ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - - -# Vector indexing - -With the introduction of the `vector` data-type a new type of index was introduced. -A vector index is a dedicated index for indexing and searching through vectors. - -To create this type of index use the following syntax: - -```cypher -CREATE VECTOR INDEX FOR ON OPTIONS -``` - -The options are: -``` -{ - dimension: INT, // Required, length of the vector to be indexed - similarityFunction: STRING, // Required, currently only euclidean or cosine are allowed - M: INT, // Optional, maximum number of outgoing edges per node. default 16 - efConstruction: INT, // Optional, number of candidates during construction. default 200 - efRuntime: INT // Optional, number of candidates during search. default 10 -} -``` - -For example, to create a vector index over all `Product` nodes `description` attribute -use the following syntax: - - - - - - - - - - - - - - - -```python -graph.query("CREATE VECTOR INDEX FOR (p:Product) ON (p.description) OPTIONS {dimension:128, similarityFunction:'euclidean'}") -``` - - - - -```javascript -await graph.query("CREATE VECTOR INDEX FOR (p:Product) ON (p.description) OPTIONS {dimension:128, similarityFunction:'euclidean'}"); -``` - - - - -```rust -graph.query("CREATE VECTOR INDEX FOR (p:Product) ON (p.description) OPTIONS {dimension:128, similarityFunction:'euclidean'}").execute().await?; -``` - - - - -```java -graph.query("CREATE VECTOR INDEX FOR (p:Product) ON (p.description) OPTIONS {dimension:128, similarityFunction:'euclidean'}"); -``` - - - - -```bash -CREATE VECTOR INDEX FOR (p:Product) ON (p.description) OPTIONS {dimension:128, similarityFunction:'euclidean'} -``` - - - - - -Similarly to create a vector index over all `Call` relationships `summary` attribute -use the following syntax: - - - - - - - - - - - - - - - -```python -graph.query("CREATE VECTOR INDEX FOR ()-[e:Call]->() ON (e.summary) OPTIONS {dimension:128, similarityFunction:'euclidean'}") -``` - - - - -```javascript -await graph.query("CREATE VECTOR INDEX FOR ()-[e:Call]->() ON (e.summary) OPTIONS {dimension:128, similarityFunction:'euclidean'}"); -``` - - - - -```rust -graph.query("CREATE VECTOR INDEX FOR ()-[e:Call]->() ON (e.summary) OPTIONS {dimension:128, similarityFunction:'euclidean'}").execute().await?; -``` - - - - -```java -graph.query("CREATE VECTOR INDEX FOR ()-[e:Call]->() ON (e.summary) OPTIONS {dimension:128, similarityFunction:'euclidean'}"); -``` - - - - -```bash -CREATE VECTOR INDEX FOR ()-[e:Call]->() ON (e.summary) OPTIONS {dimension:128, similarityFunction:'euclidean'} -``` - - - - - -**Important**: When creating a vector index, both the vector dimension and similarity function must be provided. Currently, the only supported similarity functions are 'euclidean' or 'cosine'. - -## Understanding Vector Index Parameters - -### Required Parameters - -- **dimension**: The length of the vectors to be indexed. Must match the dimensionality of your embeddings (e.g., 128, 384, 768, 1536). -- **similarityFunction**: The distance metric used for similarity search: - - `euclidean`: Euclidean distance (L2 norm). Best for embeddings where magnitude matters. - - `cosine`: Cosine similarity. Best for normalized embeddings where direction matters more than magnitude. - -### Optional Parameters - -These parameters control the HNSW (Hierarchical Navigable Small World) index structure: - -- **M** (default: 16): Maximum number of connections per node in the graph - - Higher values improve recall but increase memory usage and build time - - Recommended range: 12-48 - - Use 16-32 for most applications - -- **efConstruction** (default: 200): Number of candidates evaluated during index construction - - Higher values improve index quality but slow down indexing - - Recommended range: 100-400 - - Use 200-300 for balanced quality/speed - -- **efRuntime** (default: 10): Number of candidates evaluated during search - - Higher values improve recall but slow down queries - - Can be adjusted per-query for speed/accuracy tradeoffs - - Recommended: Start with 10, increase if recall is insufficient - -## Inserting vectors - -To create a new vector use the [vecf32](/cypher/functions#vector-functions) function -as follows: - - - - - - - - - - - - - - - -```python -graph.query("CREATE (p: Product {description: vecf32([2.1, 0.82, 1.3])})") -``` - - - - -```javascript -await graph.query("CREATE (p: Product {description: vecf32([2.1, 0.82, 1.3])})"); -``` - - - - -```rust -graph.query("CREATE (p: Product {description: vecf32([2.1, 0.82, 1.3])})").execute().await?; -``` - - - - -```java -graph.query("CREATE (p: Product {description: vecf32([2.1, 0.82, 1.3])})"); -``` - - - - -```bash -CREATE (p: Product {description: vecf32([2.1, 0.82, 1.3])}) -``` - - - - - -The above query creates a new `Product` node with a `description` attribute containing a vector. - -## Query vector index - -Vector indices are used to search for similar vectors to a given query vector -using the similarity function as a measure of "distance". - -To query the index use either `db.idx.vector.queryNodes` for node retrieval or -`db.idx.vector.queryRelationships` for relationships. - -```cypher -CALL db.idx.vector.queryNodes( - label: STRING, - attribute: STRING, - k: INTEGER, - query: VECTOR -) YIELD node, score -``` - -```cypher -CALL db.idx.vector.queryRelationships( - relationshipType: STRING, - attribute: STRING, - k: INTEGER, - query: VECTOR -) YIELD relationship, score -``` - -To query up to 10 similar `Product` descriptions to a given query description vector -issue the following procedure call: - - - - - - - - - - - - - - - -```python -result = graph.query("CALL db.idx.vector.queryNodes('Product', 'description', 10, vecf32()) YIELD node") -``` - - - - -```javascript -const result = await graph.query("CALL db.idx.vector.queryNodes('Product', 'description', 10, vecf32()) YIELD node"); -``` - - - - -```rust -let result = graph.query("CALL db.idx.vector.queryNodes('Product', 'description', 10, vecf32()) YIELD node").execute().await?; -``` - - - - -```java -ResultSet result = graph.query("CALL db.idx.vector.queryNodes('Product', 'description', 10, vecf32()) YIELD node"); -``` - - - - -```bash -CALL db.idx.vector.queryNodes( - 'Product', - 'description', - 10, - vecf32(), - ) YIELD node -``` - - - - - -The procedure can yield both the indexed entity assigned to the found similar vector -in addition to a similarity score of that entity. - -## Deleting a vector index - -To remove a vector index, simply issue the `drop index` command as follows: - -```cypher -DROP VECTOR INDEX FOR () -``` - -For example, to drop the vector index over Product description, invoke: - - - - - - - - - - - - - - - -```python -graph.query("DROP VECTOR INDEX FOR (p:Product) ON (p.description)") -``` - - - - -```javascript -await graph.query("DROP VECTOR INDEX FOR (p:Product) ON (p.description)"); -``` - - - - -```rust -graph.query("DROP VECTOR INDEX FOR (p:Product) ON (p.description)").execute().await?; -``` - - - - -```java -graph.query("DROP VECTOR INDEX FOR (p:Product) ON (p.description)"); -``` - - - - -```bash -DROP VECTOR INDEX FOR (p:Product) ON (p.description) -``` - - - - - -## Index Management - -### Listing Vector Indexes - -To view all indexes (including vector) in your graph, use: - -```cypher -CALL db.indexes() -``` - -Vector indexes are marked with type `VECTOR` and show the dimension and similarity function in the options field. - -## Verifying Vector Index Usage - -To verify that a vector index is being used, examine the query execution plan: - - - - - - - - - - - - - - - -```python -# Query using vector index -query_vector = [2.1, 0.82, 1.3] -result = graph.explain(f"CALL db.idx.vector.queryNodes('Product', 'description', 10, vecf32({query_vector})) YIELD node RETURN node") -print(result) -# Output shows: ProcedureCall | db.idx.vector.queryNodes -``` - - - - -```javascript -// Query using vector index -const queryVector = [2.1, 0.82, 1.3]; -const result = await graph.explain(`CALL db.idx.vector.queryNodes('Product', 'description', 10, vecf32([${queryVector}])) YIELD node RETURN node`); -console.log(result); -// Output shows: ProcedureCall | db.idx.vector.queryNodes -``` - - - - -```rust -// Query using vector index -let result = graph.explain("CALL db.idx.vector.queryNodes('Product', 'description', 10, vecf32([2.1, 0.82, 1.3])) YIELD node RETURN node").execute().await?; -println!("{}", result); -// Output shows: ProcedureCall | db.idx.vector.queryNodes -``` - - - - -```java -// Query using vector index -float[] queryVector = {2.1f, 0.82f, 1.3f}; -String result = graph.explain("CALL db.idx.vector.queryNodes('Product', 'description', 10, vecf32([2.1, 0.82, 1.3])) YIELD node RETURN node"); -System.out.println(result); -// Output shows: ProcedureCall | db.idx.vector.queryNodes -``` - - - - -```bash -# Query using vector index -GRAPH.EXPLAIN DEMO_GRAPH "CALL db.idx.vector.queryNodes('Product', 'description', 10, vecf32([2.1, 0.82, 1.3])) YIELD node RETURN node" -# Output shows: ProcedureCall | db.idx.vector.queryNodes -``` - - - - - -## Performance Tradeoffs and Best Practices - -### When to Use Vector Indexes - -Vector indexes are essential for: -- **Semantic search**: Finding similar items based on meaning, not just keywords -- **Recommendation systems**: Discovering similar products, content, or users -- **RAG (Retrieval Augmented Generation)**: Retrieving relevant context for LLMs -- **Duplicate detection**: Finding near-duplicate items based on embeddings -- **Image/audio similarity**: When using vision or audio embedding models - -### Performance Considerations - -**Benefits:** -- Enables efficient approximate nearest neighbor (ANN) search -- Scales to millions of vectors with sub-linear query time -- Supports both node and relationship vectors - -**Costs:** -- **Memory usage**: Vector indexes are memory-intensive - - A 1M vector index with 768 dimensions (float32) requires ~3GB of memory - - Formula: `vectors Γ— dimensions Γ— 4 bytes + HNSW overhead (~20%)` -- **Build time**: Index construction can be slow for large datasets -- **Approximate results**: Returns approximate (not exact) nearest neighbors -- **No support for filtering**: Vector queries don't combine well with property filters - -**Recommendations:** -- Choose appropriate vector dimensions (balance between quality and cost) -- Use cosine similarity for normalized embeddings (e.g., from OpenAI, Sentence Transformers) -- Use euclidean distance for unnormalized data -- Tune M and efConstruction based on your accuracy requirements -- Consider batch indexing for large datasets -- Monitor memory usage carefully - -### Similarity Function Tradeoffs - -**Cosine Similarity:** -- Best for: Text embeddings, normalized vectors -- Measures: Angular distance between vectors -- Range: -1 to 1 (1 = identical direction) -- Use when: Vector magnitude is not meaningful - -**Euclidean Distance:** -- Best for: Unnormalized data, physical measurements -- Measures: Straight-line distance between vectors -- Range: 0 to ∞ (0 = identical) -- Use when: Both direction and magnitude matter - -### Example: Realistic Vector Search - - - - - - - - - - - - - - - -```python -# Create vector index for product embeddings -graph.query("CREATE VECTOR INDEX FOR (p:Product) ON (p.embedding) OPTIONS {dimension:768, similarityFunction:'cosine', M:32, efConstruction:200}") - -# Insert products with embeddings (embeddings would come from your model) -embedding = model.encode("laptop computer") # Your embedding model -graph.query(f"CREATE (p:Product {name: 'Laptop', embedding: vecf32({embedding.tolist()})})") - -# Search for similar products -query_embedding = model.encode("notebook pc") -result = graph.query(f"CALL db.idx.vector.queryNodes('Product', 'embedding', 5, vecf32({query_embedding.tolist()})) YIELD node, score RETURN node.name, score ORDER BY score DESC") -for record in result.result_set: - print(f"Product: {record[0]}, Similarity: {record[1]}") -``` - - - - -```javascript -// Create vector index for product embeddings -await graph.query("CREATE VECTOR INDEX FOR (p:Product) ON (p.embedding) OPTIONS {dimension:768, similarityFunction:'cosine', M:32, efConstruction:200}"); - -// Insert products with embeddings (embeddings would come from your model) -const embedding = await model.encode("laptop computer"); // Your embedding model -await graph.query(`CREATE (p:Product {name: 'Laptop', embedding: vecf32([${embedding}])})`); - -// Search for similar products -const queryEmbedding = await model.encode("notebook pc"); -const result = await graph.query(`CALL db.idx.vector.queryNodes('Product', 'embedding', 5, vecf32([${queryEmbedding}])) YIELD node, score RETURN node.name, score ORDER BY score DESC`); -for (const record of result.data) { - console.log(`Product: ${record['node.name']}, Similarity: ${record['score']}`); -} -``` - - - - -```rust -// Create vector index for product embeddings -graph.query("CREATE VECTOR INDEX FOR (p:Product) ON (p.embedding) OPTIONS {dimension:768, similarityFunction:'cosine', M:32, efConstruction:200}").execute().await?; - -// Insert products with embeddings (embeddings would come from your model) -let embedding = model.encode("laptop computer"); // Your embedding model -graph.query(&format!("CREATE (p:Product {name: 'Laptop', embedding: vecf32({:?})})", embedding)).execute().await?; - -// Search for similar products -let query_embedding = model.encode("notebook pc"); -let result = graph.query(&format!("CALL db.idx.vector.queryNodes('Product', 'embedding', 5, vecf32({:?})) YIELD node, score RETURN node.name, score ORDER BY score DESC", query_embedding)).execute().await?; -for record in result.data() { - println!("Product: {}, Similarity: {}", record["node.name"], record["score"]); -} -``` - - - - -```java -// Create vector index for product embeddings -graph.query("CREATE VECTOR INDEX FOR (p:Product) ON (p.embedding) OPTIONS {dimension:768, similarityFunction:'cosine', M:32, efConstruction:200}"); - -// Insert products with embeddings (embeddings would come from your model) -float[] embedding = model.encode("laptop computer"); // Your embedding model -graph.query(String.format("CREATE (p:Product {name: 'Laptop', embedding: vecf32(%s)})", Arrays.toString(embedding))); - -// Search for similar products -float[] queryEmbedding = model.encode("notebook pc"); -ResultSet result = graph.query(String.format("CALL db.idx.vector.queryNodes('Product', 'embedding', 5, vecf32(%s)) YIELD node, score RETURN node.name, score ORDER BY score DESC", Arrays.toString(queryEmbedding))); -for (Record record : result) { - System.out.printf("Product: %s, Similarity: %s%n", record.get("node.name"), record.get("score")); -} -``` - - - - -```bash -# Create vector index for product embeddings -GRAPH.QUERY DEMO_GRAPH "CREATE VECTOR INDEX FOR (p:Product) ON (p.embedding) OPTIONS {dimension:768, similarityFunction:'cosine', M:32, efConstruction:200}" - -# Insert products with embeddings (embeddings would come from your model) -GRAPH.QUERY DEMO_GRAPH "CREATE (p:Product {name: 'Laptop', embedding: vecf32([0.1, 0.2, ...])})" - -# Search for similar products -GRAPH.QUERY DEMO_GRAPH "CALL db.idx.vector.queryNodes('Product', 'embedding', 5, vecf32([0.15, 0.18, ...])) YIELD node, score RETURN node.name, score ORDER BY score DESC" -``` - - - - - -### Troubleshooting - -**Common Issues:** - -1. **Dimension mismatch**: Ensure all vectors have the same dimension as specified in the index -2. **Wrong similarity function**: Use cosine for normalized vectors, euclidean for unnormalized -3. **Poor recall**: Increase efRuntime or efConstruction parameters -4. **Slow queries**: Decrease efRuntime or reduce k (number of results) -5. **High memory usage**: Reduce M parameter or use lower-dimensional embeddings diff --git a/website/docs/cypher/known-limitations.md b/website/docs/cypher/known-limitations.md deleted file mode 100644 index 0f94a1d..0000000 --- a/website/docs/cypher/known-limitations.md +++ /dev/null @@ -1,67 +0,0 @@ ---- -title: Known limitations -description: FalkorDB Known limitations -sidebar_position: 1000 -sidebar_label: Known limitations ---- - - - -# Known limitations -## Relationship uniqueness in patterns - -When a relation in a match pattern is not referenced elsewhere in the query, FalkorDB will only verify that at least one matching relation exists (rather than operating on every matching relation). - -In some queries, this will cause unexpected behaviors. Consider a graph with 2 nodes and 2 relations between them: - -``` -CREATE (a)-[:e {val: '1'}]->(b), (a)-[:e {val: '2'}]->(b) -``` - -Counting the number of explicit edges returns 2, as expected. - -``` -MATCH (a)-[e]->(b) RETURN COUNT(e) -``` - -However, if we count the nodes in this pattern without explicitly referencing the relation, we receive a value of 1. - -``` -MATCH (a)-[e]->(b) RETURN COUNT(b) -``` - -We are researching designs that resolve this problem without negatively impacting performance. As a temporary workaround, queries that must operate on every relation matching a pattern should explicitly refer to that relation's alias elsewhere in the query. Two options for this are: - -``` -MATCH (a)-[e]->(b) WHERE ID(e) >= 0 RETURN COUNT(b) -MATCH (a)-[e]->(b) RETURN COUNT(b), e.dummyval -``` - -## LIMIT clause does not affect eager operations - -When a WITH or RETURN clause introduces a LIMIT value, this value ought to be respected by all preceding operations. - -For example, given the query: - -``` -UNWIND [1,2,3] AS value CREATE (a {property: value}) RETURN a LIMIT 1 -``` - -One node should be created with its 'property' set to 1. FalkorDB will currently create three nodes, and only return the first. - -This limitation affects all eager operations: CREATE, SET, DELETE, MERGE, and projections with aggregate functions. - -## Indexing limitations - -One way in which FalkorDB will optimize queries is by introducing index scans when a filter is specified on an indexed label-property pair. - -The current index implementation, however, does not handle not-equal (`<>`) filters. - -To profile a query and see whether index optimizations have been introduced, use the `GRAPH.EXPLAIN` endpoint: - -```sh -$ redis-cli GRAPH.EXPLAIN social "MATCH (p:person) WHERE p.id < 5 RETURN p" -1) "Results" -2) " Project" -3) " Index Scan | (p:person)" -``` diff --git a/website/docs/cypher/limit.md b/website/docs/cypher/limit.md deleted file mode 100644 index 6e6e64c..0000000 --- a/website/docs/cypher/limit.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: LIMIT -description: > -sidebar_position: 7 -sidebar_label: LIMIT ---- - - - -# LIMIT -Although not mandatory, you can use the limit clause -to limit the number of records returned by a query: - -```sql -LIMIT -``` - -If not specified, there's no limit to the number of records returned by a query. \ No newline at end of file diff --git a/website/docs/cypher/load-csv.md b/website/docs/cypher/load-csv.md deleted file mode 100644 index e7c2c7e..0000000 --- a/website/docs/cypher/load-csv.md +++ /dev/null @@ -1,194 +0,0 @@ ---- -title: LOAD CSV -description: > -sidebar_position: 17 -sidebar_label: LOAD CSV ---- - - - -# LOAD CSV -```cypher -LOAD CSV FROM 'file://actors.csv' AS row -MERGE (a:Actor {name: row[0]}) -``` - -`LOAD CSV FROM` accepts a string path to a CSV file. The file is parsed line by line, and the current line is accessible through the variable specified by AS. Each parsed value is treated as a `string`. Use appropriate conversion functions, for example, `toInteger`, to cast values to their correct types. -Additional clauses can follow and access the row variable. - -Additional clauses can follow and accesses the `row` variable - -## FIELD DELIMITER - -If not specified, ',' is used as the default field delimiter. To change the delimiter, use the following: - -```cypher -LOAD CSV FROM 'file://actors.csv' AS row FIELDTERMINATOR ';' -RETURN row -LIMIT 10 -``` - -## IMPORTING DATA - -### Importing local files - -FalkorDB defines a data directory [see configuration](/getting-started/configuration#import_folder) -Under which local CSV files should be stored. All `file://` URIs are resolved -relative to that directory. - -In the following example we'll load the `actors.csv` file into FalkorDB. - -### actors.csv - -| | | -| ---------------|-----------| -| Lee Pace | 1979 | -| Vin Diesel | 1967 | -| Chris Pratt | 1979 | -| Zoe Saldana | 1978 | - -```cypher -LOAD CSV FROM 'file://actors.csv' -AS row -MERGE (a:Actor {name: row[0], birth_year: toInteger(row[1])}) -RETURN a.name, a.birth_year -``` - -Note that we've used indices e.g. `row[0]` to access the value at the corresponding -column. - -If the CSV contains a header row, like this: - -### actors.csv - -| name | birthyear | -| :--------------| :---------| -| Lee Pace | 1979 | -| Vin Diesel | 1967 | -| Chris Pratt | 1979 | -| Zoe Saldana | 1978 | - -Use the `WITH HEADERS` variation of the `LOAD CSV` clause: - -```cypher -LOAD CSV WITH HEADERS FROM 'file://actors.csv' -AS row -MERGE (a:Actor {name: row[name], birth_year: toInteger(row[birthyear])}) -RETURN a.name, a.birth_year -``` - -When a header row exists and `WITH HEADERS` is specified, the `row` variable becomes a `map` instead of an `array`. Access individual elements via their column names. - - -### Importing data from multiple CSVs - -Building on the previous example, we’ll introduce a second CSV file, `acted_in.csv`, which connects actors to movies. - - -### acted_in.csv - -| actor | movie | -| :--------------| :--------------| -| Lee Pace | The Fall | -| Vin Diesel | Fast & Furious | -| Chris Pratt | Passengers | -| Zoe Saldana | Avatar | - - -We'll create a new graph connecting actors to the movies they've acted in - -Load actors: - -```cypher -LOAD CSV WITH HEADERS FROM 'file://actors.csv' -AS row -MERGE (a:Actor {name:row['name']}) -``` - -Load movies and create `ACTED_IN` relations: - -```cypher -LOAD CSV WITH HEADERS FROM 'file://acted_in.csv' -AS row - -MATCH (a:Actor {name: row['actor']}) -MERGE (m:Movie {title: row['movie']}) -MERGE (a)-[:ACTED_IN]->(m) -``` - -### Importing remote files - -FalkorDB supports importing remote CSVs via HTTPS. Here’s an example loading the Big Mac dataset from calmcode.io: - -```cypher -LOAD CSV WITH HEADERS FROM 'https://calmcode.io/static/data/bigmac.csv' AS row -RETURN row LIMIT 4 - -1) 1) "ROW" -2) 1) 1) "{date: 2002-04-01, currency_code: PHP, name: Philippines, local_price: 65.0, dollar_ex: 51.0, dollar_price: 1.27450980392157}" - 2) 1) "{date: 2002-04-01, currency_code: PEN, name: Peru, local_price: 8.5, dollar_ex: 3.43, dollar_price: 2.47813411078717}" - 3) 1) "{date: 2002-04-01, currency_code: NZD, name: New Zealand, local_price: 3.6, dollar_ex: 2.24, dollar_price: 1.60714285714286}" - 4) 1) "{date: 2002-04-01, currency_code: NOK, name: Norway, local_price: 35.0, dollar_ex: 8.56, dollar_price: 4.088785046728971}" -``` - -### Dealing with a large number of columns or missing entries - -Loading CSV files with missing entries can cause complications. The following approach handles this and works well for files with many columns. -Assuming we are loading the following CSV file: - - -### missing_entries.csv - -| name | birthyear | -| :--------------| :---------| -| Lee Pace | 1979 | -| Vin Diesel | | -| Chris Pratt | | -| Zoe Saldana | 1978 | - -> Note: Vin Diesel and Chris Pratt are missing their `birth_year` entries. - -When creating Actor nodes, there is no need to explicitly define each column as done previously. -The following query creates an empty Actor node and assigns the current CSV row to it. -This process automatically sets the node's attribute set to match the values of the current row: - -```cypher -LOAD CSV FROM 'file://missing_entries.csv' AS row -CREATE (a:Actor) -SET a = row -RETURN a - -1) 1) "a" -2) 1) 1) 1) 1) "id" - 2) (integer) 0 - 2) 1) "labels" - 2) 1) "Actor" - 3) 1) "properties" - 2) 1) 1) "name" - 2) "Zoe Saldana" - 2) 1) "birthyear" - 2) "1978" - 2) 1) 1) 1) "id" - 2) (integer) 1 - 2) 1) "labels" - 2) 1) "Actor" - 3) 1) "properties" - 2) 1) 1) "name" - 2) "Chris Pratt" - 3) 1) 1) 1) "id" - 2) (integer) 2 - 2) 1) "labels" - 2) 1) "Actor" - 3) 1) "properties" - 2) 1) 1) "name" - 2) "Vin Diesel" - 4) 1) 1) 1) "id" - 2) (integer) 3 - 2) 1) "labels" - 2) 1) "Actor" - 3) 1) "properties" - 2) 1) 1) "name" - 2) "Lee Pace" - 2) 1) "birthyear" - 2) "1979" -``` diff --git a/website/docs/cypher/match.md b/website/docs/cypher/match.md deleted file mode 100644 index 76491d7..0000000 --- a/website/docs/cypher/match.md +++ /dev/null @@ -1,120 +0,0 @@ ---- -title: MATCH -description: > -sidebar_position: 1 -sidebar_label: MATCH ---- - - - -# MATCH -The `MATCH` clause describes the relationship between queried entities using ASCII art to represent pattern(s) to match against. - -**Syntax Overview:** -- Nodes are represented by parentheses `()` -- Relationships are represented by brackets `[]` -- Each graph entity (node/relationship) can contain an alias, a label/relationship type, and filters, but all are optional - -**Entity Structure:** `alias:label {filters}` - -Where: -- `alias` - Optional variable name to reference the entity -- `label` - Optional label for nodes or type for relationships -- `{filters}` - Optional property filters - -Example: - -```sh -(a:Actor)-[:ACT]->(m:Movie {title:"straight outta compton"}) -``` - -`a` is an alias for the source node, which we'll be able to refer to at different places within our query. - -`Actor` is the label under which this node is marked. - -`ACT` is the relationship type. - -`m` is an alias for the destination node. - -`Movie` destination node is of "type" movie. - -`{title:"straight outta compton"}` filters for nodes where the title property equals "straight outta compton". - -In this example, we're querying for actor entities that have an "ACT" relationship with the movie entity "straight outta compton". - -It is possible to describe broader relationships by composing a multi-hop query such as: - -```sh -(me {name:'swilly'})-[:FRIENDS_WITH]->()-[:FRIENDS_WITH]->(foaf) -``` - -Here we're interested in finding out who my friends' friends are. - -Nodes can have more than one relationship coming in or out of them, for instance: - -```sh -(me {name:'swilly'})-[:VISITED]->(c:Country)<-[:VISITED]-(friend)<-[:FRIENDS_WITH]-(me) -``` - -Here we're interested in knowing which of my friends have visited at least one country I've been to. - -## Variable length relationships - -Nodes that are a variable number of relationshipβ†’node hops away can be found using the following syntax: - -```sh --[:TYPE*minHops..maxHops]-> -``` - -`TYPE`, `minHops` and `maxHops` are all optional and default to type agnostic, 1 and infinity, respectively. - -When no bounds are given the dots may be omitted. The dots may also be omitted when setting only one bound and this implies a fixed length pattern. - -Example: - -```sh -GRAPH.QUERY DEMO_GRAPH -"MATCH (charlie:Actor { name: 'Charlie Sheen' })-[:PLAYED_WITH*1..3]->(colleague:Actor) -RETURN colleague" -``` - -Returns all actors related to 'Charlie Sheen' by 1 to 3 hops. - -## Bidirectional path traversal - -If a relationship pattern does not specify a direction, it will match regardless of which node is the source and which is the destination: - -```sh --[:TYPE]- -``` - -Example: - -```sh -GRAPH.QUERY DEMO_GRAPH -"MATCH (person_a:Person)-[:KNOWS]-(person_b:Person) -RETURN person_a, person_b" -``` - -Returns all pairs of people connected by a `KNOWS` relationship. Note that each pair will be returned twice, once with each node in the `person_a` field and once in the `person_b` field. - -The syntactic sugar `(person_a)<-[:KNOWS]->(person_b)` will return the same results. - -The bracketed edge description can be omitted if all relations should be considered: `(person_a)--(person_b)`. - -## Named paths - -Named path variables are created by assigning a path in a MATCH clause to a single alias with the syntax: -`MATCH named_path = (path)-[to]->(capture)` - -The named path includes all entities in the path, regardless of whether they have been explicitly aliased. Named paths can be accessed using built-in functions such as `nodes()` or returned directly if using a language-specific client. - -Example: - -```sh -GRAPH.QUERY DEMO_GRAPH -"MATCH p=(charlie:Actor { name: 'Charlie Sheen' })-[:PLAYED_WITH*1..3]->(:Actor) -RETURN nodes(p) as actors" -``` - -This query will produce all the paths matching the pattern contained in the named path `p`. All of these paths will share the same starting point, the actor node representing Charlie Sheen, but will otherwise vary in length and contents. Though the variable-length traversal and `(:Actor)` endpoint are not explicitly aliased, all nodes and edges traversed along the path will be included in `p`. In this case, we are only interested in the nodes of each path, which we'll collect using the built-in function `nodes()`. The returned value will contain, in order, Charlie Sheen, between 0 and 2 intermediate nodes, and the unaliased endpoint. diff --git a/website/docs/cypher/merge.md b/website/docs/cypher/merge.md deleted file mode 100644 index 441bf41..0000000 --- a/website/docs/cypher/merge.md +++ /dev/null @@ -1,92 +0,0 @@ ---- -title: MERGE -description: > -sidebar_position: 11 -sidebar_label: MERGE ---- - - - -# MERGE -The MERGE clause ensures that a path exists in the graph (either the path already exists, or it needs to be created). - -MERGE either matches existing nodes and binds them, or it creates new data and binds that. - -It’s like a combination of MATCH and CREATE that also allows you to specify what happens if the data was matched or created. - -For example, you can specify that the graph must contain a node for a user with a certain name. - -If there isn’t a node with the correct name, a new node will be created and its name property set. - -Any aliases in the MERGE path that were introduced by earlier clauses can only be matched; MERGE will not create them. - -When the MERGE path doesn't rely on earlier clauses, the whole path will always either be matched or created. - -If all path elements are introduced by MERGE, a match failure will cause all elements to be created, even if part of the match succeeded. - -The MERGE path can be followed by ON MATCH SET and ON CREATE SET directives to conditionally set properties depending on whether or not the match succeeded. - -## Merging nodes - -To merge a single node with a label: - -```sh -GRAPH.QUERY DEMO_GRAPH "MERGE (robert:Critic)" -``` - -To merge a single node with properties: - -```sh -GRAPH.QUERY DEMO_GRAPH "MERGE (charlie { name: 'Charlie Sheen', age: 10 })" -``` - -To merge a single node, specifying both label and property: - -```sh -GRAPH.QUERY DEMO_GRAPH "MERGE (michael:Person { name: 'Michael Douglas' })" -``` - -## Merging paths - -Because MERGE either matches or creates a full path, it is easy to accidentally create duplicate nodes. - -For example, if we run the following query on our sample graph: - -```sh -GRAPH.QUERY DEMO_GRAPH -"MERGE (charlie { name: 'Charlie Sheen '})-[r:ACTED_IN]->(wallStreet:Movie { name: 'Wall Street' })" -``` - -Even though a node with the name 'Charlie Sheen' already exists, the full pattern does not match, so 1 relation and 2 nodes - including a duplicate 'Charlie Sheen' node - will be created. - -We should use multiple MERGE clauses to merge a relation and only create non-existent endpoints: - -```sh -GRAPH.QUERY DEMO_GRAPH -"MERGE (charlie { name: 'Charlie Sheen' }) - MERGE (wallStreet:Movie { name: 'Wall Street' }) - MERGE (charlie)-[r:ACTED_IN]->(wallStreet)" -``` - -If we don't want to create anything if pattern elements don't exist, we can combine MATCH and MERGE clauses. The following query merges a relation only if both of its endpoints already exist: - -```sh -GRAPH.QUERY DEMO_GRAPH -"MATCH (charlie { name: 'Charlie Sheen' }) - MATCH (wallStreet:Movie { name: 'Wall Street' }) - MERGE (charlie)-[r:ACTED_IN]->(wallStreet)" -``` - -## On Match and On Create directives - -Using ON MATCH and ON CREATE, MERGE can set properties differently depending on whether a pattern is matched or created. - -In this query, we'll merge paths based on a list of properties and conditionally set a property when creating new entities: - -```sh -GRAPH.QUERY DEMO_GRAPH -"UNWIND ['Charlie Sheen', 'Michael Douglas', 'Tamara Tunie'] AS actor_name - MATCH (movie:Movie { name: 'Wall Street' }) - MERGE (person {name: actor_name})-[:ACTED_IN]->(movie) - ON CREATE SET person.first_role = movie.name" -``` diff --git a/website/docs/cypher/optional-match.md b/website/docs/cypher/optional-match.md deleted file mode 100644 index 4ca0925..0000000 --- a/website/docs/cypher/optional-match.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: OPTIONAL MATCH -description: > -sidebar_position: 2 -sidebar_label: OPTIONAL MATCH ---- - - - -# OPTIONAL MATCH -The OPTIONAL MATCH clause is a MATCH variant that produces null values for elements that do not match successfully, rather than the all-or-nothing logic for patterns in MATCH clauses. - -It can be considered to fill the same role as LEFT/RIGHT JOIN does in SQL, as MATCH entities must be resolved but nodes and edges introduced in OPTIONAL MATCH will be returned as nulls if they cannot be found. - -OPTIONAL MATCH clauses accept the same patterns as standard MATCH clauses, and may similarly be modified by WHERE clauses. - -Multiple MATCH and OPTIONAL MATCH clauses can be chained together, though a mandatory MATCH cannot follow an optional one. - -```sh -GRAPH.QUERY DEMO_GRAPH -"MATCH (p:Person) OPTIONAL MATCH (p)-[w:WORKS_AT]->(c:Company) -WHERE w.start_date > 2016 -RETURN p, w, c" -``` - -All `Person` nodes are returned, as well as any `WORKS_AT` relations and `Company` nodes that can be resolved and satisfy the `start_date` constraint. For each `Person` that does not resolve the optional pattern, the person will be returned as normal and the non-matching elements will be returned as null. - -Cypher is lenient in its handling of null values, so actions like property accesses and function calls on null values will return null values rather than emit errors. - -```sh -GRAPH.QUERY DEMO_GRAPH -"MATCH (p:Person) OPTIONAL MATCH (p)-[w:WORKS_AT]->(c:Company) -RETURN p, w.department, ID(c) as ID" -``` - -In this case, `w.department` and `ID` will be returned if the OPTIONAL MATCH was successful, and will be null otherwise. - -Clauses like SET, CREATE, MERGE, and DELETE will ignore null inputs and perform the expected updates on real inputs. One exception to this is that attempting to create a relation with a null endpoint will cause an error: - -```sh -GRAPH.QUERY DEMO_GRAPH -"MATCH (p:Person) OPTIONAL MATCH (p)-[w:WORKS_AT]->(c:Company) -CREATE (c)-[:NEW_RELATION]->(:NEW_NODE)" -``` - -If `c` is null for any record, this query will emit an error. In this case, no changes to the graph are committed, even if some values for `c` were resolved. diff --git a/website/docs/cypher/order-by.md b/website/docs/cypher/order-by.md deleted file mode 100644 index 8d23f8d..0000000 --- a/website/docs/cypher/order-by.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -title: ORDER BY -description: > -sidebar_position: 5 -sidebar_label: ORDER BY ---- - - - -# ORDER BY -Order by specifies that the output be sorted and how. - -You can order by multiple properties by stating each variable in the ORDER BY clause. - -Each property may specify its sort order with `ASC`/`ASCENDING` or `DESC`/`DESCENDING`. If no order is specified, it defaults to ascending. - -The result will be sorted by the first variable listed. - -For equal values, it will go to the next property in the ORDER BY clause, and so on. - -```sh -ORDER BY -``` - -Below we sort our friends by height. For equal heights, weight is used to break ties. - -```sh -ORDER BY friend.height, friend.weight DESC -``` \ No newline at end of file diff --git a/website/docs/cypher/procedures.md b/website/docs/cypher/procedures.md deleted file mode 100644 index 474b89d..0000000 --- a/website/docs/cypher/procedures.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: Procedures -description: > -sidebar_position: 19 -sidebar_label: Procedures ---- - - - -# Procedures -Procedures are functions that can be called from within Cypher queries using the `CALL` syntax. - -## Syntax - -Basic procedure call: - -```sh -GRAPH.QUERY social "CALL db.labels()" -``` - -With explicit `YIELD` to select specific return values: - -```sh -GRAPH.QUERY social "CALL db.labels() YIELD label" -``` - -**Note:** The `YIELD` clause is optional. When omitted, all values listed in the 'Yields' column are returned automatically. - -## Available Procedures - -| Procedure | Arguments | Yields | Description | -| ------- | :------- | :------- | :----------- | -| db.labels | none | `label` | Yields all node labels in the graph. | -| db.relationshipTypes | none | `relationshipType` | Yields all relationship types in the graph. | -| db.propertyKeys | none | `propertyKey` | Yields all property keys in the graph. | -| db.meta.stats | none | `labels`, `relTypes`, `relCount`, `nodeCount`, `labelCount`, `relTypeCount`, `propertyKeyCount` | Yield comprehensive graph statistics including maps of labels and relationship types with their counts, total node/relationship counts, and schema metadata counts. | -| db.indexes | none | `label`, `properties`, `types`, `options`, `language`, `stopwords`, `entitytype`, `status`, `info` | Yield all indexes in the graph, denoting whether they are of the type of exact-match ("RANGE"), full-text ("FULLTEXT") or vector ("VECTOR") and which label and properties each covers and whether they are indexing node or relationship attributes. | -| db.constraints | none | `type`, `label`, `properties`, `entitytype`, `status` | Yield all constraints in the graph, denoting constraint type (UNIQIE/MANDATORY), which label/relationship-type and properties each enforces. | -| db.idx.fulltext.createNodeIndex | `label`, `property` [, `property` ...] | none | Builds a full-text searchable index on a label and the 1 or more specified properties. | -| db.idx.fulltext.drop | `label` | none | Deletes the full-text index associated with the given label. | -| db.idx.fulltext.queryNodes | `label`, `string` | `node`, `score` | Retrieve all nodes that contain the specified string in the full-text indexes on the given label. | -| db.idx.fulltext.queryRelationships | `relationshipType`, `string` | `relationship`, `score` | Retrieve all relationships that contain the specified string in the full-text indexes on the given relationship type. See [Full-Text Indexing](/cypher/indexing/fulltext-index) for details. | -| db.idx.vector.queryNodes | `label`, `attribute`, `k`, `query` | `node`, `score` | Retrieve up to k nodes with vectors most similar to the query vector using the specified label and attribute. See [Vector Indexing](/cypher/indexing/vector-index) for details. | -| db.idx.vector.queryRelationships | `relationshipType`, `attribute`, `k`, `query` | `relationship`, `score` | Retrieve up to k relationships with vectors most similar to the query vector using the specified relationship type and attribute. See [Vector Indexing](/cypher/indexing/vector-index) for details. | -| algo.pageRank | `label`, `relationship-type` | `node`, `score` | Runs the pagerank algorithm over nodes of given label, considering only edges of given relationship type. | -| algo.BFS | `source-node`, `max-level`, `relationship-type` | `nodes`, `edges` | Performs BFS to find all nodes connected to the source. A `max level` of 0 indicates unlimited and a non-NULL `relationship-type` defines the relationship type that may be traversed. See [BFS Algorithm](/algorithms/bfs) for details. | -| algo.MSF | `config` | `src`, `dest`, `weight`, `relationshipType` | Computes the Minimum Spanning Forest of the graph. See [MSF Algorithm](/algorithms/msf) for details. | -| dbms.procedures() | none | `name`, `mode` | List all procedures in the DBMS, yields for every procedure its name and mode (read/write). | diff --git a/website/docs/cypher/remove.md b/website/docs/cypher/remove.md deleted file mode 100644 index e7a10a2..0000000 --- a/website/docs/cypher/remove.md +++ /dev/null @@ -1,70 +0,0 @@ ---- -title: REMOVE -description: > -sidebar_position: 23 -sidebar_label: REMOVE ---- - - - -# REMOVE -## Example graph - -```cypher -CREATE - (billy :Player {name: 'Billy', score: 84}), - (andy :Player {name: 'Andy', score: 21}), - (lori :Player:Admin {name: 'Lori', score: 90}) -``` - -## Remove attributes - -The following query removes the 'score' attribute from the node -representing Andy. - -```cypher -MATCH (n {name: 'Andy'}) -REMOVE n.score -RETURN n.name, n.score -``` - -Result: - -|n.name|n.score| -|------|-------| -|"Andy"| Null | - - -## Remove a label from a node - -To remove a label from a node use the REMOVE clause as follows: - -```cypher -MATCH (n {name: 'Lori'}) -REMOVE n:Admin -RETURN n.name, labels(n) -``` - -Result: - -|n.name|labels(n)| -|------|--------| -|"Lori"|[Player]| - - -## Removing multiple labels from a node - -Similar to removing a single label from a node we can use the REMOVE clause -to remove multiple labels in one go - -```cypher -MATCH (n :Player {name: 'Lori'}) -REMOVE n:Admin:Player -RETURN n.name, labels(n) -``` - -Result: - -|n.name|labels(n)| -|------|--------| -|"Lori"|[] | diff --git a/website/docs/cypher/return.md b/website/docs/cypher/return.md deleted file mode 100644 index 7416064..0000000 --- a/website/docs/cypher/return.md +++ /dev/null @@ -1,64 +0,0 @@ ---- -title: RETURN -description: > -sidebar_position: 4 -sidebar_label: RETURN ---- - - - -# RETURN -The `RETURN` clause defines which properties and values the result-set will contain. - -## Basic Usage - -The basic structure is a comma-separated list of `alias.property` expressions: - -```sh -RETURN person.name, person.age -``` - -For convenience, you can specify just the alias to return all properties of an entity: - -```sh -RETURN movie.title, actor -``` - -## Removing Duplicates - -Use the `DISTINCT` keyword to remove duplicate values from the result-set: - -```sh -RETURN DISTINCT friend_of_friend.name -``` - -For example, if you have two friends (Joe and Miesha) who both know Dominick, `DISTINCT` ensures that Dominick appears only once in the final result set. - - -## Aggregations - -The `RETURN` clause can also aggregate data, similar to SQL's GROUP BY functionality. - -When an aggregation function is used in the RETURN list, all non-aggregated values become implicit grouping keys: - -```sh -RETURN movie.title, MAX(actor.age), MIN(actor.age) -``` - -This query groups data by movie title and, for each movie, returns the youngest and oldest actor ages. - -### Supported Aggregation Functions - -| Function | Description | -|----------|-------------| -| `avg` | Calculate average of numeric values | -| `collect` | Collect values into a list | -| `count` | Count number of values | -| `max` | Find maximum value | -| `min` | Find minimum value | -| `percentileCont` | Calculate continuous percentile | -| `percentileDisc` | Calculate discrete percentile | -| `stDev` | Calculate standard deviation | -| `sum` | Calculate sum of numeric values | - -For detailed information on aggregation functions, see the [Functions documentation](/cypher/functions#aggregating-functions). \ No newline at end of file diff --git a/website/docs/cypher/set.md b/website/docs/cypher/set.md deleted file mode 100644 index b44a0b6..0000000 --- a/website/docs/cypher/set.md +++ /dev/null @@ -1,79 +0,0 @@ ---- -title: SET -description: > -sidebar_position: 10 -sidebar_label: SET ---- - - - -# SET -The `SET` clause is used to create or update properties on nodes and relationships. - -## Setting a Single Property - -To set a property on a node: - -```sh -GRAPH.QUERY DEMO_GRAPH "MATCH (n { name: 'Jim' }) SET n.name = 'Bob'" -``` - -## Setting Multiple Properties - -You can set multiple properties in a single `SET` clause by separating them with commas: - -```sh -GRAPH.QUERY DEMO_GRAPH -"MATCH (n { name: 'Jim', age:32 }) -SET n.age = 33, n.name = 'Bob'" -``` - -## Setting Properties from a Map - -You can set properties using a map. There are two operators with different behaviors: - -### Replace All Properties (`=`) - -Replaces **all** existing properties with the map properties: - -```sh -GRAPH.QUERY DEMO_GRAPH -"MATCH (n { name: 'Jim', age:32 }) -SET n = {age: 33, name: 'Bob'}" -``` - -**Result:** The node will have only the properties `age` and `name`. Any other existing properties are removed. - -### Merge Properties (`+=`) - -Updates only the specified properties while keeping other existing properties: - -```sh -GRAPH.QUERY DEMO_GRAPH -"MATCH (n { name: 'Jim', age:32 }) -SET n += {age: 33}" -``` - -**Result:** The node's `age` is updated to 33, but `name` and any other properties remain unchanged. - -## Copying Properties Between Entities - -You can copy all properties from one entity to another: - -```sh -GRAPH.QUERY DEMO_GRAPH -"MATCH (jim {name: 'Jim'}), (pam {name: 'Pam'}) -SET jim = pam" -``` - -After executing this query, the `jim` node will have exactly the same properties as the `pam` node (all of Jim's original properties are replaced). - -## Removing Properties - -To remove a property, set its value to `NULL`: - -```sh -GRAPH.QUERY DEMO_GRAPH "MATCH (n { name: 'Jim' }) SET n.name = NULL" -``` - -This removes the `name` property from the node entirely. \ No newline at end of file diff --git a/website/docs/cypher/skip.md b/website/docs/cypher/skip.md deleted file mode 100644 index 5c0ec45..0000000 --- a/website/docs/cypher/skip.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: SKIP -description: > -sidebar_position: 6 -sidebar_label: SKIP ---- - - - -# SKIP -The optional skip clause allows a specified number of records to be omitted from the result set. - -```sh -SKIP -``` - -This can be useful when processing results in batches. A query that would examine the second 100-element batch of nodes with the label `Person`, for example, would be: - -```sh -GRAPH.QUERY DEMO_GRAPH "MATCH (p:Person) RETURN p ORDER BY p.name SKIP 100 LIMIT 100" -``` \ No newline at end of file diff --git a/website/docs/cypher/union.md b/website/docs/cypher/union.md deleted file mode 100644 index 3d95a87..0000000 --- a/website/docs/cypher/union.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -title: UNION -description: > -sidebar_position: 13 -sidebar_label: UNION ---- - - - -# UNION -The UNION clause is used to combine the result of multiple queries. - -UNION combines the results of two or more queries into a single result set that includes all the rows that belong to all queries in the union. - -The number and the names of the columns must be identical in all queries combined by using UNION. - -To keep all the result rows, use UNION ALL. - -Using just UNION will combine and remove duplicates from the result set. - -```sh -GRAPH.QUERY DEMO_GRAPH -"MATCH (n:Actor) RETURN n.name AS name -UNION ALL -MATCH (n:Movie) RETURN n.title AS name" -``` diff --git a/website/docs/cypher/unwind.md b/website/docs/cypher/unwind.md deleted file mode 100644 index fb6b274..0000000 --- a/website/docs/cypher/unwind.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: UNWIND -description: > -sidebar_position: 14 -sidebar_label: UNWIND ---- - - - -# UNWIND -The `UNWIND` clause transforms a list into individual rows, creating one row for each element in the list. - -## Behavior - -- Each element in the list becomes a separate row -- The order of rows preserves the original list order -- Useful for processing lists, creating multiple entities, or parameter expansion - -## Basic Example - -Create a node with an array property: - -```sh -GRAPH.QUERY DEMO_GRAPH "CREATE (p {array:[1,2,3]})" -``` - -Unwind the array into individual rows: - -```sh -GRAPH.QUERY DEMO_GRAPH "MATCH (p) UNWIND p.array AS y RETURN y" -``` - -**Result:** -``` -y -1 -2 -3 -``` - -## Practical Examples - -### Create Multiple Nodes from a List - -```sh -GRAPH.QUERY DEMO_GRAPH -"UNWIND ['Alice', 'Bob', 'Charlie'] AS name -CREATE (:Person {name: name})" -``` - -### Process Nested Data - -```sh -GRAPH.QUERY DEMO_GRAPH -"WITH [{name: 'Alice', age: 30}, {name: 'Bob', age: 25}] AS people -UNWIND people AS person -CREATE (:Person {name: person.name, age: person.age})" -``` - -### Combine with Other Clauses - -```sh -GRAPH.QUERY DEMO_GRAPH -"MATCH (p:Person) -WITH collect(p.name) AS names -UNWIND names AS name -RETURN name ORDER BY name" -``` \ No newline at end of file diff --git a/website/docs/cypher/where.md b/website/docs/cypher/where.md deleted file mode 100644 index b6c82e0..0000000 --- a/website/docs/cypher/where.md +++ /dev/null @@ -1,82 +0,0 @@ ---- -title: WHERE -description: > -sidebar_position: 3 -sidebar_label: WHERE ---- - - - -# WHERE -The `WHERE` clause is optional and is used to filter results based on predicates (conditions). - -## Supported Comparison Operators - -| Operator | Description | -|----------|-------------| -| `=` | Equal to | -| `<>` | Not equal to | -| `<` | Less than | -| `<=` | Less than or equal to | -| `>` | Greater than | -| `>=` | Greater than or equal to | -| `CONTAINS` | String contains substring | -| `ENDS WITH` | String ends with substring | -| `IN` | Value is in list | -| `STARTS WITH` | String starts with substring | - -## Combining Predicates - -Predicates can be combined using the logical operators `AND`, `OR`, and `NOT`. - -Use parentheses to control precedence when combining multiple predicates. - -### Examples: - -```sql -WHERE (actor.name = "john doe" OR movie.rating > 8.8) AND movie.votes <= 250) -``` - -```sql -WHERE actor.age >= director.age AND actor.age > 32 -``` - -## Inline Property Filters - -You can specify equality predicates directly within node patterns using curly braces: - -```sql -(:President {name:"Jed Bartlett"})-[:WON]->(:State) -``` - -This requires that the president node's `name` property equals "Jed Bartlett". - -Inline predicates are functionally equivalent to predicates specified in the WHERE clause. - -## Pattern Predicates - -You can also filter based on graph patterns. These two queries are equivalent and both return presidents and the states they won: - -```sh -MATCH (p:President), (s:State) WHERE (p)-[:WON]->(s) RETURN p, s -``` - -```sh -MATCH (p:President)-[:WON]->(s:State) RETURN p, s -``` - -Pattern predicates can be negated and combined with logical operators. This query returns presidents who did not win in states where they were governors: - -```sh -MATCH (p:President), (s:State) WHERE NOT (p)-[:WON]->(s) AND (p)-[:GOVERNOR]->(s) RETURN p, s -``` - -## Label Filtering - -Nodes can be filtered by label in the WHERE clause: - -```sh -MATCH (n)-[:R]->() WHERE n:L1 OR n:L2 RETURN n -``` - -**Best Practice:** When possible, specify labels directly in the node pattern of the MATCH clause for better performance. diff --git a/website/docs/cypher/with.md b/website/docs/cypher/with.md deleted file mode 100644 index 11feff6..0000000 --- a/website/docs/cypher/with.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: WITH -description: > -sidebar_position: 12 -sidebar_label: WITH ---- - - - -# WITH -The `WITH` clause allows you to chain query parts together, passing results from one part to the next. This enables complex query composition and data manipulations. - -## Use Cases - -`WITH` is useful for: -- Chaining multiple query parts together -- Performing intermediate aggregations -- Filtering or transforming results before the next query part -- Using query modifiers (`DISTINCT`, `ORDER BY`, `LIMIT`, `SKIP`) mid-query - -## Example: Filtering by Aggregated Values - -Find all children above the average age of all people: - -```sh -GRAPH.QUERY DEMO_GRAPH -"MATCH (p:Person) WITH AVG(p.age) AS average_age MATCH (:Person)-[:PARENT_OF]->(child:Person) WHERE child.age > average_age return child -``` - -## Example: Using Modifiers Mid-Query - -You can use query modifiers with `WITH` to filter or sort before continuing: - -```sh -GRAPH.QUERY DEMO_GRAPH -"MATCH (u:User) WITH u AS nonrecent ORDER BY u.lastVisit LIMIT 3 SET nonrecent.should_contact = true" -``` - -This query: -1. Matches all users -2. Orders them by last visit (oldest first) -3. Limits to the 3 least recent visitors -4. Sets a flag on those users - -## Key Points - -- `WITH` acts like a pipeline between query parts -- Variables not included in `WITH` are not available in subsequent parts -- You can rename variables using `AS` in the `WITH` clause -- Aggregations in `WITH` cause implicit grouping (like `RETURN`) \ No newline at end of file diff --git a/website/docs/datatypes.md b/website/docs/datatypes.md deleted file mode 100644 index f880111..0000000 --- a/website/docs/datatypes.md +++ /dev/null @@ -1,299 +0,0 @@ ---- -title: Data types -description: FalkorDB supports a number of distinct data types, some of which can be persisted as property values and some of which are ephemeral. -sidebar_position: 5 -sidebar_label: Data types ---- - - -# Graph types - -All graph types are either structural elements of the graph or projections thereof. None can be stored as a property value. - -## Nodes - -Nodes are persistent graph elements that can be connected to each other via relationships. - -They can have any number of labels that describe their general type. For example, a node representing London may be created with the `Place` and `City` labels and retrieved by queries using either or both of them. - -Nodes have sets of properties to describe all of their salient characteristics. For example, our London node may have the property set: `{name: 'London', capital: True, elevation: 11}`. - -When querying nodes, multiple labels can be specified. Only nodes that hold all specified labels will be matched: - -```sh -$ redis-cli GRAPH.QUERY G "MATCH (n:Place:Continent) RETURN n" -``` - -## Relationships - -Relationships are persistent graph elements that connect one node to another. - -They must have exactly one type that describes what they represent. For example, a `RESIDENT_OF` relationship may be used to connect a `Person` node to a `City` node. - -Relationships are always directed, connecting a source node to its destination. - -Like nodes, relationships have sets of properties to describe all of their salient characteristics. - -When querying relationships, multiple types can be specified when separated by types. Relationships that hold any of the specified types will be matched: - -```sh -$ redis-cli GRAPH.QUERY G "MATCH (:Person)-[r:RESIDENT_OF|:VISITOR_TO]->(:Place {name: 'London'}) RETURN r" -``` - -## Paths - -Paths are alternating sequences of nodes and edges, starting and ending with a node. - -They are not structural elements in the graph, but can be created and returned by queries. - -For example, the following query returns all paths of any length connecting the node London to the node New York: - -```sh -$ redis-cli GRAPH.QUERY G "MATCH p=(:City {name: 'London'})-[*]->(:City {name: 'New York'}) RETURN p" -``` - -## Scalar types - -All scalar types may be provided by queries or stored as property values on node and relationship objects. - -### Strings - -FalkorDB strings are Unicode character sequences. When using Redis with a TTY (such as invoking FalkorDB commands from the terminal via `redis-cli`), some code points may not be decoded, as in: - -```sh -$ redis-cli GRAPH.QUERY G "RETURN 'ζ—₯本人' as stringval" -1) 1) "stringval" -2) 1) 1) "\xe6\x97\xa5\xe6\x9c\xac\xe4\xba\xba" -``` - -Output decoding can be forced using the `--raw` flag: - -```sh -$ redis-cli --raw GRAPH.QUERY G "RETURN 'ζ—₯本人' as stringval" -stringval -ζ—₯本人 -``` - -### Booleans - -Boolean values are specified as `true` or `false`. Internally, they are stored as numerics, with 1 representing true and 0 representing false. As FalkorDB considers types in its comparisons, 1 is not considered equal to `true`: - -```sh -$ redis-cli GRAPH.QUERY G "RETURN 1 = true" -1) 1) "1 = true" -2) 1) 1) "false" -``` - -### Integers - -All FalkorDB integers are treated as 64-bit signed integers. - -### Floating-point values - -All FalkorDB floating-point values are treated as 64-bit signed doubles. - -### Geospatial Points - -The Point data type is a set of latitude/longitude coordinates, stored within FalkorDB as a pair of 32-bit floats. It is instantiated using the `point()` function (see [Cypher functions](/cypher/functions)). - -### Nulls - -In FalkorDB, `null` is used to stand in for an unknown or missing value. - -Since we cannot reason broadly about unknown values, `null` is an important part of FalkorDB's 3-valued truth table. For example, the comparison `null = null` will evaluate to `null`, as we lack adequate information about the compared values. Similarly, `null in [1,2,3]` evaluates to `null`, since the value we're looking up is unknown. - -Unlike all other scalars, `null` cannot be stored as a property value. - -## Temporal Types - -FalkorDB supports the following temporal types that allow modeling and querying time-related data: - -1. [Date](#date) - Calendar dates (YYYY-MM-DD) -2. [Time](#time) - Time of day (HH:MM:SS) -3. [DateTime](#datetime) - Combined date and time -4. [Duration](#duration) - Time intervals - -These types follow the ISO 8601 standard and can be used in properties, parameters, and expressions. - -### Date - -Represents a calendar date in the format YYYY-MM-DD. - -**Purpose:** -Use `Date` to store and compare dates without time information, such as birth dates, due dates, or deadlines. - -**Example:** - -```cypher -CREATE (:Event { name: "Conference", date: date("2025-09-15") }) -``` - -**Interactions:** -* Compare using operators (`=`, `<`, `>`, etc.) -* Extract components using functions: - -```cypher -RETURN date("2025-09-15").year // 2025 -RETURN date("2025-09-15").month // 9 -RETURN date("2025-09-15").day // 15 -``` - -### Time - -Represents a time of day in the format HH:MM:SS. - -**Purpose:** -Use `Time` to store specific times (e.g., store hours, alarm times) without date context. - -**Example:** - -```cypher -CREATE (:Reminder { msg: "Wake up!", at: localtime("07:00:00") }) -``` - -**Interactions:** - -* Compare time values: - -```cypher -RETURN localtime("07:00:00") < localtime("09:30:00") // true -``` - -* Extract parts: - -```cypher -RETURN localtime("15:45:20").hour // 15 -RETURN localtime("15:45:20").minute // 45 -RETURN localtime("15:45:20").second // 20 -``` - -### DateTime - -Represents a point in time, combining both date and time. Format: YYYY-MM-DDTHH:MM:SS. - -**Purpose:** -Use `DateTime` when both date and time are relevant, such as logging events, scheduling, or timestamps. - -**Example:** -```cypher -CREATE (:Log { message: "System rebooted", at: localdatetime("2025-06-29T13:45:00") }) -``` - -**Interactions:** - -* Compare with other `DateTime` values -* Extract parts: - -```cypher -RETURN localdatetime("2025-06-29T13:45:00").year // 2025 -RETURN localdatetime("2025-06-29T13:45:00").hour // 13 -``` - -* Use `localdatetime()` with no arguments to get the current system time: - -```cypher -RETURN localdatetime() -``` - -### Duration - -Represents a span of time in ISO 8601 Duration format: `P[n]Y[n]M[n]DT[n]H[n]M[n]S` - -**Purpose:** -Use `Duration` to represent time intervals, such as "3 days", "2 hours", or "1 year and 6 months". - -**Example:** -```cypher -CREATE (:Cooldown { period: duration("P3DT12H") }) -``` - -**Interactions:** - -* Add/subtract durations with dates or datetimes: - -```cypher -RETURN date("2025-01-01") + duration("P1M") // 2025-02-01 -RETURN datetime("2025-06-29T13:00:00") - duration("PT30M") // 2025-06-29T12:30:00 -``` - -* Add durations together: - -```cypher -RETURN duration("P1D") + duration("PT12H") // P1DT12H -``` - -* Extract fields: - -```cypher -RETURN duration("P1Y2M3DT4H5M6S").years // 1 -RETURN duration("P1Y2M3DT4H5M6S").hours // 4 -``` - -## Collection types - -### Arrays - -Arrays are ordered lists of elements. They can be provided as literals or generated by functions like `collect()`. Nested arrays are supported, as are many functions that operate on arrays such as [list comprehensions](/commands/graph.query#list-comprehensions). - -Arrays can be stored as property values provided that no array element is of an unserializable type, such as graph entities or `null` values. - -### Maps - -Maps are order-agnostic collections of key-value pairs. If a key is a string literal, the map can be accessed using dot notation. If it is instead an expression that evaluates to a string literal, bracket notation can be used: - -```sh -$ redis-cli GRAPH.QUERY G "WITH {key1: 'stringval', key2: 10} AS map RETURN map.key1, map['key' + 2]" -1) 1) "map.key1" -Β  Β 2) "map['key' + 2]" -2) 1) 1) "stringval" -Β  Β  Β  2) (integer) 10 -``` - -This aligns with the way that the properties of nodes and relationships can be accessed. - -Maps cannot be stored as property values. - -#### Map projections - -Maps can be constructed as projections using the syntax `alias {.key1 [, ...n]}`. This can provide a useful format for returning graph entities. For example, given a graph with the node `(name: 'Jeff', age: 32)`, we can build the projection: - -```sh -$ redis-cli GRAPH.QUERY G "MATCH (n) RETURN n {.name, .age} AS projection" -1) 1) "projection" -2) 1) 1) "{name: Jeff, age: 32}" -``` - -#### Map merging - -You can combine two maps, where values in the second map will override corresponding values in the first map. -For example: - -```sh -$ redis-cli GRAPH.QUERY g "RETURN {a: 1, b: 2} + {a: 2, c: 3}" -1) 1) "{a: 1, b: 2} + {a: 2, c: 3}" -2) 1) 1) "{b: 2, a: 2, c: 3}" -3) 1) "Cached execution: 0" - 2) "Query internal execution time: 0.467666 milliseconds" -``` - -#### Function calls in map values - -The values in maps and map projections are flexible, and can generally refer either to constants or computed values: - -```sh -$ redis-cli GRAPH.QUERY G "RETURN {key1: 'constant', key2: rand(), key3: toLower('GENERATED') + '_string'} AS map" -1) 1) "map" -2) 1) 1) "{key1: constant, key2: 0.889656, key3: generated_string}" -``` - -The exception to this is aggregation functions, which must be computed in a preceding `WITH` clause instead of being invoked within the map. This restriction is intentional, as it helps to clearly disambiguate the aggregate function calls and the key values they are grouped by: - -```sh -$ redis-cli GRAPH.QUERY G " -MATCH (follower:User)-[:FOLLOWS]->(u:User) -WITH u, COUNT(follower) AS count -RETURN u {.name, follower_count: count} AS user" -1) 1) "user" -2) 1) 1) "{name: Jeff, follower_count: 12}" - 2) 1) "{name: Roi, follower_count: 18}" -``` diff --git a/website/docs/design/bulk-spec.md b/website/docs/design/bulk-spec.md deleted file mode 100644 index 53fac4e..0000000 --- a/website/docs/design/bulk-spec.md +++ /dev/null @@ -1,100 +0,0 @@ ---- -description: > ---- - - -# GRAPH.BULK endpoint specification - -The FalkorDB bulk loader uses the GRAPH.BULK endpoint to build a new graph from 1 or more Redis queries. -The bulk of these queries is binary data that is unpacked to create nodes, edges, and their properties. -This endpoint could be used to write bespoke import tools for other data formats using the implementation details provided here. - -## Caveats -The main complicating factor in writing bulk importers is that Redis has a maximum string length of 512 megabytes and a default maximum query size of 1 gigabyte. As such, large imports must be written incrementally. - -The FalkorDB team will do their best to ensure that future updates to this logic do not break current implementations, but cannot guarantee it. - -## Query Format - -``` -GRAPH.BULK [graph name] ["BEGIN"] [node count] [edge count] ([binary blob] * N) -``` - -### Arguments -#### graph name -The name of the graph to be inserted. - -#### BEGIN -The endpoint cannot be used to update existing graphs, only to create new ones. For this reason, the first query in a sequence of BULK commands should pass the string literal "BEGIN". - -#### node count -Number of nodes being inserted in this query. - -#### edge count -Number of edges being inserted in this query. - -#### binary blob -A binary string of up to 512 megabytes that partially or completely describes a single label or relationship type. - -Any number of these blobs may be provided in a query provided that Redis's 1-gigabyte query limit is not exceeded. - -### Module behavior -The endpoint will parse binary blobs as nodes until the number of created nodes matches the node count, then will parse subsequent blobs as edges. The import tool is expected to correctly provide these counts. - -If the `BEGIN` token is found, the module will verify that the graph key is unused, and will emit an error if it is. Otherwise, the partially-constructed graph will be retrieved in order to resume building. - -## Binary Blob format - -### Node format -Nodes in node blobs do not need to specify their IDs. The ID of each node is an 8-byte unsigned integer corresponding to the node count at the time of its creation. (The first-created node has the ID of 0, the second has 1, and so forth.) - -The blob consists of: - -1. [header specification](#header-specification) - -2. 1 or more [property specifications](#property-specification) - -### Edge format -The import tool is responsible for tracking the IDs of nodes used as edge endpoints. - -The blob consists of: - -1. [header specification](#header-specification) - -2. 1 or more: - 1. 8-byte unsigned integer representing source node ID - 2. 8-byte unsigned integer representing destination node ID - 3. [property specification](#property-specification) - - -#### Header specification -1. `name` - A null-terminated string representing the name of the label or relationship type. - -2. `property count` - A 4-byte unsigned integer representing the number of properties each entry in this blob possesses. - -3. `property names` - an ordered sequence of `property count` null-terminated strings, each representing the name for the property at that position. - -#### Property specification -1. `property type` - A 1-byte integer corresponding to the [TYPE enum](https://github.com/FalkorDB/FalkorDB/blob/master/src/bulk_insert/bulk_insert.c#L14-L23): -```sh -BI_NULL = 0, -BI_BOOL = 1, -BI_DOUBLE = 2, -BI_STRING = 3, -BI_LONG = 4, -BI_ARRAY = 5, -``` - -2. `property`: - * 1-byte true/false if type is boolean - * 8-byte double if type is double - * 8-byte integer if type is integer - * Null-terminated C string if type is string - * 8-byte array length followed by N values of this same type-property pair if type is array - - -## Redis Reply -Redis will reply with a string of the format: -``` -[N] nodes created, [M] edges created -``` diff --git a/website/docs/design/client-spec.md b/website/docs/design/client-spec.md deleted file mode 100644 index 3877164..0000000 --- a/website/docs/design/client-spec.md +++ /dev/null @@ -1,295 +0,0 @@ ---- -title: Client Specification -description: > -sidebar_label: Client Specification ---- - - - -# Client Specification -By design, there is not a full standard for FalkorDB clients to adhere to. Areas such as pretty-print formatting, query validation, and transactional and multithreaded capabilities have no canonically correct behavior, and the implementer is free to choose the approach and complexity that suits them best. - -FalkorDB does, however, provide a compact result set format for clients that minimizes the amount of redundant data transmitted from the server. Implementers are encouraged to take advantage of this format, as it provides better performance and removes ambiguity from decoding certain data. This approach requires clients to be capable of issuing procedure calls to the server and performing a small amount of client-side caching. - - -## Retrieving the compact result set - -Appending the flag `--compact` to any query issued to the GRAPH.QUERY endpoint will cause the server to issue results in the compact format. Because we don't store connection-specific configurations, all queries should be issued with this flag. - -```sh -GRAPH.QUERY demo "MATCH (a) RETURN a" --compact -``` - -## Formatting differences in the compact result set - -Certain values are emitted as integer IDs rather than strings: - -1. Node labels -2. Relationship types -3. Property keys - -Instructions on how to efficiently convert these IDs in the [Procedure Calls](#procedure-calls) section below. - -Additionally, two enums are exposed: - -[ColumnType](https://github.com/FalkorDB/FalkorDB/blob/ff108d7e21061025166a35d29be1a1cb5bac6d55/src/resultset/formatters/resultset_formatter.h#L14-L19), which as of v2.1.0 will always be `COLUMN_SCALAR`. This enum is retained for backwards compatibility, and may be ignored by the client unless versions older than v2.1.0 must be supported. - -[ValueType](https://github.com/FalkorDB/FalkorDB/blob/ff108d7e21061025166a35d29be1a1cb5bac6d55/src/resultset/formatters/resultset_formatter.h#L21-L28) indicates the data type (such as Node, integer, or string) of each returned value. Each value is emitted as a 2-array, with this enum in the first position and the actual value in the second. Each property on a graph entity also has a scalar as its value, so this construction is nested in each value of the properties array when a column contains a node or relationship. - -## Decoding the result set - -Given the graph created by the query: - -```sh -GRAPH.QUERY demo "CREATE (:plant {name: 'Tree'})-[:GROWS {season: 'Autumn'}]->(:fruit {name: 'Apple'})" -``` - -Let's formulate a query that returns 3 columns: nodes, relationships, and scalars, in that order. - -Verbose (default): - -```sh -127.0.0.1:6379> GRAPH.QUERY demo "MATCH (a)-[e]->(b) RETURN a, e, b.name" -1) 1) "a" - 2) "e" - 3) "b.name" -2) 1) 1) 1) 1) "id" - 2) (integer) 0 - 2) 1) "labels" - 2) 1) "plant" - 3) 1) "properties" - 2) 1) 1) "name" - 2) "Tree" - 2) 1) 1) "id" - 2) (integer) 0 - 2) 1) "type" - 2) "GROWS" - 3) 1) "src_node" - 2) (integer) 0 - 4) 1) "dest_node" - 2) (integer) 1 - 5) 1) "properties" - 2) 1) 1) "season" - 2) "Autumn" - 3) "Apple" -3) 1) "Query internal execution time: 1.326905 milliseconds" -``` - -Compact: - -```sh -127.0.0.1:6379> GRAPH.QUERY demo "MATCH (a)-[e]->(b) RETURN a, e, b.name" --compact -1) 1) 1) (integer) 1 - 2) "a" - 2) 1) (integer) 1 - 2) "e" - 3) 1) (integer) 1 - 2) "b.name" -2) 1) 1) 1) (integer) 8 - 2) 1) (integer) 0 - 2) 1) (integer) 0 - 3) 1) 1) (integer) 0 - 2) (integer) 2 - 3) "Tree" - 2) 1) (integer) 7 - 2) 1) (integer) 0 - 2) (integer) 0 - 3) (integer) 0 - 4) (integer) 1 - 5) 1) 1) (integer) 1 - 2) (integer) 2 - 3) "Autumn" - 3) 1) (integer) 2 - 2) "Apple" -3) 1) "Query internal execution time: 1.085412 milliseconds" -``` - -These results are being parsed by `redis-cli`, which adds such visual cues as array indexing and indentation, as well as type hints like `(integer)`. The actual data transmitted is formatted using the [RESP protocol](https://redis.io/topics/protocol). All of the current FalkorDB clients rely upon a stable Redis client in the same language (such as [redis-rb](https://github.com/redis/redis-rb) for Ruby) which handles RESP decoding. - -### Top-level array results - -The result set above had 3 members in its top-level array: - -```sh -1) Header row -2) Result rows -3) Query statistics -``` - -All queries that have a `RETURN` clause will have these 3 members. Queries that don't return results have only one member in the outermost array, the query statistics: - -```sh -127.0.0.1:6379> GRAPH.QUERY demo "CREATE (:plant {name: 'Tree'})-[:GROWS {season: 'Autumn'}]->(:fruit {name: 'Apple'})" --compact -1) 1) "Labels added: 2" - 2) "Nodes created: 2" - 3) "Properties set: 3" - 4) "Relationships created: 1" - 5) "Query internal execution time: 1.972868 milliseconds" -``` - -Rather than introspecting on the query being emitted, the client implementation can check whether this array contains 1 or 3 elements to choose how to format data. - -### Reading the header row - -Our sample query `MATCH (a)-[e]->(b) RETURN a, e, b.name` generated the header: - -```sh -1) 1) (integer) 1 - 2) "a" -3) 1) (integer) 1 - 3) "e" -4) 1) (integer) 1 - 3) "b.name" -``` - -The 4 array members correspond, in order, to the 3 entities described in the RETURN clause. - -Each is emitted as a 2-array: - -```sh -1) ColumnType (enum) -2) column name (string) -``` - -The first element is the [ColumnType enum](https://github.com/FalkorDB/FalkorDB/blob/master/src/resultset/formatters/resultset_formatter.h#L14-L19), which as of RedisGraph v2.1.0 will always be `COLUMN_SCALAR`. This element is retained for backwards compatibility, and may be ignored by the client unless RedisGraph versions older than v2.1.0 must be supported. - -### Reading result rows - -The entity representations in this section will closely resemble those found in [Result Set Graph Entities](result-structure#graph-entities). - -Our query produced one row of results with 3 columns (as described by the header): - -```sh -1) 1) 1) (integer) 8 - 2) 1) (integer) 0 - 2) 1) (integer) 0 - 3) 1) 1) (integer) 0 - 2) (integer) 2 - 3) "Tree" - 2) 1) (integer) 7 - 2) 1) (integer) 0 - 2) (integer) 0 - 3) (integer) 0 - 4) (integer) 1 - 5) 1) 1) (integer) 1 - 2) (integer) 2 - 3) "Autumn" - 3) 1) (integer) 2 - 2) "Apple" -``` - -Each element is emitted as a 2-array - [`ValueType`, value]. - -It is the client's responsibility to store the [ValueType enum](https://github.com/FalkorDB/FalkorDB/blob/master/src/resultset/formatters/resultset_formatter.h#L21-L28). FalkorDB guarantees that this enum may be extended in the future, but the existing values will not be altered. - -The `ValueType` for the first entry is `VALUE_NODE`. The node representation contains 3 top-level elements: - -1. The node's internal ID. -2. An array of all label IDs associated with the node (currently, each node can have either 0 or 1 labels, though this restriction may be lifted in the future). -3. An array of all properties the node contains. Properties are represented as 3-arrays - [property key ID, `ValueType`, value]. - -```sh -[ - Node ID (integer), - [label ID (integer) X label count] - [[property key ID (integer), ValueType (enum), value (scalar)] X property count] -] -``` - -The `ValueType` for the first entry is `VALUE_EDGE`. The edge representation differs from the node representation in two respects: - -- Each relation has exactly one type, rather than the 0+ labels a node may have. -- A relation is emitted with the IDs of its source and destination nodes. - -As such, the complete representation is as follows: - -1. The relation's internal ID. -2. The relationship type ID. -3. The source node's internal ID. -4. The destination node's internal ID. -5. The key-value pairs of all properties the relation possesses. - -```sh -[ - Relation ID (integer), - type ID (integer), - source node ID (integer), - destination node ID (integer), - [[property key ID (integer), ValueType (enum), value (scalar)] X property count] -] -``` - -The `ValueType` for the third entry is `VALUE_STRING`, and the other element in the array is the actual value, "Apple". - -### Reading statistics - -The final top-level member of the GRAPH.QUERY reply is the execution statistics. This element is identical between the compact and standard response formats. - -The statistics always include query execution time, while any combination of the other elements may be included depending on how the graph was modified. - -1. "Labels added: (integer)" -2. "Labels removed: (integer)" (since RedisGraph 2.10) -3. "Nodes created: (integer)" -4. "Nodes deleted: (integer)" -5. "Properties set: (integer)" -6. "Properties removed: (integer)" (since RedisGraph 2.10) -7. "Relationships created: (integer)" -8. "Relationships deleted: (integer)" -9. "Indices created: (integer)" -10. "Indices deleted: (integer)" -11. "Query internal execution time: (float) milliseconds" - -## Procedure Calls - -Property keys, node labels, and relationship types are all returned as IDs rather than strings in the compact format. For each of these 3 string-ID mappings, IDs start at 0 and increase monotonically. - -As such, the client should store a string array for each of these 3 mappings, and print the appropriate string for the user by checking an array at position _ID_. If an ID greater than the array length is encountered, the local array should be updated with a procedure call. - -These calls are described generally in the [Procedures documentation](/cypher/procedures). - -To retrieve each full mapping, the appropriate calls are: - -`db.labels()` - -```sh -127.0.0.1:6379> GRAPH.QUERY demo "CALL db.labels()" -1) 1) "label" -2) 1) 1) "plant" - 2) 1) "fruit" -3) 1) "Query internal execution time: 0.321513 milliseconds" -``` - -`db.relationshipTypes()` - -```sh -127.0.0.1:6379> GRAPH.QUERY demo "CALL db.relationshipTypes()" -1) 1) "relationshipType" -2) 1) 1) "GROWS" -3) 1) "Query internal execution time: 0.429677 milliseconds" -``` - -`db.propertyKeys()` - -```sh -127.0.0.1:6379> GRAPH.QUERY demo "CALL db.propertyKeys()" -1) 1) "propertyKey" -2) 1) 1) "name" - 2) 1) "season" -3) 1) "Query internal execution time: 0.318940 milliseconds" -``` - -Because the cached values never become outdated, it is possible to just retrieve new values with slightly more complex constructions: - -```sh -CALL db.propertyKeys() YIELD propertyKey RETURN propertyKey SKIP [cached_array_length] -``` - -Though the property calls are quite efficient regardless of whether this optimization is used. - -As an example, the Python client checks its local array of labels to resolve every label ID [as seen here](https://github.com/RedisGraph/redisgraph-py/blob/d65ec325b1909489845427b7100dcba6c4050b66/redisgraph/graph.py#L20-L32). - -In the case of an IndexError, it issues a procedure call to fully refresh its label cache [as seen here](https://github.com/RedisGraph/redisgraph-py/blob/d65ec325b1909489845427b7100dcba6c4050b66/redisgraph/graph.py#L153-L154). - -## Reference clients - -All the logic described in this document has been implemented in most of the clients listed in [Client Libraries](/getting-started/clients). Among these, `node-redis`, `redis-py` and `jedis` are currently the most sophisticated. diff --git a/website/docs/design/index.md b/website/docs/design/index.md deleted file mode 100644 index 2583058..0000000 --- a/website/docs/design/index.md +++ /dev/null @@ -1,221 +0,0 @@ ---- -title: The FalkorDB Design -description: "FalkorDB: A High Performance In-Memory Graph Database" -sidebar_position: 999 -sidebar_label: The FalkorDB Design ---- - -# The FalkorDB Design - -## Abstract - -Graph-based data is everywhere nowadays. Facebook, Google, Twitter and Pinterest are just a few who've realize the power -behind relationship data and are utilizing it to its fullest. As a direct result, we see a rise both in interest for and -the variety of graph data solutions. - -With the introduction of [Redis Modules](http://antirez.com/news/106) we've recognized the great potential of introducing a -graph data structure to the Redis arsenal, and developed FalkorDB. Bringing new graph capabilities to Redis -through a native C implementation with an emphasis on performance, [FalkorDB](https://github.com/FalkorDB/FalkorDB) is now -available as an open source project. - -In this documentation, we'll discuss the internal design and features of FalkorDB and demonstrate its current capabilities. - -## FalkorDB At-a-Glance - -FalkorDB is a graph database developed from scratch on top of Redis, using the new Redis Modules API to extend Redis -with new commands and capabilities. Its main features include: - -- Simple, fast indexing and querying -- Data stored in RAM using memory-efficient custom data structures -- On-disk persistence -- Tabular result sets -- Uses the popular graph query language [openCypher](https://opencypher.org/) - -## A Little Taste: FalkorDB in Action - -Let’s look at some of the key concepts of FalkorDB using this example over the redis-cli tool: - -### Constructing a graph - -It is common to represent entities as nodes within a graph. In this example, -we'll create a small graph with both actors and movies as its entities, -and an "act" relation that will connect actors to the movies they acted in. -We'll use the graph.QUERY command to issue a CREATE query, -which will introduce new entities and relations to our graph. - -```sh -graph.QUERY 'CREATE (: