mirror of
https://github.com/trustgraph-ai/trustgraph.git
synced 2026-04-30 19:06:21 +02:00
parent
a8e437fc7f
commit
6c7af8789d
216 changed files with 31360 additions and 1611 deletions
|
|
@ -43,7 +43,12 @@ class Processor(DocumentEmbeddingsQueryService):
|
|||
|
||||
for vec in msg.vectors:
|
||||
|
||||
resp = self.vecstore.search(vec, limit=msg.limit)
|
||||
resp = self.vecstore.search(
|
||||
vec,
|
||||
msg.user,
|
||||
msg.collection,
|
||||
limit=msg.limit
|
||||
)
|
||||
|
||||
for r in resp:
|
||||
chunk = r["entity"]["doc"]
|
||||
|
|
|
|||
|
|
@ -47,6 +47,39 @@ class Processor(DocumentEmbeddingsQueryService):
|
|||
}
|
||||
)
|
||||
|
||||
self.last_index_name = None
|
||||
|
||||
def ensure_index_exists(self, index_name, dim):
|
||||
"""Ensure index exists, create if it doesn't"""
|
||||
if index_name != self.last_index_name:
|
||||
if not self.pinecone.has_index(index_name):
|
||||
try:
|
||||
self.pinecone.create_index(
|
||||
name=index_name,
|
||||
dimension=dim,
|
||||
metric="cosine",
|
||||
spec=ServerlessSpec(
|
||||
cloud="aws",
|
||||
region="us-east-1",
|
||||
)
|
||||
)
|
||||
logger.info(f"Created index: {index_name}")
|
||||
|
||||
# Wait for index to be ready
|
||||
import time
|
||||
for i in range(0, 1000):
|
||||
if self.pinecone.describe_index(index_name).status["ready"]:
|
||||
break
|
||||
time.sleep(1)
|
||||
|
||||
if not self.pinecone.describe_index(index_name).status["ready"]:
|
||||
raise RuntimeError("Gave up waiting for index creation")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Pinecone index creation failed: {e}")
|
||||
raise e
|
||||
self.last_index_name = index_name
|
||||
|
||||
async def query_document_embeddings(self, msg):
|
||||
|
||||
try:
|
||||
|
|
@ -62,9 +95,11 @@ class Processor(DocumentEmbeddingsQueryService):
|
|||
dim = len(vec)
|
||||
|
||||
index_name = (
|
||||
"d-" + msg.user + "-" + msg.collection + "-" + str(dim)
|
||||
"d-" + msg.user + "-" + msg.collection
|
||||
)
|
||||
|
||||
self.ensure_index_exists(index_name, dim)
|
||||
|
||||
index = self.pinecone.Index(index_name)
|
||||
|
||||
results = index.query(
|
||||
|
|
|
|||
|
|
@ -38,6 +38,24 @@ class Processor(DocumentEmbeddingsQueryService):
|
|||
)
|
||||
|
||||
self.qdrant = QdrantClient(url=store_uri, api_key=api_key)
|
||||
self.last_collection = None
|
||||
|
||||
def ensure_collection_exists(self, collection, dim):
|
||||
"""Ensure collection exists, create if it doesn't"""
|
||||
if collection != self.last_collection:
|
||||
if not self.qdrant.collection_exists(collection):
|
||||
try:
|
||||
self.qdrant.create_collection(
|
||||
collection_name=collection,
|
||||
vectors_config=VectorParams(
|
||||
size=dim, distance=Distance.COSINE
|
||||
),
|
||||
)
|
||||
logger.info(f"Created collection: {collection}")
|
||||
except Exception as e:
|
||||
logger.error(f"Qdrant collection creation failed: {e}")
|
||||
raise e
|
||||
self.last_collection = collection
|
||||
|
||||
async def query_document_embeddings(self, msg):
|
||||
|
||||
|
|
@ -49,10 +67,11 @@ class Processor(DocumentEmbeddingsQueryService):
|
|||
|
||||
dim = len(vec)
|
||||
collection = (
|
||||
"d_" + msg.user + "_" + msg.collection + "_" +
|
||||
str(dim)
|
||||
"d_" + msg.user + "_" + msg.collection
|
||||
)
|
||||
|
||||
self.ensure_collection_exists(collection, dim)
|
||||
|
||||
search_result = self.qdrant.query_points(
|
||||
collection_name=collection,
|
||||
query=vec,
|
||||
|
|
|
|||
|
|
@ -50,7 +50,12 @@ class Processor(GraphEmbeddingsQueryService):
|
|||
|
||||
for vec in msg.vectors:
|
||||
|
||||
resp = self.vecstore.search(vec, limit=msg.limit * 2)
|
||||
resp = self.vecstore.search(
|
||||
vec,
|
||||
msg.user,
|
||||
msg.collection,
|
||||
limit=msg.limit * 2
|
||||
)
|
||||
|
||||
for r in resp:
|
||||
ent = r["entity"]["entity"]
|
||||
|
|
|
|||
|
|
@ -49,6 +49,39 @@ class Processor(GraphEmbeddingsQueryService):
|
|||
}
|
||||
)
|
||||
|
||||
self.last_index_name = None
|
||||
|
||||
def ensure_index_exists(self, index_name, dim):
|
||||
"""Ensure index exists, create if it doesn't"""
|
||||
if index_name != self.last_index_name:
|
||||
if not self.pinecone.has_index(index_name):
|
||||
try:
|
||||
self.pinecone.create_index(
|
||||
name=index_name,
|
||||
dimension=dim,
|
||||
metric="cosine",
|
||||
spec=ServerlessSpec(
|
||||
cloud="aws",
|
||||
region="us-east-1",
|
||||
)
|
||||
)
|
||||
logger.info(f"Created index: {index_name}")
|
||||
|
||||
# Wait for index to be ready
|
||||
import time
|
||||
for i in range(0, 1000):
|
||||
if self.pinecone.describe_index(index_name).status["ready"]:
|
||||
break
|
||||
time.sleep(1)
|
||||
|
||||
if not self.pinecone.describe_index(index_name).status["ready"]:
|
||||
raise RuntimeError("Gave up waiting for index creation")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Pinecone index creation failed: {e}")
|
||||
raise e
|
||||
self.last_index_name = index_name
|
||||
|
||||
def create_value(self, ent):
|
||||
if ent.startswith("http://") or ent.startswith("https://"):
|
||||
return Value(value=ent, is_uri=True)
|
||||
|
|
@ -71,9 +104,11 @@ class Processor(GraphEmbeddingsQueryService):
|
|||
dim = len(vec)
|
||||
|
||||
index_name = (
|
||||
"t-" + msg.user + "-" + msg.collection + "-" + str(dim)
|
||||
"t-" + msg.user + "-" + msg.collection
|
||||
)
|
||||
|
||||
self.ensure_index_exists(index_name, dim)
|
||||
|
||||
index = self.pinecone.Index(index_name)
|
||||
|
||||
# Heuristic hack, get (2*limit), so that we have more chance
|
||||
|
|
|
|||
|
|
@ -38,6 +38,24 @@ class Processor(GraphEmbeddingsQueryService):
|
|||
)
|
||||
|
||||
self.qdrant = QdrantClient(url=store_uri, api_key=api_key)
|
||||
self.last_collection = None
|
||||
|
||||
def ensure_collection_exists(self, collection, dim):
|
||||
"""Ensure collection exists, create if it doesn't"""
|
||||
if collection != self.last_collection:
|
||||
if not self.qdrant.collection_exists(collection):
|
||||
try:
|
||||
self.qdrant.create_collection(
|
||||
collection_name=collection,
|
||||
vectors_config=VectorParams(
|
||||
size=dim, distance=Distance.COSINE
|
||||
),
|
||||
)
|
||||
logger.info(f"Created collection: {collection}")
|
||||
except Exception as e:
|
||||
logger.error(f"Qdrant collection creation failed: {e}")
|
||||
raise e
|
||||
self.last_collection = collection
|
||||
|
||||
def create_value(self, ent):
|
||||
if ent.startswith("http://") or ent.startswith("https://"):
|
||||
|
|
@ -56,10 +74,11 @@ class Processor(GraphEmbeddingsQueryService):
|
|||
|
||||
dim = len(vec)
|
||||
collection = (
|
||||
"t_" + msg.user + "_" + msg.collection + "_" +
|
||||
str(dim)
|
||||
"t_" + msg.user + "_" + msg.collection
|
||||
)
|
||||
|
||||
self.ensure_collection_exists(collection, dim)
|
||||
|
||||
# Heuristic hack, get (2*limit), so that we have more chance
|
||||
# of getting (limit) entities
|
||||
search_result = self.qdrant.query_points(
|
||||
|
|
|
|||
0
trustgraph-flow/trustgraph/query/objects/__init__.py
Normal file
0
trustgraph-flow/trustgraph/query/objects/__init__.py
Normal file
|
|
@ -0,0 +1,2 @@
|
|||
|
||||
from . service import *
|
||||
|
|
@ -0,0 +1,6 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
from . service import run
|
||||
|
||||
run()
|
||||
|
||||
738
trustgraph-flow/trustgraph/query/objects/cassandra/service.py
Normal file
738
trustgraph-flow/trustgraph/query/objects/cassandra/service.py
Normal file
|
|
@ -0,0 +1,738 @@
|
|||
"""
|
||||
Objects query service using GraphQL. Input is a GraphQL query with variables.
|
||||
Output is GraphQL response data with any errors.
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
import asyncio
|
||||
from typing import Dict, Any, Optional, List, Set
|
||||
from enum import Enum
|
||||
from dataclasses import dataclass, field
|
||||
from cassandra.cluster import Cluster
|
||||
from cassandra.auth import PlainTextAuthProvider
|
||||
|
||||
import strawberry
|
||||
from strawberry import Schema
|
||||
from strawberry.types import Info
|
||||
from strawberry.scalars import JSON
|
||||
from strawberry.tools import create_type
|
||||
|
||||
from .... schema import ObjectsQueryRequest, ObjectsQueryResponse, GraphQLError
|
||||
from .... schema import Error, RowSchema, Field as SchemaField
|
||||
from .... base import FlowProcessor, ConsumerSpec, ProducerSpec
|
||||
from .... base.cassandra_config import add_cassandra_args, resolve_cassandra_config
|
||||
|
||||
# Module logger
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
default_ident = "objects-query"
|
||||
|
||||
# GraphQL filter input types
|
||||
@strawberry.input
|
||||
class IntFilter:
|
||||
eq: Optional[int] = None
|
||||
gt: Optional[int] = None
|
||||
gte: Optional[int] = None
|
||||
lt: Optional[int] = None
|
||||
lte: Optional[int] = None
|
||||
in_: Optional[List[int]] = strawberry.field(name="in", default=None)
|
||||
not_: Optional[int] = strawberry.field(name="not", default=None)
|
||||
not_in: Optional[List[int]] = None
|
||||
|
||||
@strawberry.input
|
||||
class StringFilter:
|
||||
eq: Optional[str] = None
|
||||
contains: Optional[str] = None
|
||||
startsWith: Optional[str] = None
|
||||
endsWith: Optional[str] = None
|
||||
in_: Optional[List[str]] = strawberry.field(name="in", default=None)
|
||||
not_: Optional[str] = strawberry.field(name="not", default=None)
|
||||
not_in: Optional[List[str]] = None
|
||||
|
||||
@strawberry.input
|
||||
class FloatFilter:
|
||||
eq: Optional[float] = None
|
||||
gt: Optional[float] = None
|
||||
gte: Optional[float] = None
|
||||
lt: Optional[float] = None
|
||||
lte: Optional[float] = None
|
||||
in_: Optional[List[float]] = strawberry.field(name="in", default=None)
|
||||
not_: Optional[float] = strawberry.field(name="not", default=None)
|
||||
not_in: Optional[List[float]] = None
|
||||
|
||||
|
||||
class Processor(FlowProcessor):
|
||||
|
||||
def __init__(self, **params):
|
||||
|
||||
id = params.get("id", default_ident)
|
||||
|
||||
# Get Cassandra parameters
|
||||
cassandra_host = params.get("cassandra_host")
|
||||
cassandra_username = params.get("cassandra_username")
|
||||
cassandra_password = params.get("cassandra_password")
|
||||
|
||||
# Resolve configuration with environment variable fallback
|
||||
hosts, username, password = resolve_cassandra_config(
|
||||
host=cassandra_host,
|
||||
username=cassandra_username,
|
||||
password=cassandra_password
|
||||
)
|
||||
|
||||
# Store resolved configuration with proper names
|
||||
self.cassandra_host = hosts # Store as list
|
||||
self.cassandra_username = username
|
||||
self.cassandra_password = password
|
||||
|
||||
# Config key for schemas
|
||||
self.config_key = params.get("config_type", "schema")
|
||||
|
||||
super(Processor, self).__init__(
|
||||
**params | {
|
||||
"id": id,
|
||||
"config_type": self.config_key,
|
||||
}
|
||||
)
|
||||
|
||||
self.register_specification(
|
||||
ConsumerSpec(
|
||||
name = "request",
|
||||
schema = ObjectsQueryRequest,
|
||||
handler = self.on_message
|
||||
)
|
||||
)
|
||||
|
||||
self.register_specification(
|
||||
ProducerSpec(
|
||||
name = "response",
|
||||
schema = ObjectsQueryResponse,
|
||||
)
|
||||
)
|
||||
|
||||
# Register config handler for schema updates
|
||||
self.register_config_handler(self.on_schema_config)
|
||||
|
||||
# Schema storage: name -> RowSchema
|
||||
self.schemas: Dict[str, RowSchema] = {}
|
||||
|
||||
# GraphQL schema
|
||||
self.graphql_schema: Optional[Schema] = None
|
||||
|
||||
# GraphQL types cache
|
||||
self.graphql_types: Dict[str, type] = {}
|
||||
|
||||
# Cassandra session
|
||||
self.cluster = None
|
||||
self.session = None
|
||||
|
||||
# Known keyspaces and tables
|
||||
self.known_keyspaces: Set[str] = set()
|
||||
self.known_tables: Dict[str, Set[str]] = {}
|
||||
|
||||
def connect_cassandra(self):
|
||||
"""Connect to Cassandra cluster"""
|
||||
if self.session:
|
||||
return
|
||||
|
||||
try:
|
||||
if self.cassandra_username and self.cassandra_password:
|
||||
auth_provider = PlainTextAuthProvider(
|
||||
username=self.cassandra_username,
|
||||
password=self.cassandra_password
|
||||
)
|
||||
self.cluster = Cluster(
|
||||
contact_points=self.cassandra_host,
|
||||
auth_provider=auth_provider
|
||||
)
|
||||
else:
|
||||
self.cluster = Cluster(contact_points=self.cassandra_host)
|
||||
|
||||
self.session = self.cluster.connect()
|
||||
logger.info(f"Connected to Cassandra cluster at {self.cassandra_host}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to connect to Cassandra: {e}", exc_info=True)
|
||||
raise
|
||||
|
||||
def sanitize_name(self, name: str) -> str:
|
||||
"""Sanitize names for Cassandra compatibility"""
|
||||
import re
|
||||
safe_name = re.sub(r'[^a-zA-Z0-9_]', '_', name)
|
||||
if safe_name and not safe_name[0].isalpha():
|
||||
safe_name = 'o_' + safe_name
|
||||
return safe_name.lower()
|
||||
|
||||
def sanitize_table(self, name: str) -> str:
|
||||
"""Sanitize table names for Cassandra compatibility"""
|
||||
import re
|
||||
safe_name = re.sub(r'[^a-zA-Z0-9_]', '_', name)
|
||||
safe_name = 'o_' + safe_name
|
||||
return safe_name.lower()
|
||||
|
||||
def parse_filter_key(self, filter_key: str) -> tuple[str, str]:
|
||||
"""Parse GraphQL filter key into field name and operator"""
|
||||
if not filter_key:
|
||||
return ("", "eq")
|
||||
|
||||
# Support common GraphQL filter patterns:
|
||||
# field_name -> (field_name, "eq")
|
||||
# field_name_gt -> (field_name, "gt")
|
||||
# field_name_gte -> (field_name, "gte")
|
||||
# field_name_lt -> (field_name, "lt")
|
||||
# field_name_lte -> (field_name, "lte")
|
||||
# field_name_in -> (field_name, "in")
|
||||
|
||||
operators = ["_gte", "_lte", "_gt", "_lt", "_in", "_eq"]
|
||||
|
||||
for op_suffix in operators:
|
||||
if filter_key.endswith(op_suffix):
|
||||
field_name = filter_key[:-len(op_suffix)]
|
||||
operator = op_suffix[1:] # Remove the leading underscore
|
||||
return (field_name, operator)
|
||||
|
||||
# Default to equality if no operator suffix found
|
||||
return (filter_key, "eq")
|
||||
|
||||
async def on_schema_config(self, config, version):
|
||||
"""Handle schema configuration updates"""
|
||||
logger.info(f"Loading schema configuration version {version}")
|
||||
|
||||
# Clear existing schemas
|
||||
self.schemas = {}
|
||||
self.graphql_types = {}
|
||||
|
||||
# Check if our config type exists
|
||||
if self.config_key not in config:
|
||||
logger.warning(f"No '{self.config_key}' type in configuration")
|
||||
return
|
||||
|
||||
# Get the schemas dictionary for our type
|
||||
schemas_config = config[self.config_key]
|
||||
|
||||
# Process each schema in the schemas config
|
||||
for schema_name, schema_json in schemas_config.items():
|
||||
try:
|
||||
# Parse the JSON schema definition
|
||||
schema_def = json.loads(schema_json)
|
||||
|
||||
# Create Field objects
|
||||
fields = []
|
||||
for field_def in schema_def.get("fields", []):
|
||||
field = SchemaField(
|
||||
name=field_def["name"],
|
||||
type=field_def["type"],
|
||||
size=field_def.get("size", 0),
|
||||
primary=field_def.get("primary_key", False),
|
||||
description=field_def.get("description", ""),
|
||||
required=field_def.get("required", False),
|
||||
enum_values=field_def.get("enum", []),
|
||||
indexed=field_def.get("indexed", False)
|
||||
)
|
||||
fields.append(field)
|
||||
|
||||
# Create RowSchema
|
||||
row_schema = RowSchema(
|
||||
name=schema_def.get("name", schema_name),
|
||||
description=schema_def.get("description", ""),
|
||||
fields=fields
|
||||
)
|
||||
|
||||
self.schemas[schema_name] = row_schema
|
||||
logger.info(f"Loaded schema: {schema_name} with {len(fields)} fields")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to parse schema {schema_name}: {e}", exc_info=True)
|
||||
|
||||
logger.info(f"Schema configuration loaded: {len(self.schemas)} schemas")
|
||||
|
||||
# Regenerate GraphQL schema
|
||||
self.generate_graphql_schema()
|
||||
|
||||
def get_python_type(self, field_type: str):
|
||||
"""Convert schema field type to Python type for GraphQL"""
|
||||
type_mapping = {
|
||||
"string": str,
|
||||
"integer": int,
|
||||
"float": float,
|
||||
"boolean": bool,
|
||||
"timestamp": str, # Use string for timestamps in GraphQL
|
||||
"date": str,
|
||||
"time": str,
|
||||
"uuid": str
|
||||
}
|
||||
return type_mapping.get(field_type, str)
|
||||
|
||||
def create_graphql_type(self, schema_name: str, row_schema: RowSchema) -> type:
|
||||
"""Create a GraphQL type from a RowSchema"""
|
||||
|
||||
# Create annotations for the GraphQL type
|
||||
annotations = {}
|
||||
defaults = {}
|
||||
|
||||
for field in row_schema.fields:
|
||||
python_type = self.get_python_type(field.type)
|
||||
|
||||
# Make field optional if not required
|
||||
if not field.required and not field.primary:
|
||||
annotations[field.name] = Optional[python_type]
|
||||
defaults[field.name] = None
|
||||
else:
|
||||
annotations[field.name] = python_type
|
||||
|
||||
# Create the class dynamically
|
||||
type_name = f"{schema_name.capitalize()}Type"
|
||||
graphql_class = type(
|
||||
type_name,
|
||||
(),
|
||||
{
|
||||
"__annotations__": annotations,
|
||||
**defaults
|
||||
}
|
||||
)
|
||||
|
||||
# Apply strawberry decorator
|
||||
return strawberry.type(graphql_class)
|
||||
|
||||
def create_filter_type_for_schema(self, schema_name: str, row_schema: RowSchema):
|
||||
"""Create a dynamic filter input type for a schema"""
|
||||
# Create the filter type dynamically
|
||||
filter_type_name = f"{schema_name.capitalize()}Filter"
|
||||
|
||||
# Add __annotations__ and defaults for the fields
|
||||
annotations = {}
|
||||
defaults = {}
|
||||
|
||||
logger.info(f"Creating filter type {filter_type_name} for schema {schema_name}")
|
||||
|
||||
for field in row_schema.fields:
|
||||
logger.info(f"Field {field.name}: type={field.type}, indexed={field.indexed}, primary={field.primary}")
|
||||
|
||||
# Allow filtering on any field for now, not just indexed/primary
|
||||
# if field.indexed or field.primary:
|
||||
if field.type == "integer":
|
||||
annotations[field.name] = Optional[IntFilter]
|
||||
defaults[field.name] = None
|
||||
logger.info(f"Added IntFilter for {field.name}")
|
||||
elif field.type == "float":
|
||||
annotations[field.name] = Optional[FloatFilter]
|
||||
defaults[field.name] = None
|
||||
logger.info(f"Added FloatFilter for {field.name}")
|
||||
elif field.type == "string":
|
||||
annotations[field.name] = Optional[StringFilter]
|
||||
defaults[field.name] = None
|
||||
logger.info(f"Added StringFilter for {field.name}")
|
||||
|
||||
logger.info(f"Filter type {filter_type_name} will have fields: {list(annotations.keys())}")
|
||||
|
||||
# Create the class dynamically
|
||||
FilterType = type(
|
||||
filter_type_name,
|
||||
(),
|
||||
{
|
||||
"__annotations__": annotations,
|
||||
**defaults
|
||||
}
|
||||
)
|
||||
|
||||
# Apply strawberry input decorator
|
||||
FilterType = strawberry.input(FilterType)
|
||||
|
||||
return FilterType
|
||||
|
||||
def create_sort_direction_enum(self):
|
||||
"""Create sort direction enum"""
|
||||
@strawberry.enum
|
||||
class SortDirection(Enum):
|
||||
ASC = "asc"
|
||||
DESC = "desc"
|
||||
|
||||
return SortDirection
|
||||
|
||||
def parse_idiomatic_where_clause(self, where_obj) -> Dict[str, Any]:
|
||||
"""Parse the idiomatic nested filter structure"""
|
||||
if not where_obj:
|
||||
return {}
|
||||
|
||||
conditions = {}
|
||||
|
||||
logger.info(f"Parsing where clause: {where_obj}")
|
||||
|
||||
for field_name, filter_obj in where_obj.__dict__.items():
|
||||
if filter_obj is None:
|
||||
continue
|
||||
|
||||
logger.info(f"Processing field {field_name} with filter_obj: {filter_obj}")
|
||||
|
||||
if hasattr(filter_obj, '__dict__'):
|
||||
# This is a filter object (StringFilter, IntFilter, etc.)
|
||||
for operator, value in filter_obj.__dict__.items():
|
||||
if value is not None:
|
||||
logger.info(f"Found operator {operator} with value {value}")
|
||||
# Map GraphQL operators to our internal format
|
||||
if operator == "eq":
|
||||
conditions[field_name] = value
|
||||
elif operator in ["gt", "gte", "lt", "lte"]:
|
||||
conditions[f"{field_name}_{operator}"] = value
|
||||
elif operator == "in_":
|
||||
conditions[f"{field_name}_in"] = value
|
||||
elif operator == "contains":
|
||||
conditions[f"{field_name}_contains"] = value
|
||||
|
||||
logger.info(f"Final parsed conditions: {conditions}")
|
||||
return conditions
|
||||
|
||||
def generate_graphql_schema(self):
|
||||
"""Generate GraphQL schema from loaded schemas using dynamic filter types"""
|
||||
if not self.schemas:
|
||||
logger.warning("No schemas loaded, cannot generate GraphQL schema")
|
||||
self.graphql_schema = None
|
||||
return
|
||||
|
||||
# Create GraphQL types and filter types for each schema
|
||||
filter_types = {}
|
||||
sort_direction_enum = self.create_sort_direction_enum()
|
||||
|
||||
for schema_name, row_schema in self.schemas.items():
|
||||
graphql_type = self.create_graphql_type(schema_name, row_schema)
|
||||
filter_type = self.create_filter_type_for_schema(schema_name, row_schema)
|
||||
|
||||
self.graphql_types[schema_name] = graphql_type
|
||||
filter_types[schema_name] = filter_type
|
||||
|
||||
# Create the Query class with resolvers
|
||||
query_dict = {'__annotations__': {}}
|
||||
|
||||
for schema_name, row_schema in self.schemas.items():
|
||||
graphql_type = self.graphql_types[schema_name]
|
||||
filter_type = filter_types[schema_name]
|
||||
|
||||
# Create resolver function for this schema
|
||||
def make_resolver(s_name, r_schema, g_type, f_type, sort_enum):
|
||||
async def resolver(
|
||||
info: Info,
|
||||
where: Optional[f_type] = None,
|
||||
order_by: Optional[str] = None,
|
||||
direction: Optional[sort_enum] = None,
|
||||
limit: Optional[int] = 100
|
||||
) -> List[g_type]:
|
||||
# Get the processor instance from context
|
||||
processor = info.context["processor"]
|
||||
user = info.context["user"]
|
||||
collection = info.context["collection"]
|
||||
|
||||
# Parse the idiomatic where clause
|
||||
filters = processor.parse_idiomatic_where_clause(where)
|
||||
|
||||
# Query Cassandra
|
||||
results = await processor.query_cassandra(
|
||||
user, collection, s_name, r_schema,
|
||||
filters, limit, order_by, direction
|
||||
)
|
||||
|
||||
# Convert to GraphQL types
|
||||
graphql_results = []
|
||||
for row in results:
|
||||
graphql_obj = g_type(**row)
|
||||
graphql_results.append(graphql_obj)
|
||||
|
||||
return graphql_results
|
||||
|
||||
return resolver
|
||||
|
||||
# Add resolver to query
|
||||
resolver_name = schema_name
|
||||
resolver_func = make_resolver(schema_name, row_schema, graphql_type, filter_type, sort_direction_enum)
|
||||
|
||||
# Add field to query dictionary
|
||||
query_dict[resolver_name] = strawberry.field(resolver=resolver_func)
|
||||
query_dict['__annotations__'][resolver_name] = List[graphql_type]
|
||||
|
||||
# Create the Query class
|
||||
Query = type('Query', (), query_dict)
|
||||
Query = strawberry.type(Query)
|
||||
|
||||
# Create the schema with auto_camel_case disabled to keep snake_case field names
|
||||
self.graphql_schema = strawberry.Schema(
|
||||
query=Query,
|
||||
config=strawberry.schema.config.StrawberryConfig(auto_camel_case=False)
|
||||
)
|
||||
logger.info(f"Generated GraphQL schema with {len(self.schemas)} types")
|
||||
|
||||
async def query_cassandra(
|
||||
self,
|
||||
user: str,
|
||||
collection: str,
|
||||
schema_name: str,
|
||||
row_schema: RowSchema,
|
||||
filters: Dict[str, Any],
|
||||
limit: int,
|
||||
order_by: Optional[str] = None,
|
||||
direction: Optional[Any] = None
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""Execute a query against Cassandra"""
|
||||
|
||||
# Connect if needed
|
||||
self.connect_cassandra()
|
||||
|
||||
# Build the query
|
||||
keyspace = self.sanitize_name(user)
|
||||
table = self.sanitize_table(schema_name)
|
||||
|
||||
# Start with basic SELECT
|
||||
query = f"SELECT * FROM {keyspace}.{table}"
|
||||
|
||||
# Add WHERE clauses
|
||||
where_clauses = [f"collection = %s"]
|
||||
params = [collection]
|
||||
|
||||
# Add filters for indexed or primary key fields
|
||||
for filter_key, value in filters.items():
|
||||
if value is not None:
|
||||
# Parse field name and operator from filter key
|
||||
logger.debug(f"Parsing filter key: '{filter_key}' (type: {type(filter_key)})")
|
||||
result = self.parse_filter_key(filter_key)
|
||||
logger.debug(f"parse_filter_key returned: {result} (type: {type(result)}, len: {len(result) if hasattr(result, '__len__') else 'N/A'})")
|
||||
|
||||
if not result or len(result) != 2:
|
||||
logger.error(f"parse_filter_key returned invalid result: {result}")
|
||||
continue # Skip this filter
|
||||
|
||||
field_name, operator = result
|
||||
|
||||
# Find the field in schema
|
||||
schema_field = None
|
||||
for f in row_schema.fields:
|
||||
if f.name == field_name:
|
||||
schema_field = f
|
||||
break
|
||||
|
||||
if schema_field:
|
||||
safe_field = self.sanitize_name(field_name)
|
||||
|
||||
# Build WHERE clause based on operator
|
||||
if operator == "eq":
|
||||
where_clauses.append(f"{safe_field} = %s")
|
||||
params.append(value)
|
||||
elif operator == "gt":
|
||||
where_clauses.append(f"{safe_field} > %s")
|
||||
params.append(value)
|
||||
elif operator == "gte":
|
||||
where_clauses.append(f"{safe_field} >= %s")
|
||||
params.append(value)
|
||||
elif operator == "lt":
|
||||
where_clauses.append(f"{safe_field} < %s")
|
||||
params.append(value)
|
||||
elif operator == "lte":
|
||||
where_clauses.append(f"{safe_field} <= %s")
|
||||
params.append(value)
|
||||
elif operator == "in":
|
||||
if isinstance(value, list):
|
||||
placeholders = ",".join(["%s"] * len(value))
|
||||
where_clauses.append(f"{safe_field} IN ({placeholders})")
|
||||
params.extend(value)
|
||||
else:
|
||||
# Default to equality for unknown operators
|
||||
where_clauses.append(f"{safe_field} = %s")
|
||||
params.append(value)
|
||||
|
||||
if where_clauses:
|
||||
query += " WHERE " + " AND ".join(where_clauses)
|
||||
|
||||
# Add ORDER BY if requested (will try Cassandra first, then fall back to post-query sort)
|
||||
cassandra_order_by_added = False
|
||||
if order_by and direction:
|
||||
# Validate that order_by field exists in schema
|
||||
order_field_exists = any(f.name == order_by for f in row_schema.fields)
|
||||
if order_field_exists:
|
||||
safe_order_field = self.sanitize_name(order_by)
|
||||
direction_str = "ASC" if direction.value == "asc" else "DESC"
|
||||
# Add ORDER BY - if Cassandra rejects it, we'll catch the error during execution
|
||||
query += f" ORDER BY {safe_order_field} {direction_str}"
|
||||
|
||||
# Add limit first (must come before ALLOW FILTERING)
|
||||
if limit:
|
||||
query += f" LIMIT {limit}"
|
||||
|
||||
# Add ALLOW FILTERING for now (should optimize with proper indexes later)
|
||||
query += " ALLOW FILTERING"
|
||||
|
||||
# Execute query
|
||||
try:
|
||||
result = self.session.execute(query, params)
|
||||
cassandra_order_by_added = True # If we get here, Cassandra handled ORDER BY
|
||||
except Exception as e:
|
||||
# If ORDER BY fails, try without it
|
||||
if order_by and direction and "ORDER BY" in query:
|
||||
logger.info(f"Cassandra rejected ORDER BY, falling back to post-query sorting: {e}")
|
||||
# Remove ORDER BY clause and retry
|
||||
query_parts = query.split(" ORDER BY ")
|
||||
if len(query_parts) == 2:
|
||||
query_without_order = query_parts[0] + " LIMIT " + str(limit) + " ALLOW FILTERING" if limit else " ALLOW FILTERING"
|
||||
result = self.session.execute(query_without_order, params)
|
||||
cassandra_order_by_added = False
|
||||
else:
|
||||
raise
|
||||
else:
|
||||
raise
|
||||
|
||||
# Convert rows to dicts
|
||||
results = []
|
||||
for row in result:
|
||||
row_dict = {}
|
||||
for field in row_schema.fields:
|
||||
safe_field = self.sanitize_name(field.name)
|
||||
if hasattr(row, safe_field):
|
||||
value = getattr(row, safe_field)
|
||||
# Use original field name in result
|
||||
row_dict[field.name] = value
|
||||
results.append(row_dict)
|
||||
|
||||
# Post-query sorting if Cassandra didn't handle ORDER BY
|
||||
if order_by and direction and not cassandra_order_by_added:
|
||||
reverse_order = (direction.value == "desc")
|
||||
try:
|
||||
results.sort(key=lambda x: x.get(order_by, 0), reverse=reverse_order)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to sort results by {order_by}: {e}")
|
||||
|
||||
return results
|
||||
|
||||
async def execute_graphql_query(
|
||||
self,
|
||||
query: str,
|
||||
variables: Dict[str, Any],
|
||||
operation_name: Optional[str],
|
||||
user: str,
|
||||
collection: str
|
||||
) -> Dict[str, Any]:
|
||||
"""Execute a GraphQL query"""
|
||||
|
||||
if not self.graphql_schema:
|
||||
raise RuntimeError("No GraphQL schema available - no schemas loaded")
|
||||
|
||||
# Create context for the query
|
||||
context = {
|
||||
"processor": self,
|
||||
"user": user,
|
||||
"collection": collection
|
||||
}
|
||||
|
||||
# Execute the query
|
||||
result = await self.graphql_schema.execute(
|
||||
query,
|
||||
variable_values=variables,
|
||||
operation_name=operation_name,
|
||||
context_value=context
|
||||
)
|
||||
|
||||
# Build response
|
||||
response = {}
|
||||
|
||||
if result.data:
|
||||
response["data"] = result.data
|
||||
else:
|
||||
response["data"] = None
|
||||
|
||||
if result.errors:
|
||||
response["errors"] = [
|
||||
{
|
||||
"message": str(error),
|
||||
"path": getattr(error, "path", []),
|
||||
"extensions": getattr(error, "extensions", {})
|
||||
}
|
||||
for error in result.errors
|
||||
]
|
||||
else:
|
||||
response["errors"] = []
|
||||
|
||||
# Add extensions if any
|
||||
if hasattr(result, "extensions") and result.extensions:
|
||||
response["extensions"] = result.extensions
|
||||
|
||||
return response
|
||||
|
||||
async def on_message(self, msg, consumer, flow):
|
||||
"""Handle incoming query request"""
|
||||
|
||||
try:
|
||||
request = msg.value()
|
||||
|
||||
# Sender-produced ID
|
||||
id = msg.properties()["id"]
|
||||
|
||||
logger.debug(f"Handling objects query request {id}...")
|
||||
|
||||
# Execute GraphQL query
|
||||
result = await self.execute_graphql_query(
|
||||
query=request.query,
|
||||
variables=dict(request.variables) if request.variables else {},
|
||||
operation_name=request.operation_name,
|
||||
user=request.user,
|
||||
collection=request.collection
|
||||
)
|
||||
|
||||
# Create response
|
||||
graphql_errors = []
|
||||
if "errors" in result and result["errors"]:
|
||||
for err in result["errors"]:
|
||||
graphql_error = GraphQLError(
|
||||
message=err.get("message", ""),
|
||||
path=err.get("path", []),
|
||||
extensions=err.get("extensions", {})
|
||||
)
|
||||
graphql_errors.append(graphql_error)
|
||||
|
||||
response = ObjectsQueryResponse(
|
||||
error=None,
|
||||
data=json.dumps(result.get("data")) if result.get("data") else "null",
|
||||
errors=graphql_errors,
|
||||
extensions=result.get("extensions", {})
|
||||
)
|
||||
|
||||
logger.debug("Sending objects query response...")
|
||||
await flow("response").send(response, properties={"id": id})
|
||||
|
||||
logger.debug("Objects query request completed")
|
||||
|
||||
except Exception as e:
|
||||
|
||||
logger.error(f"Exception in objects query service: {e}", exc_info=True)
|
||||
|
||||
logger.info("Sending error response...")
|
||||
|
||||
response = ObjectsQueryResponse(
|
||||
error = Error(
|
||||
type = "objects-query-error",
|
||||
message = str(e),
|
||||
),
|
||||
data = None,
|
||||
errors = [],
|
||||
extensions = {}
|
||||
)
|
||||
|
||||
await flow("response").send(response, properties={"id": id})
|
||||
|
||||
def close(self):
|
||||
"""Clean up Cassandra connections"""
|
||||
if self.cluster:
|
||||
self.cluster.shutdown()
|
||||
logger.info("Closed Cassandra connection")
|
||||
|
||||
@staticmethod
|
||||
def add_args(parser):
|
||||
"""Add command-line arguments"""
|
||||
|
||||
FlowProcessor.add_args(parser)
|
||||
add_cassandra_args(parser)
|
||||
|
||||
parser.add_argument(
|
||||
'--config-type',
|
||||
default='schema',
|
||||
help='Configuration type prefix for schemas (default: schema)'
|
||||
)
|
||||
|
||||
def run():
|
||||
"""Entry point for objects-query-graphql-cassandra command"""
|
||||
Processor.launch(default_ident, __doc__)
|
||||
|
||||
|
|
@ -6,36 +6,44 @@ null. Output is a list of triples.
|
|||
|
||||
import logging
|
||||
|
||||
from .... direct.cassandra import TrustGraph
|
||||
from .... direct.cassandra_kg import KnowledgeGraph
|
||||
from .... schema import TriplesQueryRequest, TriplesQueryResponse, Error
|
||||
from .... schema import Value, Triple
|
||||
from .... base import TriplesQueryService
|
||||
from .... base.cassandra_config import add_cassandra_args, resolve_cassandra_config
|
||||
|
||||
# Module logger
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
default_ident = "triples-query"
|
||||
|
||||
default_graph_host='localhost'
|
||||
|
||||
class Processor(TriplesQueryService):
|
||||
|
||||
def __init__(self, **params):
|
||||
|
||||
graph_host = params.get("graph_host", default_graph_host)
|
||||
graph_username = params.get("graph_username", None)
|
||||
graph_password = params.get("graph_password", None)
|
||||
# Get Cassandra parameters
|
||||
cassandra_host = params.get("cassandra_host")
|
||||
cassandra_username = params.get("cassandra_username")
|
||||
cassandra_password = params.get("cassandra_password")
|
||||
|
||||
# Resolve configuration with environment variable fallback
|
||||
hosts, username, password = resolve_cassandra_config(
|
||||
host=cassandra_host,
|
||||
username=cassandra_username,
|
||||
password=cassandra_password
|
||||
)
|
||||
|
||||
super(Processor, self).__init__(
|
||||
**params | {
|
||||
"graph_host": graph_host,
|
||||
"graph_username": graph_username,
|
||||
"cassandra_host": ','.join(hosts),
|
||||
"cassandra_username": username,
|
||||
}
|
||||
)
|
||||
|
||||
self.graph_host = [graph_host]
|
||||
self.username = graph_username
|
||||
self.password = graph_password
|
||||
self.cassandra_host = hosts
|
||||
self.cassandra_username = username
|
||||
self.cassandra_password = password
|
||||
self.table = None
|
||||
|
||||
def create_value(self, ent):
|
||||
|
|
@ -48,21 +56,21 @@ class Processor(TriplesQueryService):
|
|||
|
||||
try:
|
||||
|
||||
table = (query.user, query.collection)
|
||||
user = query.user
|
||||
|
||||
if table != self.table:
|
||||
if self.username and self.password:
|
||||
self.tg = TrustGraph(
|
||||
hosts=self.graph_host,
|
||||
keyspace=query.user, table=query.collection,
|
||||
username=self.username, password=self.password
|
||||
if user != self.table:
|
||||
if self.cassandra_username and self.cassandra_password:
|
||||
self.tg = KnowledgeGraph(
|
||||
hosts=self.cassandra_host,
|
||||
keyspace=query.user,
|
||||
username=self.cassandra_username, password=self.cassandra_password
|
||||
)
|
||||
else:
|
||||
self.tg = TrustGraph(
|
||||
hosts=self.graph_host,
|
||||
keyspace=query.user, table=query.collection,
|
||||
self.tg = KnowledgeGraph(
|
||||
hosts=self.cassandra_host,
|
||||
keyspace=query.user,
|
||||
)
|
||||
self.table = table
|
||||
self.table = user
|
||||
|
||||
triples = []
|
||||
|
||||
|
|
@ -70,13 +78,13 @@ class Processor(TriplesQueryService):
|
|||
if query.p is not None:
|
||||
if query.o is not None:
|
||||
resp = self.tg.get_spo(
|
||||
query.s.value, query.p.value, query.o.value,
|
||||
query.collection, query.s.value, query.p.value, query.o.value,
|
||||
limit=query.limit
|
||||
)
|
||||
triples.append((query.s.value, query.p.value, query.o.value))
|
||||
else:
|
||||
resp = self.tg.get_sp(
|
||||
query.s.value, query.p.value,
|
||||
query.collection, query.s.value, query.p.value,
|
||||
limit=query.limit
|
||||
)
|
||||
for t in resp:
|
||||
|
|
@ -84,14 +92,14 @@ class Processor(TriplesQueryService):
|
|||
else:
|
||||
if query.o is not None:
|
||||
resp = self.tg.get_os(
|
||||
query.o.value, query.s.value,
|
||||
query.collection, query.o.value, query.s.value,
|
||||
limit=query.limit
|
||||
)
|
||||
for t in resp:
|
||||
triples.append((query.s.value, t.p, query.o.value))
|
||||
else:
|
||||
resp = self.tg.get_s(
|
||||
query.s.value,
|
||||
query.collection, query.s.value,
|
||||
limit=query.limit
|
||||
)
|
||||
for t in resp:
|
||||
|
|
@ -100,14 +108,14 @@ class Processor(TriplesQueryService):
|
|||
if query.p is not None:
|
||||
if query.o is not None:
|
||||
resp = self.tg.get_po(
|
||||
query.p.value, query.o.value,
|
||||
query.collection, query.p.value, query.o.value,
|
||||
limit=query.limit
|
||||
)
|
||||
for t in resp:
|
||||
triples.append((t.s, query.p.value, query.o.value))
|
||||
else:
|
||||
resp = self.tg.get_p(
|
||||
query.p.value,
|
||||
query.collection, query.p.value,
|
||||
limit=query.limit
|
||||
)
|
||||
for t in resp:
|
||||
|
|
@ -115,13 +123,14 @@ class Processor(TriplesQueryService):
|
|||
else:
|
||||
if query.o is not None:
|
||||
resp = self.tg.get_o(
|
||||
query.o.value,
|
||||
query.collection, query.o.value,
|
||||
limit=query.limit
|
||||
)
|
||||
for t in resp:
|
||||
triples.append((t.s, t.p, query.o.value))
|
||||
else:
|
||||
resp = self.tg.get_all(
|
||||
query.collection,
|
||||
limit=query.limit
|
||||
)
|
||||
for t in resp:
|
||||
|
|
@ -147,24 +156,7 @@ class Processor(TriplesQueryService):
|
|||
def add_args(parser):
|
||||
|
||||
TriplesQueryService.add_args(parser)
|
||||
|
||||
parser.add_argument(
|
||||
'-g', '--graph-host',
|
||||
default="localhost",
|
||||
help=f'Graph host (default: localhost)'
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
'--graph-username',
|
||||
default=None,
|
||||
help=f'Cassandra username'
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
'--graph-password',
|
||||
default=None,
|
||||
help=f'Cassandra password'
|
||||
)
|
||||
add_cassandra_args(parser)
|
||||
|
||||
|
||||
def run():
|
||||
|
|
|
|||
|
|
@ -55,6 +55,10 @@ class Processor(TriplesQueryService):
|
|||
|
||||
try:
|
||||
|
||||
# Extract user and collection, use defaults if not provided
|
||||
user = query.user if query.user else "default"
|
||||
collection = query.collection if query.collection else "default"
|
||||
|
||||
triples = []
|
||||
|
||||
if query.s is not None:
|
||||
|
|
@ -64,10 +68,13 @@ class Processor(TriplesQueryService):
|
|||
# SPO
|
||||
|
||||
records, summary, keys = self.io.execute_query(
|
||||
"MATCH (src:Node {uri: $src})-[rel:Rel {uri: $rel}]->(dest:Literal {value: $value}) "
|
||||
"MATCH (src:Node {uri: $src, user: $user, collection: $collection})-"
|
||||
"[rel:Rel {uri: $rel, user: $user, collection: $collection}]->"
|
||||
"(dest:Literal {value: $value, user: $user, collection: $collection}) "
|
||||
"RETURN $src as src "
|
||||
"LIMIT " + str(query.limit),
|
||||
src=query.s.value, rel=query.p.value, value=query.o.value,
|
||||
user=user, collection=collection,
|
||||
database_=self.db,
|
||||
)
|
||||
|
||||
|
|
@ -75,10 +82,13 @@ class Processor(TriplesQueryService):
|
|||
triples.append((query.s.value, query.p.value, query.o.value))
|
||||
|
||||
records, summary, keys = self.io.execute_query(
|
||||
"MATCH (src:Node {uri: $src})-[rel:Rel {uri: $rel}]->(dest:Node {uri: $uri}) "
|
||||
"MATCH (src:Node {uri: $src, user: $user, collection: $collection})-"
|
||||
"[rel:Rel {uri: $rel, user: $user, collection: $collection}]->"
|
||||
"(dest:Node {uri: $uri, user: $user, collection: $collection}) "
|
||||
"RETURN $src as src "
|
||||
"LIMIT " + str(query.limit),
|
||||
src=query.s.value, rel=query.p.value, uri=query.o.value,
|
||||
user=user, collection=collection,
|
||||
database_=self.db,
|
||||
)
|
||||
|
||||
|
|
@ -90,10 +100,13 @@ class Processor(TriplesQueryService):
|
|||
# SP
|
||||
|
||||
records, summary, keys = self.io.execute_query(
|
||||
"MATCH (src:Node {uri: $src})-[rel:Rel {uri: $rel}]->(dest:Literal) "
|
||||
"MATCH (src:Node {uri: $src, user: $user, collection: $collection})-"
|
||||
"[rel:Rel {uri: $rel, user: $user, collection: $collection}]->"
|
||||
"(dest:Literal {user: $user, collection: $collection}) "
|
||||
"RETURN dest.value as dest "
|
||||
"LIMIT " + str(query.limit),
|
||||
src=query.s.value, rel=query.p.value,
|
||||
user=user, collection=collection,
|
||||
database_=self.db,
|
||||
)
|
||||
|
||||
|
|
@ -102,10 +115,13 @@ class Processor(TriplesQueryService):
|
|||
triples.append((query.s.value, query.p.value, data["dest"]))
|
||||
|
||||
records, summary, keys = self.io.execute_query(
|
||||
"MATCH (src:Node {uri: $src})-[rel:Rel {uri: $rel}]->(dest:Node) "
|
||||
"MATCH (src:Node {uri: $src, user: $user, collection: $collection})-"
|
||||
"[rel:Rel {uri: $rel, user: $user, collection: $collection}]->"
|
||||
"(dest:Node {user: $user, collection: $collection}) "
|
||||
"RETURN dest.uri as dest "
|
||||
"LIMIT " + str(query.limit),
|
||||
src=query.s.value, rel=query.p.value,
|
||||
user=user, collection=collection,
|
||||
database_=self.db,
|
||||
)
|
||||
|
||||
|
|
@ -120,10 +136,13 @@ class Processor(TriplesQueryService):
|
|||
# SO
|
||||
|
||||
records, summary, keys = self.io.execute_query(
|
||||
"MATCH (src:Node {uri: $src})-[rel:Rel]->(dest:Literal {value: $value}) "
|
||||
"MATCH (src:Node {uri: $src, user: $user, collection: $collection})-"
|
||||
"[rel:Rel {user: $user, collection: $collection}]->"
|
||||
"(dest:Literal {value: $value, user: $user, collection: $collection}) "
|
||||
"RETURN rel.uri as rel "
|
||||
"LIMIT " + str(query.limit),
|
||||
src=query.s.value, value=query.o.value,
|
||||
user=user, collection=collection,
|
||||
database_=self.db,
|
||||
)
|
||||
|
||||
|
|
@ -132,10 +151,13 @@ class Processor(TriplesQueryService):
|
|||
triples.append((query.s.value, data["rel"], query.o.value))
|
||||
|
||||
records, summary, keys = self.io.execute_query(
|
||||
"MATCH (src:Node {uri: $src})-[rel:Rel]->(dest:Node {uri: $uri}) "
|
||||
"MATCH (src:Node {uri: $src, user: $user, collection: $collection})-"
|
||||
"[rel:Rel {user: $user, collection: $collection}]->"
|
||||
"(dest:Node {uri: $uri, user: $user, collection: $collection}) "
|
||||
"RETURN rel.uri as rel "
|
||||
"LIMIT " + str(query.limit),
|
||||
src=query.s.value, uri=query.o.value,
|
||||
user=user, collection=collection,
|
||||
database_=self.db,
|
||||
)
|
||||
|
||||
|
|
@ -148,10 +170,13 @@ class Processor(TriplesQueryService):
|
|||
# S
|
||||
|
||||
records, summary, keys = self.io.execute_query(
|
||||
"MATCH (src:Node {uri: $src})-[rel:Rel]->(dest:Literal) "
|
||||
"MATCH (src:Node {uri: $src, user: $user, collection: $collection})-"
|
||||
"[rel:Rel {user: $user, collection: $collection}]->"
|
||||
"(dest:Literal {user: $user, collection: $collection}) "
|
||||
"RETURN rel.uri as rel, dest.value as dest "
|
||||
"LIMIT " + str(query.limit),
|
||||
src=query.s.value,
|
||||
user=user, collection=collection,
|
||||
database_=self.db,
|
||||
)
|
||||
|
||||
|
|
@ -160,10 +185,13 @@ class Processor(TriplesQueryService):
|
|||
triples.append((query.s.value, data["rel"], data["dest"]))
|
||||
|
||||
records, summary, keys = self.io.execute_query(
|
||||
"MATCH (src:Node {uri: $src})-[rel:Rel]->(dest:Node) "
|
||||
"MATCH (src:Node {uri: $src, user: $user, collection: $collection})-"
|
||||
"[rel:Rel {user: $user, collection: $collection}]->"
|
||||
"(dest:Node {user: $user, collection: $collection}) "
|
||||
"RETURN rel.uri as rel, dest.uri as dest "
|
||||
"LIMIT " + str(query.limit),
|
||||
src=query.s.value,
|
||||
user=user, collection=collection,
|
||||
database_=self.db,
|
||||
)
|
||||
|
||||
|
|
@ -181,10 +209,13 @@ class Processor(TriplesQueryService):
|
|||
# PO
|
||||
|
||||
records, summary, keys = self.io.execute_query(
|
||||
"MATCH (src:Node)-[rel:Rel {uri: $uri}]->(dest:Literal {value: $value}) "
|
||||
"MATCH (src:Node {user: $user, collection: $collection})-"
|
||||
"[rel:Rel {uri: $uri, user: $user, collection: $collection}]->"
|
||||
"(dest:Literal {value: $value, user: $user, collection: $collection}) "
|
||||
"RETURN src.uri as src "
|
||||
"LIMIT " + str(query.limit),
|
||||
uri=query.p.value, value=query.o.value,
|
||||
user=user, collection=collection,
|
||||
database_=self.db,
|
||||
)
|
||||
|
||||
|
|
@ -193,10 +224,13 @@ class Processor(TriplesQueryService):
|
|||
triples.append((data["src"], query.p.value, query.o.value))
|
||||
|
||||
records, summary, keys = self.io.execute_query(
|
||||
"MATCH (src:Node)-[rel:Rel {uri: $uri}]->(dest:Node {uri: $dest}) "
|
||||
"MATCH (src:Node {user: $user, collection: $collection})-"
|
||||
"[rel:Rel {uri: $uri, user: $user, collection: $collection}]->"
|
||||
"(dest:Node {uri: $dest, user: $user, collection: $collection}) "
|
||||
"RETURN src.uri as src "
|
||||
"LIMIT " + str(query.limit),
|
||||
uri=query.p.value, dest=query.o.value,
|
||||
user=user, collection=collection,
|
||||
database_=self.db,
|
||||
)
|
||||
|
||||
|
|
@ -209,10 +243,13 @@ class Processor(TriplesQueryService):
|
|||
# P
|
||||
|
||||
records, summary, keys = self.io.execute_query(
|
||||
"MATCH (src:Node)-[rel:Rel {uri: $uri}]->(dest:Literal) "
|
||||
"MATCH (src:Node {user: $user, collection: $collection})-"
|
||||
"[rel:Rel {uri: $uri, user: $user, collection: $collection}]->"
|
||||
"(dest:Literal {user: $user, collection: $collection}) "
|
||||
"RETURN src.uri as src, dest.value as dest "
|
||||
"LIMIT " + str(query.limit),
|
||||
uri=query.p.value,
|
||||
user=user, collection=collection,
|
||||
database_=self.db,
|
||||
)
|
||||
|
||||
|
|
@ -221,10 +258,13 @@ class Processor(TriplesQueryService):
|
|||
triples.append((data["src"], query.p.value, data["dest"]))
|
||||
|
||||
records, summary, keys = self.io.execute_query(
|
||||
"MATCH (src:Node)-[rel:Rel {uri: $uri}]->(dest:Node) "
|
||||
"MATCH (src:Node {user: $user, collection: $collection})-"
|
||||
"[rel:Rel {uri: $uri, user: $user, collection: $collection}]->"
|
||||
"(dest:Node {user: $user, collection: $collection}) "
|
||||
"RETURN src.uri as src, dest.uri as dest "
|
||||
"LIMIT " + str(query.limit),
|
||||
uri=query.p.value,
|
||||
user=user, collection=collection,
|
||||
database_=self.db,
|
||||
)
|
||||
|
||||
|
|
@ -239,10 +279,13 @@ class Processor(TriplesQueryService):
|
|||
# O
|
||||
|
||||
records, summary, keys = self.io.execute_query(
|
||||
"MATCH (src:Node)-[rel:Rel]->(dest:Literal {value: $value}) "
|
||||
"MATCH (src:Node {user: $user, collection: $collection})-"
|
||||
"[rel:Rel {user: $user, collection: $collection}]->"
|
||||
"(dest:Literal {value: $value, user: $user, collection: $collection}) "
|
||||
"RETURN src.uri as src, rel.uri as rel "
|
||||
"LIMIT " + str(query.limit),
|
||||
value=query.o.value,
|
||||
user=user, collection=collection,
|
||||
database_=self.db,
|
||||
)
|
||||
|
||||
|
|
@ -251,10 +294,13 @@ class Processor(TriplesQueryService):
|
|||
triples.append((data["src"], data["rel"], query.o.value))
|
||||
|
||||
records, summary, keys = self.io.execute_query(
|
||||
"MATCH (src:Node)-[rel:Rel]->(dest:Node {uri: $uri}) "
|
||||
"MATCH (src:Node {user: $user, collection: $collection})-"
|
||||
"[rel:Rel {user: $user, collection: $collection}]->"
|
||||
"(dest:Node {uri: $uri, user: $user, collection: $collection}) "
|
||||
"RETURN src.uri as src, rel.uri as rel "
|
||||
"LIMIT " + str(query.limit),
|
||||
uri=query.o.value,
|
||||
user=user, collection=collection,
|
||||
database_=self.db,
|
||||
)
|
||||
|
||||
|
|
@ -267,9 +313,12 @@ class Processor(TriplesQueryService):
|
|||
# *
|
||||
|
||||
records, summary, keys = self.io.execute_query(
|
||||
"MATCH (src:Node)-[rel:Rel]->(dest:Literal) "
|
||||
"MATCH (src:Node {user: $user, collection: $collection})-"
|
||||
"[rel:Rel {user: $user, collection: $collection}]->"
|
||||
"(dest:Literal {user: $user, collection: $collection}) "
|
||||
"RETURN src.uri as src, rel.uri as rel, dest.value as dest "
|
||||
"LIMIT " + str(query.limit),
|
||||
user=user, collection=collection,
|
||||
database_=self.db,
|
||||
)
|
||||
|
||||
|
|
@ -278,9 +327,12 @@ class Processor(TriplesQueryService):
|
|||
triples.append((data["src"], data["rel"], data["dest"]))
|
||||
|
||||
records, summary, keys = self.io.execute_query(
|
||||
"MATCH (src:Node)-[rel:Rel]->(dest:Node) "
|
||||
"MATCH (src:Node {user: $user, collection: $collection})-"
|
||||
"[rel:Rel {user: $user, collection: $collection}]->"
|
||||
"(dest:Node {user: $user, collection: $collection}) "
|
||||
"RETURN src.uri as src, rel.uri as rel, dest.uri as dest "
|
||||
"LIMIT " + str(query.limit),
|
||||
user=user, collection=collection,
|
||||
database_=self.db,
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -55,6 +55,10 @@ class Processor(TriplesQueryService):
|
|||
|
||||
try:
|
||||
|
||||
# Extract user and collection, use defaults if not provided
|
||||
user = query.user if query.user else "default"
|
||||
collection = query.collection if query.collection else "default"
|
||||
|
||||
triples = []
|
||||
|
||||
if query.s is not None:
|
||||
|
|
@ -64,9 +68,12 @@ class Processor(TriplesQueryService):
|
|||
# SPO
|
||||
|
||||
records, summary, keys = self.io.execute_query(
|
||||
"MATCH (src:Node {uri: $src})-[rel:Rel {uri: $rel}]->(dest:Literal {value: $value}) "
|
||||
"MATCH (src:Node {uri: $src, user: $user, collection: $collection})-"
|
||||
"[rel:Rel {uri: $rel, user: $user, collection: $collection}]->"
|
||||
"(dest:Literal {value: $value, user: $user, collection: $collection}) "
|
||||
"RETURN $src as src",
|
||||
src=query.s.value, rel=query.p.value, value=query.o.value,
|
||||
user=user, collection=collection,
|
||||
database_=self.db,
|
||||
)
|
||||
|
||||
|
|
@ -74,9 +81,12 @@ class Processor(TriplesQueryService):
|
|||
triples.append((query.s.value, query.p.value, query.o.value))
|
||||
|
||||
records, summary, keys = self.io.execute_query(
|
||||
"MATCH (src:Node {uri: $src})-[rel:Rel {uri: $rel}]->(dest:Node {uri: $uri}) "
|
||||
"MATCH (src:Node {uri: $src, user: $user, collection: $collection})-"
|
||||
"[rel:Rel {uri: $rel, user: $user, collection: $collection}]->"
|
||||
"(dest:Node {uri: $uri, user: $user, collection: $collection}) "
|
||||
"RETURN $src as src",
|
||||
src=query.s.value, rel=query.p.value, uri=query.o.value,
|
||||
user=user, collection=collection,
|
||||
database_=self.db,
|
||||
)
|
||||
|
||||
|
|
@ -88,9 +98,12 @@ class Processor(TriplesQueryService):
|
|||
# SP
|
||||
|
||||
records, summary, keys = self.io.execute_query(
|
||||
"MATCH (src:Node {uri: $src})-[rel:Rel {uri: $rel}]->(dest:Literal) "
|
||||
"MATCH (src:Node {uri: $src, user: $user, collection: $collection})-"
|
||||
"[rel:Rel {uri: $rel, user: $user, collection: $collection}]->"
|
||||
"(dest:Literal {user: $user, collection: $collection}) "
|
||||
"RETURN dest.value as dest",
|
||||
src=query.s.value, rel=query.p.value,
|
||||
user=user, collection=collection,
|
||||
database_=self.db,
|
||||
)
|
||||
|
||||
|
|
@ -99,9 +112,12 @@ class Processor(TriplesQueryService):
|
|||
triples.append((query.s.value, query.p.value, data["dest"]))
|
||||
|
||||
records, summary, keys = self.io.execute_query(
|
||||
"MATCH (src:Node {uri: $src})-[rel:Rel {uri: $rel}]->(dest:Node) "
|
||||
"MATCH (src:Node {uri: $src, user: $user, collection: $collection})-"
|
||||
"[rel:Rel {uri: $rel, user: $user, collection: $collection}]->"
|
||||
"(dest:Node {user: $user, collection: $collection}) "
|
||||
"RETURN dest.uri as dest",
|
||||
src=query.s.value, rel=query.p.value,
|
||||
user=user, collection=collection,
|
||||
database_=self.db,
|
||||
)
|
||||
|
||||
|
|
@ -116,9 +132,12 @@ class Processor(TriplesQueryService):
|
|||
# SO
|
||||
|
||||
records, summary, keys = self.io.execute_query(
|
||||
"MATCH (src:Node {uri: $src})-[rel:Rel]->(dest:Literal {value: $value}) "
|
||||
"MATCH (src:Node {uri: $src, user: $user, collection: $collection})-"
|
||||
"[rel:Rel {user: $user, collection: $collection}]->"
|
||||
"(dest:Literal {value: $value, user: $user, collection: $collection}) "
|
||||
"RETURN rel.uri as rel",
|
||||
src=query.s.value, value=query.o.value,
|
||||
user=user, collection=collection,
|
||||
database_=self.db,
|
||||
)
|
||||
|
||||
|
|
@ -127,9 +146,12 @@ class Processor(TriplesQueryService):
|
|||
triples.append((query.s.value, data["rel"], query.o.value))
|
||||
|
||||
records, summary, keys = self.io.execute_query(
|
||||
"MATCH (src:Node {uri: $src})-[rel:Rel]->(dest:Node {uri: $uri}) "
|
||||
"MATCH (src:Node {uri: $src, user: $user, collection: $collection})-"
|
||||
"[rel:Rel {user: $user, collection: $collection}]->"
|
||||
"(dest:Node {uri: $uri, user: $user, collection: $collection}) "
|
||||
"RETURN rel.uri as rel",
|
||||
src=query.s.value, uri=query.o.value,
|
||||
user=user, collection=collection,
|
||||
database_=self.db,
|
||||
)
|
||||
|
||||
|
|
@ -142,9 +164,12 @@ class Processor(TriplesQueryService):
|
|||
# S
|
||||
|
||||
records, summary, keys = self.io.execute_query(
|
||||
"MATCH (src:Node {uri: $src})-[rel:Rel]->(dest:Literal) "
|
||||
"MATCH (src:Node {uri: $src, user: $user, collection: $collection})-"
|
||||
"[rel:Rel {user: $user, collection: $collection}]->"
|
||||
"(dest:Literal {user: $user, collection: $collection}) "
|
||||
"RETURN rel.uri as rel, dest.value as dest",
|
||||
src=query.s.value,
|
||||
user=user, collection=collection,
|
||||
database_=self.db,
|
||||
)
|
||||
|
||||
|
|
@ -153,9 +178,12 @@ class Processor(TriplesQueryService):
|
|||
triples.append((query.s.value, data["rel"], data["dest"]))
|
||||
|
||||
records, summary, keys = self.io.execute_query(
|
||||
"MATCH (src:Node {uri: $src})-[rel:Rel]->(dest:Node) "
|
||||
"MATCH (src:Node {uri: $src, user: $user, collection: $collection})-"
|
||||
"[rel:Rel {user: $user, collection: $collection}]->"
|
||||
"(dest:Node {user: $user, collection: $collection}) "
|
||||
"RETURN rel.uri as rel, dest.uri as dest",
|
||||
src=query.s.value,
|
||||
user=user, collection=collection,
|
||||
database_=self.db,
|
||||
)
|
||||
|
||||
|
|
@ -173,9 +201,12 @@ class Processor(TriplesQueryService):
|
|||
# PO
|
||||
|
||||
records, summary, keys = self.io.execute_query(
|
||||
"MATCH (src:Node)-[rel:Rel {uri: $uri}]->(dest:Literal {value: $value}) "
|
||||
"MATCH (src:Node {user: $user, collection: $collection})-"
|
||||
"[rel:Rel {uri: $uri, user: $user, collection: $collection}]->"
|
||||
"(dest:Literal {value: $value, user: $user, collection: $collection}) "
|
||||
"RETURN src.uri as src",
|
||||
uri=query.p.value, value=query.o.value,
|
||||
user=user, collection=collection,
|
||||
database_=self.db,
|
||||
)
|
||||
|
||||
|
|
@ -184,9 +215,12 @@ class Processor(TriplesQueryService):
|
|||
triples.append((data["src"], query.p.value, query.o.value))
|
||||
|
||||
records, summary, keys = self.io.execute_query(
|
||||
"MATCH (src:Node)-[rel:Rel {uri: $uri}]->(dest:Node {uri: $dest}) "
|
||||
"MATCH (src:Node {user: $user, collection: $collection})-"
|
||||
"[rel:Rel {uri: $uri, user: $user, collection: $collection}]->"
|
||||
"(dest:Node {uri: $dest, user: $user, collection: $collection}) "
|
||||
"RETURN src.uri as src",
|
||||
uri=query.p.value, dest=query.o.value,
|
||||
user=user, collection=collection,
|
||||
database_=self.db,
|
||||
)
|
||||
|
||||
|
|
@ -199,9 +233,12 @@ class Processor(TriplesQueryService):
|
|||
# P
|
||||
|
||||
records, summary, keys = self.io.execute_query(
|
||||
"MATCH (src:Node)-[rel:Rel {uri: $uri}]->(dest:Literal) "
|
||||
"MATCH (src:Node {user: $user, collection: $collection})-"
|
||||
"[rel:Rel {uri: $uri, user: $user, collection: $collection}]->"
|
||||
"(dest:Literal {user: $user, collection: $collection}) "
|
||||
"RETURN src.uri as src, dest.value as dest",
|
||||
uri=query.p.value,
|
||||
user=user, collection=collection,
|
||||
database_=self.db,
|
||||
)
|
||||
|
||||
|
|
@ -210,9 +247,12 @@ class Processor(TriplesQueryService):
|
|||
triples.append((data["src"], query.p.value, data["dest"]))
|
||||
|
||||
records, summary, keys = self.io.execute_query(
|
||||
"MATCH (src:Node)-[rel:Rel {uri: $uri}]->(dest:Node) "
|
||||
"MATCH (src:Node {user: $user, collection: $collection})-"
|
||||
"[rel:Rel {uri: $uri, user: $user, collection: $collection}]->"
|
||||
"(dest:Node {user: $user, collection: $collection}) "
|
||||
"RETURN src.uri as src, dest.uri as dest",
|
||||
uri=query.p.value,
|
||||
user=user, collection=collection,
|
||||
database_=self.db,
|
||||
)
|
||||
|
||||
|
|
@ -227,9 +267,12 @@ class Processor(TriplesQueryService):
|
|||
# O
|
||||
|
||||
records, summary, keys = self.io.execute_query(
|
||||
"MATCH (src:Node)-[rel:Rel]->(dest:Literal {value: $value}) "
|
||||
"MATCH (src:Node {user: $user, collection: $collection})-"
|
||||
"[rel:Rel {user: $user, collection: $collection}]->"
|
||||
"(dest:Literal {value: $value, user: $user, collection: $collection}) "
|
||||
"RETURN src.uri as src, rel.uri as rel",
|
||||
value=query.o.value,
|
||||
user=user, collection=collection,
|
||||
database_=self.db,
|
||||
)
|
||||
|
||||
|
|
@ -238,9 +281,12 @@ class Processor(TriplesQueryService):
|
|||
triples.append((data["src"], data["rel"], query.o.value))
|
||||
|
||||
records, summary, keys = self.io.execute_query(
|
||||
"MATCH (src:Node)-[rel:Rel]->(dest:Node {uri: $uri}) "
|
||||
"MATCH (src:Node {user: $user, collection: $collection})-"
|
||||
"[rel:Rel {user: $user, collection: $collection}]->"
|
||||
"(dest:Node {uri: $uri, user: $user, collection: $collection}) "
|
||||
"RETURN src.uri as src, rel.uri as rel",
|
||||
uri=query.o.value,
|
||||
user=user, collection=collection,
|
||||
database_=self.db,
|
||||
)
|
||||
|
||||
|
|
@ -253,8 +299,11 @@ class Processor(TriplesQueryService):
|
|||
# *
|
||||
|
||||
records, summary, keys = self.io.execute_query(
|
||||
"MATCH (src:Node)-[rel:Rel]->(dest:Literal) "
|
||||
"MATCH (src:Node {user: $user, collection: $collection})-"
|
||||
"[rel:Rel {user: $user, collection: $collection}]->"
|
||||
"(dest:Literal {user: $user, collection: $collection}) "
|
||||
"RETURN src.uri as src, rel.uri as rel, dest.value as dest",
|
||||
user=user, collection=collection,
|
||||
database_=self.db,
|
||||
)
|
||||
|
||||
|
|
@ -263,8 +312,11 @@ class Processor(TriplesQueryService):
|
|||
triples.append((data["src"], data["rel"], data["dest"]))
|
||||
|
||||
records, summary, keys = self.io.execute_query(
|
||||
"MATCH (src:Node)-[rel:Rel]->(dest:Node) "
|
||||
"MATCH (src:Node {user: $user, collection: $collection})-"
|
||||
"[rel:Rel {user: $user, collection: $collection}]->"
|
||||
"(dest:Node {user: $user, collection: $collection}) "
|
||||
"RETURN src.uri as src, rel.uri as rel, dest.uri as dest",
|
||||
user=user, collection=collection,
|
||||
database_=self.db,
|
||||
)
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue