Protect Your Community with AI-Powered Content Moderation

ContentShield AI automatically moderates user-generated content on your website with advanced AI analysis, customizable filters, and real-time protection against harmful content.

99.9%

Accuracy Rate

<100ms

Response Time

24/7

Protection

Analytics Projects Settings
847
Positive Comments
23
Blocked Content
156
Neutral Reviews

Powerful Features for Complete Protection

Everything you need to keep your community safe and engaged

🤖

AI-Powered Analysis

Advanced machine learning algorithms analyze content context, sentiment, and potential risks in real-time.

⚙️

Customizable Filters

Set your own scoring thresholds, forbidden topics, and blocked words to match your community standards.

📊

Real-time Dashboard

Monitor all your projects with detailed analytics, trends, and actionable insights at a glance.

🔌

Easy Integration

Simple API integration that works as middleware for comments, reviews, and any user-generated content.

Lightning Fast

Sub-100ms response times ensure your users never experience delays while staying protected.

🛡️

Multi-layer Protection

Combines sentiment analysis, topic detection, and custom rules for comprehensive content filtering.

How ContentShield AI Works

Get started in minutes with our simple 4-step process

1

Register & Create Project

Sign up for your account and create your first project in our intuitive dashboard.

2

Configure Your Settings

Set your scoring thresholds, define forbidden topics, and customize your content filters.

3

Get Your API Key

Receive your unique API key and endpoint URL to integrate with your website.

4

Start Protecting

Implement our middleware and watch as AI protects your community in real-time.

API Integration Examples

// JavaScript (Node.js/Browser)
const moderateContent = async (content, projectId) => {
  try {
    const response = await fetch('https://api.contentshieldai.com/moderate', {
      method: 'POST',
      headers: {
        'Authorization': 'Bearer YOUR_API_KEY',
        'Content-Type': 'application/json'
      },
      body: JSON.stringify({
        content: content,
        projectId: projectId
      })
    });

    const result = await response.json();

    if (result.passed) {
      // Content is safe to publish
      publishContent(content);
    } else {
      // Content blocked
      console.log('Content blocked:', result.description);
    }

    return result;
  } catch (error) {
    console.error('Moderation failed:', error);
  }
};

// Usage example
moderateContent("This is a great product!", "your-project-id");
apiKey = $apiKey;
    }

    public function moderateContent($content, $projectId) {
        $data = [
            'content' => $content,
            'projectId' => $projectId
        ];

        $ch = curl_init();
        curl_setopt($ch, CURLOPT_URL, $this->baseUrl . '/moderate');
        curl_setopt($ch, CURLOPT_POST, true);
        curl_setopt($ch, CURLOPT_POSTFIELDS, json_encode($data));
        curl_setopt($ch, CURLOPT_RETURNTRANSFER, true);
        curl_setopt($ch, CURLOPT_HTTPHEADER, [
            'Authorization: Bearer ' . $this->apiKey,
            'Content-Type: application/json'
        ]);

        $response = curl_exec($ch);
        curl_close($ch);

        return json_decode($response, true);
    }
}

// Usage
$moderator = new ContentShieldAI('YOUR_API_KEY');
$result = $moderator->moderateContent('User comment here', 'project-id');

if ($result['passed']) {
    // Safe to publish
    echo "Content approved";
} else {
    echo "Content blocked: " . $result['description'];
}
?>
// Go Example
package main

import (
    "bytes"
    "encoding/json"
    "fmt"
    "io/ioutil"
    "net/http"
)

type ModerationRequest struct {
    Content   string `json:"content"`
    ProjectID string `json:"projectId"`
}

type ModerationResponse struct {
    Context     string  `json:"context"`
    Score       int     `json:"score"`
    Passed      bool    `json:"passed"`
    Description string  `json:"description"`
}

func moderateContent(content, projectID, apiKey string) (*ModerationResponse, error) {
    req := ModerationRequest{
        Content:   content,
        ProjectID: projectID,
    }

    jsonData, err := json.Marshal(req)
    if err != nil {
        return nil, err
    }

    httpReq, err := http.NewRequest("POST",
        "https://api.contentshieldai.com/moderate",
        bytes.NewBuffer(jsonData))
    if err != nil {
        return nil, err
    }

    httpReq.Header.Set("Authorization", "Bearer "+apiKey)
    httpReq.Header.Set("Content-Type", "application/json")

    client := &http.Client{}
    resp, err := client.Do(httpReq)
    if err != nil {
        return nil, err
    }
    defer resp.Body.Close()

    body, err := ioutil.ReadAll(resp.Body)
    if err != nil {
        return nil, err
    }

    var result ModerationResponse
    err = json.Unmarshal(body, &result)
    return &result, err
}

func main() {
    result, err := moderateContent(
        "This is a test comment",
        "your-project-id",
        "YOUR_API_KEY")

    if err != nil {
        fmt.Printf("Error: %v\n", err)
        return
    }

    if result.Passed {
        fmt.Println("Content approved for publication")
    } else {
        fmt.Printf("Content blocked: %s\n", result.Description)
    }
}
// .NET C# Example
using System;
using System.Net.Http;
using System.Text;
using System.Text.Json;
using System.Threading.Tasks;

public class ContentShieldAI
{
    private readonly HttpClient _httpClient;
    private readonly string _apiKey;
    private const string BaseUrl = "https://api.contentshieldai.com";

    public ContentShieldAI(string apiKey)
    {
        _apiKey = apiKey;
        _httpClient = new HttpClient();
        _httpClient.DefaultRequestHeaders.Add("Authorization", $"Bearer {apiKey}");
    }

    public async Task ModerateContentAsync(string content, string projectId)
    {
        var request = new
        {
            content = content,
            projectId = projectId
        };

        var json = JsonSerializer.Serialize(request);
        var httpContent = new StringContent(json, Encoding.UTF8, "application/json");

        try
        {
            var response = await _httpClient.PostAsync($"{BaseUrl}/moderate", httpContent);
            var responseString = await response.Content.ReadAsStringAsync();

            return JsonSerializer.Deserialize(responseString,
                new JsonSerializerOptions { PropertyNameCaseInsensitive = true });
        }
        catch (Exception ex)
        {
            throw new Exception($"Moderation failed: {ex.Message}");
        }
    }
}

public class ModerationResult
{
    public string Context { get; set; }
    public int Score { get; set; }
    public bool Passed { get; set; }
    public string Description { get; set; }
}

// Usage example
class Program
{
    static async Task Main(string[] args)
    {
        var moderator = new ContentShieldAI("YOUR_API_KEY");

        var result = await moderator.ModerateContentAsync(
            "This is a user comment",
            "your-project-id");

        if (result.Passed)
        {
            Console.WriteLine("Content approved for publication");
        }
        else
        {
            Console.WriteLine($"Content blocked: {result.Description}");
        }
    }
}

Simple, Transparent Pricing

Choose the perfect plan for your needs

Starter

$ 29 /month
  • ✓ Up to 10,000 requests/month
  • ✓ 1 project
  • ✓ Basic dashboard
  • ✓ Email support
  • ✓ Standard API access
Get Started

Enterprise

Custom
  • ✓ Unlimited requests
  • ✓ Unlimited projects
  • ✓ White-label solution
  • ✓ Dedicated support
  • ✓ Custom AI training
  • ✓ SLA guarantee
Contact Sales

Ready to Get Started?

Join thousands of websites already protected by ContentShield AI. Start your free trial today and see the difference intelligent content moderation can make.

🚀

Quick Setup

Get started in under 5 minutes

Start Your Free Trial