šŸ“š Code Examples

Ready-to-Use API Implementations

Copy-paste code examples in PHP, Node.js, and Python. Includes AI integrations with OpenAI and Claude.

🐘 PHP Implementation

Simple PHP server with keyword-based replies

PHP
<?php
// server-api.php
header('Content-Type: application/json');

// 1. Verify X-API-Key
$apiKey = $_SERVER['HTTP_X_API_KEY'] ?? '';
if ($apiKey !== 'your-secret-key-here') {
    http_response_code(403);
    echo json_encode([
        'error' => 'Invalid API key',
        'status' => 'forbidden'
    ]);
    exit;
}

// 2. Parse incoming JSON
$data = json_decode(file_get_contents('php://input'), true);

$type = $data['type'] ?? '';
$sender = $data['sender'] ?? '';
$name = $data['name'] ?? '';
$message = strtolower($data['message'] ?? '');
$timestamp = $data['timestamp'] ?? '';

// 3. Log incoming message (optional)
error_log("Received from $sender: $message");

// 4. Generate smart reply based on message
$reply = match(true) {
    str_contains($message, 'help') => 
        "šŸ“± How can we help? Reply:\n• INFO - General info\n• STATUS - Check status\n• CONTACT - Contact details",
    
    str_contains($message, 'info') => 
        "ā„¹ļø Welcome! Visit our website at www.example.com or email support@example.com",
    
    str_contains($message, 'status') => 
        "āœ… System is running normally. All services operational.",
    
    str_contains($message, 'contact') => 
        "šŸ“§ Contact Us:\nEmail: support@example.com\nPhone: +40 XXX XXX XXX\nWebsite: www.example.com",
    
    str_contains($message, 'price') || str_contains($message, 'cost') =>
        "šŸ’° For pricing information, please visit our website or call us during business hours (9 AM - 5 PM).",
    
    str_contains($message, 'hours') || str_contains($message, 'open') =>
        "šŸ• Business Hours:\nMon-Fri: 9:00 AM - 5:00 PM\nSat: 10:00 AM - 2:00 PM\nSun: Closed",
    
    default => 
        "Thanks for your message! We'll get back to you soon. For immediate help, send HELP."
};

// 5. Send response
echo json_encode([
    'text' => $reply,
    'status' => 'success',
    'timestamp' => date('Y-m-d H:i:s')
]);
?>

šŸ“ Setup Instructions:

  1. Upload this file to your PHP web server
  2. Replace your-secret-key-here with your actual API key
  3. Customize the reply messages for your use case
  4. Use URL: https://yourdomain.com/server-api.php

šŸ¤– Node.js + OpenAI Integration

AI-powered intelligent replies using GPT-4

JavaScript
// server.js - AI-powered replies with OpenAI
const express = require('express');
const OpenAI = require('openai');

const app = express();
app.use(express.json());

const openai = new OpenAI({
  apiKey: process.env.OPENAI_API_KEY
});

const API_KEY = process.env.API_KEY || 'your-secret-key-here';

app.post('/api/autoreply', async (req, res) => {
  // 1. Verify API Key
  if (req.headers['x-api-key'] !== API_KEY) {
    return res.status(403).json({
      error: 'Invalid API key',
      status: 'forbidden'
    });
  }

  // 2. Get message data
  const { type, sender, name, message, timestamp } = req.body;

  console.log(`Received from ${sender}: ${message}`);

  try {
    // 3. Generate AI response
    const completion = await openai.chat.completions.create({
      model: "gpt-4",
      messages: [
        {
          role: "system",
          content: "You are a helpful customer support assistant. Keep responses brief (under 150 characters), friendly, and professional."
        },
        {
          role: "user",
          content: `Customer message: ${message}`
        }
      ],
      max_tokens: 150,
      temperature: 0.7
    });

    const aiReply = completion.choices[0].message.content;

    console.log(`AI Reply: ${aiReply}`);

    // 4. Send response
    res.json({
      text: aiReply,
      status: 'success',
      timestamp: new Date().toISOString()
    });

  } catch (error) {
    console.error('OpenAI error:', error);
    
    // Fallback response
    res.json({
      text: 'Thanks for your message! Our team will respond soon.',
      status: 'success'
    });
  }
});

const PORT = process.env.PORT || 3000;
app.listen(PORT, () => {
  console.log(`Server running on port ${PORT}`);
});

šŸ“ Setup Instructions:

  1. Install dependencies: npm install express openai
  2. Set environment variables:
    • OPENAI_API_KEY - Your OpenAI API key
    • API_KEY - Your custom API key for AutoReply Mate
  3. Run: node server.js
  4. Deploy to Heroku, Vercel, or any Node.js hosting

šŸ Python + Claude AI Integration

Intelligent replies using Anthropic's Claude

Python
# main.py - Smart replies with Claude AI
from fastapi import FastAPI, Header, HTTPException
from anthropic import Anthropic
from pydantic import BaseModel
from datetime import datetime
import os

app = FastAPI()
client = Anthropic(api_key=os.getenv('ANTHROPIC_API_KEY'))

API_KEY = os.getenv('API_KEY', 'your-secret-key-here')

class IncomingMessage(BaseModel):
    type: str
    sender: str
    name: str = None
    message: str
    timestamp: str

@app.post('/api/autoreply')
async def handle_message(
    data: IncomingMessage,
    x_api_key: str = Header(None)
):
    # 1. Verify API key
    if x_api_key != API_KEY:
        raise HTTPException(status_code=403, detail='Invalid API key')
    
    print(f'Received from {data.sender}: {data.message}')
    
    # 2. Generate AI response
    try:
        message = client.messages.create(
            model='claude-3-sonnet-20240229',
            max_tokens=150,
            messages=[{
                'role': 'user',
                'content': f'Respond to this customer message briefly and professionally: {data.message}'
            }]
        )
        
        reply_text = message.content[0].text
        print(f'AI Reply: {reply_text}')
        
    except Exception as e:
        print(f'Error: {e}')
        reply_text = "Thanks for contacting us! We'll respond shortly."
    
    # 3. Return response
    return {
        'text': reply_text,
        'status': 'success',
        'timestamp': datetime.now().strftime('%Y-%m-%d %H:%M:%S')
    }

if __name__ == '__main__':
    import uvicorn
    uvicorn.run(app, host='0.0.0.0', port=8000)

šŸ“ Setup Instructions:

  1. Install dependencies: pip install fastapi anthropic uvicorn
  2. Set environment variables:
    • ANTHROPIC_API_KEY - Your Anthropic API key
    • API_KEY - Your custom API key
  3. Run: python main.py
  4. Deploy to Railway, Render, or any Python hosting

⚔ Node.js Basic Implementation

Simple rule-based replies without AI

JavaScript
// server.js - Basic Node.js implementation
const express = require('express');
const app = express();

app.use(express.json());

const API_KEY = 'your-secret-key-here';

app.post('/api/autoreply', (req, res) => {
  // 1. Verify API key
  if (req.headers['x-api-key'] !== API_KEY) {
    return res.status(403).json({
      error: 'Invalid API key',
      status: 'forbidden'
    });
  }

  // 2. Get message data
  const { type, sender, name, message, timestamp } = req.body;
  const msg = message.toLowerCase();

  console.log(`[${type}] From ${sender}: ${message}`);

  // 3. Generate reply based on keywords
  let reply;

  if (msg.includes('help')) {
    reply = 'šŸ“± Need help? Reply with:\n• INFO - Information\n• CONTACT - Our contact details\n• HOURS - Business hours';
  } 
  else if (msg.includes('info') || msg.includes('about')) {
    reply = 'ā„¹ļø We offer professional services. Visit www.example.com or call +1234567890.';
  }
  else if (msg.includes('contact')) {
    reply = 'šŸ“§ Contact:\nEmail: support@example.com\nPhone: +1 (234) 567-890\nWebsite: www.example.com';
  }
  else if (msg.includes('hours') || msg.includes('open')) {
    reply = 'šŸ• Hours:\nMon-Fri: 9 AM - 6 PM\nSat-Sun: Closed';
  }
  else if (msg.includes('price') || msg.includes('cost')) {
    reply = 'šŸ’° For pricing, please visit our website or call during business hours.';
  }
  else {
    reply = `Thanks for your message, ${name || 'there'}! We'll get back to you soon. Send HELP for options.`;
  }

  // 4. Send response
  res.json({
    text: reply,
    status: 'success',
    timestamp: new Date().toISOString()
  });

  console.log(`Reply sent: ${reply}`);
});

// Health check endpoint
app.get('/', (req, res) => {
  res.json({ status: 'ok', message: 'AutoReply API is running' });
});

const PORT = process.env.PORT || 3000;
app.listen(PORT, () => {
  console.log(`šŸš€ Server running on port ${PORT}`);
});

šŸ“ Setup Instructions:

  1. Install Express: npm install express
  2. Replace your-secret-key-here with your API key
  3. Run: node server.js
  4. Test locally: http://localhost:3000

šŸš€ Deployment Options

Where to host your API server

šŸ”· Heroku

Easy deployment for Node.js and Python apps

git push heroku main

ā–² Vercel

Serverless functions with auto-scaling

vercel deploy

šŸš‚ Railway

Modern platform with free tier

railway up

ā˜ļø AWS Lambda

Pay per request, highly scalable

serverless deploy

šŸ”µ DigitalOcean

Traditional VPS hosting

$5/month droplet

🌐 Shared Hosting

For PHP implementations

Upload via FTP

šŸŽÆ Ready to Implement?

Copy the code, deploy to your server, and start using intelligent auto-replies today