<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"
        xmlns:image="http://www.google.com/schemas/sitemap-image/1.1">
  <!-- Blog Posts -->
  <url>
    <loc>https://bytearmor.ai/blog/securing-llm-code-complete-guide</loc>
    <lastmod>2025-01-20T00:00:00+00:00</lastmod>
    <changefreq>monthly</changefreq>
    <priority>0.8</priority>
    <image:image>
      <image:loc>https://bytearmor.ai/images/blog/llm-code-security-guide.jpg</image:loc>
      <image:title>Securing LLM-Generated Code</image:title>
      <image:caption>Complete guide to securing AI-generated code from vulnerabilities</image:caption>
    </image:image>
  </url>
  
  <url>
    <loc>https://bytearmor.ai/blog/owasp-llm-code-generation</loc>
    <lastmod>2025-01-22T00:00:00+00:00</lastmod>
    <changefreq>monthly</changefreq>
    <priority>0.8</priority>
    <image:image>
      <image:loc>https://bytearmor.ai/images/blog/owasp-llm-security.jpg</image:loc>
      <image:title>OWASP Top 10 for LLMs</image:title>
      <image:caption>Understanding OWASP security risks in LLM code generation</image:caption>
    </image:image>
  </url>
  
  <url>
    <loc>https://bytearmor.ai/blog/prompt-injection-data-poisoning</loc>
    <lastmod>2025-01-23T00:00:00+00:00</lastmod>
    <changefreq>monthly</changefreq>
    <priority>0.8</priority>
    <image:image>
      <image:loc>https://bytearmor.ai/images/blog/prompt-injection-defense.jpg</image:loc>
      <image:title>Prompt Injection and Data Poisoning</image:title>
      <image:caption>Defense strategies against prompt injection attacks</image:caption>
    </image:image>
  </url>
  
  <url>
    <loc>https://bytearmor.ai/blog/prompt-engineering-secure-code</loc>
    <lastmod>2025-01-24T00:00:00+00:00</lastmod>
    <changefreq>monthly</changefreq>
    <priority>0.8</priority>
    <image:image>
      <image:loc>https://bytearmor.ai/images/blog/prompt-engineering-security.jpg</image:loc>
      <image:title>Secure Prompt Engineering</image:title>
      <image:caption>Engineering prompts for secure code generation</image:caption>
    </image:image>
  </url>
  
  <url>
    <loc>https://bytearmor.ai/blog/devsecops-ai-adaptation</loc>
    <lastmod>2025-01-25T00:00:00+00:00</lastmod>
    <changefreq>monthly</changefreq>
    <priority>0.8</priority>
    <image:image>
      <image:loc>https://bytearmor.ai/images/blog/devsecops-ai-pipeline.jpg</image:loc>
      <image:title>DevSecOps for AI</image:title>
      <image:caption>Adapting DevSecOps practices for AI-generated code</image:caption>
    </image:image>
  </url>
  
  <url>
    <loc>https://bytearmor.ai/blog/ai-vulnerabilities-detection</loc>
    <lastmod>2025-01-26T00:00:00+00:00</lastmod>
    <changefreq>monthly</changefreq>
    <priority>0.8</priority>
    <image:image>
      <image:loc>https://bytearmor.ai/images/blog/vulnerability-detection.jpg</image:loc>
      <image:title>AI Vulnerability Detection</image:title>
      <image:caption>Detecting and fixing vulnerabilities in AI-generated code</image:caption>
    </image:image>
  </url>
  
  <url>
    <loc>https://bytearmor.ai/blog/ai-red-teaming-verification</loc>
    <lastmod>2025-01-27T00:00:00+00:00</lastmod>
    <changefreq>monthly</changefreq>
    <priority>0.8</priority>
    <image:image>
      <image:loc>https://bytearmor.ai/images/blog/red-teaming-verification.jpg</image:loc>
      <image:title>AI Red Teaming</image:title>
      <image:caption>Red teaming and verification techniques for AI systems</image:caption>
    </image:image>
  </url>
</urlset>