<!-- LLM.txt viewer for Framer (Roasy AI) -->
<!-- Place this entire snippet inside an Embed block on a page like /llm -->

<pre id="llmtxt"
  style="
    white-space: pre-wrap;
    word-break: break-word;
    font-family: monospace;
    padding: 16px;
    font-size: 14px;
    margin: 0;
  ">
</pre>

<script>
/*
  This script injects your llm.txt content as raw text.
  Using textContent (instead of innerHTML) ensures it displays exactly as plain text.
  No “Collapse” or extra UI elements will appear.
*/
document.getElementById('llmtxt').textContent = `# llm.txt — Guidance for AI crawlers and LLM providers
# Site: https://roasy.ai/
# Owner: Roasy AI (Maslaking Yazılım A.Ş.)
# Encoding: UTF-8
# Last-Updated: 2025-10-07
# Language: en (human notes below include tr)

version: 1.0

metadata:
  organization: Roasy AI
  domain: roasy.ai
  contact_email: [email protected]

# ---- Global Policy ----
user-agent: *
purpose:
  qa_browsing: allow
  search_indexing: allow
  model_training: disallow
  dataset_inclusion: disallow
requirements:
  attribution_required: true
  robots_txt_respected: true
  cache_max_age_days: 7
  rate_limit_rps: 1
disallow_paths:
  - /privacy
  - /terms
  - /legal
  - /admin
  - /api
  - /account
  - /billing
  - /checkout
  - /dashboard
allow_paths:
  - /
  - /blog
  - /product
  - /pricing
  - /about
contact: https://roasy.ai/#contact
sitemap: https://roasy.ai/sitemap.xml

# ---- Specific Agents ----
user-agent: GPTBot
purpose:
  qa_browsing: allow
  search_indexing: allow
  model_training: disallow
  dataset_inclusion: disallow
requirements:
  attribution_required: true
  cache_max_age_days: 7
  rate_limit_rps: 1

user-agent: Google-Extended
purpose:
  model_training: disallow
  dataset_inclusion: disallow

user-agent: CCBot
purpose:
  model_training: disallow
  dataset_inclusion: disallow
  search_indexing: allow

user-agent: PerplexityBot
purpose:
  qa_browsing: allow
  search_indexing: allow
  model_training: disallow
requirements:
  attribution_required: true
  rate_limit_rps: 1

user-agent: Amazonbot
purpose:
  model_training: disallow
  dataset_inclusion: disallow

# ---- Human Note (tr) ----
# This file allows limited crawling for Q&A and search purposes only.
# Model training and dataset inclusion are disallowed.
# Crawlers must respect robots.txt and provide attribution when referencing the site.
`;
</script>
<pre style="white-space:pre-wrap;word-break:break-word;font-family:monospace;padding:16px;font-size:14px;margin:0;">
# llm.txt — Guidance for AI crawlers and LLM providers
# Site: https://roasy.ai/
# Owner: Roasy AI (Maslaking Yazılım A.Ş.)
# Encoding: UTF-8
# Last-Updated: 2025-10-07
# Language: en (human notes below include tr)

version: 1.0

metadata:
  organization: Roasy AI
  domain: roasy.ai
  contact_email: [email protected]

# ---- Global Policy ----
user-agent: *
purpose:
  qa_browsing: allow
  search_indexing: allow
  model_training: disallow
  dataset_inclusion: disallow
requirements:
  attribution_required: true
  robots_txt_respected: true
  cache_max_age_days: 7
  rate_limit_rps: 1
disallow_paths:
  - /privacy
  - /terms
  - /legal
  - /admin
  - /api
  - /account
  - /billing
  - /checkout
  - /dashboard
allow_paths:
  - /
  - /blog
  - /product
  - /pricing
  - /about
contact: https://roasy.ai/#contact
sitemap: https://roasy.ai/sitemap.xml

# ---- Specific Agents ----
user-agent: GPTBot
purpose:
  qa_browsing: allow
  search_indexing: allow
  model_training: disallow
  dataset_inclusion: disallow
requirements:
  attribution_required: true
  cache_max_age_days: 7
  rate_limit_rps: 1

user-agent: Google-Extended
purpose:
  model_training: disallow
  dataset_inclusion: disallow

user-agent: CCBot
purpose:
  model_training: disallow
  dataset_inclusion: disallow
  search_indexing: allow

user-agent: PerplexityBot
purpose:
  qa_browsing: allow
  search_indexing: allow
  model_training: disallow
requirements:
  attribution_required: true
  rate_limit_rps: 1

user-agent: Amazonbot
purpose:
  model_training: disallow
  dataset_inclusion: disallow

# ---- Human Note (tr) ----
# This file allows limited crawling for Q&A and search purposes only.
# Model training and dataset inclusion are disallowed.
# Crawlers must respect robots.txt and provide attribution when referencing the site.
</pre>
Collapse