Spaces:
Runtime error
Runtime error
talexm
commited on
Commit
·
ae2a953
1
Parent(s):
f702b43
update
Browse files
devops_ai/__init__.py
ADDED
|
File without changes
|
devops_ai/deployment_script.py
ADDED
|
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import json
|
| 3 |
+
from openai import OpenAI
|
| 4 |
+
|
| 5 |
+
def generate_helm_chart():
|
| 6 |
+
client = OpenAI(
|
| 7 |
+
base_url="https://api.studio.nebius.ai/v1/",
|
| 8 |
+
api_key="N_KEY"
|
| 9 |
+
)
|
| 10 |
+
prompt = """
|
| 11 |
+
pipeline {
|
| 12 |
+
agent any
|
| 13 |
+
stages {
|
| 14 |
+
stage('Build') {
|
| 15 |
+
steps {
|
| 16 |
+
echo 'Building...'
|
| 17 |
+
sh './gradlew build'
|
| 18 |
+
}
|
| 19 |
+
}
|
| 20 |
+
stage('Test') {
|
| 21 |
+
steps {
|
| 22 |
+
echo 'Testing...'
|
| 23 |
+
sh './gradlew test'
|
| 24 |
+
}
|
| 25 |
+
}
|
| 26 |
+
stage('Deploy') {
|
| 27 |
+
steps {
|
| 28 |
+
echo 'Deploying...'
|
| 29 |
+
sh './deploy.sh'
|
| 30 |
+
}
|
| 31 |
+
}
|
| 32 |
+
}
|
| 33 |
+
}
|
| 34 |
+
"""
|
| 35 |
+
response = client.chat.completions.create(
|
| 36 |
+
model="Qwen/Qwen2-VL-72B-Instruct",
|
| 37 |
+
messages=[
|
| 38 |
+
{
|
| 39 |
+
"role": "user",
|
| 40 |
+
"content": [
|
| 41 |
+
{"type": "text",
|
| 42 |
+
"text": f"Convert this Jenkinsfile into a GitHub Actions workflow:\n\n{prompt}"}
|
| 43 |
+
],
|
| 44 |
+
}
|
| 45 |
+
],
|
| 46 |
+
max_tokens=300,
|
| 47 |
+
)
|
| 48 |
+
|
| 49 |
+
# Extracting and prettifying the content
|
| 50 |
+
if response.choices:
|
| 51 |
+
helm_chart_raw = response.choices[0].message.content
|
| 52 |
+
|
| 53 |
+
# Prettifying the output
|
| 54 |
+
print("\n--- Helm Chart Output (Prettified) ---\n")
|
| 55 |
+
print(helm_chart_raw.strip())
|
| 56 |
+
else:
|
| 57 |
+
print("No response received from the API.")
|
| 58 |
+
|
| 59 |
+
if __name__ == "__main__":
|
| 60 |
+
generate_helm_chart()
|
devops_ai/example-service_helm_chart.yaml
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Here's an example of how you can deploy an AWS Lambda function using CloudFormation. This Lambda function will be triggered when an object is created in a specified S3 bucket, and it will convert all uploaded files to PDF format:
|
| 2 |
+
1. **Template Preparation:**
|
| 3 |
+
- Create a new directory on your local machine where you will store the template file and Lambda function code.
|
| 4 |
+
- Write a Python code for the Lambda function that converts files to PDF. Here's a simple example using the `python-pdf` library:
|
| 5 |
+
```python
|
| 6 |
+
import boto3
|
| 7 |
+
import pdfkit
|
| 8 |
+
s3 = boto3.client('s3')
|
| 9 |
+
|
| 10 |
+
def convert_to_pdf(event, context):
|
| 11 |
+
# Extract file name and bucket name from the event
|
| 12 |
+
bucket_name = event['Records'][0]['s3']['bucket']['name']
|
| 13 |
+
file_name = event['Records'][0]['s3']['object']['key']
|
| 14 |
+
|
| 15 |
+
# Download the file from S3
|
| 16 |
+
response = s3.get_object(Bucket=bucket_name, Key=file_name)
|
| 17 |
+
original_file_data = response['Body'].read()
|
| 18 |
+
|
| 19 |
+
# Convert the file to PDF
|
| 20 |
+
pdf_file = pdfkit.from_string(original_file_data)
|
| 21 |
+
pdf_filename = file_name + '.pdf'
|
| 22 |
+
|
| 23 |
+
# Upload the PDF back to S3
|
| 24 |
+
upload_response = s3.put_object(
|
| 25 |
+
Body=pdf_file,
|
| 26 |
+
Bucket=
|
devops_ai/helm_chart_generator.py
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
|
| 3 |
+
import cohere
|
| 4 |
+
|
| 5 |
+
# Initialize Cohere client
|
| 6 |
+
co = cohere.Client("nvnj")
|
| 7 |
+
|
| 8 |
+
def generate_helm_chart(prompt):
|
| 9 |
+
prompt = f"Generate a Kubernetes Helm chart for a microservice named '{service_name}' in YAML format."
|
| 10 |
+
response = co.generate(
|
| 11 |
+
model="command",
|
| 12 |
+
prompt=prompt,
|
| 13 |
+
max_tokens=300,
|
| 14 |
+
temperature=0.7,
|
| 15 |
+
)
|
| 16 |
+
return response.generations[0].text.strip()
|
| 17 |
+
|
| 18 |
+
# Example Usage
|
| 19 |
+
service_name = "example-service"
|
| 20 |
+
helm_chart = generate_helm_chart(service_name)
|
| 21 |
+
print(helm_chart)
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def save_exact_output_to_file(output, file_name):
|
| 25 |
+
try:
|
| 26 |
+
# Save the exact string output to the file
|
| 27 |
+
with open(file_name, "w") as file:
|
| 28 |
+
file.write(output)
|
| 29 |
+
print(f"Output saved successfully to {file_name}")
|
| 30 |
+
except Exception as e:
|
| 31 |
+
print(f"Error saving output: {e}")
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
# Example Usage
|
| 35 |
+
if __name__ == "__main__":
|
| 36 |
+
service_name = "example-service"
|
| 37 |
+
try:
|
| 38 |
+
# Generate the Helm chart
|
| 39 |
+
helm_chart = generate_helm_chart(service_name)
|
| 40 |
+
|
| 41 |
+
# Save the output to a YAML file
|
| 42 |
+
file_name = f"{service_name}_helm_chart.yaml"
|
| 43 |
+
save_exact_output_to_file(helm_chart, file_name)
|
| 44 |
+
except Exception as e:
|
| 45 |
+
print(f"Error: {e}")
|