Code Examples

JewelMusic API Examples

Ready-to-use code examples and tutorials to help you integrate JewelMusic's AI-powered music distribution features into your applications.

Quick Start Examples

Copy and paste these examples to get started with common use cases

Audio Transcription
Transcribe audio files with cultural context awareness in JavaScript/TypeScript
JavaScript/TypeScript Example
1import { JewelMusicAPI } from '@jewelmusic/sdk'
2
3const api = new JewelMusicAPI('your-api-key')
4
5async function transcribeAudio() {
6  const file = new File([audioBuffer], 'song.mp3')
7  
8  const result = await api.transcription.create({
9    audio: file,
10    language: 'auto', // Auto-detect
11    cultural_context: 'georgian', // Cultural awareness
12    format: 'json'
13  })
14
15  console.log('Transcription:', result.transcription.text)
16  console.log('Confidence:', result.transcription.confidence)
17  console.log('Cultural patterns:', result.transcription.cultural_patterns)
18  
19  // Handle cultural-specific notation
20  if (result.transcription.cultural_patterns.includes('polyphonic')) {
21    console.log('Detected Georgian polyphonic singing')
22    // Apply specialized processing
23  }
24}
Music Analysis
Analyze musical structure and extract cultural patterns using Python
Python Example
1import jewelmusic
2from jewelmusic import JewelMusicClient
3import numpy as np
4
5client = JewelMusicClient('your-api-key')
6
7def analyze_music(audio_path):
8    """Analyze music with cultural pattern detection"""
9    with open(audio_path, 'rb') as audio_file:
10        analysis = client.analysis.create(
11            audio=audio_file,
12            analysis_type=['structure', 'cultural', 'stems', 'harmonic'],
13            quality='premium',
14            cultural_regions=['georgian', 'caucasian']
15        )
16
17        # Extract musical structure
18        tempo = analysis.structure.tempo
19        key = analysis.structure.key
20        time_signature = analysis.structure.time_signature
21        
22        # Cultural pattern analysis
23        cultural_style = analysis.cultural.style
24        traditional_elements = analysis.cultural.traditional_elements
25        
26        # Harmonic analysis
27        chord_progressions = analysis.harmonic.progressions
28        modal_characteristics = analysis.harmonic.modal_characteristics
29
30        return {
31            'tempo': tempo,
32            'key': key,
33            'time_signature': time_signature,
34            'cultural_style': cultural_style,
35            'traditional_elements': traditional_elements,
36            'chord_progressions': chord_progressions,
37            'modal_characteristics': modal_characteristics
38        }
39
40# Usage
41result = analyze_music('traditional_song.wav')
42print(f"Tempo: {result['tempo']} BPM")
43print(f"Cultural Style: {result['cultural_style']}")
44print(f"Traditional Elements: {', '.join(result['traditional_elements'])}")
Music Distribution (Go)
Submit music for global distribution using the Go SDK
Go Example
1package main
2
3import (
4    "context"
5    "fmt"
6    "log"
7    "github.com/jewelmusic/go-sdk"
8)
9
10func main() {
11    client := jewelmusic.NewClient("your-api-key")
12
13    // Prepare release metadata with cultural context
14    release := &jewelmusic.ReleaseRequest{
15        Title:          "Traditional Georgian Songs",
16        ArtistName:     "Rustavi Ensemble",
17        ReleaseType:    "album",
18        Genre:          "World Music",
19        SubGenre:       "Georgian Folk",
20        CulturalContext: jewelmusic.CulturalContext{
21            Region:     "Georgian",
22            Style:      "Polyphonic",
23            Language:   "ka-GE",
24            Traditional: true,
25        },
26        Metadata: &jewelmusic.ReleaseMetadata{
27            RecordingLocation: "Tbilisi, Georgia",
28            RecordingYear:     2024,
29            ProducerNotes:     "Authentic Georgian polyphonic singing",
30        },
31        Platforms: []string{
32            "spotify", "apple_music", "amazon", "youtube_music",
33            "deezer", "tidal", "pandora",
34        },
35        RoyaltySplits: []jewelmusic.RoyaltySplit{
36            {RecipientID: "artist_001", Percentage: 70},
37            {RecipientID: "label_001", Percentage: 30},
38        },
39    }
40
41    // Submit for distribution
42    ctx := context.Background()
43    result, err := client.Distribution.Submit(ctx, release)
44    if err != nil {
45        log.Fatal(err)
46    }
47
48    fmt.Printf("Release ID: %s
49", result.ReleaseID)
50    fmt.Printf("Status: %s
51", result.Status)
52    fmt.Printf("Estimated Live Date: %s
53", result.EstimatedLiveDate)
54    
55    // Monitor distribution status
56    status, err := client.Distribution.GetStatus(ctx, result.ReleaseID)
57    if err != nil {
58        log.Fatal(err)
59    }
60    
61    for _, platform := range status.Platforms {
62        fmt.Printf("%s: %s
63", platform.Name, platform.Status)
64    }
65}
Batch Processing (Ruby)
Process multiple audio files with cultural metadata preservation
Ruby Example
1require 'jewelmusic'
2
3client = JewelMusic::Client.new(api_key: 'your-api-key')
4
5class MusicProcessor
6  def initialize(client)
7    @client = client
8  end
9
10  def batch_process_with_metadata(audio_files)
11    results = []
12    
13    audio_files.each do |file_info|
14      begin
15        # Extract and preserve cultural metadata
16        metadata = extract_cultural_metadata(file_info[:path])
17        
18        # Process audio with transcription and analysis
19        result = @client.process_audio(
20          file_path: file_info[:path],
21          options: {
22            transcribe: true,
23            analyze: true,
24            enhance: true,
25            preserve_metadata: true,
26            cultural_context: metadata[:culture],
27            language_hints: metadata[:languages]
28          }
29        )
30        
31        # Stem separation with cultural awareness
32        stems = @client.separate_stems(
33          audio_id: result.audio_id,
34          preservation_mode: 'cultural',
35          stem_types: ['vocals', 'melodic', 'harmonic', 'rhythmic']
36        )
37        
38        results << {
39          original: file_info,
40          processed: result,
41          stems: stems,
42          metadata: metadata
43        }
44        
45      rescue JewelMusic::APIError => e
46        puts "Error processing #{file_info[:path]}: #{e.message}"
47      end
48    end
49    
50    results
51  end
52  
53  private
54  
55  def extract_cultural_metadata(file_path)
56    # Extract metadata from file tags or naming convention
57    {
58      culture: 'georgian',
59      languages: ['ka', 'en'],
60      traditional: true,
61      region: 'caucasus'
62    }
63  end
64end
65
66# Usage
67processor = MusicProcessor.new(client)
68
69files = [
70  { path: 'song1.wav', artist: 'Artist 1' },
71  { path: 'song2.wav', artist: 'Artist 2' }
72]
73
74results = processor.batch_process_with_metadata(files)
75results.each do |r|
76  puts "Processed: #{r[:original][:path]}"
77  puts "Transcription: #{r[:processed].transcription.text}"
78  puts "Cultural Elements: #{r[:metadata][:culture]}"
79end
Web Integration (PHP)
Integrate JewelMusic API into your PHP web application
PHP Example
1<?php
2require_once 'vendor/autoload.php';
3
4use JewelMusicClient;
5use JewelMusicResourcesTranscription;
6use JewelMusicResourcesDistribution;
7
8class JewelMusicService {
9    private $client;
10    
11    public function __construct($apiKey) {
12        $this->client = new Client($apiKey);
13    }
14    
15    /**
16     * Handle file upload and processing
17     */
18    public function processUpload($uploadedFile) {
19        try {
20            // Validate file
21            if (!$this->validateAudioFile($uploadedFile)) {
22                throw new Exception('Invalid audio file');
23            }
24            
25            // Create transcription with cultural awareness
26            $transcription = $this->client->transcriptions->create([
27                'file' => fopen($uploadedFile['tmp_name'], 'r'),
28                'model' => 'jewelmusic-v2',
29                'response_format' => 'json',
30                'language' => 'auto',
31                'cultural_detection' => true,
32                'timestamp_granularities' => ['word', 'segment']
33            ]);
34            
35            // Analyze for distribution readiness
36            $analysis = $this->client->analysis->create([
37                'audio_id' => $transcription->audio_id,
38                'checks' => [
39                    'quality_check' => true,
40                    'copyright_scan' => true,
41                    'explicit_content' => true,
42                    'cultural_sensitivity' => true
43                ]
44            ]);
45            
46            // Prepare for distribution if checks pass
47            if ($analysis->ready_for_distribution) {
48                $distribution = $this->prepareDistribution(
49                    $transcription,
50                    $analysis
51                );
52            }
53            
54            return [
55                'success' => true,
56                'transcription' => $transcription,
57                'analysis' => $analysis,
58                'distribution' => $distribution ?? null
59            ];
60            
61        } catch (Exception $e) {
62            return [
63                'success' => false,
64                'error' => $e->getMessage()
65            ];
66        }
67    }
68    
69    private function validateAudioFile($file) {
70        $allowedTypes = ['audio/mpeg', 'audio/wav', 'audio/flac'];
71        $maxSize = 500 * 1024 * 1024; // 500MB
72        
73        return in_array($file['type'], $allowedTypes) 
74               && $file['size'] <= $maxSize;
75    }
76    
77    private function prepareDistribution($transcription, $analysis) {
78        return $this->client->distribution->prepare([
79            'audio_id' => $transcription->audio_id,
80            'metadata' => [
81                'title' => $transcription->detected_title ?? 'Untitled',
82                'language' => $transcription->detected_language,
83                'cultural_tags' => $analysis->cultural_elements,
84                'auto_generated' => true
85            ],
86            'platforms' => $this->getTargetPlatforms($analysis),
87            'release_date' => date('Y-m-d', strtotime('+2 weeks'))
88        ]);
89    }
90    
91    private function getTargetPlatforms($analysis) {
92        // Select platforms based on content analysis
93        $platforms = ['spotify', 'apple_music'];
94        
95        if ($analysis->cultural_elements) {
96            $platforms[] = 'youtube_music'; // Better for world music
97        }
98        
99        return $platforms;
100    }
101}
102
103// Usage in your web application
104$service = new JewelMusicService($_ENV['JEWELMUSIC_API_KEY']);
105
106if ($_SERVER['REQUEST_METHOD'] === 'POST' && isset($_FILES['audio'])) {
107    $result = $service->processUpload($_FILES['audio']);
108    
109    header('Content-Type: application/json');
110    echo json_encode($result);
111}
112?>
Enterprise Integration (Java)
Enterprise-grade Java implementation with error handling and retry logic
Java Example
1import com.jewelmusic.sdk.*;
2import com.jewelmusic.sdk.models.*;
3import com.jewelmusic.sdk.exceptions.*;
4import java.util.*;
5import java.nio.file.*;
6import java.util.concurrent.*;
7
8public class JewelMusicService {
9    private final JewelMusicClient client;
10    private final ExecutorService executor;
11    private final RetryPolicy retryPolicy;
12    
13    public JewelMusicService(String apiKey) {
14        this.client = JewelMusicClient.builder()
15            .apiKey(apiKey)
16            .timeout(30, TimeUnit.SECONDS)
17            .maxRetries(3)
18            .build();
19            
20        this.executor = Executors.newFixedThreadPool(10);
21        this.retryPolicy = RetryPolicy.exponentialBackoff();
22    }
23    
24    /**
25     * Process audio with comprehensive error handling
26     */
27    public CompletableFuture<ProcessingResult> processAudioAsync(Path audioPath) {
28        return CompletableFuture.supplyAsync(() -> {
29            try {
30                // Step 1: Upload and validate
31                AudioFile audio = uploadWithValidation(audioPath);
32                
33                // Step 2: Transcribe with cultural awareness
34                TranscriptionResult transcription = transcribeWithRetry(audio);
35                
36                // Step 3: Analyze musical structure
37                AnalysisResult analysis = analyzeMusic(audio);
38                
39                // Step 4: Extract stems if needed
40                StemSeparationResult stems = null;
41                if (shouldSeparateStems(analysis)) {
42                    stems = separateStems(audio);
43                }
44                
45                // Step 5: Enhance audio quality
46                EnhancementResult enhancement = enhanceAudio(audio, analysis);
47                
48                // Step 6: Prepare for distribution
49                DistributionPackage distribution = prepareDistribution(
50                    audio, transcription, analysis, enhancement
51                );
52                
53                return ProcessingResult.builder()
54                    .audioId(audio.getId())
55                    .transcription(transcription)
56                    .analysis(analysis)
57                    .stems(stems)
58                    .enhancement(enhancement)
59                    .distribution(distribution)
60                    .build();
61                    
62            } catch (JewelMusicException e) {
63                throw new ProcessingException("Failed to process audio", e);
64            }
65        }, executor);
66    }
67    
68    private TranscriptionResult transcribeWithRetry(AudioFile audio) {
69        return retryPolicy.execute(() -> {
70            TranscriptionRequest request = TranscriptionRequest.builder()
71                .audioId(audio.getId())
72                .model("jewelmusic-v2-large")
73                .language("auto")
74                .enableCulturalDetection(true)
75                .enableTimestamps(true)
76                .vocabularyBoost(Arrays.asList(
77                    "Georgian", "polyphonic", "traditional"
78                ))
79                .build();
80                
81            return client.transcriptions().create(request);
82        });
83    }
84    
85    private AnalysisResult analyzeMusic(AudioFile audio) {
86        AnalysisRequest request = AnalysisRequest.builder()
87            .audioId(audio.getId())
88            .analysisTypes(Arrays.asList(
89                AnalysisType.STRUCTURE,
90                AnalysisType.HARMONY,
91                AnalysisType.RHYTHM,
92                AnalysisType.CULTURAL,
93                AnalysisType.MOOD
94            ))
95            .culturalRegions(Arrays.asList("georgian", "caucasian"))
96            .build();
97            
98        return client.analysis().perform(request);
99    }
100    
101    private boolean shouldSeparateStems(AnalysisResult analysis) {
102        // Decide based on analysis results
103        return analysis.getComplexity() > 0.7 || 
104               analysis.getInstrumentCount() > 3;
105    }
106    
107    private DistributionPackage prepareDistribution(
108            AudioFile audio,
109            TranscriptionResult transcription,
110            AnalysisResult analysis,
111            EnhancementResult enhancement) {
112        
113        DistributionMetadata metadata = DistributionMetadata.builder()
114            .title(transcription.getDetectedTitle())
115            .artist(transcription.getDetectedArtist())
116            .genre(analysis.getPrimaryGenre())
117            .subGenre(analysis.getSubGenres())
118            .culturalTags(analysis.getCulturalElements())
119            .language(transcription.getLanguage())
120            .isrc(generateISRC())
121            .build();
122            
123        List<Platform> platforms = selectPlatforms(analysis);
124        
125        return DistributionPackage.builder()
126            .audioId(enhancement.getEnhancedAudioId())
127            .metadata(metadata)
128            .platforms(platforms)
129            .releaseStrategy(ReleaseStrategy.WATERFALL)
130            .royaltySplits(calculateRoyalties())
131            .build();
132    }
133    
134    // Cleanup resources
135    public void shutdown() {
136        executor.shutdown();
137        try {
138            if (!executor.awaitTermination(60, TimeUnit.SECONDS)) {
139                executor.shutdownNow();
140            }
141        } catch (InterruptedException e) {
142            executor.shutdownNow();
143        }
144        client.close();
145    }
146}

SDKs & Tutorials

Official libraries and comprehensive tutorials for different programming languages

JS
JavaScript/TypeScript SDK
Full-featured SDK for browser and Node.js applications
npm install @jewelmusic/sdk
Py
Python SDK
Perfect for data science and machine learning workflows
pip install jewelmusic-python
Go
Go SDK
High-performance SDK for backend services and APIs
go get github.com/jewelmusic/go-sdk
Rb
Ruby SDK
Elegant SDK for Ruby and Rails applications
gem install jewelmusic
PHP
PHP SDK
Comprehensive SDK for PHP web applications
composer require jewelmusic/php-sdk
Java
Java SDK
Enterprise-ready SDK with Spring integration
Maven
<dependency>
  <groupId>com.jewelmusic</groupId>
  <artifactId>jewelmusic-sdk</artifactId>
  <version>2.0.0</version>
</dependency>

Advanced Integration Examples

Complex use cases and production-ready implementations

Webhook Event Processing
Handle real-time events from JewelMusic API
Webhook Handler (Node.js/Express)
1import express from 'express'
2import crypto from 'crypto'
3import { JewelMusicWebhook } from '@jewelmusic/sdk'
4
5const app = express()
6const webhook = new JewelMusicWebhook(process.env.WEBHOOK_SECRET)
7
8app.post('/webhooks/jewelmusic', express.raw({ type: 'application/json' }), async (req, res) => {
9  const signature = req.headers['x-jewelmusic-signature']
10  
11  try {
12    // Verify webhook signature
13    const event = webhook.verify(req.body, signature)
14    
15    // Process event based on type
16    switch (event.type) {
17      case 'transcription.completed':
18        await handleTranscriptionCompleted(event.data)
19        break
20        
21      case 'distribution.live':
22        await handleDistributionLive(event.data)
23        break
24        
25      case 'royalty.payment':
26        await handleRoyaltyPayment(event.data)
27        break
28        
29      case 'cultural.pattern.detected':
30        await handleCulturalPattern(event.data)
31        break
32        
33      default:
34        console.log('Unhandled event type:', event.type)
35    }
36    
37    res.json({ received: true })
38  } catch (err) {
39    console.error('Webhook error:', err)
40    res.status(400).json({ error: 'Invalid webhook' })
41  }
42})
43
44async function handleTranscriptionCompleted(data) {
45  // Process completed transcription
46  const { transcription_id, text, language, cultural_elements } = data
47  
48  // Store in database
49  await db.transcriptions.create({
50    id: transcription_id,
51    text,
52    language,
53    cultural_elements,
54    processed_at: new Date()
55  })
56  
57  // Trigger next workflow step
58  await processTranscription(transcription_id)
59}
60
61async function handleDistributionLive(data) {
62  // Handle when music goes live on platforms
63  const { release_id, platforms, urls } = data
64  
65  // Notify artist
66  await sendNotification({
67    type: 'release_live',
68    release_id,
69    platforms,
70    urls
71  })
72  
73  // Update analytics
74  await analytics.track('release.live', {
75    release_id,
76    platform_count: platforms.length
77  })
78}
Real-time Streaming Analysis
Process audio streams in real-time with WebSocket
Streaming Analysis (Python)
1import asyncio
2import websockets
3import json
4import numpy as np
5from jewelmusic import StreamingClient
6
7class RealtimeAudioProcessor:
8    def __init__(self, api_key):
9        self.client = StreamingClient(api_key)
10        self.buffer = []
11        self.sample_rate = 44100
12        self.chunk_size = 4096
13        
14    async def connect(self):
15        """Establish WebSocket connection for streaming"""
16        self.ws = await self.client.connect_stream(
17            mode='realtime',
18            sample_rate=self.sample_rate,
19            channels=1,
20            features=['transcription', 'pitch', 'cultural']
21        )
22        
23    async def process_audio_stream(self, audio_source):
24        """Process live audio stream"""
25        async for chunk in audio_source:
26            # Add to buffer
27            self.buffer.extend(chunk)
28            
29            # Process when buffer is full
30            if len(self.buffer) >= self.chunk_size:
31                await self.analyze_chunk(self.buffer[:self.chunk_size])
32                self.buffer = self.buffer[self.chunk_size:]
33                
34    async def analyze_chunk(self, audio_chunk):
35        """Send audio chunk for analysis"""
36        # Convert to base64 for transmission
37        audio_bytes = np.array(audio_chunk).astype(np.float32).tobytes()
38        
39        # Send to API
40        await self.ws.send(json.dumps({
41            'audio': audio_bytes.hex(),
42            'timestamp': asyncio.get_event_loop().time()
43        }))
44        
45        # Receive analysis results
46        response = await self.ws.recv()
47        result = json.loads(response)
48        
49        # Process results
50        if result.get('transcription'):
51            await self.handle_transcription(result['transcription'])
52            
53        if result.get('cultural_patterns'):
54            await self.handle_cultural_detection(result['cultural_patterns'])
55            
56        if result.get('pitch_data'):
57            await self.handle_pitch_analysis(result['pitch_data'])
58    
59    async def handle_transcription(self, transcription):
60        """Handle real-time transcription updates"""
61        print(f"Transcription: {transcription['text']}")
62        
63        # Check for keywords or commands
64        if 'stop' in transcription['text'].lower():
65            await self.stop_recording()
66            
67    async def handle_cultural_detection(self, patterns):
68        """Handle detected cultural patterns"""
69        for pattern in patterns:
70            if pattern['confidence'] > 0.8:
71                print(f"Detected: {pattern['type']} ({pattern['region']})")
72                
73                # Adapt processing based on cultural context
74                if pattern['type'] == 'georgian_polyphonic':
75                    await self.enable_polyphonic_mode()
76                    
77    async def handle_pitch_analysis(self, pitch_data):
78        """Process pitch detection results"""
79        fundamental = pitch_data.get('fundamental_frequency')
80        if fundamental:
81            # Convert to note
82            note = self.frequency_to_note(fundamental)
83            print(f"Current note: {note}")
84            
85    def frequency_to_note(self, freq):
86        """Convert frequency to musical note"""
87        A4 = 440
88        C0 = A4 * pow(2, -4.75)
89        
90        if freq > 0:
91            h = 12 * np.log2(freq / C0)
92            octave = int(h / 12)
93            n = int(h % 12)
94            notes = ['C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B']
95            return f"{notes[n]}{octave}"
96        return None
97
98# Usage
99async def main():
100    processor = RealtimeAudioProcessor('your-api-key')
101    await processor.connect()
102    
103    # Get audio from microphone or stream
104    audio_source = get_audio_stream()  # Your audio input implementation
105    await processor.process_audio_stream(audio_source)
106
107asyncio.run(main())
Interactive API Playground
Test API endpoints directly in your browser with live examples

Try It Live

Interactive playground with sample audio files and real API responses

Ready to Build Something Amazing?

Get your API key and start integrating JewelMusic's AI-powered features today.