当前位置: 首页 > news >正文

G3学习笔记

  • 🍨 本文为🔗365天深度学习训练营 中的学习记录博客
  • 🍖 原作者:K同学啊

准备工作

import torch
import numpy as np
import torch.nn as nn
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable
from torchvision.utils import save_image
from torchvision.utils import make_grid
from torch.utils.tensorboard import SummaryWriter
from torchsummary import summary
import matplotlib.pyplot as plt
import datetime
torch.manual_seed(1)
<torch._C.Generator at 0x23c3bf41230>
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
batch_size = 128

1. 导入数据

train_transform = transforms.Compose([transforms.Resize(128),transforms.ToTensor(),transforms.Normalize([0.5,0.5,0.5], [0.5,0.5,0.5])])train_dataset = datasets.ImageFolder(root=r'C:\Users\11054\Desktop\kLearning\G3\rps', transform=train_transform)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True,num_workers=6)

2. 数据可视化

def show_images(images):fig, ax = plt.subplots(figsize=(20, 20))ax.set_xticks([]); ax.set_yticks([])ax.imshow(make_grid(images.detach(), nrow=22).permute(1, 2, 0))def show_batch(dl):for images, _ in dl:show_images(images)break
show_batch(train_loader)
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers). Got range [-0.92941177..1.0].

请添加图片描述

image_shape = (3, 128, 128)
image_dim = int(np.prod(image_shape))
latent_dim = 100
n_classes = 3
embedding_dim = 100

构建模型

# 自定义权重初始化函数,用于初始化生成器和判别器的权重
def weights_init(m):# 获取当前层的类名classname = m.__class__.__name__# 如果当前层是卷积层(类名中包含 'Conv' )if classname.find('Conv') != -1:# 使用正态分布随机初始化权重,均值为0,标准差为0.02torch.nn.init.normal_(m.weight, 0.0, 0.02)# 如果当前层是批归一化层(类名中包含 'BatchNorm' )elif classname.find('BatchNorm') != -1:# 使用正态分布随机初始化权重,均值为1,标准差为0.02torch.nn.init.normal_(m.weight, 1.0, 0.02)# 将偏置项初始化为全零torch.nn.init.zeros_(m.bias)

1. 构建生成器

class Generator(nn.Module):def __init__(self):super(Generator, self).__init__()# 定义条件标签的生成器部分,用于将标签映射到嵌入空间中# n_classes:条件标签的总数# embedding_dim:嵌入空间的维度self.label_conditioned_generator = nn.Sequential(nn.Embedding(n_classes, embedding_dim),  # 使用Embedding层将条件标签映射为稠密向量nn.Linear(embedding_dim, 16)             # 使用线性层将稠密向量转换为更高维度)# 定义潜在向量的生成器部分,用于将噪声向量映射到图像空间中# latent_dim:潜在向量的维度self.latent = nn.Sequential(nn.Linear(latent_dim, 4*4*512),  # 使用线性层将潜在向量转换为更高维度nn.LeakyReLU(0.2, inplace=True)  # 使用LeakyReLU激活函数进行非线性映射)# 定义生成器的主要结构,将条件标签和潜在向量合并成生成的图像self.model = nn.Sequential(# 反卷积层1:将合并后的向量映射为64x8x8的特征图nn.ConvTranspose2d(513, 64*8, 4, 2, 1, bias=False),nn.BatchNorm2d(64*8, momentum=0.1, eps=0.8),  # 批标准化nn.ReLU(True),  # ReLU激活函数# 反卷积层2:将64x8x8的特征图映射为64x4x4的特征图nn.ConvTranspose2d(64*8, 64*4, 4, 2, 1, bias=False),nn.BatchNorm2d(64*4, momentum=0.1, eps=0.8),nn.ReLU(True),# 反卷积层3:将64x4x4的特征图映射为64x2x2的特征图nn.ConvTranspose2d(64*4, 64*2, 4, 2, 1, bias=False),nn.BatchNorm2d(64*2, momentum=0.1, eps=0.8),nn.ReLU(True),# 反卷积层4:将64x2x2的特征图映射为64x1x1的特征图nn.ConvTranspose2d(64*2, 64*1, 4, 2, 1, bias=False),nn.BatchNorm2d(64*1, momentum=0.1, eps=0.8),nn.ReLU(True),# 反卷积层5:将64x1x1的特征图映射为3x64x64的RGB图像nn.ConvTranspose2d(64*1, 3, 4, 2, 1, bias=False),nn.Tanh()  # 使用Tanh激活函数将生成的图像像素值映射到[-1, 1]范围内)def forward(self, inputs):noise_vector, label = inputs# 通过条件标签生成器将标签映射为嵌入向量label_output = self.label_conditioned_generator(label)# 将嵌入向量的形状变为(batch_size, 1, 4, 4),以便与潜在向量进行合并label_output = label_output.view(-1, 1, 4, 4)# 通过潜在向量生成器将噪声向量映射为潜在向量latent_output = self.latent(noise_vector)# 将潜在向量的形状变为(batch_size, 512, 4, 4),以便与条件标签进行合并latent_output = latent_output.view(-1, 512, 4, 4)# 将条件标签和潜在向量在通道维度上进行合并,得到合并后的特征图concat = torch.cat((latent_output, label_output), dim=1)# 通过生成器的主要结构将合并后的特征图生成为RGB图像image = self.model(concat)return image
generator = Generator().to(device)
generator.apply(weights_init)
print(generator)
Generator((label_conditioned_generator): Sequential((0): Embedding(3, 100)(1): Linear(in_features=100, out_features=16, bias=True))(latent): Sequential((0): Linear(in_features=100, out_features=8192, bias=True)(1): LeakyReLU(negative_slope=0.2, inplace=True))(model): Sequential((0): ConvTranspose2d(513, 512, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)(1): BatchNorm2d(512, eps=0.8, momentum=0.1, affine=True, track_running_stats=True)(2): ReLU(inplace=True)(3): ConvTranspose2d(512, 256, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)(4): BatchNorm2d(256, eps=0.8, momentum=0.1, affine=True, track_running_stats=True)(5): ReLU(inplace=True)(6): ConvTranspose2d(256, 128, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)(7): BatchNorm2d(128, eps=0.8, momentum=0.1, affine=True, track_running_stats=True)(8): ReLU(inplace=True)(9): ConvTranspose2d(128, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)(10): BatchNorm2d(64, eps=0.8, momentum=0.1, affine=True, track_running_stats=True)(11): ReLU(inplace=True)(12): ConvTranspose2d(64, 3, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)(13): Tanh())
)
from torchinfo import summarysummary(generator)
=================================================================
Layer (type:depth-idx)                   Param #
=================================================================
Generator                                --
├─Sequential: 1-1                        --
│    └─Embedding: 2-1                    300
│    └─Linear: 2-2                       1,616
├─Sequential: 1-2                        --
│    └─Linear: 2-3                       827,392
│    └─LeakyReLU: 2-4                    --
├─Sequential: 1-3                        --
│    └─ConvTranspose2d: 2-5              4,202,496
│    └─BatchNorm2d: 2-6                  1,024
│    └─ReLU: 2-7                         --
│    └─ConvTranspose2d: 2-8              2,097,152
│    └─BatchNorm2d: 2-9                  512
│    └─ReLU: 2-10                        --
│    └─ConvTranspose2d: 2-11             524,288
│    └─BatchNorm2d: 2-12                 256
│    └─ReLU: 2-13                        --
│    └─ConvTranspose2d: 2-14             131,072
│    └─BatchNorm2d: 2-15                 128
│    └─ReLU: 2-16                        --
│    └─ConvTranspose2d: 2-17             3,072
│    └─Tanh: 2-18                        --
=================================================================
Total params: 7,789,308
Trainable params: 7,789,308
Non-trainable params: 0
=================================================================
a = torch.ones(100)
b = torch.ones(1)
b = b.long()
a = a.to(device)
b = b.to(device)
# generator((a,b))

2. 构建鉴别器

import torch
import torch.nn as nnclass Discriminator(nn.Module):def __init__(self):super(Discriminator, self).__init__()# 定义一个条件标签的嵌入层,用于将类别标签转换为特征向量self.label_condition_disc = nn.Sequential(nn.Embedding(n_classes, embedding_dim),     # 嵌入层将类别标签编码为固定长度的向量nn.Linear(embedding_dim, 3*128*128)         # 线性层将嵌入的向量转换为与图像尺寸相匹配的特征张量)# 定义主要的鉴别器模型self.model = nn.Sequential(nn.Conv2d(6, 64, 4, 2, 1, bias=False),       # 输入通道为6(包含图像和标签的通道数),输出通道为64,4x4的卷积核,步长为2,padding为1nn.LeakyReLU(0.2, inplace=True),             # LeakyReLU激活函数,带有负斜率,增加模型对输入中的负值的感知能力nn.Conv2d(64, 64*2, 4, 3, 2, bias=False),    # 输入通道为64,输出通道为64*2,4x4的卷积核,步长为3,padding为2nn.BatchNorm2d(64*2, momentum=0.1, eps=0.8),  # 批量归一化层,有利于训练稳定性和收敛速度nn.LeakyReLU(0.2, inplace=True),nn.Conv2d(64*2, 64*4, 4, 3, 2, bias=False),  # 输入通道为64*2,输出通道为64*4,4x4的卷积核,步长为3,padding为2nn.BatchNorm2d(64*4, momentum=0.1, eps=0.8),nn.LeakyReLU(0.2, inplace=True),nn.Conv2d(64*4, 64*8, 4, 3, 2, bias=False),  # 输入通道为64*4,输出通道为64*8,4x4的卷积核,步长为3,padding为2nn.BatchNorm2d(64*8, momentum=0.1, eps=0.8),nn.LeakyReLU(0.2, inplace=True),nn.Flatten(),                               # 将特征图展平为一维向量,用于后续全连接层处理nn.Dropout(0.4),                            # 随机失活层,用于减少过拟合风险nn.Linear(4608, 1),                         # 全连接层,将特征向量映射到输出维度为1的向量nn.Sigmoid()                                # Sigmoid激活函数,用于输出范围限制在0到1之间的概率值)def forward(self, inputs):img, label = inputs# 将类别标签转换为特征向量label_output = self.label_condition_disc(label)# 重塑特征向量为与图像尺寸相匹配的特征张量label_output = label_output.view(-1, 3, 128, 128)# 将图像特征和标签特征拼接在一起作为鉴别器的输入concat = torch.cat((img, label_output), dim=1)# 将拼接后的输入通过鉴别器模型进行前向传播,得到输出结果output = self.model(concat)return output
discriminator = Discriminator().to(device)
discriminator.apply(weights_init)
print(discriminator)
Discriminator((label_condition_disc): Sequential((0): Embedding(3, 100)(1): Linear(in_features=100, out_features=49152, bias=True))(model): Sequential((0): Conv2d(6, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)(1): LeakyReLU(negative_slope=0.2, inplace=True)(2): Conv2d(64, 128, kernel_size=(4, 4), stride=(3, 3), padding=(2, 2), bias=False)(3): BatchNorm2d(128, eps=0.8, momentum=0.1, affine=True, track_running_stats=True)(4): LeakyReLU(negative_slope=0.2, inplace=True)(5): Conv2d(128, 256, kernel_size=(4, 4), stride=(3, 3), padding=(2, 2), bias=False)(6): BatchNorm2d(256, eps=0.8, momentum=0.1, affine=True, track_running_stats=True)(7): LeakyReLU(negative_slope=0.2, inplace=True)(8): Conv2d(256, 512, kernel_size=(4, 4), stride=(3, 3), padding=(2, 2), bias=False)(9): BatchNorm2d(512, eps=0.8, momentum=0.1, affine=True, track_running_stats=True)(10): LeakyReLU(negative_slope=0.2, inplace=True)(11): Flatten(start_dim=1, end_dim=-1)(12): Dropout(p=0.4, inplace=False)(13): Linear(in_features=4608, out_features=1, bias=True)(14): Sigmoid())
)
summary(discriminator)
=================================================================
Layer (type:depth-idx)                   Param #
=================================================================
Discriminator                            --
├─Sequential: 1-1                        --
│    └─Embedding: 2-1                    300
│    └─Linear: 2-2                       4,964,352
├─Sequential: 1-2                        --
│    └─Conv2d: 2-3                       6,144
│    └─LeakyReLU: 2-4                    --
│    └─Conv2d: 2-5                       131,072
│    └─BatchNorm2d: 2-6                  256
│    └─LeakyReLU: 2-7                    --
│    └─Conv2d: 2-8                       524,288
│    └─BatchNorm2d: 2-9                  512
│    └─LeakyReLU: 2-10                   --
│    └─Conv2d: 2-11                      2,097,152
│    └─BatchNorm2d: 2-12                 1,024
│    └─LeakyReLU: 2-13                   --
│    └─Flatten: 2-14                     --
│    └─Dropout: 2-15                     --
│    └─Linear: 2-16                      4,609
│    └─Sigmoid: 2-17                     --
=================================================================
Total params: 7,729,709
Trainable params: 7,729,709
Non-trainable params: 0
=================================================================
a = torch.ones(2,3,128,128)
b = torch.ones(2,1)
b = b.long()
a = a.to(device)
b = b.to(device)
c = discriminator((a,b))
c.size()
torch.Size([2, 1])

训练模型

1. 定义损失函数

adversarial_loss = nn.BCELoss() def generator_loss(fake_output, label):gen_loss = adversarial_loss(fake_output, label)return gen_lossdef discriminator_loss(output, label):disc_loss = adversarial_loss(output, label)return disc_loss

2. 定义优化器

learning_rate = 0.0002G_optimizer = optim.Adam(generator.parameters(),     lr = learning_rate, betas=(0.5, 0.999))
D_optimizer = optim.Adam(discriminator.parameters(), lr = learning_rate, betas=(0.5, 0.999))

3. 训练模型

# 设置训练的总轮数
num_epochs = 20
# 初始化用于存储每轮训练中判别器和生成器损失的列表
D_loss_plot, G_loss_plot = [], []# 循环进行训练
for epoch in range(1, num_epochs + 1):# 初始化每轮训练中判别器和生成器损失的临时列表D_loss_list, G_loss_list = [], []# 遍历训练数据加载器中的数据for index, (real_images, labels) in enumerate(train_loader):# 清空判别器的梯度缓存D_optimizer.zero_grad()# 将真实图像数据和标签转移到GPU(如果可用)real_images = real_images.to(device)labels      = labels.to(device)# 将标签的形状从一维向量转换为二维张量(用于后续计算)labels = labels.unsqueeze(1).long()# 创建真实目标和虚假目标的张量(用于判别器损失函数)real_target = Variable(torch.ones(real_images.size(0), 1).to(device))fake_target = Variable(torch.zeros(real_images.size(0), 1).to(device))# 计算判别器对真实图像的损失D_real_loss = discriminator_loss(discriminator((real_images, labels)), real_target)# 从噪声向量中生成假图像(生成器的输入)noise_vector = torch.randn(real_images.size(0), latent_dim, device=device)noise_vector = noise_vector.to(device)generated_image = generator((noise_vector, labels))# 计算判别器对假图像的损失(注意detach()函数用于分离生成器梯度计算图)output = discriminator((generated_image.detach(), labels))D_fake_loss = discriminator_loss(output, fake_target)# 计算判别器总体损失(真实图像损失和假图像损失的平均值)D_total_loss = (D_real_loss + D_fake_loss) / 2D_loss_list.append(D_total_loss)# 反向传播更新判别器的参数D_total_loss.backward()D_optimizer.step()# 清空生成器的梯度缓存G_optimizer.zero_grad()# 计算生成器的损失G_loss = generator_loss(discriminator((generated_image, labels)), real_target)G_loss_list.append(G_loss)# 反向传播更新生成器的参数G_loss.backward()G_optimizer.step()# 打印当前轮次的判别器和生成器的平均损失print('Epoch: [%d/%d]: D_loss: %.3f, G_loss: %.3f' % ((epoch), num_epochs, torch.mean(torch.FloatTensor(D_loss_list)), torch.mean(torch.FloatTensor(G_loss_list))))# 将当前轮次的判别器和生成器的平均损失保存到列表中D_loss_plot.append(torch.mean(torch.FloatTensor(D_loss_list)))G_loss_plot.append(torch.mean(torch.FloatTensor(G_loss_list)))if epoch%10 == 0:# 将生成的假图像保存为图片文件save_image(generated_image.data[:50], f'C:/Users/11054/Desktop/kLearning/G3/images/sample_{epoch}.png', nrow=5, normalize=True)# 将当前轮次的生成器和判别器的权重保存到文件torch.save(generator.state_dict(), f'C:/Users/11054/Desktop/kLearning/G3/generator_epoch_{epoch}.pth')torch.save(discriminator.state_dict(), f'C:/Users/11054/Desktop/kLearning/G3/discriminator_epoch_{epoch}.pth')
Epoch: [1/20]: D_loss: 0.328, G_loss: 1.420
Epoch: [2/20]: D_loss: 0.169, G_loss: 2.818
Epoch: [3/20]: D_loss: 0.261, G_loss: 2.547
Epoch: [4/20]: D_loss: 0.293, G_loss: 2.359
Epoch: [5/20]: D_loss: 0.301, G_loss: 2.415
Epoch: [6/20]: D_loss: 0.348, G_loss: 2.198
Epoch: [7/20]: D_loss: 0.346, G_loss: 2.292
Epoch: [8/20]: D_loss: 0.425, G_loss: 2.117
Epoch: [9/20]: D_loss: 0.470, G_loss: 1.596

模型分析

1. 加载模型

generator.load_state_dict(torch.load(f'C:/Users/11054/Desktop/kLearning/G3/generator_epoch_10.pth'), strict=False)
generator.eval()               
Generator((label_conditioned_generator): Sequential((0): Embedding(3, 100)(1): Linear(in_features=100, out_features=16, bias=True))(latent): Sequential((0): Linear(in_features=100, out_features=8192, bias=True)(1): LeakyReLU(negative_slope=0.2, inplace=True))(model): Sequential((0): ConvTranspose2d(513, 512, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)(1): BatchNorm2d(512, eps=0.8, momentum=0.1, affine=True, track_running_stats=True)(2): ReLU(inplace=True)(3): ConvTranspose2d(512, 256, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)(4): BatchNorm2d(256, eps=0.8, momentum=0.1, affine=True, track_running_stats=True)(5): ReLU(inplace=True)(6): ConvTranspose2d(256, 128, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)(7): BatchNorm2d(128, eps=0.8, momentum=0.1, affine=True, track_running_stats=True)(8): ReLU(inplace=True)(9): ConvTranspose2d(128, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)(10): BatchNorm2d(64, eps=0.8, momentum=0.1, affine=True, track_running_stats=True)(11): ReLU(inplace=True)(12): ConvTranspose2d(64, 3, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)(13): Tanh())
)
# 导入所需的库
from numpy import asarray
from numpy.random import randn
from numpy.random import randint
from numpy import linspace
from matplotlib import pyplot
from matplotlib import gridspec# 生成潜在空间的点,作为生成器的输入
def generate_latent_points(latent_dim, n_samples, n_classes=3):# 从标准正态分布中生成潜在空间的点x_input = randn(latent_dim * n_samples)# 将生成的点整形成用于神经网络的输入的批量z_input = x_input.reshape(n_samples, latent_dim)return z_input# 在两个潜在空间点之间进行均匀插值
def interpolate_points(p1, p2, n_steps=10):# 在两个点之间进行插值,生成插值比率ratios = linspace(0, 1, num=n_steps)# 线性插值向量vectors = list()for ratio in ratios:v = (1.0 - ratio) * p1 + ratio * p2vectors.append(v)return asarray(vectors)# 生成两个潜在空间的点
pts = generate_latent_points(100, 2)
# 在两个潜在空间点之间进行插值
interpolated = interpolate_points(pts[0], pts[1])# 将数据转换为torch张量并将其移至GPU(假设device已正确声明为GPU)
interpolated = torch.tensor(interpolated).to(device).type(torch.float32)output = None
# 对于三个类别的循环,分别进行插值和生成图片
for label in range(3):# 创建包含相同类别标签的张量labels = torch.ones(10) * labellabels = labels.to(device)labels = labels.unsqueeze(1).long()print(labels.size())# 使用生成器生成插值结果predictions = generator((interpolated, labels))predictions = predictions.permute(0,2,3,1)pred = predictions.detach().cpu()if output is None:output = predelse:output = np.concatenate((output,pred))
torch.Size([10, 1])
torch.Size([10, 1])
torch.Size([10, 1])
output.shape
(30, 128, 128, 3)
nrow = 3
ncol = 10fig = plt.figure(figsize=(15,4))
gs = gridspec.GridSpec(nrow, ncol) k = 0
for i in range(nrow):for j in range(ncol):pred = (output[k, :, :, :] + 1 ) * 127.5pred = np.array(pred)  ax= plt.subplot(gs[i,j])ax.imshow(pred.astype(np.uint8))ax.set_xticklabels([])ax.set_yticklabels([])ax.axis('off')k += 1   plt.show()

请添加图片描述

个人总结

CGAN 通过引入条件信息(Conditional Information) 来控制生成数据的类别或属性,从而增强生成数据的可控性和多样性,核心组件分析:

  1. 条件机制实现:

    • 通过Embedding层将离散标签映射为连续向量(label_conditioned_generator/label_condition_disc)
    • 使用Linear层调整维度后与图像特征拼接(forward中的concat操作)
  2. 生成器(Generator)关键设计:

    • 输入:噪声向量( latent_dim ) + 类别标签( n_classes )
    • 结构:5层转置卷积(ConvTranspose2d)实现上采样
    • 输出:3通道RGB图像(Tanh激活归一化到[-1,1])
    • 创新点:将标签嵌入向量reshape为4x4特征图与噪声特征拼接
  3. 判别器(Discriminator)关键设计:

    • 输入:真实/生成图像 + 对应标签特征
    • 结构:4层卷积(Conv2d)实现下采样
    • 输出:Sigmoid激活的单一概率值
    • 特点:标签特征先转换为3x128x128伪图像再拼接

训练技巧观察:

  1. 参数配置:

    • LeakyReLU(0.2)防止梯度消失
    • BatchNorm momentum=0.1, eps=0.8 的非常规设置
    • Dropout(0.4)防止判别器过拟合
  2. 维度变换:

    • 生成器最终输出3x64x64图像(需注意与判别器输入尺寸匹配)
    • 判别器Flatten后全连接层输入4608维(需根据输入尺寸计算)

相关文章:

  • ejs列表渲染,条件渲染,在node中使用ejs
  • 【C++入门:类和对象】[3]
  • JS 应用算法逆向三重断点调试调用堆栈BP 插件发包安全结合
  • java中final以及static的作用
  • Linux并发与竞争:从生活例子到内核实战
  • 从对数变换到深度框架:逻辑回归与交叉熵的数学原理及PyTorch实战
  • 高企复审奖补!2025年合肥市高新技术企业重新认定奖励补贴政策及申报条件
  • 【AI论文】ToolRL:奖励是工具学习所需的一切
  • [创业之路-382]:企业法务 - 企业如何通过技术专利与技术秘密保护自己
  • 深度学习:迁移学习
  • rocky9.4部署k8s群集v1.28.2版本(containerd)(纯命令)
  • ssh启动不了报错
  • leetcode刷题日记——有效的括号
  • python自动化学习六:断言
  • 基于Java+MySQL 实现(Web)日程管理系统
  • 开发了一个b站视频音频提取器
  • Spring Security结构总览
  • 【MySQL】基本查询
  • Android 16强制横竖屏设置
  • 【oql】spark thriftserver内存溢出,使用oql查询导致oom的sql
  • 《不眠之夜》上演8年推出特别版,多业态联动形成戏剧经济带
  • 生于1984年,马玥已任辽宁锦州北镇市代市长
  • 北京市平谷区政协原主席王春辉接受纪律审查和监察调查
  • 赵志丹任中国地质大学(北京)校长
  • 北朝时期的甲胄
  • 马上评丨超常设置战略急需专业,意味着什么